max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/opnsense/scripts/systemhealth/logformats/syslog.py | roms2000/core | 0 | 12760751 | <filename>src/opnsense/scripts/systemhealth/logformats/syslog.py
"""
Copyright (c) 2020 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import datetime
from . import BaseLogFormat
class SysLogFormat(BaseLogFormat):
def __init__(self, filename):
super(SysLogFormat, self).__init__(filename)
self._priority = 1
self._startup_timestamp = datetime.datetime.now()
@staticmethod
def match(line):
return len(line) > 15 and re.match(r'(?:[01]\d|2[0123]):(?:[012345]\d):(?:[012345]\d)', line[7:15])
def timestamp(self, line):
# syslog format, strip timestamp and return actual log data
ts = datetime.datetime.strptime("%s %s" % (self._startup_timestamp.year, line[0:15]), "%Y %b %d %H:%M:%S")
ts = ts.replace(year=self._startup_timestamp.year)
if (self._startup_timestamp - ts).days < 0:
# likely previous year, (month for this year not reached yet)
ts = ts.replace(year=ts.year - 1)
return ts.isoformat()
@staticmethod
def line(line):
# strip timestamp from log line
response = line[16:]
# strip hostname from log line
return response[response.find(' ')+1:].strip()
class SysLogFormatEpoch(BaseLogFormat):
def __init__(self, filename):
super(SysLogFormatEpoch, self).__init__(filename)
self._priority = 2
@staticmethod
def match(line):
# looks like an epoch
return len(line) > 15 and line[0:10].isdigit() and line[10] == '.' and line[11:13].isdigit()
@staticmethod
def timestamp(line):
return datetime.datetime.fromtimestamp(float(line[0:13])).isoformat()
@staticmethod
def line(line):
return line[14:].strip()
| 1.921875 | 2 |
accounts/admin.py | mariuslihet/CRM | 2 | 12760752 | <gh_stars>1-10
from django.contrib import admin
from accounts.models import Account
# Register your models here.
admin.site.register(Account)
| 1.25 | 1 |
cifar10/smallnet.py | MarvinTeichmann/tensorflow_examples | 2 | 12760753 | <reponame>MarvinTeichmann/tensorflow_examples
import tensorflow as tf
import re
import cifar10_input as data_input
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = data_input.IMAGE_SIZE
NUM_CHANNELS = data_input.NUM_CHANNELS
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
NUM_CLASSES = data_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = data_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = data_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 200000, 'Number of steps to run trainer.')
# flags.DEFINE_integer('num_filter_1', 32, 'Number of units in hidden layer 1.')
# flags.DEFINE_integer('num_filter_2', 64, 'Number of units in hidden layer 2.')
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
initial = tf.truncated_normal(shape, stddev=stddev)
var = tf.Variable(initial, name=name)
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def weight_variable(name, shape, stddev=0.1):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(name, shape, constant=0.1):
initial = tf.constant(constant, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(x, W, strides=[1, 1, 1, 1]):
return tf.nn.conv2d(x, W, strides=strides, padding='SAME')
def max_pool_3x3(x, name):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name=name
)
def normalization(x,name):
return tf.nn.lrn(x, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name=name)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def inference(images, keep_prob, train=True,
num_filter_1=64, num_filter_2=64):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
num_filter_1: Amount of filters in conv1.
num_filter_2: Amount of filters in conv2.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# First Convolutional Layer
with tf.variable_scope('Conv1') as scope:
W_conv1 = _variable_with_weight_decay('weights', shape=[5, 5, NUM_CHANNELS, num_filter_1],
stddev=1e-4, wd=0.0)
b_conv1 = bias_variable('biases', [num_filter_1], constant=0.0)
x_image = tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, name=scope.name)
_activation_summary(h_conv1)
# First Pooling Layer
h_pool1 = max_pool_3x3(h_conv1, name='pool1')
# First Normalization
norm1 = normalization(h_pool1, name='norm1')
# Second Convolutional Layer
with tf.name_scope('Conv2'):
W_conv2 = _variable_with_weight_decay('weights', [5, 5, num_filter_1, num_filter_2],
stddev=1e-4, wd=0.0)
b_conv2 = bias_variable('biases', [num_filter_2])
h_conv2 = tf.nn.relu(conv2d(norm1, W_conv2) + b_conv2)
_activation_summary(h_conv2)
# Second Pooling Layer
h_pool2 = max_pool_3x3(h_conv2, name='pool2')
# Second Normalization
norm2 = normalization(h_pool2, name='norm1')
# Fully Connected 1
with tf.variable_scope('FullC1') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in norm2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(norm2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = bias_variable('biases', [384])
fullc1 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
_activation_summary(fullc1)
# local4
with tf.variable_scope('FullC2') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = bias_variable('biases', [192])
fullc2 = tf.nn.relu_layer(fullc1, weights, biases, name=scope.name)
_activation_summary(fullc2)
# Computing Softmax
with tf.name_scope('logits'):
W_fc2 = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
b_fc2 = bias_variable('biases', [NUM_CLASSES])
logits = tf.matmul(fullc2, W_fc2) + b_fc2
_activation_summary(logits)
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
# Convert from sparse integer labels in the range [0, NUM_CLASSSES)
# to 1-hot dense float vectors (that is we will have batch_size vectors,
# each with NUM_CLASSES values, all of which are 0.0 except there will
# be a 1.0 in the entry corresponding to the label).
with tf.name_scope('loss'):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name='xentropy')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
tf.add_to_collection('losses', cross_entropy_mean)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return loss
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def training(loss, global_step=0, learning_rate=None):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
global_step: Integer Variable counting the number of training steps
processed.
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32)) | 3.28125 | 3 |
bseditor/tests/test_commands.py | cltrudeau/django-bootstrap-editor | 3 | 12760754 | <filename>bseditor/tests/test_commands.py
# bseditor.tests.test_commands.py
import os
from django.core.management import call_command
from django.test import TestCase
from bseditor.models import Version
from bseditor.management.commands.defaultversion import versions
from wrench.contexts import capture_stdout
# ============================================================================
class CommandTests(TestCase):
def test_default_version(self):
with capture_stdout():
call_command('defaultversion')
self.assertEqual(1, Version.objects.count())
def test_create_version(self):
version = versions[0]
base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
vars_filename = os.path.join(base_dir, version[1], version[2])
compile_filename = os.path.join(base_dir, version[1], version[3])
with capture_stdout():
call_command('createversion', version[0], vars_filename,
compile_filename)
self.assertEqual(1, Version.objects.count())
| 2.28125 | 2 |
triple_agent/parsing/timeline/parse_timeline.py | andrewzwicky/TripleAgent | 3 | 12760755 | <filename>triple_agent/parsing/timeline/parse_timeline.py
import logging
import hashlib
from typing import List, Tuple, Optional, Iterator, Any
import cv2
import numpy as np
from triple_agent.classes.books import Books, COLORS_TO_BOOKS_ENUM
from triple_agent.classes.characters import Characters, PORTRAIT_MD5_DICT
from triple_agent.classes.roles import ROLE_COLORS_TO_ENUM, Roles
from triple_agent.classes.timeline import (
TimelineEvent,
EVENT_IMAGE_HASH_DICT,
ACTOR_IMAGE_HASH_DICT,
DIGIT_DICT,
)
from triple_agent.classes.capture_debug_pictures import capture_debug_picture
from triple_agent.constants.paths import DEBUG_CAPTURES
logger = logging.getLogger("triple_agent")
LINE_SPACING = 20
LINE_HEIGHT = 19
TIMELINE_TOP = 388
TIMELINE_LEFT = 42
TIMELINE_HEIGHT = 596
TIMELINE_WIDTH = 700
OVERALL_CAPTURE_BORDER = 10
LINE_BORDER = 4
BACKGROUND_COLOR = (38, 38, 38)
HIGHLIGHTED_BACKGROUND = (255, 255, 255)
SPY_MISSIONS_COLOR = (255, 204, 0)
ARROW_COLOR = (178, 178, 178)
NUM_LINES = 30
ARROW_ROW = 613
ARROW_COL = 160
ARROW_WIDTH = 18
ARROW_HEIGHT = 2
PORTRAIT_SPACING = 22
SINGLE_PORTRAIT_WIDTH = 22 # 22 accounts for background removed single portraits
PORTRAIT_BACKGROUND_BORDER = 2
SINGLE_BOOK_WIDTH = 17
BOOK_SPACING = 18
TIMER_OFFSET = 54
TEXT_OFFSET = 125
ROLE_BORDER_SIZE = 2
SPY_P_TOP = 668
SPY_P_LEFT = 633
SPY_P_WIDTH = 2
SPY_P_HEIGHT = 25
TIMEOUT = 12 # seconds
TIME_STEP = 1
class TimelineParseException(Exception):
pass
class TimelineDigitNotMatchedException(TimelineParseException):
pass
class TimelinePortraitNotMatchedException(TimelineParseException):
pass
class TimelineActorNotMatchedException(TimelineParseException):
pass
class TimelineEventNotMatchedException(TimelineParseException):
pass
class TimelineOddNumberScreenshots(TimelineParseException):
pass
class TimelineMismatchedElapsedScreenshots(TimelineParseException):
pass
def separate_line_images(screenshot: np.ndarray) -> List[np.ndarray]:
line_images = []
for line_no in range(NUM_LINES):
top_of_line = OVERALL_CAPTURE_BORDER + (line_no * LINE_SPACING)
crop_img = screenshot[
top_of_line : top_of_line + LINE_HEIGHT,
OVERALL_CAPTURE_BORDER : OVERALL_CAPTURE_BORDER + TIMELINE_WIDTH,
]
line_images.append(crop_img)
return line_images
def remove_books(line_image: np.ndarray) -> Tuple[Tuple[Optional[Books]], np.ndarray]:
first_row = line_image[0]
books: Tuple[Optional[Books]] = (None,)
no_book = np.all(np.all(first_row == BACKGROUND_COLOR, axis=1))
if not no_book:
book_mask = np.any(first_row != BACKGROUND_COLOR, axis=1)
first_book_index = np.argmax(book_mask)
last_book_index = len(book_mask) - np.argmax(book_mask[::-1])
if (last_book_index - first_book_index) > SINGLE_BOOK_WIDTH:
book_colors = [
tuple(
line_image[
0, ((last_book_index - BOOK_SPACING) + first_book_index) // 2
]
),
tuple(
line_image[
0, (last_book_index + (first_book_index + BOOK_SPACING)) // 2
]
),
]
else:
book_colors = [
tuple(line_image[0, (last_book_index + first_book_index) // 2])
]
books = tuple(COLORS_TO_BOOKS_ENUM[color] for color in book_colors)
line_image[:, first_book_index:last_book_index] = BACKGROUND_COLOR
return books, line_image
def separate_portraits(
line_image: np.ndarray,
) -> Tuple[
np.ndarray,
Tuple[Optional[np.ndarray]],
Tuple[Optional[Roles]],
Tuple[Optional[Books]],
]:
last_row = line_image[-1]
# no portrait found
portraits: Tuple[Optional[np.ndarray], ...] = (None,)
roles: Tuple[Optional[Roles], ...] = (None,)
no_portrait = np.all(np.all(last_row == BACKGROUND_COLOR, axis=1))
if not no_portrait:
portrait_mask = np.any(last_row != BACKGROUND_COLOR, axis=1)
first_portrait_index = np.argmax(portrait_mask)
last_portrait_index = len(portrait_mask) - np.argmax(portrait_mask[::-1])
if (last_portrait_index - first_portrait_index) > SINGLE_PORTRAIT_WIDTH:
# multiple portraits
portraits = (
np.copy(
line_image[
:-ROLE_BORDER_SIZE,
first_portrait_index
+ PORTRAIT_BACKGROUND_BORDER : last_portrait_index
- PORTRAIT_SPACING
- PORTRAIT_BACKGROUND_BORDER,
]
),
np.copy(
line_image[
:-ROLE_BORDER_SIZE,
first_portrait_index
+ PORTRAIT_SPACING
+ PORTRAIT_BACKGROUND_BORDER : last_portrait_index
- PORTRAIT_BACKGROUND_BORDER,
]
),
)
else:
portraits = (
np.copy(
line_image[
:-ROLE_BORDER_SIZE,
first_portrait_index
+ PORTRAIT_BACKGROUND_BORDER : last_portrait_index
- PORTRAIT_BACKGROUND_BORDER,
]
),
)
if (last_portrait_index - first_portrait_index) > SINGLE_PORTRAIT_WIDTH:
role_colors = (
tuple(
line_image[
-1,
(
(last_portrait_index - PORTRAIT_SPACING)
+ first_portrait_index
)
// 2,
]
),
tuple(
line_image[
-1,
(
last_portrait_index
+ (first_portrait_index + PORTRAIT_SPACING)
)
// 2,
]
),
)
else:
role_colors = (
tuple(
line_image[-1, (last_portrait_index + first_portrait_index) // 2]
),
)
roles = tuple(
ROLE_COLORS_TO_ENUM.get(color, Roles.Civilian) for color in role_colors
)
line_image[:, first_portrait_index:last_portrait_index] = BACKGROUND_COLOR
books, line_image = remove_books(line_image)
return line_image, portraits, roles, books
def add_borders(line_image: np.ndarray) -> np.ndarray:
bordered = cv2.copyMakeBorder(
line_image,
LINE_BORDER,
LINE_BORDER,
LINE_BORDER,
LINE_BORDER,
cv2.BORDER_CONSTANT,
value=BACKGROUND_COLOR,
)
return bordered
def convert_black_white(line_image: np.ndarray) -> np.ndarray:
decolored = cv2.threshold(line_image, 40, 255, cv2.THRESH_BINARY)[1]
decolored = cv2.bitwise_not(decolored)
return decolored
def remove_highlighted_background(line_image: np.ndarray) -> np.ndarray:
if np.array_equal(line_image[0, 0], HIGHLIGHTED_BACKGROUND):
blue, green, red = line_image.T
idx = ((red == 255) & (green == 255) & (blue == 255)).T
line_image[idx] = BACKGROUND_COLOR[0]
gradient_mask = np.any(line_image[-1] != BACKGROUND_COLOR, axis=1)
gradient_column_index = len(gradient_mask) - np.argmax(gradient_mask[::-1])
line_image[:, gradient_column_index - 1] = BACKGROUND_COLOR[0]
return line_image
def split_into_parts(
line_image: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
actor = np.copy(line_image[:, :TIMER_OFFSET])
time = np.copy(line_image[:, TIMER_OFFSET:TEXT_OFFSET])
text = np.copy(line_image[:, TEXT_OFFSET:])
return actor, time, text
def name_portrait(
portraits: Tuple[Optional[np.ndarray]],
) -> Tuple[Optional[Characters], ...]:
characters = []
for portrait in portraits:
if portrait is None:
return (None,)
portrait_md5 = hashlib.md5(portrait.tobytes()).hexdigest()
try:
characters.append(PORTRAIT_MD5_DICT[portrait_md5])
except KeyError as key_exec:
logger.warning("TimelineParseException character portrait not found")
capture_debug_picture(DEBUG_CAPTURES.joinpath("portraits"), portrait)
raise TimelinePortraitNotMatchedException(
"character portrait not found"
) from key_exec
# noinspection PyTypeChecker
return tuple(characters)
def remove_overlap(events: Iterator[TimelineEvent]) -> List[TimelineEvent]:
all_events_list = list(events)
num_overlapping_events = find_overlap_last_page_index(
[hash(e) for e in all_events_list]
)
return trim_overlapped_list(all_events_list, num_overlapping_events)
def trim_overlapped_list(events: List[Any], num_overlapping_events: int) -> List[Any]:
if num_overlapping_events == 0:
return events
if num_overlapping_events == NUM_LINES:
return events[:-NUM_LINES]
return events[:-NUM_LINES] + events[(-NUM_LINES + num_overlapping_events) :]
def find_overlap_last_page_index(hashes: List[int]) -> int:
# can't contain overlap with less than one page of results
if len(hashes) > NUM_LINES:
# if full pages, assume a number evenly divisible by 30
# otherwise, we can't know where the last page break is
assert len(hashes) % NUM_LINES == 0
last_page_hashes = hashes[-NUM_LINES:]
second_last_page_hashes = hashes[-(NUM_LINES * 2) : -NUM_LINES]
# starting with the maximum possible overlap, continuously check
# smaller and smaller sections, until an overlap range is found
# This does mean that there is no discernable difference between a
# doubled event (same hash) that spans the last page boundary
# and the same even showing up twice because of overlap
# TODO: Consider using shift to get absolute times to avoid these edge cases
for num_overlapping_events in range(NUM_LINES, 0, -1):
if np.array_equal(
last_page_hashes[:num_overlapping_events],
second_last_page_hashes[-num_overlapping_events:],
):
# it is possible to return a full page worth of overlap here
# if the last two pages are identical
# this seems highly unlikely
return num_overlapping_events
# if nothing is returned, it means there's no overlap
return 0
def parse_time_digits(time_pic: np.ndarray) -> str:
digit_width = 8
digit_height = 12
digit_top = 5
elapsed_decimal_top = 13
elapsed_decimal_left = 41
elapsed_decimal_size = 5
possible_decimal = time_pic[
elapsed_decimal_top : elapsed_decimal_top + elapsed_decimal_size,
elapsed_decimal_left : elapsed_decimal_left + elapsed_decimal_size,
]
# both red and black hash for period location
if hashlib.md5(possible_decimal.tobytes()).hexdigest() in (
"0b4aa16ffb116f1b8cc4c0d940b6859f",
"c8b5048bcbc949fff21066780a5ebb4e",
):
# elapsed mode
digit_offsets = [14, 23, 32, 46, 55]
elapsed = True
else:
digit_offsets = [0, 9, 18, 32, 41, 55]
elapsed = False
digits = []
for start in digit_offsets:
digit = time_pic[
digit_top : digit_top + digit_height, start : start + digit_width
]
digit_hash = hashlib.md5(digit.tobytes()).hexdigest()
try:
digits.append(DIGIT_DICT[digit_hash])
except KeyError as key_exec:
logger.warning("TimelineParseException digit not found")
capture_debug_picture(DEBUG_CAPTURES.joinpath("digits"), time_pic)
raise TimelineDigitNotMatchedException("digit not found") from key_exec
if elapsed:
return "{}{}{}.{}{}".format(*digits).lstrip()
return "{}{}{}:{}{}.{}".format(*digits).lstrip()
def process_line_image(line_image: np.ndarray) -> Optional[TimelineEvent]:
if np.all(line_image == BACKGROUND_COLOR[0]):
return None
line_image = remove_highlighted_background(line_image)
words, portraits, roles, books = separate_portraits(line_image)
characters = name_portrait(portraits)
actor_pic, time_pic, event_pic = split_into_parts(
convert_black_white(add_borders(words))
)
time = parse_time_digits(time_pic)
event_image_hash = hashlib.md5(event_pic.tobytes()).hexdigest()
actor_image_hash = hashlib.md5(actor_pic.tobytes()).hexdigest()
try:
event = EVENT_IMAGE_HASH_DICT[event_image_hash]
except KeyError as key_exec:
logger.warning("TimelineParseException event not found")
capture_debug_picture(DEBUG_CAPTURES.joinpath("events"), line_image)
raise TimelineEventNotMatchedException("event not found") from key_exec
try:
actor = ACTOR_IMAGE_HASH_DICT[actor_image_hash]
except KeyError as key_exec:
logger.warning("TimelineParseException actor not found")
capture_debug_picture(DEBUG_CAPTURES.joinpath("actors"), line_image)
raise TimelineActorNotMatchedException("actor not found") from key_exec
return TimelineEvent(actor, time, event, characters, roles, books)
def parse_screenshot(screenshot: np.ndarray) -> List[TimelineEvent]:
lines = separate_line_images(screenshot)
events = list(filter(None, [process_line_image(line) for line in lines]))
return events
if __name__ == "__main__":
pass
| 2.015625 | 2 |
tmtoolkit/lda_utils/tm_gensim.py | ddomhoff/tmtoolkit | 0 | 12760756 | # -*- coding: utf-8 -*-
import logging
import numpy as np
import gensim
from .common import MultiprocModelsRunner, MultiprocModelsWorkerABC, MultiprocEvaluationRunner, \
MultiprocEvaluationWorkerABC, dtm_to_gensim_corpus
from .eval_metrics import metric_cao_juan_2009
AVAILABLE_METRICS = (
'perplexity',
# 'cross_validation',
'cao_juan_2009',
# 'arun_2010',
)
logger = logging.getLogger('tmtoolkit')
def get_model_perplexity(model, eval_corpus):
n_words = sum(cnt for document in eval_corpus for _, cnt in document)
bound = model.bound(eval_corpus)
perwordbound = bound / n_words
return np.exp2(-perwordbound)
class MultiprocModelsWorkerGensim(MultiprocModelsWorkerABC):
package_name = 'gensim'
def fit_model(self, data, params, return_data=False):
data = dtm_to_gensim_corpus(data.tocsr())
model = gensim.models.ldamodel.LdaModel(data, **params)
if return_data:
return model, data
else:
return model
class MultiprocEvaluationWorkerGensim(MultiprocEvaluationWorkerABC, MultiprocModelsWorkerGensim):
def fit_model(self, data, params, return_data=False):
model, data = super(MultiprocEvaluationWorkerGensim, self).fit_model(data, params, return_data=True)
results = {}
if self.return_models:
results['model'] = model
for metric in self.eval_metric:
# if metric == 'cross_validation': continue
if metric == 'cao_juan_2009':
res = metric_cao_juan_2009(model.state.get_lambda())
# elif metric == 'arun_2010': # TODO: fix this (get document topic distr. from gensim model)
# results = metric_arun_2010(train_model.state.get_lambda(), train_model[corpus_train], data.sum(axis=1))
else: # default: perplexity
res = get_model_perplexity(model, data)
logger.info('> evaluation result with metric "%s": %f' % (metric, res))
results[metric] = res
return results
def compute_models_parallel(data, varying_parameters=None, constant_parameters=None, n_max_processes=None):
"""
Compute several Topic Models in parallel using the "gensim" package. Use a single or multiple document term matrices
`data` and optionally a list of varying parameters `varying_parameters`. Pass parameters in `constant_parameters`
dict to each model calculation. Use at maximum `n_max_processes` processors or use all available processors if None
is passed.
`data` can be either a Document-Term-Matrix (NumPy array/matrix, SciPy sparse matrix) or a dict with document ID ->
Document-Term-Matrix mapping when calculating models for multiple corpora (named multiple documents).
If `data` is a dict of named documents, this function will return a dict with document ID -> result list. Otherwise
it will only return a result list. A result list always is a list containing tuples `(parameter_set, model)` where
`parameter_set` is a dict of the used parameters.
"""
mp_models = MultiprocModelsRunner(MultiprocModelsWorkerGensim, data, varying_parameters, constant_parameters,
n_max_processes=n_max_processes)
return mp_models.run()
def evaluate_topic_models(data, varying_parameters, constant_parameters=None, n_max_processes=None, return_models=False,
metric=None, **metric_kwargs):
"""
Compute several Topic Models in parallel using the "gensim" package. Calculate the models using a list of varying
parameters `varying_parameters` on a single Document-Term-Matrix `data`. Pass parameters in `constant_parameters`
dict to each model calculation. Use at maximum `n_max_processes` processors or use all available processors if None
is passed.
`data` must be a Document-Term-Matrix (NumPy array/matrix, SciPy sparse matrix).
Will return a list of size `len(varying_parameters)` containing tuples `(parameter_set, eval_results)` where
`parameter_set` is a dict of the used parameters and `eval_results` is a dict of metric names -> metric results.
"""
mp_eval = MultiprocEvaluationRunner(MultiprocEvaluationWorkerGensim, AVAILABLE_METRICS, data,
varying_parameters, constant_parameters,
metric=metric, metric_options=metric_kwargs,
n_max_processes=n_max_processes, return_models=return_models)
return mp_eval.run()
| 2.125 | 2 |
src/server/handlers/peewee_orm.py | w4n9H/pyfdfs | 26 | 12760757 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: <EMAIL>
@create: 16/2/18
"""
__author__ = "mango"
__version__ = "0.1"
import peewee
from peewee import MySQLDatabase, CompositeKey
from playhouse.pool import PooledMySQLDatabase
from settings import MYSQL_CONFIG
from settings import FDFS_DOMAIN
db = MySQLDatabase(host=MYSQL_CONFIG['host'],
port=MYSQL_CONFIG['port'],
user=MYSQL_CONFIG['user'],
passwd=MYSQL_CONFIG['passwd'],
database=MYSQL_CONFIG['db_name'])
pool_db = PooledMySQLDatabase(MYSQL_CONFIG['db_name'],
max_connections=2000,
stale_timeout=10,
host=MYSQL_CONFIG['host'],
port=MYSQL_CONFIG['port'],
user=MYSQL_CONFIG['user'],
passwd=MYSQL_CONFIG['passwd'],)
# noinspection PyPep8Naming,PyMethodMayBeStatic
class fdfs_info(peewee.Model):
file_name = peewee.FixedCharField(max_length=255)
file_size = peewee.IntegerField()
file_md5 = peewee.CharField(default='', max_length=32)
file_crc32 = peewee.CharField(default='', max_length=8)
file_group = peewee.CharField(max_length=64)
file_local_path = peewee.CharField(max_length=255)
domain_id = peewee.IntegerField()
class Meta:
database = db
primary_key = CompositeKey('file_name', 'domain_id')
indexes = ((('domain_id', 'file_name'), True), ) #
def conn_finish(self):
if not db.is_closed():
db.close()
def fdfs_insert(self, in_dict):
"""
数据插入
:param in_dict: 插入的数据 dict
:return: 成功返回 true ,失败返回 false
"""
try:
iq = (fdfs_info
.insert(**in_dict))
iq.execute()
return True, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def fdfs_update(self, up_dict, file_name, domain_id):
"""
数据更新
:param up_dict: 需要更新的数据 dict
:param file_name: 文件名 str
:param domain_id: 域空间ID int
:return: 成功返回 true ,失败返回 false
"""
try:
uq = (fdfs_info
.update(**up_dict)
.where(fdfs_info.domain_id == domain_id, fdfs_info.file_name == file_name))
uq.execute()
return True, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def fdfs_delete(self, file_name, domain_id):
"""
数据删除
:param file_name: 文件名 str
:param domain: 域空间名 str
:return: 成功返回 true ,失败返回 false
"""
try:
d = (fdfs_info
.delete()
.where(fdfs_info.domain_id == domain_id, fdfs_info.file_name == file_name))
d.execute()
return True, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def fdfs_exist(self, file_name, domain_id):
"""
判断数据是否存在
:param file_name: 文件名 str
:param domain: 域空间名 str
:return:
0 数据存在
1 数据不存在
2 查询错误
"""
try:
query_data = fdfs_info.select(fdfs_info.file_group, fdfs_info.file_local_path).\
where(fdfs_info.domain_id == domain_id, fdfs_info.file_name == file_name)
if query_data:
return 0, query_data.dicts().get()
else:
return 1, None
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def fdfs_file_info(self, file_name, domain_id):
"""
数据查询
:param file_name: 文件名 str
:param domain: 域空间名 str
:return:
0 查询成功
1 未查询到数据
2 查询错误
"""
try:
query_data = fdfs_info.select().where(fdfs_info.domain_id == domain_id, fdfs_info.file_name == file_name)
if query_data:
return 0, query_data.dicts().get()
else:
return 1, None
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def fdfs_download(self, file_name, domain_id):
"""
获取下载地址
:param file_name: 文件名 str
:param domain: 域空间名 str
:return: 成功返回 true ,失败返回 false
"""
try:
query_data = fdfs_info.select(fdfs_info.file_group, fdfs_info.file_local_path).\
where(fdfs_info.domain_id == domain_id, fdfs_info.file_name == file_name)
if query_data:
query_info = query_data.dicts().get()
group_info = query_info.get('file_group', '')
group_local_info = query_info.get('file_local_path', '')
http_info = FDFS_DOMAIN.get(group_info, '')
redirect_http = "%s/%s/%s?filename=%s" % (http_info, group_info, group_local_info, file_name)
return True, redirect_http
else:
return False, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def fdfs_empty(self, domain_id):
"""
判断某个domain是否为空
:param domain_id:
:return:
0 domain为空
1 domain中有文件
2 查询错误
"""
try:
result = fdfs_info.select().where(fdfs_info.domain_id == domain_id).count()
if result == 0:
return 0, None
else:
return 1, None
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def list_file(self, domain_id, limit):
"""
列出domain 文件列表
:param domain_id:
:param limit:
:return:
0 文件列表
1 domain没有文件
2 查询错误
"""
try:
result = []
query_data = fdfs_info.select(fdfs_info.file_name).where(fdfs_info.domain_id == domain_id).limit(limit)
if query_data:
for i in query_data.dicts():
result.append(i.get('file_name'))
return 0, result
else:
return 1, []
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
# noinspection PyPep8Naming,PyMethodMayBeStatic
class domain_info(peewee.Model):
# domain_id = peewee.IntegerField()
domain_name = peewee.CharField(max_length=255, unique=True)
class Meta:
database = db # 连接数据库
def conn_finish(self):
if not db.is_closed():
db.close()
def id_exist(self, domain_name):
"""
判断 域空间 是否存在
:param domain_name: 域空间名 str
:return:
0 数据存在
1 数据不存在
2 查询错误
"""
try:
query_data = domain_info.select(domain_info.id, domain_info.domain_name).\
where(domain_info.domain_name == domain_name)
if query_data:
return 0, query_data.dicts().get()
else:
return 1, None
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def id_insert(self, domain_name):
"""
插入新的 域空间
:param domain_name: 域空间名 str
:return: 成功返回 true ,失败返回 false
"""
try:
in_dict = {'domain_name': domain_name}
iq = (domain_info
.insert(**in_dict))
iq.execute()
return True, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def domain_operation(self, domain_name):
"""
域空间名操作
:param domain_name: 域空间名 str
:return:
"""
try:
id_exist_status, id_exist_info = self.id_exist(domain_name)
if id_exist_status == 0:
return True, id_exist_info
elif id_exist_status == 1:
id_insert_status, id_insert_info = self.id_insert(domain_name)
if id_insert_status:
id_query_status, id_query_info = self.id_exist(domain_name)
if id_query_status == 0:
return True, id_query_info
else:
return False, id_insert_info
else:
return False, id_exist_info
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
def get_domain_name(self, domain_id):
"""
通过 domain_id 获取 domain_name
:param domain_id:
:return:
"""
try:
query_data = domain_info.select(domain_info.domain_name).where(domain_info.id == domain_id)
if query_data:
return 0, query_data.dicts().get()
else:
return 1, None
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def get_all_domain(self):
"""
获取所有 domain
:return:
"""
result = []
try:
query_data = domain_info.select(domain_info.domain_name)
if query_data:
for i in query_data.dicts():
result.append(i.get('domain_name'))
return 0, result
else:
return 1, result
except Exception as error:
return 2, str(error)
finally:
self.conn_finish()
def delete_domain(self, domain):
"""
删除 domain
:param domain:
:return:
"""
try:
iq = (domain_info
.delete()
.where(domain_info.domain_name == domain))
iq.execute()
return True, None
except Exception as error:
return False, str(error)
finally:
self.conn_finish()
if __name__ == '__main__':
ms = domain_info()
# ms.query_data()
# print ms.fdfs_exist('281cb5c0-d07e-', 'test')
print ms.get_all_domain()
# print ms.fdfs_download('281cb5c0-d07e-', 'test')
# print ms.fdfs_update({'file_crc32': 'F'}, '281cb5c0-d07e-4', 'test')
| 2.203125 | 2 |
custom/iii_2_xml2yolo.py | jason-su/UCGNet | 0 | 12760758 | <gh_stars>0
# coding:utf-8
# xml2txt
import xml.etree.ElementTree as ET
import os
import shutil
from Constants import train_name_txt_1000, crop_1000_xml_dir, train_txt_dir_1000, yolo_trainval_txt_1000, crop_1000_img_dir
classes = ['ignored regions','pedestrian','people','bicycle','car','van','truck','tricycle','awning-tricycle','bus','motor','others'] # ����ȱ�����ƣ�������xml��ע����һ��
# train_file = 'images_train.txt' # ���ɵ�txt�ļ�
train_file_txt = '' # �������txt�������
# wd = os.getcwd()
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
box = list(box)
box[1] = min(box[1], size[0])
box[3] = min(box[3], size[1])
x = ((box[0] + box[1]) / 2.0) * dw
y = ((box[2] + box[3]) / 2.0) * dh
w = (box[1] - box[0]) * dw
h = (box[3] - box[2]) * dh
return (x, y, w, h)
def convert_annotation(image_id):
in_file = open("{}/{}.xml".format(crop_1000_xml_dir,image_id))
out_file = open("{}/{}.txt".format(train_txt_dir_1000,image_id),"w")
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
if w==0 or h==0:
print("Err, w=0 or h=0,",image_id)
for obj in root.iter('object'):
cls = obj.find('name').text
if cls not in classes: # ����xml�е�ȱ������
continue
cls_id = classes.index(cls)
if cls_id == 0 or cls_id ==11:
continue
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w, h), b)
out_file.write(str(cls_id - 1) + " " + " ".join([str(a) for a in bb]) + '\n')
image_ids_train = open(train_name_txt_1000).read().strip().split() # ��ȡxml�ļ�������
for image_id in image_ids_train:
convert_annotation(image_id)
anns = os.listdir(crop_1000_xml_dir)
for ann in anns:
ans = ''
if ann[-3:] != 'xml':
continue
train_file_txt = train_file_txt + crop_1000_img_dir + ann[:-3] + 'jpg\n' # ����yolo��ʽ��ͼƬ����
with open(yolo_trainval_txt_1000, 'w') as outfile:
outfile.write(train_file_txt)
# # 复制visdrone_train里图片复制到images中
# image_old = "/home/jjliao/Visdrone_yolo_cluster/VisDrone2019-DET-train/images_cluster/"
# image_new = yolo_trainval_dir + "/images/train/"
#
# if not os.path.exists(image_new):
# os.makedirs(image_new)
# for file in os.listdir(image_old):
# full_file = os.path.join(image_old, file)
# new_full_file = os.path.join(image_new, file)
# shutil.copy(full_file, new_full_file)
| 2.578125 | 3 |
client.py | imStudd/BomberMan | 0 | 12760759 | # -*- coding: utf-8 -*-
import queue
import select
import signal
import socket
import sys
import threading
class Client:
def __init__(self, host, port, sq, rq):
self._host = host
self._port = port
self._connect()
self._send_q = sq
self._recv_q = rq
if sys.platform == "linux":
signal.signal(signal.SIGUSR2, self._handler)
self._connected = threading.Event()
self._client_thread = threading.Thread(target=self._receive_data)
self._connected.set()
self._client_thread.start()
def _handler(self, signum, frame):
self._connected.clear()
def _connect(self):
for addrinfo in socket.getaddrinfo(self._host, self._port, socket.AF_UNSPEC, socket.SOCK_STREAM):
ai_family, sock_type, _, _, sock_addr = addrinfo
try:
sock = socket.socket(ai_family, sock_type)
except socket.error as ex:
print("(!) sock: %s\n" % ex)
except Exception as ex:
print("(!) sock: %s\n" % ex)
try:
sock.connect(sock_addr)
except socket.error as ex:
print("(!) connect: %s\n" % ex)
exit(1)
sock.setblocking(0)
if self._host != sock_addr[0]:
print("(+) Connected to %s(%s):%d\n" %
(self._host, sock_addr[0], self._port))
else:
print("(+) Connected to %s:%d\n" % (self._host, self._port))
self._sock = sock
def _sending_data_proccess(self, code, data):
try:
buf = code + " " + data + "\n"
self._sock.sendall(buf.encode())
except Exception as ex:
print("(!) sending data proccess: %s\n" % ex)
def _receive_data(self):
while self._connected.is_set():
try:
readable, _, _ = select.select([self._sock], [], [], 0.05)
for s in readable:
buf = s.recv(10240)
if len(buf) == 0:
self._connected.clear()
break
self._received_data_proccess(buf)
if not self._send_q.empty():
d = self._send_q.get()
if d[0] == "QUT":
self._connected.clear()
self._sending_data_proccess(d[0], d[1])
except select.error as ex:
print("(!) select: %s\n" % ex)
self._sending_data_proccess("QUT", "")
self._connected.clear()
self._recv_q.put("EXT")
print("(*) Disconnecting to server...\n")
self._sock.close()
print("(-) Disconnected\n")
def _received_data_proccess(self, data):
try:
# print("#DEBUG Client# - ", data)
data = data.decode().splitlines()
for buf in data:
buf = buf.strip().split(" ")
if buf[0] == "ERR":
if "FULL" in buf[1]:
print("Server full, closing connection with server !\n")
if "UNAVAILABLE" in buf[1]:
print("Nickname unavailable\n")
self._connected.clear()
else:
self._recv_q.put(buf)
except Exception as ex:
print("(!) received data process: %s\n" % ex)
| 2.734375 | 3 |
chefkoch.py | florianschmidt1994/chefkoch-api | 22 | 12760760 | <reponame>florianschmidt1994/chefkoch-api<gh_stars>10-100
import requests
from enum import Enum
from bs4 import BeautifulSoup
import re
import tasks
class UserNotFoundError(Exception):
def __init__(self, user_id):
self.user_id = user_id
class RecipeNotFoundError(Exception):
def __init__(self, recipe_id):
self.recipe_id = recipe_id
class LoginException(Exception):
def __init__(self, username):
self.username = username
class OrderBy(Enum):
relevance = 2
rating = 3
difficulty = 4
max_time_needed = 5
date = 6
random = 7
daily_shuffle = 8
class ChefkochApi:
"""An API Wrapper for www.chefkoch.com"""
def __init__(self, username="", password=""):
if username and password:
self.session = self.login(username, password)
self.is_logged_in = True
else:
self.session = requests.session()
self.is_logged_in = False
def login(self, username, password):
"""Login user with username and password"""
session = requests.Session()
login_url = "https://www.chefkoch.de/benutzer/authentifizieren"
login_data = {
"username": username,
"password": password,
"remember_me": "on",
"context": "login/init"
}
res = session.post(login_url, login_data)
# They send 200 even if the authentication failed...
if res.url == "https://www.chefkoch.de/benutzer/einloggen":
raise LoginException(username)
return session
def get_recipe(self, recipe_id):
"""Returns a recipe as a dict for a given recipe_id"""
url = "https://api.chefkoch.de/v2/recipes/%s" % recipe_id
res = self.session.get(url)
if res.status_code is not 200:
raise RecipeNotFoundError(recipe_id)
else:
return res.json()
def search_recipe(self, query='',
offset=0,
limit=50,
minimum_rating=0,
maximum_time=0,
order_by=OrderBy.relevance,
descend_categories=1,
order=0):
"""Returns a list of recipes that match the given search tearms"""
payload = {
"query": query,
"limit": limit,
"offset": offset,
"minimumRating": minimum_rating,
"maximumTime": maximum_time,
"orderBy": order_by,
"descendCategories": descend_categories,
"order": order
}
res = self.session.get("https://api.chefkoch.de/v2/recipes", params=payload)
if res.status_code is not 200:
raise ConnectionError("Response is not 200")
else:
return res.json()
def get_user(self, user_id):
url = 'http://www.chefkoch.de/user/profil/' + user_id
r = self.session.get(url)
response = r.content.decode("utf-8")
soup = BeautifulSoup(response, 'html.parser')
title = soup.select(".page-title")
if len(title) >= 1:
if "Keine oder ungültige User-ID" in title[0].text.strip():
raise UserNotFoundError(user_id)
user_details = soup.select("#user-details tr")
user = {
'id': user_id,
'_id': user_id,
'friends': [],
'Schritt-für-Schritt-Anleitungen': []
}
if len(soup.select('.username')) >= 0:
user['username'] = soup.select('.username')[0].text.strip(),
for entry in user_details:
td = entry.select("td")
if td[0].text.strip() != '':
img = td[1].select("img")
if not img:
user[td[0].text.strip().replace(':', '')] = td[1].text.strip()
else:
user[td[0].text.strip().replace(':', '')] = img[0].attrs['alt']
profile_sections = soup.select(".slat__title")
for section in profile_sections:
if section.text.strip().find('Über mich') >= 0:
user['aboutme'] = soup.select("#user-about")[0].text.strip().replace("\r", "")
if section.text.strip().strip().find('Freunde') >= 0:
user['Freunde'] = re.findall(r'\d+', section.text.strip())[0]
user['friends'] = self.get_friends_of_user(user_id)
# Anzahl der Rezepte
if section.text.strip().find('Rezepte') >= 0:
user['Anzahl_Rezepte'] = re.findall(r'\d+', section.text.strip())[0]
# Anzahl der Rezeptsammlungen
if section.text.strip().find('Rezeptsammlungen') >= 0:
user['AnzahlRezeptsammlungen'] = re.findall(r'\d+', section.text.strip())[0]
# Rezeptsammlungen + Anzahl der Rezepte pro Sammlung
user['Rezeptsammlungen'] = []
for row in soup.select('#table-recipe-collections tr'):
for link in row.select('a'):
url = link.get('href')
count = re.findall(r'\d+', row.text)[0]
user['Rezeptsammlungen'].append({'url': url, 'nrOfRecipes': count})
if section.text.strip().find('Schritt-für-Schritt-Anleitungen') >= 0:
user['Anzahl-Schritt-für-Schritt-Anleitungen'] = re.findall(r'\d+', section.text.strip())[0]
user['Schritt-für-Schritt-Anleitungen'] = self.get_step_by_step_guides(user_id)
if section.text.strip().find('Fotoalben') >= 0:
user['Fotoalben'] = re.findall(r'\d+', section.text.strip())[0]
if section.text.strip().find('Forenthemen') >= 0:
user['Forenthemen'] = re.findall(r'\d+', section.text.strip())[0]
if section.text.strip().find('Gruppen') >= 0:
user['Gruppen'] = re.findall(r'\d+', section.text.strip())[0]
# Gruppen (Name + url)
user['Gruppen'] = []
for row in soup.select('#user-groups li'):
name_of_group = row.text.strip()
link = row.select('a')[0]
url = link.get('href')
user['Gruppen'].append({'url': url, 'Gruppenname': name_of_group})
return user
def get_friends_of_user(self, user_id):
"""Returns a list of friends for a given user_id"""
url = 'http://www.chefkoch.de/user/freunde/%s/' % user_id
response = self.session.get(url).text
soup = BeautifulSoup(response, 'html.parser')
friends = []
for buddy in soup.select('li.user-buddies__buddy'):
friend = {'username': buddy.text.strip()}
if buddy.select('a'):
friend['link'] = buddy.select('a')[0].get('href')
regex = r"/user/profil/(.*)/.*.html"
friend['id'] = re.findall(regex, friend['link'])[0]
friends.append(friend)
return friends
def get_rating_by_recipe_id(self, recipe_id, db):
url = 'http://www.chefkoch.de/rezepte/wertungen/' + recipe_id + '/'
r = self.session.get(url)
response = r.content.decode("utf-8")
soup = BeautifulSoup(response, 'html.parser')
recipe_rating = {}
recipe_rating['_id'] = recipe_id
voting_table = soup.select(".voting-table tr")
if not voting_table:
recipe_rating["rating"] = []
return recipe_rating
voting_table.pop(0)
votings = []
for entry in voting_table:
td = entry.select("td")
voting_by_user = {}
voting_by_user["voting"] = re.findall(r'\d+', td[0].select("span span")[0].get("class")[1])[0]
voting_by_user["name"] = td[1].text.strip()
# check if user account was removed from chefkoch.de
if td[1].select("a"):
voting_by_user["id"] = td[1].select("a")[0].get("href").split('/')[3]
# adds user to db
# TODO: This logic should be in tasks.py
self.add_unknown_user(voting_by_user["id"], db)
else:
voting_by_user["id"] = "unbekannt"
print(voting_by_user)
print(recipe_id)
print(entry.text.strip())
voting_by_user["date"] = td[2].text.strip()
votings.append(voting_by_user)
recipe_rating["rating"] = votings
return recipe_rating
def get_step_by_step_guides(self, user_id):
url = 'http://www.chefkoch.de/community/profil/%s/anleitungen' % user_id
response = self.session.get(url).text
soup = BeautifulSoup(response, 'html.parser')
guides = []
for row in soup.select('.theme-community .without-footer'):
link = row.select('a')
if link[1]:
url = link[1].get('href')
guides.append({'url': url, 'Titel': link[1].text.strip()})
return guides
# TODO: This should also be in tasks.py
def add_unknown_user(self, id, db):
user_found = False
db_user = db.users.find({"_id": id})
for user in db_user:
user_found = True
if not user_found:
tasks.crawl_single_user.delay(id)
| 2.75 | 3 |
anijung/migrations/0003_auto_20200307_1654.py | NullFull/anijung | 1 | 12760761 | # Generated by Django 2.2.10 on 2020-03-07 07:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('anijung', '0002_quote'),
]
operations = [
migrations.AlterField(
model_name='quote',
name='case',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='anijung.Case'),
),
]
| 1.453125 | 1 |
model-optimizer/unit_tests/extensions/front/kaldi/tdnn_component_replacer_test.py | monroid/openvino | 2,406 | 12760762 | <filename>model-optimizer/unit_tests/extensions/front/kaldi/tdnn_component_replacer_test.py
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from extensions.front.kaldi.tdnn_component_replacer import TdnnComponentReplacer
from mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, regular_op, result, connect_front, const
@generator
class TdnnComponentReplacerTest(unittest.TestCase):
@generate(*[
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1],),
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1, 2, 10, 1000],),
([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 0]),
])
def test_tdnnreplacer(self, weights, biases, time_offsets):
def generate_offsets():
offset_edges = []
offset_nodes = {}
for i, t in enumerate(time_offsets):
offset_nodes.update(**regular_op('memoryoffset_' + str(i), {'type': None}))
if t != 0:
offset_edges.append(('placeholder', 'memoryoffset_' + str(i), {'out': 0, 'in': 0}))
offset_edges.append(('memoryoffset_' + str(i), 'concat', {'out': 0, 'in': i}))
else:
offset_edges.append(('placeholder', 'concat', {'out': 0, 'in': i}))
return offset_nodes, offset_edges
offset_nodes, ref_offset_edges = generate_offsets()
nodes = {
**offset_nodes,
**regular_op('placeholder', {'type': 'Parameter'}),
**regular_op('tdnncomponent', {'op': 'tdnncomponent',
'weights': np.array(weights),
'biases': np.array(biases),
'time_offsets': np.array(time_offsets)}),
**const('weights', np.array(weights)),
**const('biases', np.array(biases)),
**regular_op('concat', {'type': 'Concat', 'axis': 1}),
**regular_op('memoryoffset_0', {'type': None}),
**regular_op('memoryoffset_1', {'type': None}),
**regular_op('memoryoffset_2', {'type': None}),
**regular_op('fully_connected', {'type': 'FullyConnected'}),
**result('result'),
}
graph = build_graph(nodes, [
*connect_front('placeholder', 'tdnncomponent'),
*connect_front('tdnncomponent', 'result')
], nodes_with_edges_only=True)
graph.stage = 'front'
ref_graph = build_graph(nodes, [
*ref_offset_edges,
*connect_front('concat', '0:fully_connected'),
*connect_front('weights', '1:fully_connected'),
*connect_front('biases', '2:fully_connected'),
*connect_front('fully_connected', 'result')
], nodes_with_edges_only=True)
TdnnComponentReplacer().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
| 2.0625 | 2 |
examples/matching_queries_to_indexes/main.py | dhermes/endpoints-proto-datastore | 91 | 12760763 | <filename>examples/matching_queries_to_indexes/main.py
# If you have not yet seen the source in basic_with_auth/main.py and
# paging/main.py, please take a look.
# In this sample we use a custom Enum for the "order" property in queries
# to strictly control the indexes used and make sure we have corresponding
# indexes created in index.yaml.
import endpoints
from google.appengine.ext import ndb
# This import allows us to define our own Enum using the ProtoRPC messages
# library. This is not usually needed, since EndpointsModel handles message
# definition, but in this case it is.
from protorpc import messages
from protorpc import remote
# We import EndpointsAliasProperty so that we can define our own helper property
# similar to the properties "id", "entityKey", "limit", "order" and "pageToken"
# provided by EndpointsModel.
from endpoints_proto_datastore.ndb import EndpointsAliasProperty
from endpoints_proto_datastore.ndb import EndpointsModel
# This is an Enum used to strictly define which order values are allowed.
# In this case, we are only allowing two query orders and have an enum value
# corresponding to each.
class Order(messages.Enum):
MYFIRST = 1
MYSECOND = 2
class MyModel(EndpointsModel):
# As in simple_get/main.py, by setting _message_fields_schema, we can set a
# custom ProtoRPC message schema. We set the schema to the four properties
# corresponding to the NDB properties and exclude the fifth property, which is
# the alias property "order". Though the helper property "order" from
# EndpointsModel is not included in the message schema, since we define our
# own "order", this would be included if we did not define our own schema.
_message_fields_schema = ('attr1', 'attr2', 'owner', 'created')
# The properties attr1 and attr2 are required here so that all entities will
# have values for performing queries.
attr1 = ndb.StringProperty(required=True)
attr2 = ndb.StringProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
# As in basic_with_auth/main.py, an owner property is used and each entity
# created will have the current user saved as the owner. As with attr1 and
# attr2 above, we are also requiring the owner field so we can use it for
# queries too.
owner = ndb.UserProperty(required=True)
# This is a setter which will be used by the helper property "order", which we
# are overriding here. The setter used for that helper property is also named
# OrderSet. This method will be called when order is set from a ProtoRPC
# query request.
def OrderSet(self, value):
# Since we wish to control which queries are made, we only accept values
# from our custom Enum type Order.
if not isinstance(value, Order):
raise TypeError('Expected an enum, received: %s.' % (value,))
# For MYFIRST, we order by attr1.
if value == Order.MYFIRST:
# Use the method OrderSet from the parent class to set the string value
# based on the enum.
super(MyModel, self).OrderSet('attr1')
# For MYSECOND, we order by attr2, but in descending order.
elif value == Order.MYSECOND:
# Use the method OrderSet from the parent class to set the string value
# based on the enum.
super(MyModel, self).OrderSet('-attr2')
# For either case, the order used here will be combined with an equality
# filter based on the current user, and we have the corresponding indexes
# specified in index.yaml so no index errors are experienced by our users.
# If the value is not a valid Enum value, raise a TypeError. This should
# never occur since value is known to be an instance of Order.
else:
raise TypeError('Unexpected value of Order: %s.' % (value,))
# This EndpointsAliasProperty is our own helper property and overrides the
# original "order". We specify the setter as the function OrderSet which we
# just defined. The property_type is the class Order and the default value of
# the alias property is MYFIRST.
# Endpoints alias properties must have a corresponding property type, which
# can be either a ProtoRPC field or a ProtoRPC message class or enum class.
# Here, by providing a property type of Order, we aid in the creation of a
# field corresponding to this property in a ProtoRPC message schema.
# The EndpointsAliasProperty can be used as a decorator as is done here, or
# can be used in the same way NDB properties are, e.g.
# attr1 = ndb.StringProperty()
# and the similar
# order = EndpointsAliasProperty(OrderGet, setter=OrderSet, ...)
# where OrderGet would be the function defined here.
@EndpointsAliasProperty(setter=OrderSet, property_type=Order,
default=Order.MYFIRST)
def order(self):
# We only need to limit the values to Order enums, so we can use the getter
# from the helper property with no changes.
return super(MyModel, self).order
# Since we are using auth, we want to test with the Google APIs Explorer:
# https://developers.google.com/apis-explorer/
# By default, if allowed_client_ids is not specified, this is enabled by
# default. If you specify allowed_client_ids, you'll need to include
# endpoints.API_EXPLORER_CLIENT_ID in this list. This is necessary for auth
# tokens obtained by the API Explorer (on behalf of users) to be considered
# valid by our API.
@endpoints.api(name='myapi', version='v1', description='My Little API')
class MyApi(remote.Service):
# We use specify that request_fields is ('attr1', 'attr2') because the
# created value is set when the entity is put to the datastore and the owner
# is set from the current user. As in basic_with_auth, since user_required is
# set to True, the current user will always be valid.
# Since no response_fields are set, the four fields from
# _message_fields_schema will be sent in the response.
@MyModel.method(request_fields=('attr1', 'attr2'),
user_required=True,
path='mymodel', http_method='POST', name='mymodel.insert')
def MyModelInsert(self, my_model):
my_model.owner = endpoints.get_current_user()
my_model.put()
return my_model
# As in paging/main.py, we use the fields limit, order and pageToken for
# paging, but here "order" is the Enum-based property we defined above. As
# mentioned in the definition of OrderSet, these order values are coupled with
# the filter for current user.
# Since no collection_fields are set, each value in "items" in the response
# will use the four fields from _message_fields_schema.
@MyModel.query_method(query_fields=('limit', 'order', 'pageToken'),
user_required=True,
path='mymodels', name='mymodel.list')
def MyModelList(self, query):
# Current user is valid since user_required is set to True.
return query.filter(MyModel.owner == endpoints.get_current_user())
application = endpoints.api_server([MyApi], restricted=False)
| 2.328125 | 2 |
misc/Prague19/build/app3.py | NimbleStorage/container-examples | 6 | 12760764 | #!/usr/bin/env python
import os
import time
import datetime
from os import path
if not os.path.exists('data'):
os.mkdir('data')
while True:
log = "%s logging app3 from %s LoadAVG-1m: %s\n" \
% (datetime.datetime.now(), os.uname()[1], round(os.getloadavg()[0],2))
f = open("data/log.txt", "a")
f.write(log)
f.close()
print log,
time.sleep(1)
| 2.609375 | 3 |
classify_tests.py | likedan/keras_lrp | 6 | 12760765 | <reponame>likedan/keras_lrp<gh_stars>1-10
# go over the models dir and classify everything
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import csv, os
import math
from PIL import Image
from keras.models import load_model
from tensorflow.python.keras.models import load_model
from lrp.LRPModel import LRPModel
from keras.applications import *
img_size = 512
img_size_flat = img_size * img_size * 3
img_shape_full = (img_size, img_size, 3)
replace = True
def classify_model(class_name, model_path):
model_path = os.path.join("models", class_name, model_path)
files = os.listdir(model_path)
model_file_name = None
weight_file_name = None
for file in files:
if len(file) > 5:
if file[-3:] == ".h5":
model_file_name = file
if file[-5:] == ".hdf5":
weight_file_name = file
if model_file_name == None or weight_file_name == None:
print(model_path, " model file or weight file missing")
return
model = load_model(os.path.join(model_path, model_file_name))
model.load_weights(os.path.join(model_path, weight_file_name))
# input_tensor = Input(shape=self.img_shape_full)
# model = vgg16.VGG16(input_tensor=input_tensor, weights='imagenet', include_top=False
model = LRPModel(model)
if replace == False and os.path.exists(os.path.join(model_path, 'test_results_formatted.csv')):
return
rows = []
with open('rank/Tests/question.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
tests = []
for row in reader:
if row[1] != class_name:
continue
image = Image.open("rank/" + row[0])
plt.imshow(image)
plt.show()
img_array = np.asarray(image)
if img_array.shape != img_shape_full:
image = image.resize((img_size, img_size), Image.ANTIALIAS)
img_array = np.asarray(image)
tests.append(img_array/ 255)
lrp_result = model.perform_lrp(np.array([img_array / 255]))
np.save(row[0].replace("/", "-"), lrp_result[0, :, :, :])
plt.imshow(np.log(np.log(np.abs(lrp_result[0, :, :, :]))), cmap='jet')
plt.show()
rows.append(row)
print("Finished Classifying: ", model_path)
classify_model("neckline_design_labels", "InceptionV3-85") | 2.3125 | 2 |
aulaspythonintermediario/aula26/aula26.py | lel352/Curso-Python | 1 | 12760766 | <filename>aulaspythonintermediario/aula26/aula26.py
# Aula 26 - Criando, lendo, escrevendo e apagando arquivos
# forma básica
import os
import json
file = open('abc.txt', 'w+')
file.write('linha 1\n')
file.write('linha 2\n')
file.write('linha 3\n')
print('Lendo Linha')
file.seek(0, 0) # mandando começo do arquivo
print(file.read())
print('#'*25)
file.seek(0, 0) # mandando começo do arquivo
print(file.readline(), end='') # lendo linha por linha
print(file.readline(), end='') # lendo linha por linha
print(file.readline(), end='') # lendo linha por linha
print('#'*25)
file.seek(0, 0)
print(file.readlines())
print('#'*25)
file.seek(0, 0)
for linha in file.readlines():
print(linha, end='')
print('#'*25)
file.seek(0, 0)
for linha in file.readlines():
print(linha, end='')
file.close()
print('*'*25)
try:
file = open('abc2.txt', 'w+')
file.write('linha')
file.seek(0)
print(file.read())
finally:
file.close()
print('*'*25)
# Forma comum em python
# não precisa fechar o arquivo o generador fecha sozinho
with open('abc3.txt', 'w+') as file:
file.write('linha 1 \n')
file.write('linha 2 \n')
file.write('linha 3 \n')
file.seek(0)
print(file.read())
print('*'*25)
with open('abc3.txt', 'r') as file:
print(file.read())
print('*'*25)
# a append não apaga dados anteriores
with open('abc3.txt', 'a+') as file:
file.write('outra linha')
file.seek(0)
print(file.read())
print('*'*25)
with open('abc3.txt', 'r') as file:
print(file.read())
os.remove('abc.txt')
print('*'*25)
d1 = {
'Pessoa 1': {
'nome': 'Luiz',
'idade': 25,
},
'Pessoa 2': {
'nome': 'Rose',
'idade': 30,
}
}
d1_json = json.dumps(d1, indent=True)
print(d1_json)
with open('abc.json', 'w+') as file:
file.write(d1_json)
| 4.1875 | 4 |
snsapi/third/douban_client/api/base.py | hupili/snsapi | 51 | 12760767 | # -*- coding: utf-8 -*-
from pyoauth2 import AccessToken
from .error import DoubanAPIError, DoubanOAuthError
DEFAULT_START = 0
DEFAULT_COUNT = 20
def check_execption(func):
def _check(*arg, **kws):
resp = func(*arg, **kws)
if resp.status >= 400:
if resp.status == 403:
raise DoubanOAuthError(401, 'UNAUTHORIZED')
else:
raise DoubanAPIError(resp)
body = resp.body
if body:
return resp.parsed
return body
return _check
class DoubanAPIBase(object):
def __init__(self, access_token):
self.access_token = access_token
if not isinstance(self.access_token, AccessToken):
raise DoubanOAuthError(401, 'UNAUTHORIZED')
def __repr__(self):
return '<DoubanAPI Base>'
@check_execption
def _get(self, url, **opts):
return self.access_token.get(url, **opts)
@check_execption
def _post(self, url, **opts):
return self.access_token.post(url, **opts)
@check_execption
def _put(self, url, **opts):
return self.access_token.put(url, **opts)
@check_execption
def _patch(self, url, **opts):
return self.access_token.patch(url, **opts)
@check_execption
def _delete(self, url, **opts):
return self.access_token.delete(url, **opts)
| 2.359375 | 2 |
Arrays/PickingNumbers.py | d3xt3r0/Data-Structures-And-Algorithms | 4 | 12760768 |
# hackerrank problem of problem solving
# problem statement : Picking Numbers
def pickingNumbers(arr) :
left = 0
max_sum = 0;max_left = 0;max_right=0
for i in range(1, len(arr)) :
if abs(arr[i] - arr[i-1]) > 1 :
number = i - left
if number > max_sum :
max_sum = number
max_left = left
max_right = i-1
left = i
return max_sum+1
if __name__ == "__main__" :
print(pickingNumbers([1, 2, 2, 3, 1, 2]))
| 3.8125 | 4 |
src/lingcomp/similarity/enums.py | CharlottePouw/interpreting-complexity | 2 | 12760769 | from enum import Enum
class SimilarityFunction(Enum):
RSA = "RSA"
PWCCA = "PWCCA"
class SimilarityStrategy(Enum):
CLS_TOKEN = "cls_token"
REDUCE_MEAN = "reduce_mean"
PER_TOKEN = "per_token"
class SimilarityPlotMode(Enum):
INTER = "inter"
INTRA = "intra"
| 3.359375 | 3 |
c2t/config.py | ilil01/qdt | 29 | 12760770 | __all__ = [
"C2TConfig"
, "Run"
, "get_new_rsp"
, "DebugClient"
, "DebugServer"
, "TestBuilder"
]
from collections import (
namedtuple
)
from common import (
pypath
)
with pypath("..pyrsp"):
from pyrsp.rsp import (
RSP,
archmap
)
# CPU Testing Tool configuration components
C2TConfig = namedtuple(
"C2TConfig",
"rsp_target qemu gdbserver target_compiler oracle_compiler"
)
Run = namedtuple(
"Run",
"executable args"
)
def get_new_rsp(regs, pc, regsize, little_endian = True):
class CustomRSP(RSP):
def __init__(self, *a, **kw):
self.arch = dict(
regs = regs,
endian = little_endian,
bitsize = regsize
)
self.pc_reg = pc
super(CustomRSP, self).__init__(*a, **kw)
return CustomRSP
class DebugClient(object):
def __init__(self, march, new_rsp = None, user = False, sp = None,
qemu_reset = False, test_timeout = 10.0
):
self.march = march
if march in archmap:
self.rsp = archmap[march]
elif new_rsp is not None:
self.rsp = new_rsp
else:
self.rsp = None
self.user = user
self.sp = sp
self.qemu_reset = qemu_reset
self.test_timeout = test_timeout
class DebugServer(object):
def __init__(self, run):
self.run = run
@property
def run_script(self):
return ' '.join(self.run)
class TestBuilder(tuple):
def __new__(cls, *runs):
return tuple.__new__(cls, runs)
# TODO: how to operate without runs?
@property
def run_script(self):
for run in self:
yield ' '.join(run)
| 1.929688 | 2 |
regress.py | roumenguha/ECE143-Final-Project | 0 | 12760771 | <reponame>roumenguha/ECE143-Final-Project
import numpy as np
def polyfit(x, y, degree):
'''
Description: returns the results of the numpy polyfit function
polynomial regression
Parameters: x is input variable (a list of indepedent values)
y is output variable (a list of dependent values)
degree order of polynomial to fit x to y
'''
results = {}
coeffs = np.polyfit(x, y, degree)
# Polynomial Coefficients
results['polynomial'] = coeffs.tolist()
# r-squared
p = np.poly1d(coeffs)
# fit values, and mean
yhat = p(x) # or [p(z) for z in x]
ybar = np.sum(y)/len(y) # or sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
results['determination'] = ssreg / sstot
return results
def regress(i, out, deg):
'''
Description: sets up a polynomial regress w/ lists and labels for visualization
Parameters: x is input variable (a list of indepedent values)
y is output variable (a list of dependent values)
degree order of polynomial to fit x to y
'''
results = polyfit(i, out, deg)
txt = 'r_sq: %.4f' % results['determination']
p = np.poly1d(results['polynomial'])
x = np.arange(min(i), max(i), 0.01)
y = p(x)
return txt,p,x,y
| 3.984375 | 4 |
day 22/Thibaut - python/day22.py | AE-nv/aedvent-code-2021 | 1 | 12760772 | <reponame>AE-nv/aedvent-code-2021
import numpy as np
def parseLine(l):
command, args = l.strip().split(' ')
x, y, z = [[int(bound)+50 for bound in arg.split('=')[1].split('..')] for arg in args.split(',')]
return (command, (x, y, z))
def setCuboid(core, bounds, set):
for x in range(bounds[0][0], bounds[0][1]+1):
if x>=0 and x<=100:
for y in range(bounds[1][0], bounds[1][1] + 1):
if y >= 0 and y <= 100:
for z in range(bounds[2][0], bounds[2][1] + 1):
if z >= 0 and z <= 100:
core[x,y,z]=set
return core
def coreInBounds(bounds):
for lower, upper in bounds:
if upper<0 or lower>100:
return False
return True
if __name__ == '__main__':
with open("input.txt", 'r') as f:
data = f.readlines()
instructions = [parseLine(l) for l in data]
print(instructions)
core = np.zeros((101, 101, 101))
i=1
for command, bounds in instructions:
if coreInBounds(bounds):
set=0
if command=='on':
set=1
core = setCuboid(core, bounds, set)
print("completed instruction ",i)
i+=1
print(np.sum(core)) | 2.8125 | 3 |
tests/test_result_faces.py | Data-Only-Greater/SNL-Delft3D-CEC-Verify | 0 | 12760773 | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
import pytest
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import xarray as xr
from snl_d3d_cec_verify.cases import CaseStudy
from snl_d3d_cec_verify.result.faces import (_check_case_study,
_faces_frame_to_slice,
_faces_frame_to_depth,
_map_to_faces_frame_with_tke,
_map_to_faces_frame,
_get_quadrilateral_centre,
_FMFaces,
_trim_to_faces_frame,
_StructuredFaces)
def test_check_case_study_error():
case = CaseStudy(dx=[1, 2, 3])
with pytest.raises(ValueError) as excinfo:
_check_case_study(case)
assert "case study must have length one" in str(excinfo)
@pytest.fixture
def faces_frame_fm(data_dir):
csv_path = data_dir / "output" / "faces_frame_fm.csv"
frame = pd.read_csv(csv_path, parse_dates=["time"])
times = frame.time.unique()
return frame[frame.time == times[-1]]
@pytest.fixture
def faces_frame_structured(data_dir):
csv_path = data_dir / "output" / "faces_frame_structured.csv"
frame = pd.read_csv(csv_path, parse_dates=["time"])
times = frame.time.unique()
return frame[frame.time == times[-1]]
def test_faces_frame_to_slice_sigma(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -0.5
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$z$"].min() > -1.0012
assert ds["$z$"].max() < -1
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_fm["u"].min()
assert ds["$u$"].max() <= faces_frame_fm["u"].max()
assert ds["$v$"].min() >= faces_frame_fm["v"].min()
assert ds["$v$"].max() <= faces_frame_fm["v"].max()
assert ds["$w$"].min() >= faces_frame_fm["w"].min()
assert ds["$w$"].max() <= faces_frame_fm["w"].max()
def test_faces_frame_structured_to_slice_sigma(faces_frame_structured):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -0.75
ds = _faces_frame_to_slice(faces_frame_structured, ts, "sigma", sigma)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$z$"].min() > -1.504
assert ds["$z$"].max() < -1.5
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_structured["u"].min()
assert ds["$u$"].max() <= faces_frame_structured["u"].max()
assert ds["$v$"].min() >= faces_frame_structured["v"].min()
assert ds["$v$"].max() <= faces_frame_structured["v"].max()
assert ds["$w$"].min() >= faces_frame_structured["w"].min()
assert ds["$w$"].max() <= faces_frame_structured["w"].max()
assert ds["$k$"].min() >= 0
assert ds["$k$"].min() >= faces_frame_structured["tke"].min()
assert ds["$k$"].max() <= faces_frame_structured["tke"].max()
def test_faces_frame_to_slice_sigma_extrapolate_forward(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = 0.1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert ds["$z$"].min() > 0.2
assert ds["$z$"].max() < 0.2003
def test_faces_frame_to_slice_sigma_extrapolate_backward(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
sigma = -1.1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "sigma", sigma)
assert ds["$z$"].min() > -2.203
assert ds["$z$"].max() < -2.2
def test_faces_frame_to_slice_z(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
z = -1
ds = _faces_frame_to_slice(faces_frame_fm, ts, "z", z)
assert isinstance(ds, xr.Dataset)
assert len(ds["$x$"]) == 18
assert len(ds["$y$"]) == 4
assert np.isclose(ds["$x$"].min(), 0.5)
assert np.isclose(ds["$x$"].max(), 17.5)
assert np.isclose(ds["$y$"].min(), 1.5)
assert np.isclose(ds["$y$"].max(), 4.5)
assert ds["$z$"].values.take(0) == z
assert ds.time.values.take(0) == ts
assert ds[r"$\sigma$"].values.min() >= -1
assert ds["$z$"].max() < 1.002
# Same bounds as the frame
assert ds["$u$"].min() >= faces_frame_fm["u"].min()
assert ds["$u$"].max() <= faces_frame_fm["u"].max()
assert ds["$v$"].min() >= faces_frame_fm["v"].min()
assert ds["$v$"].max() <= faces_frame_fm["v"].max()
assert ds["$w$"].min() >= faces_frame_fm["w"].min()
assert ds["$w$"].max() <= faces_frame_fm["w"].max()
def test_faces_frame_to_slice_error():
with pytest.raises(RuntimeError) as excinfo:
_faces_frame_to_slice("mock", "mock", "mock", "mock")
assert "Given key is not valid" in str(excinfo)
def test_faces_frame_to_depth(faces_frame_fm):
ts = pd.Timestamp("2001-01-01 01:00:00")
da = _faces_frame_to_depth(faces_frame_fm, ts)
assert isinstance(da, xr.DataArray)
assert len(da["$x$"]) == 18
assert len(da["$y$"]) == 4
assert da.time.values.take(0) == ts
# Same bounds as the frame
assert da.min() >= faces_frame_fm["depth"].min()
assert da.max() <= faces_frame_fm["depth"].max()
def test_faces_frame_structured_to_depth(faces_frame_structured):
ts = pd.Timestamp("2001-01-01 01:00:00")
da = _faces_frame_to_depth(faces_frame_structured, ts)
assert isinstance(da, xr.DataArray)
assert len(da["$x$"]) == 18
assert len(da["$y$"]) == 4
assert da.time.values.take(0) == ts
# Same bounds as the frame
assert da.min() >= faces_frame_structured["depth"].min()
assert da.max() <= faces_frame_structured["depth"].max()
def test_faces_load_t_step_first(faces):
t_step = -1
expected_t_step = faces._resolve_t_step(t_step)
faces._load_t_step(t_step)
assert len(faces._frame) == 18 * 4 * 7
assert expected_t_step in faces._t_steps
assert faces._t_steps[expected_t_step] == \
pd.Timestamp('2001-01-01 01:00:00')
def test_faces_load_t_step_second(faces):
faces._load_t_step(-1)
faces._load_t_step(0)
assert len(faces._frame) == 18 * 4 * 7 * 2
assert len(faces._t_steps) == 2
assert set(faces._frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00'),
pd.Timestamp('2001-01-01')])
def test_faces_load_t_step_no_repeat(faces):
faces._load_t_step(-1)
faces._load_t_step(1)
assert len(faces._frame) == 18 * 4 * 7
assert len(faces._t_steps) == 1
def test_faces_extract_depth(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_depth')
faces.extract_depth(-1)
mock.assert_called()
def test_faces_extract_sigma(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_slice')
faces.extract_sigma(-1, 0)
mock.assert_called()
assert 'sigma' in mock.call_args.args[2]
def test_faces_extract_sigma_interp(faces):
t_step = -1
sigma = -0.5
x = 1
y = 3
ds = faces.extract_sigma(t_step, sigma, x, y)
t_step = faces._resolve_t_step(t_step)
ts = faces._t_steps[t_step]
assert isinstance(ds, xr.Dataset)
assert ds[r"$\sigma$"].values.take(0) == sigma
assert ds.time.values.take(0) == ts
assert ds["$x$"].values.take(0) == x
assert ds["$y$"].values.take(0) == y
assert np.isclose(ds["$z$"].values, -1.00114767)
# Same bounds as the frame
assert (faces._frame["u"].min() <= ds["$u$"].values.take(0) <=
faces._frame["u"].max())
assert (faces._frame["v"].min() <= ds["$v$"].values.take(0) <=
faces._frame["v"].max())
assert (faces._frame["w"].min() <= ds["$w$"].values.take(0) <=
faces._frame["w"].max())
def test_faces_extract_z(mocker, faces):
mock = mocker.patch('snl_d3d_cec_verify.result.faces.'
'_faces_frame_to_slice')
faces.extract_z(-1, -1)
mock.assert_called()
assert 'z' in mock.call_args.args[2]
def test_faces_extract_z_interp(faces):
t_step = -1
z = -1
x = 1
y = 3
ds = faces.extract_z(t_step, z, x, y)
t_step = faces._resolve_t_step(t_step)
ts = faces._t_steps[t_step]
assert isinstance(ds, xr.Dataset)
assert ds["$z$"].values.take(0) == z
assert ds.time.values.take(0) == ts
assert ds["$x$"].values.take(0) == x
assert ds["$y$"].values.take(0) == y
assert np.isclose(ds[r"$\sigma$"].values, -0.49942682)
# Same bounds as the frame
assert (faces._frame["u"].min() <= ds["$u$"].values.take(0) <=
faces._frame["u"].max())
assert (faces._frame["v"].min() <= ds["$v$"].values.take(0) <=
faces._frame["v"].max())
assert (faces._frame["w"].min() <= ds["$w$"].values.take(0) <=
faces._frame["w"].max())
@pytest.mark.parametrize("x, y", [
("mock", None),
(None, "mock")])
def test_faces_extract_interp_error(faces, x, y):
with pytest.raises(RuntimeError) as excinfo:
faces.extract_z("mock", "mock", x, y)
assert "x and y must both be set" in str(excinfo)
def test_faces_extract_turbine_z(mocker, faces):
case = CaseStudy()
offset_z = 0.5
t_step = -1
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_z(t_step, case, offset_z)
mock.assert_called_with(t_step, case.turb_pos_z + offset_z)
def test_faces_extract_turbine_centreline(mocker, faces):
case = CaseStudy()
t_step = -1
x_step = 0.5
offset_x = 0.5
offset_y = 0.5
offset_z = 0.5
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_centreline(t_step,
case,
x_step,
offset_x,
offset_y,
offset_z)
mock.assert_called()
assert mock.call_args.args[0] == t_step
assert mock.call_args.args[1] == case.turb_pos_z + offset_z
x = mock.call_args.args[2]
y = mock.call_args.args[3]
assert min(x) == case.turb_pos_x + offset_x
assert max(x) <= faces.xmax
assert np.unique(np.diff(x)).take(0) == x_step
assert set(y) == set([case.turb_pos_y + offset_y])
def test_faces_extract_turbine_centre(mocker, faces):
case = CaseStudy()
t_step = -1
offset_x = 0.5
offset_y = 0.5
offset_z = 0.5
mock = mocker.patch.object(faces, 'extract_z')
faces.extract_turbine_centre(t_step,
case,
offset_x,
offset_y,
offset_z)
mock.assert_called()
assert mock.call_args.args[0] == t_step
assert mock.call_args.args[1] == case.turb_pos_z + offset_z
x = mock.call_args.args[2]
y = mock.call_args.args[3]
assert len(x) == 1
assert len(y) == 1
assert x[0] == case.turb_pos_x + offset_x
assert y[0] == case.turb_pos_y + offset_y
def test_map_to_faces_frame_with_tke(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame_with_tke(map_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 18 * 4 * 7
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2.003 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() <= 0
assert np.isclose(faces_frame["sigma"].unique(),
[-1.,
-0.83333333,
-0.66666667,
-0.5,
-0.33333333,
-0.16666667,
0.]).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() > 0.57
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.0089
sigma_slice = _faces_frame_to_slice(faces_frame,
pd.Timestamp('2001-01-01 01:00:00'),
"sigma",
-0.75)
assert np.isclose(sigma_slice["$z$"].values.mean(), -1.5009617997833038)
assert round(sigma_slice["$k$"].values.mean(), 5) == 0.00627
def test_map_to_faces_frame_with_tke_none(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame_with_tke(map_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 18 * 4 * 7 * 2
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2.003 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() <= 0
assert np.isclose(faces_frame["sigma"].unique(),
[-1.,
-0.83333333,
-0.66666667,
-0.5,
-0.33333333,
-0.16666667,
0.]).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 1.998
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() >= 0
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.0089
def test_map_to_faces_frame(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame(map_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 216
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert (faces_frame["sigma"].unique() == (-0.8333333333333334,
-0.5,
-0.16666666666666669)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() > 0.6
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
sigma_slice = _faces_frame_to_slice(faces_frame,
pd.Timestamp('2001-01-01 01:00:00'),
"sigma",
-0.75)
assert np.isclose(sigma_slice["$z$"].values.mean(), -1.5009617997833038)
def test_map_to_faces_frame_none(data_dir):
map_path = data_dir / "output" / "FlowFM_map.nc"
faces_frame = _map_to_faces_frame(map_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 432
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert (faces_frame["sigma"].unique() == (-0.8333333333333334,
-0.5,
-0.16666666666666669)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() >= 2
assert faces_frame["depth"].max() < 2.003
assert faces_frame["u"].min() >= 0.
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-15
assert faces_frame["v"].max() < 1e-15
assert faces_frame["w"].min() > -0.02
assert faces_frame["w"].max() < 0.02
def test_get_quadrilateral_centre():
densities = np.array([0, 0, 1, 1])
result = _get_quadrilateral_centre(densities)
assert result == 0.5
def test_FMFaces(mocker):
mock = mocker.patch(
'snl_d3d_cec_verify.result.faces._map_to_faces_frame_with_tke',
autospec=True)
path = "mock"
tstep = 0
test = _FMFaces(path, 2, 18)
test._get_faces_frame(tstep)
mock.assert_called_with(path, tstep)
def test_trim_to_faces_frame(data_dir):
trim_path = data_dir / "output" / "trim-D3D.nc"
faces_frame = _trim_to_faces_frame(trim_path, -1)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 216
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert np.isclose(faces_frame["sigma"].unique(),
(-0.16666667, -0.5, -0.83333331)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() > 2
assert faces_frame["depth"].max() < 2.005
assert faces_frame["u"].min() > 0.6
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-2
assert faces_frame["v"].max() < 1e-2
assert faces_frame["w"].min() > -0.03
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.004
def test_trim_to_faces_frame_none(data_dir):
trim_path = data_dir / "output" / "trim-D3D.nc"
faces_frame = _trim_to_faces_frame(trim_path)
assert isinstance(faces_frame, pd.DataFrame)
assert len(faces_frame) == 432
assert faces_frame.columns.to_list() == ["x",
"y",
"z",
"sigma",
"time",
"depth",
"u",
"v",
"w",
"tke"]
assert np.isclose(faces_frame["x"].min(), 0.5)
assert np.isclose(faces_frame["x"].max(), 17.5)
assert np.isclose(faces_frame["y"].min(), 1.5)
assert np.isclose(faces_frame["y"].max(), 4.5)
assert -2 < faces_frame["z"].min() < -4 / 3
assert -2 / 3 < faces_frame["z"].max() < 0
assert np.isclose(faces_frame["sigma"].unique(),
(-0.16666667, -0.5, -0.83333331)).all()
assert set(faces_frame["time"]) == set([
pd.Timestamp('2001-01-01 00:00:00'),
pd.Timestamp('2001-01-01 01:00:00')])
assert faces_frame["depth"].min() >= 2
assert faces_frame["depth"].max() < 2.005
assert faces_frame["u"].min() >= 0.
assert faces_frame["u"].max() < 0.9
assert faces_frame["v"].min() > -1e-2
assert faces_frame["v"].max() < 1e-2
assert faces_frame["w"].min() > -0.03
assert faces_frame["w"].max() < 0.02
assert faces_frame["tke"].min() > 0
assert faces_frame["tke"].max() < 0.004
def test_StructuredFaces(mocker):
mock = mocker.patch('snl_d3d_cec_verify.result.faces._trim_to_faces_frame',
autospec=True)
path = "mock"
tstep = 0
test = _StructuredFaces(path, 2, 18)
test._get_faces_frame(tstep)
mock.assert_called_with(path, tstep)
| 2.28125 | 2 |
2153/2153.py3.py | isac322/BOJ | 14 | 12760774 | st = input()
sum = 0
for s in st:
t = ord(s)
if t >= ord('a'):
sum += t - ord('a') + 1
else:
sum += t - ord('A') + 27
ii = 2
for i in range(2, sum + 1):
if sum % i == 0:
ii = i
break
if ii == sum or sum == 1 :
print('It is a prime word.')
else :
print('It is not a prime word.') | 3.640625 | 4 |
lacrm/__init__.py | HighMileage/lacrm | 1 | 12760775 | <gh_stars>1-10
"lacrm package"
from lacrm.api import Lacrm # noqa
| 0.882813 | 1 |
test_typeconverter.py | Hardtack/TypeConverter | 0 | 12760776 | <filename>test_typeconverter.py
# -*- encoding: utf-8 -*-
import sys
import pytest
import typeconverter
if sys.version_info < (3, 0, 0):
PY3 = None
PY2 = object()
string_basetype = basestring
def to_str(x):
return unicode(x)
else:
PY3 = object()
PY2 = None
string_basetype = str
def to_str(x):
return str(x)
def test_converter():
converter = typeconverter.Converter(string_basetype)
@converter.handle(list)
def convert_list(li):
return ', '.join(map(converter.convert, li))
@converter.handle(tuple)
def contert_tuple(tp):
return '(' + ', '.join(map(converter.convert, tp)) + ')'
if PY2:
@converter.handle(int, float, long)
def convert_number(n):
return 'n' + str(n)
else:
@converter.handle(int, float)
def convert_number(n):
return 'n' + str(n)
@converter.default
def convert(obj):
return str(obj)
assert 'a, b, c' == converter.convert(['a', 'b', 'c'])
assert '(a, b)' == converter.convert(('a', 'b'))
assert 'n123' == converter.convert(123)
if PY2:
assert 'n1' == converter.convert(long(1))
assert '{}' == converter.convert({})
assert 'n1, n2, n3' == converter.convert([1, 2, 3])
def test_multiple():
converter = typeconverter.Converter(list)
@converter.handle(tuple, set)
def convert_iterable(i):
return list(i)
s = {1, 2, 3}
converted = converter.convert(s)
assert len(s) == len(converted)
assert isinstance(converted, list)
for i in s:
assert i in converted
with pytest.raises(TypeError):
converter.convert('str')
def test_chain():
converter = typeconverter.Converter((list, dict, int, string_basetype))
class A(object):
def __init__(self, v):
self.v = v
@converter.handle(A)
def convert_A(a):
return a.v
class B(object):
def __init__(self, v):
self.v = v
@converter.handle(B)
def convert_B(b):
return A(b.v)
assert 1 == converter.convert(A(1))
assert 2 == converter.convert(B(2))
assert '1' == converter.convert(A('1'))
assert '2' == converter.convert(B('2'))
def test_assert():
class DeepConverter(typeconverter.Converter):
def assert_type(self, obj):
super(DeepConverter, self).assert_type(obj)
if isinstance(obj, list):
for i in obj:
self.assert_type(i)
elif isinstance(obj, dict):
for k, v in obj.iter_items():
self.assert_type(k)
self.assert_type(v)
converter = DeepConverter((list, dict, string_basetype))
@converter.handle(set, tuple)
def convert_iterable(i):
return list(i)
@converter.handle(list)
def convert_list(li):
return [converter.convert(x) for x in li]
@converter.handle(dict)
def convert_dict(d):
converted = {}
for k, v in d.iter_items():
converted[converter.convert(k)] = converter.convert(v)
return converted
@converter.default
def convert(obj):
return to_str(obj)
assert [['1', '2', '3'], 'b', 'c'] == converter.convert(
({1, 2, 3}, 'b', 'c')
)
| 2.5 | 2 |
automl/analyze_bohb.py | automl/learning_environments | 11 | 12760777 | import colorsys
import math
import os
import random
from decimal import Decimal
import hpbandster.core.result as hpres
import matplotlib.pyplot as plt
import numpy as np
# smallest value is best -> reverse_loss = True
# largest value is best -> reverse_loss = False
REVERSE_LOSS = True
EXP_LOSS = 1
OUTLIER_PERC_WORST = 0.5
OUTLIER_PERC_BEST = 0.0
def analyze_bohb(log_dir, title):
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
# get the 'dict' that translates config ids to the actual configurations
id2conf = result.get_id2config_mapping()
# Here is how you get he incumbent (best configuration)
inc_id = result.get_incumbent_id()
# let's grab the run on the highest budget
inc_runs = result.get_runs_by_id(inc_id)
inc_run = inc_runs[-1]
# We have access to all information: the config, the loss observed during
# optimization, and all the additional information
inc_valid_score = inc_run.loss
inc_config = id2conf[inc_id]['config']
inc_info = inc_run['info']
print('Best found configuration :' + str(inc_config))
print('Score: ' + str(inc_valid_score))
print('Info: ' + str(inc_info))
# print('It achieved accuracies of %f (validation) and %f (test).' % (-inc_valid_score, inc_test_score))
# # Let's plot the observed losses grouped by budget,
# hpvis.losses_over_time(all_runs)
#
# # the number of concurent runs,
# hpvis.concurrent_runs_over_time(all_runs)
#
# # and the number of finished runs.
# hpvis.finished_runs_over_time(all_runs)
#
# # This one visualizes the spearman rank correlation coefficients of the losses
# # between different budgets.
# hpvis.correlation_across_budgets(result)
#
# # For model based optimizers, one might wonder how much the model actually helped.
# # The next plot compares the performance of configs picked by the model vs. random ones
# hpvis.performance_histogram_model_vs_random(all_runs, id2conf)
result = remove_outliers(result)
# result = filter_values(result)
# print_configs_sorted_by_loss(result)
# print_stats_per_value(result)
# plot_accuracy_over_budget(result)
plot_parallel_scatter(result)
plt.title(title)
plt.show()
file_name = str(title).strip().replace(' ', '_').lower()
plt.savefig(os.path.join("../experiments/automl_plots/", file_name + ".png"))
def print_configs_sorted_by_loss(result):
lst = []
for k1, v1 in result.data.items():
for k2, v2 in v1.results.items():
loss = v2['loss']
config = v1.config
lst.append((loss, config))
lst.sort(key=lambda x: x[0])
for elem in lst:
print(elem)
def print_stats_per_value(result):
# get all possible keys
min_epoch = float('Inf')
config_params = {}
for value in result.data.values():
for config_param, config_param_val in value.config.items():
for epoch, epoch_result in value.results.items():
try:
loss = epoch_result["loss"]
min_epoch = min(min_epoch, epoch)
if config_param in config_params.keys():
config_params[config_param].append((config_param_val, epoch, loss))
else:
config_params[config_param] = [(config_param_val, epoch, loss)]
except:
print('Error in get_avg_per_value, continuing')
for config_param, data in (dict(sorted(config_params.items()))).items():
print(config_param)
# get all unique possible values for each config parameter
values = set(elem[0] for elem in data)
values = sorted(list(values))
if len(values) > 20:
continue
for value in values:
losses = []
for elem in data:
val, epoch, loss = elem
if val == value and epoch == min_epoch:
losses.append(loss)
print('{} {} {} {}'.format(value, np.mean(losses), np.std(losses), len(losses)))
def remove_outliers(result):
lut = []
for key, value1 in result.data.items():
for value2 in value1.results.values():
if value2 == None:
loss = float('nan')
else:
loss = value2['loss']
lut.append([loss, key])
filtered_lut = [x for x in lut if math.isfinite(x[0])]
worst_loss = sorted(filtered_lut, reverse=REVERSE_LOSS)[0][0]
if REVERSE_LOSS:
worst_loss += 0.01 * abs(worst_loss)
else:
worst_loss -= 0.01 * abs(worst_loss)
# remove NaN's
for i in range(len(lut)):
if not math.isfinite(lut[i][0]) or lut[i][0] == 0:
lut[i][0] = worst_loss
for key in result.data[lut[i][1]].results.keys():
# hacky but sometimes some budgets are missing (presumably when terminating ongoing runs)
if result.data[lut[i][1]].results[key] is None:
continue
else:
result.data[lut[i][1]].results[key]['loss'] = worst_loss
# result.data.pop(elem[1], None)
lut.sort(key=lambda x: x[0], reverse=REVERSE_LOSS)
n_remove_worst = math.ceil(len(lut) * OUTLIER_PERC_WORST)
n_remove_best = math.ceil(len(lut) * OUTLIER_PERC_BEST)
# remove percentage of worst values
for i in range(n_remove_worst):
elem = lut.pop(0)
result.data.pop(elem[1], None)
# remove percentage of best values
for i in range(n_remove_best):
elem = lut.pop()
result.data.pop(elem[1], None)
return result
def filter_values(result):
del_list = []
for key, value1 in result.data.items():
id = key
config = value1.config
rep_env_num = config['rep_env_num']
ddqn_dropout = config['ddqn_dropout']
# if not ddqn_dropout == 0:
# del_list.append(id)
# if not rep_env_num == 5:
# del_list.append(id)
for id in del_list:
result.data.pop(id, None)
return result
def plot_accuracy_over_budget(result):
fig, ax = plt.subplots()
# plot hyperband plot
index = None
color = None
for key, value1 in result.data.items():
if key[0] is not index:
index = key[0]
color = get_bright_random_color()
try:
x = []
y = []
for key2, value2 in value1.results.items():
x.append(key2)
y.append(value2["loss"])
plt.semilogx(x, y, color=color)
except:
print('Error in plot_accuracy_over_budget, continuing')
ax.set_title('Score for different configurations')
ax.set_xlabel('epochs')
ax.set_ylabel('score')
def plot_parallel_scatter(result):
plt.subplots(dpi=300, figsize=(8, 4))
ep_m = 1e9
ep_M = -1e9
loss_m = 1e9
loss_M = -1e9
# get all possible keys
config_params = {}
for value in result.data.values():
for config_param, config_param_val in value.config.items():
for epoch, epoch_result in value.results.items():
try:
loss = epoch_result["loss"]
ep_m = min(ep_m, epoch)
ep_M = max(ep_M, epoch)
loss_m = min(loss_m, loss)
loss_M = max(loss_M, loss)
if config_param in config_params.keys():
config_params[config_param].append((config_param_val, epoch, loss))
else:
config_params[config_param] = [(config_param_val, epoch, loss)]
except:
print('Error in plot_parallel_scatter, continuing')
x_dev = 0.2
r_min = 3
r_max = 4
alpha = 0.4
text_x_offset = -0.1
text_y_offset = -0.1
size_text = 6
index = 0
for config_param, data in (dict(sorted(config_params.items()))).items():
# get all unique possible values for each config parameter
values = set(elem[0] for elem in data)
values = sorted(list(values))
n = len(data)
xs = np.zeros(n)
ys = np.zeros(n)
rads = np.zeros(n)
colors = np.zeros([n, 3])
# extract common features
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
ep = data[k][1]
acc = map_to_zero_one_range(data[k][2], loss_m, loss_M)
# test:
# loss_b = -1233125.5410615604
# loss_a = -5233125.5410615604 #(we minimize the negative reward)
# print(loss_b, "->", map_to_zero_one_range(loss_b, loss_m, loss_M))
# print(loss_a, "->", map_to_zero_one_range(loss_a, loss_m, loss_M))
rads[k] = linear_interpolation(np.log(ep), np.log(ep_m), np.log(ep_M), r_min, r_max) ** 2
colors[k, :] = get_color(acc)
# check for type (categorical,int,float,log)
if type(values[0]) is bool:
y_dev = x_dev / 2
for i in range(len(values)):
plt.text(index + text_x_offset, values[i] + text_y_offset, str(values[i]), rotation=90,
size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = values[i] + np.random.uniform(-y_dev, y_dev)
elif type(values[0]) is str:
y_dev = min(1 / len(values) / 2.5, x_dev / 2)
for i in range(len(values)):
plt.text(index + text_x_offset, i / (max(len(values) - 1, 1)) + text_y_offset, values[i],
rotation=90, size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = i / (max(len(values) - 1, 1)) + np.random.uniform(-y_dev, y_dev)
elif type(values[0]) is int:
y_dev = min(1 / len(values) / 2.5, x_dev / 2)
plotAllStr = len(values) < 20
if not plotAllStr:
min_val = min(values)
max_val = max(values)
plt.text(index + text_x_offset, 0 + text_y_offset, str(f"{Decimal(min_val):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 1 + text_y_offset, str(f"{Decimal(max_val):.1E}"), rotation=90, size=size_text)
for i in range(len(values)):
if plotAllStr:
plt.text(index + text_x_offset, i / (max(len(values) - 1, 1)), str(values[i]), rotation=90,
size=size_text)
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = i / (max(len(values) - 1, 1)) + np.random.uniform(-y_dev, y_dev)
else: # float
min_val = min(values)
max_val = max(values)
# log scale if min/max value differs to much
if max_val / min_val > 100:
val050 = np.exp((np.log(min_val) + np.log(max_val)) / 2)
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = linear_interpolation(np.log(data[k][0]), np.log(min_val), np.log(max_val), 0, 1)
# linear scale
else:
val050 = linear_interpolation(0.50, 0, 1, min_val, max_val)
for i in range(len(values)):
for k in range(len(data)):
if data[k][0] == values[i]:
xs[k] = index + np.random.uniform(-x_dev, x_dev)
ys[k] = linear_interpolation(data[k][0], min_val, max_val, 0, 1)
plt.text(index + text_x_offset, 0 + text_y_offset, str(f"{Decimal(min_val):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 0.5 + text_y_offset, str(f"{Decimal(val050):.1E}"), rotation=90, size=size_text)
plt.text(index + text_x_offset, 1 + text_y_offset, str(f"{Decimal(max_val):.1E}"), rotation=90, size=size_text)
plt.scatter(xs, ys, s=rads, c=colors, alpha=alpha, edgecolors='none')
index += 1
plt.yticks([], [])
plt.xticks(np.arange(index), (tuple(sorted(config_params.keys()))), rotation=90, fontsize=size_text)
plt.subplots_adjust(bottom=0.25)
def linear_interpolation(x, x0, x1, y0, y1):
# linearly interpolate between two x/y values for a given x value
return y0 + (y1 - y0) * (x - x0) / (x1 - x0 + 1e-9)
def map_to_zero_one_range(loss, loss_m, loss_M):
if loss_M < 1 and loss_m > 0 and REVERSE_LOSS == False:
# if we have already a loss in the [0,1] range, there is no need to normalize anything
acc = loss
elif loss_M < 0 and loss_m > -1 and REVERSE_LOSS == True:
# if we have a loss in the [-1,0] range, simply revert its sign
acc = -loss
else:
# normalize loss to the 0 (bad) - 1(good) range
acc = (loss - loss_m) / (loss_M - loss_m + 1e-9)
if REVERSE_LOSS:
acc = 1 - acc
acc = acc ** EXP_LOSS
return acc
def get_color(acc):
# print("acc: ", acc)
if acc <= 0:
# print("color: ", np.array([[1, 0, 0]]))
return np.array([[1, 0, 0]])
elif acc <= 0.5:
# print("color: ", np.array([[1, 0, 0]]) + 2 * acc * np.array([[0, 1, 0]]))
return np.array([[1, 0, 0]]) + 2 * acc * np.array([[0, 1, 0]])
elif acc <= 1:
# print("color: ", np.array([[1, 1, 0]]) + 2 * (acc - 0.5) * np.array([[-1, 0, 0]]))
return np.array([[1, 1, 0]]) + 2 * (acc - 0.5) * np.array([[-1, 0, 0]])
else:
# print("color: ", np.array([[0, 1, 0]]))
return np.array([[0, 1, 0]])
def get_bright_random_color():
h, s, l = random.random(), 1, 0.5
return colorsys.hls_to_rgb(h, l, s)
if __name__ == '__main__':
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-04-09'
# log_dir = '../results/bohb_params_ql_cb_cliff_2021-03-04-16'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-04-17'
# log_dir = '../results/bohb_params_ql_cb_cliff_2021-03-04-20'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-04-22'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-04-22'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-05-13'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-05-13'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-06-00'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-06-00'
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-06-10'
# log_dir = '../results/bohb_params_DDQN_ICM_cartpole_2021-03-06-10'
# title = "DDQN ICM on CartPole"
# log_dir = '../results/bohb_params_td3_icm_hc_2021-03-08-20'
# title = "TD3 ICM on HC"
# log_dir = '../results/bohb_params_td3_icm_cmc_2021-03-08-22'
# title = "TD3 ICM on CMC"
# log_dir = '../results/bohb_params_TD3_discrete_gumbel_temp_annealing_2021-03-11-14'
# title = "Discrete TD3 with annealed temp on CartPole"
# log_dir = '../results/bohb_params_TD3_discrete_gumbel_temp_annealing_on_syn_env_2_2021-03-11-23'
# title = "Discrete TD3 with annealed temp on CartPole Syn Env Model 2"
# log_dir = '../results/bohb_params_ppo_hc_2021-03-13-23'
# title = "PPO on HC"
# log_dir = '../results/bohb_params_td3_icm_cmc_max_reward_2021-03-16-00'
# title = "TD3 ICM on CMC max. reward"
# log_dir = '../results/bohb_params_td3_icm_hc_max_reward_2021-03-16-00'
# title = "TD3 ICM on HC max. reward"
# log_dir = '../results/bohb_params_ppo_hc_icm_1e-3_ent_coef_1e-1_action_std_2021-03-19-20'
# title = "PPO ICM on HC max. reward"
# log_dir = '../results/halfcheetah_td3_bohb_params_se_prep_2021-06-11-11'
# title = "TD3 HC max. reward"
# log_dir = '../results/halfcheetah_td3_bohb_params_se_prep_2021-06-13-17'
# title = "TD3 HC max. reward"
#
# log_dir = '../results/SE_evaluate_cmc_se_params_2021-07-27-11'
# title = "SE CMC HPO"
log_dir = "/home/ferreira/Projects/learning_environments/results/SE_evaluate_cmc_se_params_2021-07-30-10"
title = "SE CMC HPO"
analyze_bohb(log_dir, title=title)
| 2.390625 | 2 |
frame/gui.py | Rosikobu/snake-reloaded | 0 | 12760778 | import pygame, sys
from typing import List, Tuple
import time
from pygame.math import Vector2
from .snake import Snake
from .eatable.mouse import Mouse
from .model import Model
from .eatable.sandglass import Sandglass
from .eatable.peel import Peel
from .components import sound_controller
from .eatable.saw import Saw
from .components.background import Background
from .config import FPS, xSize, ySize, SPEED
from .components.score import Score
from .noneatable.bush import Bush
class GUI(object):
def __init__(self) -> None:
# Fenstersetup
self.screen = pygame.display.set_mode((xSize,ySize))
#print(type(screen))
self.SCREEN_UPDATE = pygame.USEREVENT
self.clock = pygame.time.Clock()
# Initialize pygame
pygame.mixer.pre_init(44100,-16,2,512)
pygame.init()
pygame.time.set_timer(self.SCREEN_UPDATE, SPEED)
# Erstellen der Objekte
self.snake = Snake(self.screen)
# Hintergrund
self.background = Background(self.screen)
# Powerups
self.sandglass = Sandglass(self.screen)
self.saw = Saw(self.screen)
self.peel = Peel(self.screen)
# Score
self.score = Score(self.screen)
self.barrier_list = []
self.food_list = []
# Logik
self.model = Model(self.screen, self.snake, self.sandglass, self.saw, self.score, self.food_list, self.barrier_list, self.peel)
# Speed Up
self.speedup = False
def update(self) -> None:
self.model.update_snake() # Snake in Gang setzen
self.model.check_collision() # Kollisionsprüfung Snake mit Maus
self.model.check_fail() # Kollisionsprüfung mit sich selbst
self.model.update_barriers()
def draw_elements(self) -> None:
self.background.draw_background()
self.snake.draw_snake() # Zeichne Snake
for barrier in self.barrier_list:
barrier.draw_barrier()
for obj in self.food_list: # Zeichne Mäuse
obj.draw_food()
self.score.draw_score()
# Hauptschleife
def update_display(self) -> None:
for event in pygame.event.get():
if event.type == pygame.QUIT: # Fenster schließen
pygame.quit()
sys.exit()
if event.type == self.SCREEN_UPDATE:
self.update() # Logik Spielelemente Update
if event.type == pygame.KEYDOWN: # Steuerung von Snake Update
self.model.change_direction(event)
self.draw_elements() # Zeiche alle statische Objekte
pygame.display.update() # Update Display | 2.6875 | 3 |
src/data/image_validation.py | ehudbaumatz/enhance | 0 | 12760779 | <gh_stars>0
def is_before_after(img_path):
"""
check weather image is a before after
:param img_path:
:return:
""" | 1.585938 | 2 |
substring.py | Samridhi-88/python-list | 0 | 12760780 | <reponame>Samridhi-88/python-list
mainstr="the quick brown fox jumped over the lazy dog. the dog slept over the verandah"
newstr=mainstr.split(" ")
i=0
a=" "
while i<len(newstr):
if newstr[i]=="over":
a=a+"on"+" "
else:
a=a+newstr[i]+" "
i+=1
print(a) | 3.375 | 3 |
stockpile-wrapper/stockpile-wrapper.py | cloud-bulldozer/bohica | 0 | 12760781 | <gh_stars>0
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from elasticsearch_dsl import Search
import elasticsearch
import time
import subprocess
import sys
import os
import uuid
import base64
import json
import redis
import ssl
import hashlib
from transcribe.render import transcribe
from elasticsearch.helpers import parallel_bulk, BulkIndexError
def _connect_to_es(server, port, es_ssl):
_es_connection_string = str(server) + ':' + str(port)
if es_ssl == "true":
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
es = elasticsearch.Elasticsearch([_es_connection_string], send_get_body_as='POST',
ssl_context=ssl_ctx, use_ssl=True)
else:
es = elasticsearch.Elasticsearch([_es_connection_string], send_get_body_as='POST')
return es
def _index_result(es, my_uuid, my_node, my_pod, index_retries):
index = "stockpile-results-raw"
timestamp = int(time.time())
stockpile_file = os.popen('grep stockpile_output_path group_vars/all.yml | awk \'{printf $2}\'').read()
if os.path.exists(stockpile_file):
_upload_to_es(stockpile_file, my_uuid, timestamp, es, my_node, my_pod, index_retries)
_upload_to_es_bulk(stockpile_file, my_uuid, timestamp, es, index, my_node, my_pod)
def _upload_to_es(payload_file, my_uuid, timestamp, es, my_node, my_pod, index_retries):
documents = {
"total": 0,
"existent": 0,
"total": 0
}
def doc_stream():
for scribed in transcribe(payload_file, 'stockpile'):
doc = json.loads(scribed)
es_index = "%s-metadata" % doc["module"]
doc["uuid"] = my_uuid
_id = hashlib.sha256(str(doc).encode()).hexdigest()
# This information changes depending on the node and pod where stockpile-wrapper is executed
# Don't include it in the _id calculation to avoid indexing several times documents not
# specific to a node
doc["node_name"] = my_node
doc["pod_name"] = my_pod
doc["timestamp"] = timestamp
documents["total"] += 1
yield {"_index": es_index,
"_source": doc,
"_id": _id,
"_op_type": "create"}
failed_docs = []
for r in range(index_retries):
documents["failed"] = 0
documents["existent"] = 0
try:
for ok, resp in parallel_bulk(es, doc_stream()):
pass
# Catch indexing exception
except BulkIndexError as err:
exception = err
# An exception can refer to multiple documents
for failed_doc in err.errors:
# Document already exists in ES
if failed_doc["create"]["status"] == 409:
documents["existent"] += 1
continue
documents["failed"] += 1
es_index = "%s-metadata" % failed_doc["create"]["data"]["module"]
doc = {"_index": es_index,
"_source": failed_doc["create"]["data"],
"_id": failed_doc["create"]["_id"],
"_op_type": "create"}
failed_docs.append(doc)
except Exception as err:
print("Unknown indexing error: %s" % err)
return
if not documents["failed"]:
break
if documents["total"] > documents["failed"] + documents["existent"]:
print("%d documents successfully indexed" % (documents["total"] - documents["failed"] - documents["existent"]))
if documents["failed"] > 0:
print("%d documents couldn't be indexed" % documents["failed"])
print("Indexing exception found %s" % exception)
if documents["existent"] > 0:
print("%d documents already exist in ES" % documents["existent"])
def _upload_to_es_bulk(payload_file, my_uuid, timestamp, es, index, my_node, my_pod):
payload = open(payload_file, "rb").read()
raw_stockpile = str(base64.urlsafe_b64encode(payload))
try:
_data = {"uuid": my_uuid,
"timestamp": timestamp,
"node_name": my_node,
"pod_name": my_pod,
"data": raw_stockpile}
es.index(index=index, body=_data)
except Exception as e:
print("Indexing exception found %s" % e)
def _run_stockpile(tags, skip_tags):
cmd = ["ansible-playbook", "-i", "hosts", "stockpile.yml", "--tags", tags, "--skip-tags", skip_tags]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def _check_index(es, my_uuid, my_node):
# We are using metadata-cpuinfo as it is a basic index that should regularly be there without any extended permissions
s = Search(using=es, index="cpuinfo-metadata").query("match", uuid=my_uuid).query("match", node_name=my_node)
check_results = s.execute()
if check_results['hits']['total']['value'] > 0:
return True
else:
return False
def _mark_node(r, my_node, my_uuid, es, check_val):
current_val = r.get(check_val)
# If the metadata claims to exist check if it does. If it is unable to find data then run it again
# If its running let it run
# Else run the collection
if current_val == "Metadata-Exists":
if _check_index(es, my_uuid, my_node):
return "exists"
else:
r.set(check_val, "Metadata-Running")
return "run"
elif current_val == "Metadata-Running":
return "running"
else:
r.set(check_val, "Metadata-Running")
return "run"
def main():
parser = argparse.ArgumentParser(description="Stockpile Wrapper script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-s', '--server',
help='Provide elastic server information')
parser.add_argument(
'-p', '--port',
type=int,
default=9200,
help='Provide elastic port information')
parser.add_argument(
'--sslskipverify',
help='If es is setup with ssl, but can disable tls cert verification',
default=False)
parser.add_argument(
'--index-retries',
type=int,
default=3,
help='Number of retries for indexing')
parser.add_argument(
'-u', '--uuid',
help='UUID to provide to elastic')
parser.add_argument(
'-n', '--nodename',
help='Node Name to provide to elastic')
parser.add_argument(
'-N', '--podname',
help='Pod Name to provide to elastic')
parser.add_argument(
'--redisip',
help='IP address for redis server')
parser.add_argument(
'--redisport',
type=int,
default=6379,
help='Port for the redis server')
parser.add_argument(
'--force',
help='Force metadata collection regardless of redis',
action="store_true")
parser.add_argument(
'--tags',
help='Comma separated tags to run stockpile with',
default='all')
parser.add_argument(
'--skip-tags',
help='Comma separated tags to skip in stockpile',
default='None')
args = parser.parse_args()
my_uuid = args.uuid
my_node = args.nodename
my_pod = args.podname
if args.server is not None and args.port is not None:
es = _connect_to_es(args.server, args.port, args.sslskipverify)
run = "run"
if args.redisip is not None and args.redisport is not None and my_node is not None and my_uuid is not None:
pool = redis.ConnectionPool(host=args.redisip, port=args.redisport, decode_responses=True)
r = redis.Redis(connection_pool=pool, charset="utf-8")
check_val = my_uuid + "-" + my_node
run = _mark_node(r, my_node, my_uuid, es, check_val)
if my_uuid is None:
my_uuid = str(uuid.uuid4())
if run == "run" or args.force:
rc, stdout, stderr = _run_stockpile(args.tags, args.skip_tags)
if rc != 0:
print("Stockpile execution error: %s" % stderr)
sys.exit(1)
else:
print("Metadata already collected on %s " % my_node)
if my_node is None:
my_node = "Null"
if my_pod is None:
my_pod = "Null"
if args.server != "none":
_index_result(es, my_uuid, my_node, my_pod, args.index_retries)
if args.redisip is not None and args.redisport is not None and run == "run":
r.set(check_val, "Metadata-Exists")
if args.redisip is not None and args.redisport is not None and my_node is not None and my_uuid is not None:
print("Closing Redis connection")
r.client_setname(my_pod)
clients = r.client_list()
for x in range(len(clients)):
if clients[x]["name"] == my_pod:
my_id = clients[x]["id"]
break
r.client_kill_filter(_id=my_id,skipme=False)
if es is not None:
print("Attempting to close ES connection")
es.close()
print("uuid: %s" % my_uuid)
if __name__ == '__main__':
sys.exit(main())
| 2.09375 | 2 |
xnas/search_space/MB/proxyless_cnn.py | MAC-AutoML/XNAS | 9 | 12760782 | <reponame>MAC-AutoML/XNAS<filename>xnas/search_space/MB/proxyless_cnn.py<gh_stars>1-10
from xnas.search_space.MB.ops import *
from xnas.search_space.utils import profile, make_divisible
import torch
import pdb
import json
import xnas.core.logging as logging
import os
from xnas.core.config import cfg
logger = logging.get_logger(__name__)
class ProxylessNASNets(MyNetwork):
def __init__(self, n_classes=1000, space_name='proxyless', width_mult=1.3, depth=4):
super(ProxylessNASNets, self).__init__()
self.width_mult = width_mult
self.depth = depth
self.conv_candidates = [
'3x3_MBConv3', '3x3_MBConv6',
'5x5_MBConv3', '5x5_MBConv6',
'7x7_MBConv3', '7x7_MBConv6',
] if len(cfg.MB.BASIC_OP) == 0 else cfg.MB.BASIC_OP
if space_name == 'google':
self.base_stage_width = [32, 16, 24, 32, 64, 96, 160, 320, 1280]
elif space_name == 'proxyless':
self.base_stage_width = [32, 16, 24, 40, 80, 96, 192, 320, 1280]
input_channel = make_divisible(
self.base_stage_width[0] * width_mult, 8)
first_block_width = make_divisible(
self.base_stage_width[1] * width_mult, 8)
last_channel = make_divisible(
self.base_stage_width[-1] * width_mult, 8)
# first conv layer
first_conv = ConvLayer(
3, input_channel, kernel_size=3, stride=2, use_bn=True, act_func='relu6', ops_order='weight_bn_act'
)
# first block
first_block_conv = MBInvertedConvLayer(
in_channels=input_channel, out_channels=first_block_width, kernel_size=3, stride=1,
expand_ratio=1, act_func='relu6',
)
first_block = MobileInvertedResidualBlock(first_block_conv, None)
input_channel = first_block_width
# inverted residual blocks
blocks = nn.ModuleList()
blocks.append(first_block)
self.stride_stages = [2, 2, 2, 1, 2, 1] if len(
cfg.MB.STRIDE_STAGES) == 0 else cfg.MB.STRIDE_STAGES
n_block_list = [self.depth] * 5 + [1]
width_list = []
for base_width in self.base_stage_width[2:-1]:
width = make_divisible(base_width * self.width_mult, 8)
width_list.append(width)
feature_dim = input_channel
self.candidate_ops = []
for width, n_block, s in zip(width_list, n_block_list, self.stride_stages):
for i in range(n_block):
if i == 0:
stride = s
else:
stride = 1
if stride == 1 and feature_dim == width:
modified_conv_candidates = self.conv_candidates + ['Zero']
else:
modified_conv_candidates = self.conv_candidates + \
['3x3_MBConv1']
self.candidate_ops.append(modified_conv_candidates)
conv_op = MixedEdge(candidate_ops=build_candidate_ops(
modified_conv_candidates, feature_dim, width, stride, 'weight_bn_act',
act_func='relu6', use_se=False), )
if stride == 1 and feature_dim == width:
shortcut = IdentityLayer(feature_dim, feature_dim)
else:
shortcut = None
mb_inverted_block = MobileInvertedResidualBlock(
conv_op, shortcut)
blocks.append(mb_inverted_block)
feature_dim = width
# 1x1_conv before global average pooling
feature_mix_layer = ConvLayer(
feature_dim, last_channel, kernel_size=1, use_bn=True, act_func='relu6',
)
classifier = LinearLayer(last_channel, n_classes)
self.first_conv = first_conv
self.blocks = blocks
self.feature_mix_layer = feature_mix_layer
self.classifier = classifier
self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
self.all_edges = len(self.blocks) - 1
self.num_edges = len(self.blocks) - 1
self.num_ops = len(self.conv_candidates) + 1
""" MyNetwork required methods """
@staticmethod
def name():
return 'OFAProxylessNASNets'
def forward(self, x, sample):
# first conv
x = self.first_conv(x)
assert len(self.blocks) - 1 == len(sample)
for i in range(len(self.blocks[1:])):
this_block_conv = self.blocks[i+1].mobile_inverted_conv
if isinstance(this_block_conv, MixedEdge):
# one hot like vector
this_block_conv.active_vector = sample[i]
else:
raise NotImplementedError
for block in self.blocks:
x = block(x)
x = self.feature_mix_layer(x)
x = self.global_avg_pooling(x)
x = x.view(x.size(0), -1) # flatten
x = self.classifier(x)
return x
def genotype(self, theta):
genotype = []
for i in range(theta.shape[0]):
genotype.append(self.candidate_ops[i][np.argmax(theta[i])])
return genotype
def _ProxylessCNN():
ProxylessNASNets(
n_classes=cfg.SEARCH.NUM_CLASSES,
space_name='proxyless',
width_mult=cfg.MB.WIDTH_MULTI,
depth=cfg.MB.DEPTH)
def _Proxyless_Google_CNN():
ProxylessNASNets(
n_classes=cfg.SEARCH.NUM_CLASSES,
space_name='google',
width_mult=cfg.MB.WIDTH_MULTI,
depth=cfg.MB.DEPTH)
| 1.835938 | 2 |
tests/all/fields/id.py | stuaxo/python-stdnet | 61 | 12760783 | <filename>tests/all/fields/id.py
'''AutoId, CompositeId and custom Id tests.'''
from uuid import uuid4
from random import randint
import pulsar
import stdnet
from stdnet import FieldError
from stdnet.utils import test
from examples.models import Task, WordBook, SimpleModel, Instrument
def genid():
return str(uuid4())[:8]
class Id(test.TestCase):
'''Test primary key when it is not an AutoIdField.
Use the manager for convenience.'''
model = Task
def make(self, name='pluto'):
return self.mapper.task.new(id=genid(), name=name)
def test_create(self):
t1 = yield self.make()
yield pulsar.async_sleep(0.5)
t2 = yield self.make()
self.assertNotEqual(t1.id, t2.id)
self.assertTrue(t1.timestamp < t2.timestamp)
def test_change_id(self):
session = self.session()
t1 = yield self.make()
id1 = t1.id
self.assertEqual(id1, t1._dbdata['id'])
self.assertTrue(t1.get_state().persistent)
id2 = genid()
t1.id = id2
self.assertEqual(id1, t1._dbdata['id'])
self.assertNotEqual(id2, t1._dbdata['id'])
yield session.add(t1)
self.assertEqual(id2, t1.id)
self.assertEqual(id2, t1._dbdata['id'])
yield self.async.assertEqual(self.query().filter(id=(id1, id2)).count(), 1)
def test_clone(self):
t1 = yield self.make()
session = t1.session
yield pulsar.async_sleep(0.5)
t2 = yield session.add(t1.clone(id=genid()))
self.assertNotEqual(t1.id, t2.id)
self.assertEqual(t1.name, t2.name)
self.assertNotEqual(t1.timestamp, t2.timestamp)
self.assertTrue(t1.timestamp < t2.timestamp)
tasks = yield self.query().filter(id=(t1.id, t2.id)).all()
self.assertEqual(len(tasks), 2)
self.assertEqual(tasks[0].id, t2.id)
self.assertEqual(tasks[1].id, t1.id)
self.assertTrue(tasks[0].timestamp > tasks[1].timestamp)
def test_delete_and_clone(self):
t1 = yield self.make()
session = t1.session
res = yield session.delete(t1)
tasks = yield self.query().filter(id=t1.id).all()
self.assertEqual(len(tasks), 0)
t2 = yield session.add(t1.clone(id=genid()))
self.assertNotEqual(t1.id, t2.id)
self.assertEqual(t1.name, t2.name)
tasks = yield self.query().filter(id=(t1.id, t2.id)).all()
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].id, t2.id)
def test_fail(self):
session = self.session()
t = Task(name='pluto')
yield self.async.assertRaises(Exception, session.add, t)
class TestAutoId(test.TestCase):
models = (SimpleModel, Instrument)
def random_id(self, id=None):
if self.backend.name == 'mongo':
from bson.objectid import ObjectId
return ObjectId()
else:
if id:
return id+1
else:
return randint(1,1000)
def testMeta(self):
pk = self.model._meta.pk
self.assertEqual(pk.name, 'id')
self.assertEqual(pk.type, 'auto')
self.assertEqual(pk.internal_type, None)
self.assertEqual(pk.python_type, None)
self.assertEqual(str(pk), 'examples.simplemodel.id')
self.assertRaises(FieldError, pk.register_with_model,
'bla', SimpleModel)
def testCreateWithValue(self):
# create an instance with an id
models = self.mapper
id = self.random_id()
m1 = yield models.simplemodel.new(id=id, code='bla')
self.assertEqual(m1.id, id)
self.assertEqual(m1.code, 'bla')
m2 = yield models.simplemodel.new(code='foo')
id2 = self.random_id(id)
self.assertEqualId(m2, id2)
self.assertEqual(m2.code, 'foo')
qs = yield models.simplemodel.query().all()
self.assertEqual(len(qs), 2)
self.assertEqual(set(qs), set((m1, m2)))
def testCreateWithValue2(self):
models = self.mapper
id = self.random_id()
m1 = yield models[Instrument].new(name='test1', type='bla', ccy='eur')
m2 = yield models.instrument.new(id=id, name='test2', type='foo', ccy='eur')
self.assertEqualId(m1, 1)
self.assertEqual(m2.id, id)
qs = yield models.instrument.query().all()
self.assertEqual(len(qs), 2)
self.assertEqual(set(qs), set((m1,m2)))
class CompositeId(test.TestCase):
model = WordBook
def create(self, word, book):
session = self.session()
m = yield session.add(self.model(word=word, book=book))
self.assertEqual(m.pkvalue(), m.id)
id = m.id
m = yield session.query(self.model).get(word=word, book=book)
self.assertEqual(m.word, word)
self.assertEqual(m.book, book)
self.assertEqual(m.id, id)
yield m
def testMeta(self):
id = self.model._meta.pk
self.assertEqual(id.type, 'composite')
fields = id.fields
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0], self.model._meta.dfields['word'])
self.assertEqual(fields[1], self.model._meta.dfields['book'])
def test_value(self):
m = self.model(book='world', word='hello')
self.assertFalse(m.id)
value = m.pkvalue()
self.assertTrue(value)
self.assertEqual(value, hash(('hello', 'world')))
m = self.model(book='hello', word='world')
self.assertNotEqual(value, m.pkvalue())
def test_create(self):
return self.create('hello', 'world')
def test_change(self):
m = yield self.create('ciao', 'libro')
session = m.session
id = m.id
m.word = 'beautiful'
self.assertNotEqual(m.pkvalue(), id)
yield session.add(m)
self.assertNotEqual(m.id, id)
self.assertEqual(m.word, 'beautiful')
query = self.query()
yield self.async.assertEqual(query.filter(id=id).count(), 0)
yield self.async.assertEqual(query.filter(id=m.id).count(), 1)
yield self.async.assertEqual(query.filter(word='ciao', book='libro').count(), 0)
m2 = yield query.get(word='beautiful', book='libro')
self.assertEqual(m, m2)
| 2.609375 | 3 |
libs/physicalparameters.py | dpttw/SEMimage | 0 | 12760784 | <filename>libs/physicalparameters.py<gh_stars>0
def chemelements():
periodic_elements = ["Ac","Al","Ag","Am","Ar","As","At","Au","B","Ba","Bh","Bi","Be",
"Bk","Br","C","Ca","Cd","Ce","Cf","Cl","Cm","Co","Cr","Cs","Cu",
"Db","Dy","Er","Es","Eu","F","Fe","Fm","Fr","Ga","Gd","Ge","H",
"He","Hf","Hg","Ho","Hs","I","In","Ir","K","Kr","La","Li","Lr",
"Lu","Md","Mg","Mn","Mo","Mt","N","Na","Nb","Nd","Ne","Ni","No",
"Np","O","Os","P","Pa","Pb","Pd","Pm","Po","Pr","Pt","Pu","Ra",
"Rb","Re","Rf","Rh","Rn","Ru","S","Sb","Sc","Se","Sg","Si","Sm",
"Sn","Sr","Ta","Tb","Tc","Te","Th","Ti","Tl","Tm","U","Uun","Uuu",
"Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","V","W","Xe","Y","Yb",
"Zn","Zr"]
return periodic_elements
| 1.929688 | 2 |
src/jig/commands/__init__.py | Lightslayer/jig | 16 | 12760785 | <filename>src/jig/commands/__init__.py<gh_stars>10-100
from .base import get_command
from .base import list_commands
| 1.257813 | 1 |
doxy_dollar.py | atria-soft/dollar | 0 | 12760786 | #!/usr/bin/python
import os
import doxy.module as module
import doxy.debug as debug
import doxy.tools as tools
def create(target, module_name):
my_module = module.Module(__file__, module_name)
my_module.set_version("version.txt")
my_module.set_title("$N gesture recognition")
my_module.set_website("http://atria-soft.github.io/" + module_name)
my_module.set_website_sources("http://github.com/atria-soft/" + module_name)
my_module.add_path([
module_name,
"doc"
])
my_module.add_depend([
'elog',
'etk',
'ejson',
'esvg',
'ememory',
])
my_module.add_exclude_symbols([
'*operator<<*',
])
my_module.add_exclude_file([
'debug.hpp',
])
my_module.add_file_patterns([
'*.hpp',
'*.md',
])
my_module.add_module_define([
"PARSE_DOXYGEN",
])
return my_module | 2.046875 | 2 |
tests/test_topn_precision.py | keener101/lkpy | 210 | 12760787 | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import precision
from lenskit.util.test import demo_recs
from lenskit import topn
def _test_prec(items, rel, **k):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return precision(recs, truth, **k)
def test_precision_empty_none():
prec = _test_prec([], [1, 3])
assert prec is None
def test_precision_simple_cases():
prec = _test_prec([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1, 2, 3, 4], [1, 3])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_prec([1, 2, 3, 4], range(4, 10))
assert prec == approx(0.25)
def test_precision_series():
prec = _test_prec(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_set():
prec = _test_prec(pd.Series([1, 2, 3, 4]), [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), range(4, 10))
assert prec == approx(0.25)
def test_precision_series_index():
prec = _test_prec(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_array():
prec = _test_prec(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_precision_array():
prec = _test_prec(np.array([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(np.array([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(np.array([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_prec_long_rel():
rel = np.arange(100)
items = [1, 0, 150, 3, 10]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_long_items():
rel = np.arange(100)
items = [1, 0, 150, 3, 10, 30, 120, 4, 17]
r = _test_prec(items, rel, k=5)
assert r == approx(0.8)
def test_prec_short_items():
rel = np.arange(100)
items = [1, 0, 150]
r = _test_prec(items, rel, k=5)
assert r == approx(2 / 3)
def test_recall_bulk_k(demo_recs):
"bulk and normal match"
train, test, recs = demo_recs
assert test['user'].value_counts().max() > 5
rla = topn.RecListAnalysis()
rla.add_metric(precision, name='pk', k=5)
rla.add_metric(precision)
# metric without the bulk capabilities
rla.add_metric(lambda *a, **k: precision(*a, **k), name='ind_pk', k=5)
rla.add_metric(lambda *a: precision(*a), name='ind_p')
res = rla.compute(recs, test)
assert res.precision.values == approx(res.ind_p.values)
assert res.pk.values == approx(res.ind_pk.values)
| 2.265625 | 2 |
gifspool/verification.py | DenysGurin/gifsta | 0 | 12760788 | import re
USERNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASSWORD_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
USERNAME_ERROR = "That's not a valid username."
PASSWORD_ERROR = "That wasn't a valid password."
V_PASSWORD_ERROR = "Your passwords didn't match."
EMAIL_ERROR = "That's not a valid email."
EXISTS_ERROR = "That user already exists"
LOGIN_ERROR = "Invalid login"
def chek_username(username):
return username and USERNAME_RE.match(username)
def chek_password(password):
return password and PASSWORD_RE.match(password)
def chek_email(email):
return not email or EMAIL_RE.match(email)
def is_notVerifyed(username, password, confirm, email, usermodel):
not_verifyed = False
kwargs = {'username': username,
'email': email}
if username in [user.username for user in usermodel.objects.all()]:#User.objects.get(username=username).username:
not_verifyed = True
kwargs["un_error"] = EXISTS_ERROR
elif not chek_username(username):
not_verifyed = True
kwargs['n_error'] = USERNAME_ERROR
if not chek_password(password):
not_verifyed = True
kwargs['p_error'] = PASSWORD_ERROR
elif password != confirm:
not_verifyed = True
kwargs['vp_error'] = V_PASSWORD_ERROR
if not chek_email(email):
not_verifyed = True
kwargs['e_error'] = EMAIL_ERROR
if not_verifyed:
return kwargs
# if not chek_username('111'):
# print "ne"
# if not chek_password('<PASSWORD>'):
# print 'pa'
# elif '111' != '111':
# print 'con'
# if not chek_email(''):
# print 'em'
# if USERNAME_RE.match('11'):
# print "re" | 3.15625 | 3 |
proyectos/2/MartinezNestor/main.py | Ricardo191998/sistop-2020-1 | 13 | 12760789 | from threading import Thread, Semaphore
from random import randint
from time import sleep
#global array to denote the airplanes in the air
planes = []
tracks = []
planes_landed = -1
passengersDownloaded = 0
mutex = Semaphore(1)
ops = Semaphore(0)
bus = Semaphore(0)
landingTrack = Semaphore(0)
airplane = Semaphore(1)
busBarrier = Semaphore(0)
class Passenger():
def __init__(self,id):
self.id = id
class Generator():
def __init__(self):
pass
def generatePassengers(self,n):
ps = []
for i in range(n):
p = Passenger(i)
ps.append(p)
return ps
class Plane():
g = Generator()
def __init__(self,id):
self.id = id
self.passengers = self.g.generatePassengers(randint(1,8))
self.fly()
def fly(self):
global planes_landed,planes
while True:
airplane.acquire()
print(self)
sleep(5)
with mutex:
if planes_landed < 5:
planes.append(self)
ops.release()
sleep(2)
def time_to_download(self):
time = 0
for p in self.passengers:
time_of_p = 0.4
time += (0.2 + time_of_p)
return time
def __str__(self):
return "Plane #" + str(self.id) + " is arriving with " + str(len(self.passengers)) + " passengers."
class Operator():
def __init__(self,id):
self.id = id
self.isAvailable = True
self.plane = None
self.work()
def work(self):
global planes,planes_landed
while True:
ops.acquire()
with mutex:
planes_landed += 1
p = planes[planes_landed]
if planes_landed < 4:
print("Operator is now attending plane #%d" % p.id)
sleep(2)
landingTrack.release()
else:
print("\tPlane %d must wait until a landing track is available..." % p.id)
with mutex:
planes_landed -= 1
with mutex:
if planes_landed < 5:
airplane.release()
class Track():
def __init__(self,id):
self.id = id
self.receivePlane()
def receivePlane(self):
global planes,planes_landed
while True:
landingTrack.acquire()
sleep(2)
with mutex:
plane = planes[planes_landed]
print("\tLanding track %d ready for plane %d's landing" % (self.id,plane.id))
self.attendPlane(plane)
def attendPlane(self,plane):
global planes_landed,passengersDownloaded
print("\t\t\t\tPassengers are now dowloading from plane %d" % plane.id)
for i in plane.passengers:
print("\t\t\t\tPassenger %d is dowloading from plane %d" % (i.id,plane.id))
sleep(1)
with mutex:
passengersDownloaded += 1
if passengersDownloaded == 10:
bus.release()
class Bus():
def __init__(self):
self.leaveWithPassengers()
def leaveWithPassengers(self):
global passengersDownloaded
while True:
bus.acquire()
print("\t\tBus is now leaving. We have %d passengers" % passengersDownloaded)
sleep(5)
print("\t\tArriving at terminal. %d passengers are going home." % passengersDownloaded)
passengersDownloaded = 0
if __name__ == '__main__':
num_planes = 5
num_tracks = 4
Thread(target=Operator,args=[0]).start()
Thread(target=Bus,args=[]).start()
for i in range(num_planes):
Thread(target=Plane,args=[i]).start()
for i in range(num_tracks):
Thread(target=Track,args=[i]).start()
| 3.328125 | 3 |
webapp/app.py | data-science-misis/rec-sys | 0 | 12760790 | <reponame>data-science-misis/rec-sys<filename>webapp/app.py
#!/usr/bin/env python
# encoding: utf-8
import os
import dash
from dash import dash_table
from dash import html
import json
from dash import dcc, Output, Input
from flask import request, jsonify
import prediction_engine
from data_provider import database
port = int(os.environ.get("PORT", 5000))
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div([
html.H1("Wine Recommender"),
html.H3("Please pick a user"),
dcc.Dropdown(
id='user-dropdown',
options=[{'label': user, 'value': user} for user in prediction_engine.get_user_ids()] + [
{'label': 'Unknown', 'value': 'Unknown'}],
value='Unknown'
),
html.Div(id='user-output-container'),
html.H2("Recommendations"),
html.Div(id='recommendations-datatable'),
html.H2("Dataset"),
dash_table.DataTable(
id='datatable-row-ids',
columns=[
{'name': i, 'id': i, 'deletable': False, 'selectable': True} for i in database().columns
# omit the id column
if i != 'id'
],
data=database().to_dict('records'),
editable=False,
filter_action="native",
sort_action="native",
sort_mode='multi',
row_selectable='multi',
row_deletable=False,
selected_rows=[],
page_action='native',
page_current=0,
page_size=10,
),
html.Div(id='datatable-interactivity-container'),
])
@app.callback(
Output('recommendations-datatable', 'children'),
Input('user-dropdown', 'value')
)
def update_output(value, k=10):
user_id = value if value != 'Unknown' else None
method, predictions = predict(user_id, k)
table = html.H6("No recommendations generated") if predictions.empty else dash_table.DataTable(
columns=[
{'name': i, 'id': i, 'deletable': False, 'selectable': True} for i in predictions.columns
# omit the id column
if i != 'id'
],
data=predictions.to_dict('records'),
editable=False,
filter_action="native",
sort_action="native",
sort_mode='multi',
row_selectable='multi',
row_deletable=False,
selected_rows=[],
page_action='native',
page_current=0,
page_size=10,
)
return [
html.H2(method),
table
]
@app.callback(
Output('datatable-interactivity', 'style_data_conditional'),
Input('datatable-interactivity', 'selected_columns')
)
def update_styles(selected_columns):
return [{
'if': {'column_id': i},
'background_color': '#D2F3FF'
} for i in selected_columns]
@server.route("/health", methods=['GET'])
def hello():
return "<p>Service is up and running</p>"
def predictions_to_response(prediction_type, predictions):
return json.dumps(
{
'type': prediction_type,
'predictions': predictions,
}
)
@server.route('/api/users', methods=['GET'])
def get_users():
return jsonify(prediction_engine.get_user_ids())
@server.route('/api/predictions', methods=['POST'])
def controller_predict():
record = json.loads(request.data)
user_id = record.get('user_id')
k = record.get('predictions_count')
method, predictions = predict(user_id, k)
return predictions_to_response(method, predictions.to_dict(orient='records'))
def predict(user_id=None, k=None):
if not user_id:
return 'popularity based', prediction_engine.predict_popular(k)
else:
return 'collaborative-filtering', prediction_engine.predict_collaborative_filtering(user_id)
if __name__ == '__main__':
server.run(debug=True, host='0.0.0.0', port=port)
| 2.296875 | 2 |
casepro/msgs/views.py | rapidpro/ureport-partners | 0 | 12760791 | <reponame>rapidpro/ureport-partners<gh_stars>0
import logging
from collections import defaultdict
import iso639
from dash.orgs.views import OrgObjPermsMixin, OrgPermsMixin
from el_pagination.paginators import LazyPaginator
from smartmin.csv_imports.models import ImportTask
from smartmin.mixins import NonAtomicMixin
from smartmin.views import (
SmartCreateView,
SmartCRUDL,
SmartCSVImportView,
SmartDeleteView,
SmartListView,
SmartReadView,
SmartTemplateView,
SmartUpdateView,
)
from temba_client.utils import parse_iso8601
from django import forms
from django.core.validators import FileExtensionValidator
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.urls import reverse
from django.utils.timesince import timesince
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from casepro.rules.mixins import RuleFormMixin
from casepro.statistics.models import DailyCount
from casepro.utils import JSONEncoder, month_range, str_to_bool
from casepro.utils.export import BaseDownloadView
from .forms import FaqForm, LabelForm
from .models import FAQ, Label, Message, MessageExport, MessageFolder, Outgoing, OutgoingFolder, ReplyExport
from .tasks import message_export, reply_export
RESPONSE_DELAY_WARN_SECONDS = 24 * 60 * 60 # show response delays > 1 day as warning
logger = logging.getLogger(__name__)
# Override the ImportTask start method so we can use our self-defined task
def override_start(self, org): # pragma: no cover
from .tasks import faq_csv_import
self.log("Queued import at %s" % now())
self.save(update_fields=("import_log",))
# trigger task
result = faq_csv_import.delay(org.id, self.id)
self.task_id = result.task_id
self.save(update_fields=("task_id",))
ImportTask.start = override_start
class LabelCRUDL(SmartCRUDL):
actions = ("create", "update", "read", "delete", "list", "watch", "unwatch")
model = Label
class Create(RuleFormMixin, OrgPermsMixin, SmartCreateView):
form_class = LabelForm
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Create, self).get_form_kwargs()
kwargs["org"] = self.request.org
kwargs["is_create"] = True
return kwargs
def derive_initial(self):
# label created manually in casepro aren't synced by default
initial = super(LabelCRUDL.Create, self).derive_initial()
initial["is_synced"] = False
return initial
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
name = data["name"]
description = data["description"]
tests = self.construct_tests()
is_synced = data["is_synced"]
self.object = Label.create(org, name, description, tests, is_synced)
def get_success_url(self):
return reverse("msgs.label_read", args=[self.object.pk])
class Update(RuleFormMixin, OrgObjPermsMixin, SmartUpdateView):
form_class = LabelForm
success_url = "id@msgs.label_read"
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Update, self).get_form_kwargs()
kwargs["org"] = self.request.org
kwargs["is_create"] = False
return kwargs
def post_save(self, obj):
obj = super(LabelCRUDL.Update, self).post_save(obj)
tests = self.construct_tests()
obj.update_tests(tests)
return obj
class Read(OrgObjPermsMixin, SmartReadView):
def get_queryset(self):
return Label.get_all(self.request.org, self.request.user)
def get_context_data(self, **kwargs):
context = super(LabelCRUDL.Read, self).get_context_data(**kwargs)
# augment usual label JSON
label_json = self.object.as_json()
label_json["watching"] = self.object.is_watched_by(self.request.user)
# angular app requires context data in JSON format
context["context_data_json"] = {"label": label_json}
context["rule_tests"] = self.object.rule.get_tests_description() if self.object.rule else ""
return context
class Delete(OrgObjPermsMixin, SmartDeleteView):
cancel_url = "@msgs.label_list"
def post(self, request, *args, **kwargs):
label = self.get_object()
label.release()
return HttpResponse(status=204)
class List(OrgPermsMixin, SmartListView):
def get(self, request, *args, **kwargs):
with_activity = str_to_bool(self.request.GET.get("with_activity", ""))
labels = list(Label.get_all(self.request.org, self.request.user).order_by("name"))
Label.bulk_cache_initialize(labels)
if with_activity:
# get message statistics
this_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(0)).scope_totals()
last_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(-1)).scope_totals()
def as_json(label):
obj = label.as_json()
if with_activity:
obj["activity"] = {"this_month": this_month.get(label, 0), "last_month": last_month.get(label, 0)}
return obj
return JsonResponse({"results": [as_json(l) for l in labels]})
class Watch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for watching a label
"""
permission = "msgs.label_read"
def post(self, request, *args, **kwargs):
self.get_object().watch(request.user)
return HttpResponse(status=204)
class Unwatch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for unwatching a label
"""
permission = "msgs.label_read"
def post(self, request, *args, **kwargs):
self.get_object().unwatch(request.user)
return HttpResponse(status=204)
class MessageSearchMixin(object):
def derive_search(self):
"""
Collects and prepares message search parameters into JSON serializable dict
"""
folder = MessageFolder[self.request.GET["folder"]]
if folder == MessageFolder.flagged and str_to_bool(self.request.GET.get("archived", "")):
folder = MessageFolder.flagged_with_archived
label_id = self.request.GET.get("label", None)
text = self.request.GET.get("text", None)
contact_id = self.request.GET.get("contact", None)
after = parse_iso8601(self.request.GET.get("after", None))
before = parse_iso8601(self.request.GET.get("before", None))
return {
"folder": folder,
"label": label_id,
"text": text,
"contact": contact_id,
"after": after,
"before": before,
}
class MessageCRUDL(SmartCRUDL):
actions = ("search", "lock", "action", "label", "bulk_reply", "forward", "history")
model = Message
class Search(OrgPermsMixin, MessageSearchMixin, SmartTemplateView):
"""
JSON endpoint for fetching incoming messages
"""
page_size = 50
def get_messages(self, search, last_refresh=None):
org = self.request.org
user = self.request.user
queryset = Message.search(org, user, search, modified_after=last_refresh, all=False)
return queryset.prefetch_related("contact", "labels", "case__assignee", "case__user_assignee")
def get_context_data(self, **kwargs):
context = super(MessageCRUDL.Search, self).get_context_data(**kwargs)
page = int(self.request.GET.get("page", 1))
last_refresh = self.request.GET.get("last_refresh")
search = self.derive_search()
# this is a refresh of new and modified messages
if last_refresh:
messages = self.get_messages(search, last_refresh)
# don't use paging for these messages
context["object_list"] = list(messages)
context["has_more"] = False
else:
messages = self.get_messages(search)
paginator = LazyPaginator(messages, per_page=self.page_size)
context["object_list"] = paginator.page(page)
context["has_more"] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
results = []
for m in context["object_list"]:
msg = m.as_json()
msg["lock"] = m.get_lock(self.request.user)
results.append(msg)
return JsonResponse({"results": results, "has_more": context["has_more"]}, encoder=JSONEncoder)
class Lock(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for updating messages with a date and user id.
Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/lock/(?P<action>\w+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs["action"]
message_ids = request.json["messages"]
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
lock_messages = []
if action == "lock":
for message in messages:
if message.get_lock(request.user):
lock_messages.append(message.backend_id)
if not lock_messages:
for message in messages:
message.user_lock(user)
elif action == "unlock":
for message in messages:
message.user_unlock()
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return JsonResponse({"messages": lock_messages}, encoder=JSONEncoder)
class Action(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for bulk message actions. Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/action/(?P<action>\w+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs["action"]
message_ids = request.json["messages"]
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
label_id = request.json.get("label")
label = Label.get_all(org, user).get(pk=label_id) if label_id else None
if action == "flag":
Message.bulk_flag(org, user, messages)
elif action == "unflag":
Message.bulk_unflag(org, user, messages)
elif action == "label":
Message.bulk_label(org, user, messages, label)
elif action == "unlabel":
Message.bulk_unlabel(org, user, messages, label)
elif action == "archive":
Message.bulk_archive(org, user, messages)
elif action == "restore":
Message.bulk_restore(org, user, messages)
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return HttpResponse(status=204)
class Label(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for labelling a message.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/label/(?P<id>\d+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
user_labels = Label.get_all(self.org, user)
message_id = int(kwargs["id"])
message = org.incoming_messages.filter(org=org, backend_id=message_id).first()
label_ids = request.json["labels"]
specified_labels = list(user_labels.filter(pk__in=label_ids))
# user can't remove labels that they can't see
unseen_labels = [l for l in message.labels.all() if l not in user_labels]
message.update_labels(user, specified_labels + unseen_labels)
return HttpResponse(status=204)
class BulkReply(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for bulk messages replies
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/bulk_reply/$"
def post(self, request, *args, **kwargs):
text = request.json["text"]
message_ids = request.json["messages"]
messages = Message.objects.filter(org=request.org, backend_id__in=message_ids).select_related("contact")
# organize messages by contact
messages_by_contact = defaultdict(list)
for msg in messages:
messages_by_contact[msg.contact].append(msg)
# the actual message that will be replied to is the oldest selected message for each contact
reply_tos = []
for contact, contact_messages in messages_by_contact.items():
contact_messages = sorted(contact_messages, key=lambda m: m.created_on, reverse=True)
reply_tos.append(contact_messages[0])
outgoing = Outgoing.create_bulk_replies(request.org, request.user, text, reply_tos)
return JsonResponse({"messages": len(outgoing)})
class Forward(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for forwarding a message to a URN
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/forward/(?P<id>\d+)/$"
def post(self, request, *args, **kwargs):
text = request.json["text"]
message = Message.objects.get(org=request.org, backend_id=int(kwargs["id"]))
urns = request.json["urns"]
outgoing = Outgoing.create_forwards(request.org, request.user, text, urns, message)
return JsonResponse({"messages": len(outgoing)})
class History(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching message history. Takes a message backend id
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/history/(?P<id>\d+)/$"
def get(self, request, *args, **kwargs):
message = Message.objects.get(org=request.org, backend_id=int(kwargs["id"]))
actions = [a.as_json() for a in message.get_history()]
return JsonResponse({"actions": actions}, encoder=JSONEncoder)
class MessageExportCRUDL(SmartCRUDL):
model = MessageExport
actions = ("create", "read")
class Create(NonAtomicMixin, OrgPermsMixin, MessageSearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = MessageExport.create(self.request.org, self.request.user, search)
message_export.delay(export.pk)
return JsonResponse({"export_id": export.pk})
class Read(BaseDownloadView):
title = _("Download Messages")
filename = "message_export.xls"
class ReplySearchMixin(object):
def derive_search(self):
"""
Collects and prepares reply search parameters into JSON serializable dict
"""
params = self.request.GET
partner = params.get("partner")
after = parse_iso8601(params.get("after"))
before = parse_iso8601(params.get("before"))
return {"partner": partner, "after": after, "before": before}
class OutgoingCRUDL(SmartCRUDL):
actions = ("search", "search_replies")
model = Outgoing
class Search(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching outgoing messages
"""
def derive_search(self):
folder = OutgoingFolder[self.request.GET["folder"]]
text = self.request.GET.get("text", None)
contact = self.request.GET.get("contact", None)
return {"folder": folder, "text": text, "contact": contact}
def get_context_data(self, **kwargs):
context = super(OutgoingCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
page = int(self.request.GET.get("page", 1))
search = self.derive_search()
messages = Outgoing.search(org, user, search)
paginator = LazyPaginator(messages, per_page=50)
context["object_list"] = paginator.page(page)
context["has_more"] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{"results": [m.as_json() for m in context["object_list"]], "has_more": context["has_more"]},
encoder=JSONEncoder,
)
class SearchReplies(OrgPermsMixin, ReplySearchMixin, SmartTemplateView):
"""
JSON endpoint to fetch replies made by users
"""
def get(self, request, *args, **kwargs):
org = self.request.org
user = self.request.user
page = int(self.request.GET.get("page", 1))
search = self.derive_search()
items = Outgoing.search_replies(org, user, search).exclude(reply_to=None)
paginator = LazyPaginator(items, 50)
outgoing = paginator.page(page)
has_more = paginator.num_pages > page
def as_json(msg):
delay = (msg.created_on - msg.reply_to.created_on).total_seconds()
obj = msg.as_json()
obj.update(
{
"reply_to": {
"text": msg.reply_to.text,
"flagged": msg.reply_to.is_flagged,
"labels": [l.as_json(full=False) for l in msg.reply_to.labels.all()],
},
"response": {
"delay": timesince(msg.reply_to.created_on, now=msg.created_on),
"warning": delay > RESPONSE_DELAY_WARN_SECONDS,
},
}
)
return obj
return JsonResponse({"results": [as_json(o) for o in outgoing], "has_more": has_more}, encoder=JSONEncoder)
class ReplyExportCRUDL(SmartCRUDL):
model = ReplyExport
actions = ("create", "read")
class Create(NonAtomicMixin, OrgPermsMixin, ReplySearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = self.model.create(self.request.org, self.request.user, search)
reply_export.delay(export.pk)
return JsonResponse({"export_id": export.pk})
class Read(BaseDownloadView):
title = _("Download Replies")
filename = "reply_export.xls"
class FaqSearchMixin(object):
def derive_search(self):
"""
Collects and prepares FAQ search parameters into JSON serializable dict
"""
label = self.request.GET.get("label", None)
text = self.request.GET.get("text", None)
language = self.request.GET.get("language", None)
return {"label": label, "text": text, "language": language}
class FaqCRUDL(SmartCRUDL):
model = FAQ
actions = ("list", "create", "read", "update", "delete", "search", "import", "languages")
class List(OrgPermsMixin, SmartListView):
fields = ("question", "answer", "language", "parent")
default_order = ("-parent", "question")
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
class Create(OrgPermsMixin, SmartCreateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Create, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == "POST" and not self.request.POST and hasattr(self.request, "json"):
kwargs["data"] = self.request.json
kwargs["org"] = self.request.org
return kwargs
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
question = data["question"]
answer = data["answer"]
language = data["language"]
parent = data["parent"]
labels = data["labels"]
faq = FAQ.create(org, question, answer, language, parent, labels)
self.object = faq
class Read(OrgPermsMixin, SmartReadView):
fields = ["question", "answer", "language", "parent"]
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Read, self).get_context_data(**kwargs)
edit_button_url = reverse("msgs.faq_update", args=[self.object.pk])
context["context_data_json"] = {"faq": self.object.as_json()}
context["edit_button_url"] = edit_button_url
context["can_delete"] = True
labels = []
for label in self.object.labels.all():
labels.append(label.name)
context["labels"] = ", ".join(labels)
return context
class Update(OrgPermsMixin, SmartUpdateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Update, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == "POST" and not self.request.POST and hasattr(self.request, "json"):
kwargs["data"] = self.request.json
kwargs["org"] = self.request.org
return kwargs
def derive_initial(self):
initial = super(FaqCRUDL.Update, self).derive_initial()
initial["labels"] = self.object.labels.all()
return initial
def derive_fields(self):
fields = ["question", "answer", "language", "parent"]
if not self.object.parent:
fields.append("labels")
return tuple(fields)
class Delete(OrgPermsMixin, SmartDeleteView):
cancel_url = "@msgs.faq_list"
def post(self, request, *args, **kwargs):
faq = self.get_object()
faq.release()
return HttpResponse(status=204)
class Search(OrgPermsMixin, FaqSearchMixin, SmartTemplateView):
"""
JSON endpoint for searching FAQs
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
search = self.derive_search()
faqs = FAQ.search(org, user, search)
context["object_list"] = faqs
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse({"results": [m.as_json() for m in context["object_list"]]}, encoder=JSONEncoder)
class Import(OrgPermsMixin, SmartCSVImportView):
class Form(forms.ModelForm):
csv_file = forms.FileField(label=_("Import file"), validators=[FileExtensionValidator(["csv"])])
class Meta:
model = ImportTask
fields = ("csv_file",)
model = ImportTask
success_message = "File uploaded successfully. If your FAQs don't appear here soon, something went wrong."
success_url = "@msgs.faq_list"
def get_form_class(self):
return FaqCRUDL.Import.Form
def post_save(self, task):
task.start(self.org)
return task
class Languages(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for getting a list of currently all available languages
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Languages, self).get_context_data(**kwargs)
org = self.request.org
langs = FAQ.get_all_languages(org)
lang_list = []
for lang in langs:
lang_list.append(FAQ.get_language_from_code(lang["language"]))
context["language_list"] = lang_list
iso_list = [{"name": l.name, "code": l.part3} for l in iso639.languages]
context["iso_list"] = iso_list
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{"results": context["language_list"], "iso_list": context["iso_list"]}, encoder=JSONEncoder
)
| 1.570313 | 2 |
test/test.py | gelm0/huffman_python | 0 | 12760792 | <reponame>gelm0/huffman_python
import os
import sys
import filecmp
import unittest
from compression import huffmantree, huffman
def test_compress(file_name):
with open(file_name, 'rb') as fin:
data_read = fin.read()
compressed_data = huffman.encode_data(data_read)
decompressed_data = huffman.decode_data(compressed_data)
return data_read, decompressed_data
def test_compress_canon(file_name):
with open(file_name, 'rb') as fin:
data_read = fin.read()
compressed_data = huffman.encode_data(data_read, True)
decompressed_data = huffman.decode_data(compressed_data)
return data_read, decompressed_data
def get_symbol_tree(data):
h = huffmantree.HuffmanTree(data=data)
h.get_symbol_tree_by_val()
class HuffmanTest(unittest.TestCase):
resources = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) \
+ '/test/resources/'
test_file_1 = resources + 'short_text.txt'
test_file_2 = resources + 'medium_text.txt'
test_file_3 = resources + '84-h.txt'
def test_shorter_string_huffman(self):
data_read, decompressed_data = test_compress(self.test_file_1)
assert data_read == decompressed_data
def test_shorter_string_canon(self):
data_read, decompressed_data = test_compress_canon(self.test_file_1)
assert data_read == decompressed_data
def test_longer_string_huffman(self):
data_read, decompressed_data = test_compress(self.test_file_2)
assert data_read == decompressed_data
def test_longer_string_canon(self):
data_read, decompressed_data = test_compress_canon(self.test_file_2)
assert data_read == decompressed_data
def test_frankenstein_book_huffman(self):
data_read, decompressed_data = test_compress(self.test_file_3)
assert data_read == decompressed_data
def test_frankenstein_book_canon(self):
data_read, decompressed_data = test_compress_canon(self.test_file_3)
assert data_read == decompressed_data
def test_encode_decode_canon_header(self):
h = huffmantree.HuffmanTree(file_name=self.test_file_1)
symbol_tree = h.get_canon_tree()
header = huffman.construct_canonical_header(symbol_tree)
deserialized_header, _ = huffman.deconstruct_encoded_data(header
+ b'0\n0')
expected_symbol_tree =\
huffman.read_canonical_header(deserialized_header)
assert symbol_tree == expected_symbol_tree
def test_encode_decode_header(self):
h = huffmantree.HuffmanTree(file_name=self.test_file_1)
symbol_tree = h.get_symbol_tree_by_val()
header = huffman.construct_header(symbol_tree)
deserialized_header, _ = huffman.deconstruct_encoded_data(header
+ b'0\n0')
expected_symbol_tree = huffman.read_header(deserialized_header)
assert symbol_tree == expected_symbol_tree
def test_full_program_flow(self):
outfile = 'out'
outfile_decomp = 'outd'
outfile_canon = 'outc'
outfile_canon_decomp = 'outdc'
sys.argv = ['', '-i', self.test_file_3, '-o', outfile, '-e']
huffman.main()
sys.argv = ['', '-i', self.test_file_3, '-o',
outfile_canon, '-e', '-c']
huffman.main()
sys.argv = ['', '-o', outfile_decomp, '-i', outfile, '-d']
huffman.main()
sys.argv = ['', '-o', outfile_canon_decomp,
'-i', outfile_canon, '-d']
huffman.main()
assert(filecmp.cmp(self.test_file_3, outfile_decomp, shallow=False))
assert(filecmp.cmp(self.test_file_3, outfile_canon_decomp, shallow=False))
if __name__ == '__main__':
unittest.main()
| 2.84375 | 3 |
evq/__init__.py | sylwekczmil/evq | 0 | 12760793 | <reponame>sylwekczmil/evq
"""Top-level package for evq."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.0.2'
| 0.664063 | 1 |
models/discriminator.py | wspalding/hs_cards_3 | 0 | 12760794 | <gh_stars>0
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, \
Conv2DTranspose, Reshape, AveragePooling2D, UpSampling2D, LeakyReLU, \
BatchNormalization, Embedding, Concatenate, Input
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras import initializers
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras import activations
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.ops.gen_math_ops import Mod
def create_discriminator(config):
img_input = Input(shape=config.image_shape, name='image input')
img_layer = Conv2D(8, (16,16), strides=(1,1), padding='same', input_shape=config.image_shape)(img_input)
img_layer = Dropout(config.discriminator_dropout_rate)(img_layer)
img_layer = BatchNormalization()(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
# img_layer = (MaxPool2D(pool_size=(2,2))) #shape -> 200,200,8 -> 100,100,8
img_layer = Conv2D(32, (8,8), strides=(2,2), padding='same')(img_layer) #shape -> 100,100,8 -> 100,100,32
img_layer = Dropout(config.discriminator_dropout_rate)(img_layer)
img_layer = BatchNormalization()(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
# img_layer = (MaxPool2D(pool_size=(2,2))) #shape -> 100,100,32 -> 50,50,32
img_layer = Conv2D(32, (4,4), strides=(2,2), padding='same')(img_layer)
img_layer = Dropout(config.discriminator_dropout_rate)(img_layer)
img_layer = BatchNormalization()(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
# img_layer = (MaxPool2D(pool_size=(2,2))) #shape -> 50,50,32 -> 25,25,32
img_layer = Conv2D(32, (4,4), strides=(2,2), padding='same')(img_layer)
img_layer = Dropout(config.discriminator_dropout_rate)(img_layer)
img_layer = BatchNormalization()(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
img_layer = Flatten()(img_layer)
img_layer = Dense(64)(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
img_layer = Dense(64)(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
img_layer = Dense(64)(img_layer)
img_layer = LeakyReLU(0.2)(img_layer)
out_layer = Dense(1, activation='sigmoid')(img_layer)
model = Model(img_input, out_layer, name='discriminator')
return model
def discriminator_loss(real_output, fake_output):
cross_entropy = BinaryCrossentropy(from_logits=True)
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss | 2.4375 | 2 |
src/helper/boto.py | neovasili/serverless-guru-code-challenge | 1 | 12760795 | <gh_stars>1-10
import boto3
class BotoHelper:
def __init__(self, namespace: str, region_name="eu-west-1"):
self.__namespace = namespace
self.client = boto3.client(self.__namespace, region_name)
# boto3.set_stream_logger(name='botocore')
def change_credentials(self, credentials: dict, region: str = "eu-west-1"):
self.client = boto3.client(
self.__namespace,
region_name=region,
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
def who_am_i(self):
response = self.client.get_caller_identity()
print(response)
class BotoResourceHelper(BotoHelper):
def __init__(self, namespace: str, region_name="eu-west-1"):
super().__init__(namespace=namespace, region_name=region_name)
self.__namespace = namespace
self.resource = boto3.resource(self.__namespace, region_name)
def change_credentials(self, credentials: dict, region: str = "eu-west-1"):
self.resource = boto3.resource(
self.__namespace,
region_name=region,
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
| 2.265625 | 2 |
MagniPy/__init__.py | dangilman/MagniPy | 2 | 12760796 | # -*- coding: utf-8 -*-
"""Top-level package for MagniPy."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 0.949219 | 1 |
pyvalidator/is_md5.py | theteladras/py.validator | 15 | 12760797 | from .utils.Classes.RegEx import RegEx
from .utils.assert_string import assert_string
def is_md5(input: str) -> bool:
input = assert_string(input)
md5_pattern = RegEx("^[a-f0-9]{32}$")
return md5_pattern.match(input)
| 2.9375 | 3 |
mrf/mrfStructures.py | luiskuhn/polysome-detection | 0 | 12760798 | <gh_stars>0
import networkx as nx
import numpy as np
from pytom.tools.ProgressBar import FixedProgBar
##sequential implementation
###########################
class MRF:
def __init__(self, G):
self._G = G
self._totalNodes = self._G.order()
self._beliefs = np.ndarray((self._totalNodes, self._totalNodes))
self._msgMap = {}
for n in self._G.nodes():
self._msgMap[n] = np.ones((len(self._G.in_edges(n)), self._totalNodes))#/totalNodes
def msgMAP(self, source, target):
msgRow = self._G.out_edges(source).index((source, target))
target_msgRow = -1
if (target, source) in self._G.in_edges(source):
target_msgRow = self._G.in_edges(source).index((target, source))
phi_vec = np.zeros(self._totalNodes)
sum_n_vec = np.zeros(self._totalNodes)
for x_j in range(self._totalNodes):
phi_vec[x_j] = self.phiPotential(source, x_j)
red_val = 0.0
if target_msgRow >= 0:
red_val = self._msgInMap[source][target_msgRow, x_j] #N(j)-{i}
sum_n_vec[x_j] = np.sum(self._msgInMap[source][:, x_j]) - red_val
msg = np.zeros(self._totalNodes)
for x_i in range(self._totalNodes):
x_j_vec = np.zeros(self._totalNodes)
for x_j in range(self._totalNodes):
psi = self.psiPotential(source, target, x_j, x_i)
x_j_vec[x_j] = phi_vec[x_j] + sum_n_vec[x_j] + psi
msg[x_i] = x_j_vec.min()
# #norm
# msg = msg/msg.sum()
self._msgMap[target][msgRow, :] = msg
def lbpMAP(self, maxIterations):
bar = FixedProgBar(0, int(maxIterations*self._totalNodes), 'LBP MAP')
barCnt = 1
it = 0
while it < maxIterations:
for s in self._G.nodes():
out_edges = self._G.out_edges(s)
for t_index in range(len(out_edges)):
t = out_edges[t_index][1]
self.msgMAP(s, t)
bar.update(barCnt)
barCnt = barCnt +1
it = it +1
def computeBeliefsMAP(self):
for i in self._G.nodes():
for x_i in self._G.nodes():
sum_x_i = np.sum(self._msgMap[i][:, x_i])
self._beliefs[i, x_i] = self.phiPotential(i, x_i) + sum_x_i
# #norm
# self._beliefs[i, :] = self._beliefs[i, :]/self._beliefs[i, :].sum()
def getMAP(self, maxIterations):
self.lbpMAP(maxIterations)
self.computeBeliefsMAP()
mapVector = np.zeros(self._totalNodes)
for i in self._G.nodes():
mapVector[i] = self._beliefs[i, :].argmin()
return mapVector
def getBeliefs(self, n):
return self._beliefs[n, :]
| 2.03125 | 2 |
tools/moduletests/unit/test_selinuxpermissive.py | stivesso/aws-ec2rescue-linux | 178 | 12760799 | <filename>tools/moduletests/unit/test_selinuxpermissive.py
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the selinuxpermissive module
"""
import os
import sys
import unittest
import mock
import moduletests.src.selinuxpermissive
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class Testselinuxpermissive(unittest.TestCase):
config_file_path = "/etc/selinux/config"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
def test_detect_no_selinux(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_detect_problem(self, isfile_mock):
self.assertTrue(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=permissive"))
def test_detect_noproblem(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_fix_success(self):
self.assertTrue(moduletests.src.selinuxpermissive.fix(self.config_file_path))
@mock.patch("moduletests.src.selinuxpermissive.open", side_effect=IOError)
def test_fix_exception(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.selinuxpermissive.fix, self.config_file_path)
self.assertEqual(self.output.getvalue(), "[WARN] Unable to replace contents of /etc/selinux/config\n")
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=(True, False))
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_success_fixed(self, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=False)
def test_run_success(self, detect_mock, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux is not set to enforcing" in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_isfile(self,
restore_mock,
fix_mock,
backup_mock,
isfile_mock,
detect_mock,
config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=IOError)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(restore_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", side_effect=IOError)
def test_run_failure_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(config_mock.called)
| 2.125 | 2 |
src/api/migrations/0020_auto_20210322_2218.py | opnfv/laas | 2 | 12760800 | <gh_stars>1-10
# Generated by Django 2.2 on 2021-03-22 22:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0019_auto_20210322_1823'),
]
operations = [
migrations.AddField(
model_name='apilog',
name='method',
field=models.CharField(max_length=4, null=True),
),
migrations.AlterField(
model_name='apilog',
name='endpoint',
field=models.CharField(max_length=300, null=True),
),
]
| 1.585938 | 2 |
util/i18n.py | schocco/mds-web | 0 | 12760801 | import json
from django.core.serializers.json import DjangoJSONEncoder
from tastypie.serializers import Serializer
class CustomJSONSerializer(Serializer):
def to_json(self, data, options=None):
options = options or {}
data = self.to_simple(data, options)
return json.dumps(data, cls=DjangoJSONEncoder)
def from_json(self, content):
data = json.loads(content)
return data | 2.28125 | 2 |
python/Strategy/Duck/Duck.py | eling22/Design-Pattern | 2 | 12760802 | from abc import ABC, abstractmethod
from .FlyBehavior import FlyBehavior, FlyNoWay, FlyWithWings
from .QuackBehavior import QuackBehavior, Quack, MuteQuack, SQuack
class Duck(ABC):
def __init__(self):
self.flyBehavior = None
self.quackBehavior = None
@abstractmethod
def display(self):
pass
def performFly(self):
self.flyBehavior.fly()
def performQuack(self):
self.quackBehavior.quack()
def swim(self):
print("All ducks float, even decoys!")
def setFlyBehavior(self, flyBehavior):
self.flyBehavior = flyBehavior
def setQuackBehavior(self, quackBehavior):
self.quackBehavior = quackBehavior
class MallardDuck(Duck):
def __init__(self):
self.flyBehavior = FlyWithWings()
self.quackBehavior = Quack()
def display(self):
print("I'm a real Mallard duck")
class ModelDuck(Duck):
def __init__(self):
self.flyBehavior = FlyNoWay()
self.quackBehavior = Quack()
def display(self):
print("I'm a model duck")
| 3.828125 | 4 |
weblogo-3.4_rd/weblogolib/_cli.py | go-bears/Final-Project | 0 | 12760803 | <reponame>go-bears/Final-Project<filename>weblogo-3.4_rd/weblogolib/_cli.py
#!/usr/bin/env python
# -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 <NAME>
# Copyright (c) 2006-2011, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# WebLogo Command Line Interface
from __future__ import absolute_import, print_function
import os
import sys
from optparse import OptionGroup
from string import Template
from corebio import seq_io
from corebio.seq import Seq, SeqList, nucleic_alphabet
from corebio.utils import *
from corebio.utils.deoptparse import DeOptionParser
from corebio._py3k import iteritems, StringIO
from .color import *
from .colorscheme import ColorScheme, ColorGroup
from . import (LogoOptions, LogoData, LogoFormat,
parse_prior, description, release_description, formatters,
default_formatter,
std_alphabets, std_units, std_sizes, std_color_schemes,
read_seq_data)
# ====================== Main: Parse Command line =============================
def main():
"""WebLogo command line interface """
# ------ Parse Command line ------
parser = _build_option_parser()
(opts, args) = parser.parse_args(sys.argv[1:])
if args : parser.error("Unparsable arguments: %s " % args)
if opts.serve:
httpd_serve_forever(opts.port) # Never returns?
sys.exit(0)
# ------ Create Logo ------
try:
data = _build_logodata(opts)
format = _build_logoformat(data, opts)
formatter = opts.formatter
logo = formatter(data, format)
#logo = logo.encode()
if sys.version_info[0] >= 3:
opts.fout.buffer.write(logo)
else:
opts.fout.write(logo)
# print(logo, file=opts.fout)
except ValueError as err :
print('Error:', err, file=sys.stderr)
sys.exit(2)
except KeyboardInterrupt as err:
sys.exit(0)
# End main()
def httpd_serve_forever(port=8080) :
""" Start a webserver on a local port."""
if sys.version_info[0] >= 3:
import http.server as server
import http.server as cgiserver
else:
import BaseHTTPServer as server
import CGIHTTPServer as cgiserver
class __HTTPRequestHandler(cgiserver.CGIHTTPRequestHandler):
# Modify CGIHTTPRequestHandler so that it will run the cgi script directly, instead of exec'ing
# This bypasses the need for the cgi script to have execute permissions set,
# since distutils install does not preserve permissions.
def is_cgi(self) :
self.have_fork = False # Prevent CGIHTTPRequestHandler from using fork
if self.path == "/create.cgi":
self.cgi_info = '', 'create.cgi'
return True
return False
def is_python(self,path): # Let CGIHTTPRequestHandler know that cgi script is python
return True
# Add current directory to PYTHONPATH. This is
# so that we can run the standalone server
# without having to run the install script.
pythonpath = os.getenv("PYTHONPATH", '')
pythonpath += ":" + os.path.abspath(sys.path[0]).split()[0]
os.environ["PYTHONPATH"] = pythonpath
htdocs = resource_filename(__name__, 'htdocs', __file__)
os.chdir(htdocs)
HandlerClass = __HTTPRequestHandler
ServerClass = server.HTTPServer
httpd = ServerClass(('', port), HandlerClass)
print("WebLogo server running at http://localhost:%d/" % port)
try :
httpd.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
# end httpd_serve_forever()
def _build_logodata(options) :
motif_flag=False
fin = options.fin
if fin is None:
fin = StringIO(sys.stdin.read())
try:
# Try reading data in transfac format first.
from corebio.matrix import Motif
motif = Motif.read_transfac(fin, alphabet=options.alphabet)
motif_flag = True
except ValueError as motif_err :
# Failed reading Motif, try reading as multiple sequence data.
if options.input_parser == "transfac":
raise motif_err # Adding transfac as str insted of parser is a bit of a ugly kludge
seqs = read_seq_data(fin,
options.input_parser.read,
alphabet=options.alphabet,
ignore_lower_case = options.ignore_lower_case)
if motif_flag :
if options.ignore_lower_case:
raise ValueError("error: option --ignore-lower-case incompatible with matrix input")
if options.reverse or options.revcomp: motif.reverse()
if options.complement or options.revcomp: motif.complement()
prior = parse_prior( options.composition,motif.alphabet, options.weight)
data = LogoData.from_counts(motif.alphabet, motif, prior)
else :
if options.reverse or options.revcomp:
seqs = SeqList([s.reverse() for s in seqs], seqs.alphabet)
if options.complement or options.revcomp:
if not nucleic_alphabet.alphabetic(seqs.alphabet):
raise ValueError('non-nucleic sequence cannot be complemented')
aaa = seqs.alphabet
seqs.alphabet = nucleic_alphabet
seqs= SeqList( [Seq(s,seqs.alphabet).complement() for s in seqs], seqs.alphabet)
seqs.alphabet = aaa
prior = parse_prior( options.composition,seqs.alphabet, options.weight)
data = LogoData.from_seqs(seqs, prior)
return data
def _build_logoformat( logodata, opts) :
""" Extract and process relevant option values and return a
LogoFormat object."""
args = {}
direct_from_opts = [
"stacks_per_line",
"logo_title",
"yaxis_label",
"show_xaxis",
"show_yaxis",
"xaxis_label",
"show_ends",
"fineprint",
"show_errorbars",
"show_boxes",
"yaxis_tic_interval",
"resolution",
"alphabet",
"debug",
"show_ends",
"default_color",
#"show_color_key",
"color_scheme",
"unit_name",
"logo_label",
"yaxis_scale",
"first_index",
"logo_start",
"logo_end",
"scale_width",
"annotate",
"stack_width",
"stack_aspect_ratio",
"reverse_stacks"
]
for k in direct_from_opts:
args[k] = opts.__dict__[k]
# logo_size = copy.copy(opts.__dict__['logo_size'])
# size_from_opts = ["stack_width", "stack_height"]
# for k in size_from_opts :
# length = getattr(opts, k)
# if length : setattr( logo_size, k, length )
# args["size"] = logo_size
if opts.colors:
color_scheme = ColorScheme()
for color, symbols, desc in opts.colors:
try :
#c = Color.from_string(color)
color_scheme.groups.append( ColorGroup(symbols, color, desc) )
except ValueError :
raise ValueError(
"error: option --color: invalid value: '%s'" % color )
args["color_scheme"] = color_scheme
if opts.annotate:
args["annotate"] = opts.annotate.split(',')
logooptions = LogoOptions()
for a, v in iteritems(args):
setattr(logooptions, a, v)
theformat = LogoFormat(logodata, logooptions )
return theformat
# ========================== OPTIONS ==========================
def _build_option_parser() :
defaults = LogoOptions()
parser = DeOptionParser(usage="%prog [options] < sequence_data.fa > sequence_logo.eps",
description = description,
version = release_description,
add_verbose_options = False
)
io_grp = OptionGroup(parser, "Input/Output Options",)
data_grp = OptionGroup(parser, "Logo Data Options",)
trans_grp = OptionGroup(parser, "Transformations", "Optional transformations of the sequence data.")
format_grp = OptionGroup(parser, "Logo Format Options",
"These options control the format and display of the logo.")
color_grp = OptionGroup(parser, "Color Options",
"Colors can be specified using CSS2 syntax. e.g. 'red', '#FF0000', etc.")
advanced_grp = OptionGroup(parser, "Advanced Format Options",
"These options provide fine control over the display of the logo. ")
server_grp = OptionGroup(parser, "WebLogo Server",
"Run a standalone webserver on a local port.")
parser.add_option_group(io_grp)
parser.add_option_group(data_grp)
parser.add_option_group(trans_grp)
parser.add_option_group(format_grp)
parser.add_option_group(color_grp)
parser.add_option_group(advanced_grp)
parser.add_option_group(server_grp)
# ========================== IO OPTIONS ==========================
io_grp.add_option( "-f", "--fin",
dest="fin",
action="store",
type="file_in",
default=None,
help="Sequence input file (default: stdin)",
metavar="FILENAME")
# Add position weight matrix formats to input parsers by hand
fin_choices = dict(seq_io.format_names())
fin_choices['transfac'] = 'transfac'
io_grp.add_option("-D", "--datatype",
dest="input_parser",
action="store", type ="dict",
default = seq_io,
choices = fin_choices, # seq_io.format_names(),
help="Type of multiple sequence alignment or position weight matrix file: (%s, transfac)" %
', '.join([ f.names[0] for f in seq_io.formats]),
metavar="FORMAT")
io_grp.add_option("-o", "--fout", dest="fout",
type="file_out",
default=sys.stdout,
help="Output file (default: stdout)",
metavar="FILENAME")
io_grp.add_option( "-F", "--format",
dest="formatter",
action="store",
type="dict",
choices = formatters,
metavar= "FORMAT",
help="Format of output: eps (default), png, png_print, pdf, jpeg, svg, logodata",
default = default_formatter)
# ========================== Data OPTIONS ==========================
data_grp.add_option( "-A", "--sequence-type",
dest="alphabet",
action="store",
type="dict",
choices = std_alphabets,
help="The type of sequence data: 'protein', 'rna' or 'dna'.",
metavar="TYPE")
data_grp.add_option( "-a", "--alphabet",
dest="alphabet",
action="store",
help="The set of symbols to count, e.g. 'AGTC'. "
"All characters not in the alphabet are ignored. "
"If neither the alphabet nor sequence-type are specified then weblogo will examine the input data and make an educated guess. "
"See also --sequence-type, --ignore-lower-case" )
data_grp.add_option( "-U", "--units",
dest="unit_name",
action="store",
choices = list(std_units.keys()),
type="choice",
default = defaults.unit_name,
help="A unit of entropy ('bits' (default), 'nats', 'digits'), or a unit of free energy ('kT', 'kJ/mol', 'kcal/mol'), or 'probability' for probabilities",
metavar = "NUMBER")
data_grp.add_option( "", "--composition",
dest="composition",
action="store",
type="string",
default = "auto",
help="The expected composition of the sequences: 'auto' (default), 'equiprobable', 'none' (do not perform any compositional adjustment), a CG percentage, a species name (e.g. 'E. coli', 'H. sapiens'), or an explicit distribution (e.g. \"{'A':10, 'C':40, 'G':40, 'T':10}\"). The automatic option uses a typical distribution for proteins and equiprobable distribution for everything else. ",
metavar="COMP.")
data_grp.add_option( "", "--weight",
dest="weight",
action="store",
type="float",
default = None,
help="The weight of prior data. Default depends on alphabet length",
metavar="NUMBER")
data_grp.add_option( "-i", "--first-index",
dest="first_index",
action="store",
type="int",
default = 1,
help="Index of first position in sequence data (default: 1)",
metavar="INDEX")
data_grp.add_option( "-l", "--lower",
dest="logo_start",
action="store",
type="int",
help="Lower bound of sequence to display",
metavar="INDEX")
data_grp.add_option( "-u", "--upper",
dest="logo_end",
action="store",
type="int",
help="Upper bound of sequence to display",
metavar="INDEX")
# ========================== Transformation OPTIONS ==========================
# FIXME Add test?
trans_grp.add_option( "", "--ignore-lower-case",
dest="ignore_lower_case",
action="store_true",
default=False,
help="Disregard lower case letters and only count upper case letters in sequences."
)
trans_grp.add_option( "", "--reverse",
dest="reverse",
action="store_true",
default=False,
help="reverse sequences",
)
trans_grp.add_option( "", "--complement",
dest="complement",
action="store_true",
default=False,
help="complement nucleic sequences",
)
trans_grp.add_option( "", "--revcomp",
dest="revcomp",
action="store_true",
default=False,
help="reverse complement nucleic sequences",
)
# ========================== FORMAT OPTIONS ==========================
format_grp.add_option( "-s", "--size",
dest="stack_width",
action="store",
type ="dict",
choices = std_sizes,
metavar = "LOGOSIZE",
default = defaults.stack_width,
help="Specify a standard logo size (small, medium (default), large)" )
format_grp.add_option( "-n", "--stacks-per-line",
dest="stacks_per_line",
action="store",
type="int",
help="Maximum number of logo stacks per logo line. (default: %default)",
default = defaults.stacks_per_line,
metavar="COUNT")
format_grp.add_option( "-t", "--title",
dest="logo_title",
action="store",
type="string",
help="Logo title text.",
default = defaults.logo_title,
metavar="TEXT")
format_grp.add_option( "", "--label",
dest="logo_label",
action="store",
type="string",
help="A figure label, e.g. '2a'",
default = defaults.logo_label,
metavar="TEXT")
format_grp.add_option( "-X", "--show-xaxis",
action="store",
type = "boolean",
default= defaults.show_xaxis,
metavar = "YES/NO",
help="Display sequence numbers along x-axis? (default: %default)")
format_grp.add_option( "-x", "--xlabel",
dest="xaxis_label",
action="store",
type="string",
default = defaults.xaxis_label,
help="X-axis label",
metavar="TEXT")
format_grp.add_option( "", "--annotate",
dest="annotate",
action="store",
type="string",
default = None,
help="A comma separated list of custom stack annotations, e.g. '1,3,4,5,6,7'. Annotation list must be same length as sequences.",
metavar="TEXT")
format_grp.add_option( "-S", "--yaxis",
dest="yaxis_scale",
action="store",
type="float",
help="Height of yaxis in units. (Default: Maximum value with uninformative prior.)",
metavar = "UNIT")
format_grp.add_option( "-Y", "--show-yaxis",
action="store",
type = "boolean",
dest = "show_yaxis",
default= defaults.show_yaxis,
metavar = "YES/NO",
help="Display entropy scale along y-axis? (default: %default)")
format_grp.add_option( "-y", "--ylabel",
dest="yaxis_label",
action="store",
type="string",
help="Y-axis label (default depends on plot type and units)",
metavar="TEXT")
format_grp.add_option( "-E", "--show-ends",
action="store",
type = "boolean",
default= defaults.show_ends,
metavar = "YES/NO",
help="Label the ends of the sequence? (default: %default)")
format_grp.add_option( "-P", "--fineprint",
dest="fineprint",
action="store",
type="string",
default= defaults.fineprint,
help="The fine print (default: weblogo version)",
metavar="TEXT")
format_grp.add_option( "", "--ticmarks",
dest="yaxis_tic_interval",
action="store",
type="float",
default= defaults.yaxis_tic_interval,
help="Distance between ticmarks (default: %default)",
metavar = "NUMBER")
format_grp.add_option( "", "--errorbars",
dest = "show_errorbars",
action="store",
type = "boolean",
default= defaults.show_errorbars,
metavar = "YES/NO",
help="Display error bars? (default: %default)")
format_grp.add_option( "", "--reverse-stacks",
dest = "reverse_stacks",
action="store",
type = "boolean",
default= defaults.show_errorbars,
metavar = "YES/NO",
help="Draw stacks with largest letters on top? (default: %default)")
# ========================== Color OPTIONS ==========================
# TODO: Future Feature
# color_grp.add_option( "-K", "--color-key",
# dest= "show_color_key",
# action="store",
# type = "boolean",
# default= defaults.show_color_key,
# metavar = "YES/NO",
# help="Display a color key (default: %default)")
color_scheme_choices = list(std_color_schemes.keys())
color_scheme_choices.sort()
color_grp.add_option( "-c", "--color-scheme",
dest="color_scheme",
action="store",
type ="dict",
choices = std_color_schemes,
metavar = "SCHEME",
default = None, # Auto
help="Specify a standard color scheme (%s)" % \
", ".join(color_scheme_choices) )
color_grp.add_option( "-C", "--color",
dest="colors",
action="append",
metavar="COLOR SYMBOLS DESCRIPTION ",
nargs = 3,
default=[],
help="Specify symbol colors, e.g. --color black AG 'Purine' --color red TC 'Pyrimidine' ")
color_grp.add_option( "", "--default-color",
dest="default_color",
action="store",
metavar="COLOR",
default= defaults.default_color,
help="Symbol color if not otherwise specified.")
# ========================== Advanced options =========================
advanced_grp.add_option( "-W", "--stack-width",
dest="stack_width",
action="store",
type="float",
default= defaults.stack_width,
help="Width of a logo stack (default: %s)"% defaults.stack_width,
metavar="POINTS" )
advanced_grp.add_option( "", "--aspect-ratio",
dest="stack_aspect_ratio",
action="store",
type="float",
default= defaults.stack_aspect_ratio ,
help="Ratio of stack height to width (default: %s)"%defaults.stack_aspect_ratio,
metavar="POINTS" )
advanced_grp.add_option( "", "--box",
dest="show_boxes",
action="store",
type = "boolean",
default=False,
metavar = "YES/NO",
help="Draw boxes around symbols? (default: no)")
advanced_grp.add_option( "", "--resolution",
dest="resolution",
action="store",
type="float",
default=96,
help="Bitmap resolution in dots per inch (DPI). (Default: 96 DPI, except png_print, 600 DPI) Low resolution bitmaps (DPI<300) are antialiased.",
metavar="DPI")
advanced_grp.add_option( "", "--scale-width",
dest="scale_width",
action="store",
type = "boolean",
default= True,
metavar = "YES/NO",
help="Scale the visible stack width by the fraction of symbols in the column? (I.e. columns with many gaps of unknowns are narrow.) (Default: yes)")
advanced_grp.add_option( "", "--debug",
action="store",
type = "boolean",
default= defaults.debug,
metavar = "YES/NO",
help="Output additional diagnostic information. (Default: %default)")
# ========================== Server options =========================
server_grp.add_option( "", "--serve",
dest="serve",
action="store_true",
default= False,
help="Start a standalone WebLogo server for creating sequence logos.")
server_grp.add_option( "", "--port",
dest="port",
action="store",
type="int",
default= 8080,
help="Listen to this local port. (Default: %default)",
metavar="PORT")
return parser
# END _build_option_parser
##############################################################
| 1.179688 | 1 |
socfaker/__init__.py | atstpls/soc-faker | 0 | 12760804 | <filename>socfaker/__init__.py
from .socfaker import SocFaker
#from .vulnerability import Vulnerability
#from .application import Application
#from .computer import Computer
#from .employee import Employee
#from .file import File
#from .network import Network
#from .organization import Organization
#from .vulnerabilityhost import VulnerabilityHost
#from .vulnerabilityscan import VulnerabilityScan | 1.148438 | 1 |
packages/PIPS/validation/Demo/TutorialPPoPP2010.sub/pii.py | DVSR1966/par4all | 51 | 12760805 | <filename>packages/PIPS/validation/Demo/TutorialPPoPP2010.sub/pii.py
import pyps
from pipscc import pipscc
class Pii(pipscc):
def changes(self, ws):
def thefilter(module):
return len(module.code()) < 3
ws.filter(thefilter).inlining()
if __name__ == '__main__':
thecompiler = Pii()
thecompiler.run()
| 2.421875 | 2 |
src/league_client_api/lockfile/api.py | pralphv/lol_15ff | 0 | 12760806 | from functools import lru_cache
from typing import Dict
import base64
try:
from ..league_process import find_path_of_league
except ValueError:
from src.league_client_api.league_process import find_path_of_league
def _read_lockfile() -> Dict:
path = find_path_of_league()
with open(path, 'r') as f:
lock_file = f.read().split(':')
return {'port': lock_file[2], 'password': lock_file[3]}
@lru_cache
def get_lockfile_content() -> Dict:
lock_file = _read_lockfile()
username = 'riot'
port = lock_file['port']
password = lock_file['password']
encrypted_auth = base64.b64encode(f'{username}:{password}'.encode('utf-8')).decode('utf-8')
print({'username': username, 'port': port, 'password': password, 'encrypted': encrypted_auth})
return {'username': username, 'port': port, 'password': password, 'encrypted': encrypted_auth}
def clear_cache():
get_lockfile_content.cache_clear()
| 2.53125 | 3 |
shared/logfileAnalysis/windows.py | infostreams/webindex | 1 | 12760807 | import _winreg
import os
import string
import IISLog, NCSALog
class logfileParser:
def __init__(self, logfilename=None):
if logfilename!=None:
openLogfile(logfilename)
def __getLogdirs(self):
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\W3SVC\\Parameters\\")
mainlog = _winreg.QueryValueEx(key, "LogFileDirectory")[0]
subdirs = os.listdir(mainlog)
dirs = []
for dir in subdirs:
dirs.append(mainlog + "\\" + dir)
return dirs
def __getLogfiles(self, dir):
files = os.listdir(dir)
logs = []
invalidLogEncountered = 0
for file in files:
if string.lower(file[-3:])=="log" and \
string.lower(file[:2])!="ex":
logs.append(dir + "\\" + file)
if string.lower(file[:2])=="ex":
invalidLogEncountered = 1
# if invalidLogEncountered == 1:
# print "Warning: Invalid logfiles (of the 'W3C Extended Log File Format'-type) were encountered and ignored"
return logs
def getLogfileNames(self, configfilename=None):
logfiles = []
for dir in self.__getLogdirs():
for log in self.__getLogfiles(dir):
entry = {}
entry['server'] = None
entry['log'] = log
logfiles.append(entry)
return logfiles
def openLogfile(self, logfilename):
# TODO: ODBC-logging toevoegen
parts = string.split(logfilename, "\\")
filename = string.lower(parts[len(parts) - 1])
if filename[:2]=="in":
self.data = IISLog.IISLog(logfilename)
if filename[:2]=="nc":
self.data = NCSALog.NCSALog(logfilename)
if filename[:2]=="ex":
raise TypeError, "The 'W3C Extended Log File Format', the format of the logfile you are trying to open, is unsuitable for this application"
# print "The 'W3C Extended Log File Format', the format of the logfile you are trying to open, is unsuitable for this application"
def closeLogfile(self):
self.data.closeLogfile()
def extractHTTPRequest(self, entry):
return self.data.extractHTTPRequest(entry)
def getNextEntry(self):
return self.data.getNextEntry()
if __name__=="__main__":
l = logfileParser()
for dir in l.getLogdirs():
for log in l.getLogfiles(dir):
l.openLogfile(log)
print "e sorted=", e
| 2.640625 | 3 |
lcd128/demo_lcd128.py | mdinata/micropython | 4 | 12760808 | from st7920 import Screen
from gfx import GFX
from sysfont import sysfont
import machine
import time
machine.freq(160000000)
spi = machine.SPI(1, baudrate=80000000, polarity=1, phase=1)
screen = Screen(slaveSelectPin=Pin(15), resetDisplayPin=Pin(5))
draw=GFX(128,64,screen.plot)
#screen.set_rotation(2)
screen.clear()
def test1():
t0=time.ticks_ms()
# screen.fill_rect(0,0,127,63)
screen.redraw()
t1=time.ticks_ms()
delta=time.ticks_diff(t1,t0)
print(delta/1000)
def test2():
t0=time.ticks_ms()
# draw.fill_rect(0,0,128,64,1)
screen.redraw()
t1=time.ticks_ms()
delta=time.ticks_diff(t1,t0)
print(delta/1000)
test1()
test2() | 2.203125 | 2 |
nbiot/scan.py | pwitab/nbiot | 2 | 12760809 | import click
import tabulate
from .module import SaraN211Module, PingError
def connect_module(module: SaraN211Module, app_ctx):
click.echo(click.style(f"Connecting to network...", fg="yellow", bold=True))
module.read_module_status()
if app_ctx.apn:
module.set_pdp_context(apn=app_ctx.apn)
module.enable_signaling_connection_urc()
module.enable_network_registration()
module.enable_radio_functions()
if app_ctx.psm:
module.enable_psm_mode()
else:
module.disable_psm_mode()
module.connect(app_ctx.mno)
click.echo(click.style(f"Connected!", fg="yellow", bold=True))
@click.command()
@click.pass_obj
def connect(app_ctx):
"""
Connect to the network and get general info on module and network
"""
module: SaraN211Module = app_ctx.module
connect_module(module, app_ctx)
header = ["IMEI", "IMSI", "ICCID", "IP", "APN"]
data = [[module.imei, module.imsi, module.iccid, module.ip, module.apn]]
click.echo(
click.style(
tabulate.tabulate(
data, header, tablefmt="github", numalign="left", stralign="left"
),
fg="red",
)
)
@click.command()
@click.argument("ip")
@click.option("--runs", "-r", default=1, help="How many times should we ping")
@click.pass_obj
def ping(app_ctx, ip, runs):
"""
Ping an IP address
"""
module: SaraN211Module = app_ctx.module
connect_module(module, app_ctx)
click.echo(click.style(f"Pinging IP {ip}", fg="blue"))
results = []
for i in range(0, runs):
try:
ttl, rtt = module.ping(ip)
results.append((rtt, ttl))
click.echo(click.style(f"Success: rtt: {rtt}, ttl: {ttl}", fg="red"))
except PingError as e:
click.echo(click.style(f"**\t{e.args[0]}\t**", fg="red", bold=True))
click.echo("\nResults:")
click.echo(
click.style(
tabulate.tabulate(
results,
headers=["Round trip time (ms)", "Time to live (ms)"],
tablefmt="github",
numalign="left",
stralign="left",
),
fg="red",
)
)
@click.command()
@click.pass_obj
def stats(app_ctx):
"""
Print statistics from the module.
"""
module: SaraN211Module = app_ctx.module
connect_module(module, app_ctx)
click.echo(click.style(f"Collecting statistics...", fg="blue"))
module.update_radio_statistics()
header = ["Stat", "Value"]
data = list()
data.append(("ECL", f"{module.radio_ecl}"))
data.append(("Signal power", f"{module.radio_signal_power} dBm"))
data.append(("Total power", f"{module.radio_total_power} dBm"))
data.append(("Signal power", f"{module.radio_signal_power} dBm"))
data.append(("Tx power", f"{module.radio_tx_power} dBm"))
data.append(("Tx time", f"{module.radio_tx_time} ms"))
data.append(("Rx time", f"{module.radio_rx_time} ms"))
data.append(("Cell id", f"{module.radio_cell_id}"))
data.append(("Physical cell id", f"{module.radio_pci}"))
data.append(("SNR", f"{module.radio_snr}"))
data.append(("RSRQ", f"{module.radio_rsrq} dBm"))
click.echo(
click.style(
tabulate.tabulate(
data, header, tablefmt="github", numalign="left", stralign="left"
),
fg="red",
)
)
@click.command()
@click.pass_obj
def reboot(app_ctx):
"""
Reboot the module
"""
module: SaraN211Module = app_ctx.module
click.echo(click.style(f"Rebooting module {module}...", fg="red", bold=True))
module.reboot()
click.echo(click.style(f"Module rebooted", fg="red", bold=True))
| 2.578125 | 3 |
http/server.py | luchosr/dht11Streams | 0 | 12760810 | #from sense_hat import SenseHat
from collections import OrderedDict
sense=SenseHat()
sense.clear()
import time
import config
import requests
import json
import RPi.GPIO as GPIO
import dht11
while True:
# Get Unix timestamp
timestamp = int(time.time())
# Get Temp/Press/Hum values
temp = sense.get_temperature()
press = sense.get_pressure()
humidity = sense.get_humidity()
#Get Gyroscope values
""" o = sense.get_orientation()
x_gyroscope = o["pitch"]
y_gyroscope = o["roll"]
z_gyroscope = o["yaw"] """
#Get Accelerometer values
""" a = sense.get_accelerometer_raw()
x_accelerometer = a["x"]
y_accelerometer = a["y"]
z_accelerometer = a["z"] """
#Get Magnetometer (Compass) values
""" m = sense.get_compass_raw()
x_compass = m["x"]
y_compass = m["y"]
z_compass = m["z"]
"""
# Json open
build_json = {
"iot2tangle": [],
"device":str(config.device_id),
"timestamp":str(timestamp)
}
"""
# If Enviromental
if config.enviromental:
build_json['iot2tangle'].append({
"sensor": "Enviromental",
"data": [{
"Pressure": str(press),
"Temp": str(temp)
},{
"Humidity": str(humidity)
}]
})
"""
if config.dht11:
build_json['iot2tangle'].append({
"sensor": "dht11",
"data": [{
"Pressure": str(press),
"Temp": str(temp)
},{
"Humidity": str(humidity)
}]
})
"""
#If Accelerometer
if config.accelerometer:
build_json['iot2tangle'].append({
"sensor": "Accel",
"data": [{
"x": str(x_accelerometer),
"y": str(y_accelerometer),
"z": str(z_accelerometer)
}]
})
# If Gyroscope
if config.gyroscope:
build_json['iot2tangle'].append({
"sensor": "Gyroscope",
"data": [{
"x": str(x_gyroscope),
"y": str(y_gyroscope),
"z": str(z_gyroscope)
}]
})
# If Magonetometer
if config.magnetometer:
build_json['iot2tangle'].append({
"sensor": "Magnetometer",
"data": [{
"x": str(x_compass),
"y": str(y_compass),
"z": str(z_compass)
}]
}) """
# Set Json headers
headers = {"Content-Type": "application/json"}
# Send Data to Json server
try:
build_json = json.dumps(build_json)
r = requests.post(config.endpoint, data=build_json, headers=headers)
r.raise_for_status()
print (":: Sending datasets ::")
print("--------------------------------------------------------")
print(build_json)
except :
print ("No server listening at " + str(config.endpoint))
# Interval
time.sleep(config.relay)
| 2.609375 | 3 |
ferris/core/forms/monkey.py | palladius/gae-ferris-ricc | 2 | 12760811 | import wtforms.ext.appengine.ndb as wtfndb
import wtforms
from google.appengine.ext import ndb
from . import fields
### Additional Converters
def add_convertor(property_type, converter_func):
setattr(wtfndb.ModelConverter, 'convert_%s' % property_type, converter_func)
def convert_UserProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.UserProperty``."""
if isinstance(prop, ndb.Property) and (prop._auto_current_user or prop._auto_current_user_add):
return None
kwargs['validators'].append(wtforms.validators.email())
kwargs['validators'].append(wtforms.validators.length(max=500))
return fields.UserField(**kwargs)
def convert_KeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.KeyProperty``."""
kwargs['kind'] = prop._kind
kwargs.setdefault('allow_blank', not prop._required)
if not prop._repeated:
return fields.KeyPropertyField(**kwargs)
else:
del kwargs['allow_blank']
return fields.MultipleReferenceField(**kwargs)
def convert_BlobKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BlobKeyProperty``."""
return fields.BlobKeyField(**kwargs)
def convert_GeoPtProperty(self, model, prop, kwargs):
return fields.GeoPtPropertyField(**kwargs)
def fallback_converter(self, model, prop, kwargs):
pass
setattr(wtfndb.ModelConverter, 'fallback_converter', fallback_converter)
# Monkey-patch wtf's converters
add_convertor('UserProperty', convert_UserProperty)
add_convertor('KeyProperty', convert_KeyProperty)
add_convertor('BlobKeyProperty', convert_BlobKeyProperty)
add_convertor('GeoPtProperty', convert_GeoPtProperty)
| 2.265625 | 2 |
aiogqlc/client.py | kousu/aiogqlc | 0 | 12760812 | import aiohttp
import json
import copy
from typing import Tuple
from aiogqlc.utils import (
is_file_like,
is_file_list_like,
contains_file_variable,
null_file_variables,
)
class GraphQLClient:
def __init__(self, endpoint: str, headers: dict = None) -> None:
self.endpoint = endpoint
self.headers = headers or {}
def prepare_headers(self):
headers = copy.deepcopy(self.headers)
if aiohttp.hdrs.ACCEPT not in headers:
headers[aiohttp.hdrs.ACCEPT] = "application/json"
return headers
@classmethod
def prepare_json_data(
cls, query: str, variables: dict = None, operation: str = None
) -> dict:
data = {"query": query}
if variables:
data["variables"] = null_file_variables(variables)
if operation:
data["operationName"] = operation
return data
@classmethod
def prepare_files(cls, variables: dict) -> Tuple[dict, list]:
file_map = dict()
file_fields = list()
map_index = 0
for key, value in variables.items():
if is_file_like(value):
file_map[str(map_index)] = ["variables.{}".format(key)]
file_fields.append([str(map_index), value])
map_index += 1
elif is_file_list_like(value):
file_list_index = 0
for item in value:
file_map[str(map_index)] = [
"variables.{}.{}".format(key, file_list_index)
]
file_fields.append([str(map_index), item])
file_list_index += 1
map_index += 1
return file_map, file_fields
@classmethod
def prepare_multipart(
cls, query: str, variables: dict, operation: str = None
) -> aiohttp.FormData:
data = aiohttp.FormData()
operations_json = json.dumps(cls.prepare_json_data(query, variables, operation))
file_map, file_fields = cls.prepare_files(variables)
data.add_field("operations", operations_json, content_type="application/json")
data.add_field("map", json.dumps(file_map), content_type="application/json")
data.add_fields(*file_fields)
return data
async def execute(
self, query: str, variables: dict = None, operation: str = None
) -> aiohttp.ClientResponse:
async with aiohttp.ClientSession() as session:
if variables and contains_file_variable(variables):
data = self.prepare_multipart(query, variables, operation)
headers = self.prepare_headers()
else:
data = json.dumps(self.prepare_json_data(query, variables, operation))
headers = self.prepare_headers()
headers[aiohttp.hdrs.CONTENT_TYPE] = "application/json"
async with session.post(
self.endpoint, data=data, headers=headers
) as response:
await response.read()
return response
| 2.421875 | 2 |
komapy/conf.py | bpptkg/komapy | 0 | 12760813 | import json
from .settings import app_settings
_cached_attrs = {}
class Settings:
"""
A proxy to get or set app settings.
"""
def __getattr__(self, attr):
if attr in _cached_attrs:
return _cached_attrs[attr]
return getattr(app_settings, attr, None)
def __setattr__(self, attr, value):
if hasattr(app_settings, attr):
setattr(app_settings, attr, value)
_cached_attrs[attr] = value
def from_dict(self, settings):
"""
Set settings from dictionary object.
"""
for attr, value in settings.items():
setattr(self, attr, value)
def from_json(self, settings):
"""
Set settings from JSON object.
"""
dict_settings = json.loads(settings)
self.from_dict(dict_settings)
def from_json_file(self, path):
"""
Set settings from JSON file.
"""
with open(path) as fp:
dict_settings = json.load(fp)
self.from_dict(dict_settings)
def as_dict(self):
"""
Export all settings as dictionary object.
"""
dict_settings = {}
for key, value in _cached_attrs.items():
dict_settings[key] = value
for key in app_settings.defaults:
dict_settings.update({key: getattr(app_settings, key)})
return dict_settings
settings = Settings()
| 2.9375 | 3 |
game.py | cterence/Explorers-game | 0 | 12760814 | #!/usr/bin/python3
# Auteur : <NAME>
from tkinter import *
import random, math, time, copy
### Variables globales ###
height, width = 300, 100
goal = (width/4, 5*height/6)
start = (3*width/4, height/6)
refreshRate = 1
population = 100
generations = 100
firstGen = True
goalReached = False
won = False
allDead = False
### Classe point ###
class Dot: # Objet point
def __init__(self):
global width, height
self.x = start[0]
self.y = start[1]
self.alive = True
self.score = 0
self.moves = []
self.fittest = False
def __str__(self):
return "x : "+str(self.x)+", y : "+str(self.y)+", alive : "+str(self.alive)+", score : "+str(self.score)+", fittest ="+str(self.fittest)+", move number :"+str(len(self.moves))
def isAlive(self):
if self.x <= 0 or self.x >= width-5 or self.y <= 0 or self.y >= height-5 or (self.x < goal[0]+10 and self.x > goal[0]-10 and self.y < goal[1]+10 and self.y > goal[1]-10):
self.alive = False
def hasWon(self):
if self.x < goal[0]+10 and self.x > goal[0]-10 and self.y < goal[1]+10 and self.y > goal[1]-10 :
return True
return False
def move(self):
self.isAlive()
if (self.alive == True):
newX, newY = self.x, self.y
rand = random.random()
if (rand <= 0.25):
newX += 5
elif (rand <= 0.5):
newX -= 5
elif (rand <= 0.75):
newY += 5
else:
newY -= 5
self.moves.append((newX-self.x, newY-self.y))
self.x, self.y = newX, newY
self.fitness()
def fitness(self):
global goal
if goalReached == True:
if abs(self.x-goal[0]) == 5 and abs(self.y-goal[1]) == 5 :# Cas où le point arrive en diag du but
print("diag")
self.score = len(self.moves)*(self.y-goal[1]) # Mise à zéro de la différence entre le x du point et du goal
else :
self.score = len(self.moves)*math.sqrt((self.x-goal[0])**2+(self.y-goal[1])**2)
else :
self.score = math.sqrt((self.x-goal[0])**2+(self.y-goal[1])**2)
def moveMutated(self, fittest):
self.isAlive()
if (self.alive == True):
newX, newY = self.x, self.y
if (random.random() <= 0.1 or len(self.moves) >= len(fittest.moves)) and self.fittest == False:
rand = random.random()
if (rand <= 0.25):
newX += 5
elif (rand <= 0.5):
newX -= 5
elif (rand <= 0.75):
newY += 5
else:
newY -= 5
else:
newX += fittest.moves[len(self.moves)][0]
newY += fittest.moves[len(self.moves)][1]
self.moves.append((newX-self.x, newY-self.y))
self.x, self.y = newX, newY
self.fitness()
### Classe plateau ###
class Board:
def __init__(self):
global height, width
self.canvas = Canvas(root, width=width, height=height, background='white')
self.dots = []
self.fittest = None
self.job = None
self.createPopulation()
def addDot(self, dot):
self.dots.append(dot)
self.canvas.create_oval(dot.x, dot.y, dot.x+5, dot.y+5, fill='black')
def createPopulation(self):
for i in range(population):
self.dots.append(Dot())
def update(self, firstGen):
global won, allDead
allDead = True
for dot in self.dots:
if firstGen == True:
dot.move()
else:
dot.moveMutated(self.fittest)
if dot.fittest == True:
self.canvas.create_oval(dot.x, dot.y, dot.x+5, dot.y+5, fill='red')
else :
self.canvas.create_oval(dot.x, dot.y, dot.x+5, dot.y+5, fill='black')
if dot.hasWon():
won = True
break
if dot.alive :
allDead = False
def play(self):
global goal, won, firstGen, goalReached
self.canvas.delete("all")
self.canvas.create_oval(goal[0], goal[1], goal[0]+5, goal[1]+5, fill='green')
self.update(firstGen)
if won == False and allDead == False:
self.job = self.canvas.after(refreshRate, self.play)
else :
if allDead == False :
self.killAll()
firstGen = False
goalReached = True
root.after(1000, root.quit)
def cancel(self):
if self.job is not None:
self.canvas.after_cancel(self.job)
self.job = None
def selectFittest(self):
minFitness = sys.maxsize
for dot in self.dots:
if dot.score < minFitness:
self.fittest = dot
minFitness = dot.score
self.fittest.fittest = True
print("Score du fittest :"+str(self.fittest.score), self.fittest.x-goal[0], self.fittest.y-goal[1])
def heritage(self):
self.dots = []
fitDot = copy.deepcopy(self.fittest)
fitDot.alive, fitDot.x, fitDot.y, fitDot.moves, fitDot.win = True, start[0], start[1], [], False
normalDot = copy.deepcopy(fitDot)
normalDot.fittest = False
for i in range(population-1):
self.dots.append(copy.deepcopy(normalDot))
self.dots.append(fitDot)
def killAll(self):
global root
for dot in self.dots :
dot.alive = False
### Main ###
if __name__ == '__main__':
generation = 0
root = Tk()
board = Board()
board.canvas.pack()
root.title("Gen "+str(generation))
board.play()
root.mainloop()
firstGen = False
board.cancel()
board.selectFittest()
generation += 1
while generation <= generations:
won = False
root.title("Gen "+str(generation))
board.heritage()
board.play()
root.mainloop()
board.cancel()
board.selectFittest()
generation += 1
#voir écart entre ancien et nouveau fittest, afficher le nombre de mouvements du fittest dans la fenêtre
#fontion calcul score privilégie trop le peu de mouvements (si le point meurt rapidement = bien) | 3.46875 | 3 |
src/skmultiflow/trees/nodes/random_learning_node_perceptron.py | lambertsbennett/scikit-multiflow | 1 | 12760815 | import numpy as np
from skmultiflow.trees.nodes import ActiveLearningNodePerceptron
from skmultiflow.trees.attribute_observer import NominalAttributeRegressionObserver
from skmultiflow.trees.attribute_observer import NumericAttributeRegressionObserver
from skmultiflow.utils import get_dimensions
class RandomLearningNodePerceptron(ActiveLearningNodePerceptron):
""" Learning Node for regression tasks that always use a linear perceptron
model to provide responses.
Parameters
----------
initial_class_observations: dict
In regression tasks this dictionary carries the sufficient statistics
to perform online variance calculation. They refer to the number of
observations (key '0'), the sum of the target values (key '1'), and
the sum of the squared target values (key '2').
max_features: int
Number of attributes per subset for each node split.
parent_node: RandomLearningNodePerceptron (default=None)
A node containing statistics about observed data.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, initial_class_observations, max_features, parent_node=None,
random_state=None):
super().__init__(initial_class_observations, parent_node, random_state)
self.max_features = max_features
self.list_attributes = np.array([])
def learn_from_instance(self, X, y, weight, rht):
"""Update the node with the provided instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes for updating the node.
y: float
Instance target value.
weight: float
Instance weight.
rht: HoeffdingTreeRegressor
Regression Hoeffding Tree to update.
"""
# In regression, the self._observed_class_distribution dictionary keeps three statistics:
# [0] sum of sample seen by the node
# [1] sum of target values
# [2] sum of squared target values
# These statistics are useful to calculate the mean and to calculate the variance reduction
if self.perceptron_weight is None:
self.perceptron_weight = self.random_state.uniform(-1, 1, len(X)+1)
try:
self._observed_class_distribution[0] += weight
self._observed_class_distribution[1] += y * weight
self._observed_class_distribution[2] += y * y * weight
except KeyError:
self._observed_class_distribution[0] = weight
self._observed_class_distribution[1] = y * weight
self._observed_class_distribution[2] = y * y * weight
# Update perceptron
self.samples_seen = self._observed_class_distribution[0]
if rht.learning_ratio_const:
learning_ratio = rht.learning_ratio_perceptron
else:
learning_ratio = rht.learning_ratio_perceptron / \
(1 + self.samples_seen * rht.learning_ratio_decay)
# Loop for compatibility with bagging methods
for i in range(int(weight)):
self.update_weights(X, y, learning_ratio, rht)
if self.list_attributes.size == 0:
self.list_attributes = self._sample_features(get_dimensions(X)[1])
for i in self.list_attributes:
try:
obs = self._attribute_observers[i]
except KeyError:
if rht.nominal_attributes is not None and i in rht.nominal_attributes:
obs = NominalAttributeRegressionObserver()
else:
obs = NumericAttributeRegressionObserver()
self._attribute_observers[i] = obs
obs.observe_attribute_class(X[i], y, weight)
def _sample_features(self, n_features):
return self.random_state.choice(
n_features, size=self.max_features, replace=False
)
| 2.8125 | 3 |
tests/testGaussQuad.py | A-CGray/FEMpy | 0 | 12760816 | <reponame>A-CGray/FEMpy
"""
==============================================================================
Gauss Quadrature unit tests
==============================================================================
@File : testGaussQuad.py
@Date : 2021/07/29
@Author : <NAME>
@Description : Unit tests for FEMpy's gauss quadrature integration scheme
"""
# ==============================================================================
# Standard Python modules
# ==============================================================================
import unittest
# ==============================================================================
# External Python modules
# ==============================================================================
import numpy as np
import scipy.integrate as integrate
# ==============================================================================
# Extension modules
# ==============================================================================
from FEMpy.GaussQuad import gaussQuad1d, gaussQuad2d, gaussQuad3d
def TestFunc(x):
f = 1.0
for i in range(1, 10):
f += x**i
return f
def TestFunc2d(x1, x2):
f = 1.0
for i in range(1, 10):
f += x1**i - 3.0 * x2**i
return f
def TestFunc3d(x1, x2, x3):
f = 1.0
for i in range(1, 10):
f += x1**i - 4.0 * x2**i + 3.0 * x3**i
return f
def TestMatFunc3d(x1, x2, x3):
A = np.zeros((len(x1), 3, 3))
for i in range(len(x1)):
A[i] = np.array([[x1[i], 2.0, 3.0], [1.0, x2[i], 3.0], [1.0, 2.0, x3[i]]])
return A
class GaussQuadUnitTest(unittest.TestCase):
"""Test FEMpy's Gauss quadrature integration against scipy's integration methods"""
def setUp(self) -> None:
self.precision = 8
def test_1d_gauss_quad(self):
gaussInt = gaussQuad1d(TestFunc, 6)
scipyInt = integrate.quad(TestFunc, -1.0, 1.0)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_1d_gauss_quad_nonStandard_limits(self):
gaussInt = gaussQuad1d(TestFunc, 6, a=-2.6, b=1.9)
scipyInt = integrate.quad(TestFunc, -2.6, 1.9)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_2d_gauss_quad(self):
gaussInt = gaussQuad2d(TestFunc2d, 6)
scipyInt = integrate.dblquad(TestFunc2d, -1.0, 1.0, -1.0, 1.0)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_2d_gauss_quad_nonStandard_limits(self):
gaussInt = gaussQuad2d(TestFunc2d, [6, 6], a=[-0.3, -2.3], b=[0.7, 1.6])
scipyInt = integrate.dblquad(TestFunc2d, -2.3, 1.6, -0.3, 0.7)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_3d_gauss_quad(self):
gaussInt = gaussQuad3d(TestFunc3d, 6)
scipyInt = integrate.tplquad(TestFunc3d, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_3d_gauss_quad_nonStandard_limits(self):
gaussInt = gaussQuad3d(TestFunc3d, [6, 6, 6], a=[-4.0, 2.0, 0.0], b=[1.0, 3.0, 4.0])
scipyInt = integrate.tplquad(TestFunc3d, 0.0, 4.0, 2.0, 3.0, -4.0, 1.0)[0]
self.assertAlmostEqual(gaussInt, scipyInt, places=self.precision)
def test_3d_mat_gauss_quad(self):
gaussInt = gaussQuad3d(TestMatFunc3d, 6, a=[-4.0, 2.0, 0.0], b=[1.0, 3.0, 4.0])
trueInt = np.array([[-30.0, 40.0, 60.0], [20.0, 50.0, 60.0], [20.0, 40.0, 40.0]])
np.testing.assert_allclose(gaussInt, trueInt, atol=10**-self.precision, rtol=10**-self.precision)
if __name__ == "__main__":
unittest.main()
| 2.125 | 2 |
records/views.py | devanshudave12/telustravel_finalproject | 0 | 12760817 | <filename>records/views.py
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User, auth
from django.db import IntegrityError
# Create your views here.
# defined the variable login
def login(request):
if request.method == 'POST': # if the request is post then follow this
username = request.POST['username'] # username set to post
password = request.POST['password'] # password set to post
# using the django authentic system
user = auth.authenticate(request, username=username, password=password)
if user is not None: # if the user is not none then follow this
auth.login(request, user) # set request to user
return redirect('/') # return to the home page
else:
# else password or anything is wrong then print not valid
messages.info(request, "NOT VALID ")
return redirect('login') # return the user to login page
else:
return render(request, 'login.html') # else to login html page
def register(request):
# for register if request by user is post then follow this
if request.method == 'POST':
first_name = request.POST['first_name'] # firstname set to post
last_name = request.POST['last_name'] # last name set to post
username = request.POST['username'] # user name set to post
password1 = request.POST['password1'] # password set to post
# password 2 confirm the passsword set to post
password2 = request.POST['password2']
email = request.POST['email'] # email set to post
# if password matches to confirm password then follow this
if password1 == password2:
# if username already exists in database
if User.objects.filter(username=username).exists():
# then print user already taken
messages.info(request, 'User already taken')
return redirect('register') # return to register page
# if email already exists then do this
elif User.objects.filter(email=email).exists():
# print this
messages.info(request, 'Email id already in the system')
return redirect('register') # return register page
else:
user = User.objects.create_user(username=username,
password=<PASSWORD>,
email=email,
first_name=first_name,
last_name=last_name)
user.save() # save it
print("user created")
return redirect('/') # redirect it to home
else:
print('password not matching') # print this
return redirect('register') # return user to register
# return redirect( "/" )
else:
return render(request, 'register.html') # else everything print this
def logout(request):
auth.logout(request) # for logout
return redirect('/') # return the user to home page
| 2.6875 | 3 |
model_zoo/research/hpc/sponge/src/mdnn.py | Vincent34/mindspore | 2 | 12760818 | <gh_stars>1-10
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""mdnn class"""
import numpy as np
from mindspore import nn, Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
import mindspore.common.dtype as mstype
class Mdnn(nn.Cell):
"""Mdnn"""
def __init__(self, dim=258, dr=0.5):
super(Mdnn, self).__init__()
self.dim = dim
self.dr = dr # dropout_ratio
self.fc1 = nn.Dense(dim, 512)
self.fc2 = nn.Dense(512, 512)
self.fc3 = nn.Dense(512, 512)
self.fc4 = nn.Dense(512, 129)
self.tanh = nn.Tanh()
def construct(self, x):
"""construct"""
x = self.tanh(self.fc1(x))
x = self.tanh(self.fc2(x))
x = self.tanh(self.fc3(x))
x = self.fc4(x)
return x
class TransCrdToCV(nn.Cell):
"""TransCrdToCV"""
def __init__(self, simulation):
super(TransCrdToCV, self).__init__()
self.atom_numbers = simulation.atom_numbers
self.transfercrd = P.TransferCrd(0, 129, 129, self.atom_numbers)
self.box = Tensor(simulation.box_length)
self.radial = Parameter(Tensor(np.zeros([129,]), mstype.float32))
self.angular = Parameter(Tensor(np.zeros([129,]), mstype.float32))
self.output = Parameter(Tensor(np.zeros([1, 258]), mstype.float32))
self.charge = simulation.charge
def updatecharge(self, t_charge):
"""update charge in simulation"""
self.charge[:129] = t_charge[0] * 18.2223
return self.charge
def construct(self, crd, last_crd):
"""construct"""
self.radial, self.angular, _, _ = self.transfercrd(crd, last_crd, self.box)
self.output = P.Concat()((self.radial, self.angular))
self.output = P.ExpandDims()(self.output, 0)
return self.output
| 1.9375 | 2 |
pyelixys/web/database/populatedb.py | henryeherman/pyelixys | 0 | 12760819 | '''
Populates the SQLite database with
a user and a sequence with three components.
'''
import json
from pyelixys.web.database.model import session
from pyelixys.web.database.model import Roles
from pyelixys.web.database.model import User
from pyelixys.web.database.model import Sequence
from pyelixys.web.database.model import Component
from pyelixys.web.database.model import Reagents
from pyelixys.web.database.model import metadata
# Import hashing library for pw hash
import hashlib
def create_role():
# Create admin role
role = Roles('Administrator', 255)
session.add(role)
session.commit()
return role
def get_default_user_client_state():
""" Silly work around for current webserver """
#TODO Remove client default state server dependency
return ({"sequenceid": 0,
"runhistorysort": {"column": "date&time", "type": "sort", "mode": "down"},
"lastselectscreen": "SAVED",
"selectsequencesort": {"column": "name", "type": "sort", "mode": "down"},
"prompt": {
"show": False, "screen": "", "text2": "", "text1": "",
"edit2default": "", "buttons": [], "title": "",
"edit1validation": "", "edit1": False, "edit2": False,
"edit1default": "", "edit2validation": "",
"type": "promptstate"},
"screen": "HOME",
"type": "clientstate",
"componentid": 0})
def get_default_component_state(cassette, reactor_count):
''' Silly work around for the current webserver '''
#TODO Remove Component state/details dependency
# Create a dictionary and append to it the
# details needed
details_dict = {}
details_dict['note'] = cassette.Note
details_dict['sequenceid'] = cassette.SequenceID
details_dict['reactor'] = reactor_count
details_dict['validationerror'] = False
details_dict['componenttype'] = cassette.Type
details_dict['type'] = 'component'
details_dict['id'] = cassette.ComponentID
# For all the cassette's reagents, append their ids
details_dict['reagent'] = []
for reagent in cassette.reagents:
details_dict['reagent'].append(reagent.ReagentID)
return details_dict
def create_user(role_id):
# Let's create a default user
# Encrypt the password using md5 and reutrn as hex
new_user = User()
new_user.Username = 'devel'
new_user.Password = hashlib.md5('devel').hexdigest()
new_user.FirstName = 'Sofiebio'
new_user.LastName = 'Developer'
new_user.Email = '<EMAIL>'
new_user.RoleID = role_id
new_user.ClientState = json.dumps(
get_default_user_client_state())
session.add(new_user)
session.commit()
return new_user
def create_sequence(user_id):
# Create a new sequence for the user
new_seq = Sequence()
new_seq.Name = 'Sequence 1'
new_seq.Component = 'Test Sequence'
new_seq.Type = 'Saved'
new_seq.UserID = user_id
session.add(new_seq)
session.commit()
return new_seq
def create_cassette_components(sequence_id):
# Create a new set of component cassettes
cass_list = []
for cass_count in range(1,4):
new_comp = Component()
new_comp.SequenceID = sequence_id
new_comp.Type = 'CASSETTE'
new_comp.Note = ''
# Leave details empty, update later
new_comp.Details = ''
session.add(new_comp)
session.commit()
cass_list.append(new_comp)
return cass_list
def create_reagents(sequence_id, cassettes):
# Let's create some empty reagents
# For each of the 3 cassettes, create
# 12 reagents
for cassette in cassettes:
for reg_count in range(1,13):
reagent = Reagents()
reagent.SequenceID = sequence_id
reagent.Position = reg_count
reagent.component = cassette
reagent.ComponentID = cassette.ComponentID
session.add(reagent)
session.commit()
def update_sequence_details(sequence):
# Update the first component id and
# component count of the sequence's fields
# Query for the first component matched
component_id = session.query(Component).filter_by(
SequenceID = sequence.SequenceID).first().ComponentID
sequence.FirstComponentID = component_id
sequence.ComponentCount = 3
sequence.Valid = 1
session.commit()
def update_component_details(cassettes):
# Update the details field of each new
# cassette component
# Keep a reactor count
reactor_count = 1
for cassette in cassettes:
cassette.Details = json.dumps(
get_default_component_state(
cassette,
reactor_count))
session.commit()
reactor_count += 1
if __name__ == '__main__':
'''
Running this file as a script
shall execute the following which
will create a new role and user.
The script shall also create a
default sequence with three cassettes
that contain no reagents.
'''
metadata.create_all(checkfirst=True)
role = create_role()
user = create_user(role.RoleID)
sequence = create_sequence(user.UserID)
cassettes = create_cassette_components(sequence.SequenceID)
create_reagents(sequence.SequenceID, cassettes)
update_sequence_details(sequence)
update_component_details(cassettes)
from IPython import embed
embed()
| 2.4375 | 2 |
api/serializers.py | nhsuk/nhsuk-content-store | 24 | 12760820 | <reponame>nhsuk/nhsuk-content-store
from wagtail.api.v2.serializers import PageParentField as WagtailPageParentField
from wagtail.api.v2.serializers import PageSerializer as WagtailPageSerializer
from wagtail.api.v2.serializers import Field, StreamField, get_serializer_class
def get_page_serializer_class(value):
return get_serializer_class(
value.__class__,
['id', 'type', 'detail_url', 'html_url', 'title', 'slug'],
meta_fields=['type', 'detail_url', 'html_url'],
base=PageSerializer
)
class PageListField(Field):
"""
Serializes a list of Page objects.
"""
def to_representation(self, value):
if not value:
return []
serializer_class = get_page_serializer_class(value[0])
serializer = serializer_class(context=self.context)
return [
serializer.to_representation(child_object)
for child_object in value
]
class SiblingsField(PageListField):
def get_attribute(self, instance):
return instance.get_guide_siblings()
class ChildrenField(PageListField):
def get_attribute(self, instance):
return instance.get_live_children()
class PageParentField(WagtailPageParentField):
"""
Like the Wagtail PageParentField but using a consistent page serializer.
"""
def to_representation(self, value):
serializer_class = get_page_serializer_class(value)
serializer = serializer_class(context=self.context)
return serializer.to_representation(value)
class ContentField(Field):
"""
Returns a dict of content fields so that they are namespaced and not among other model fields.
The param `fields` is a list of tuples (field name, serializer field) of the content fields
to be returned.
Example of returned value:
{
"header": [
{
"value": "test header content",
"type": "markdown"
}
],
"main": [
{
"value": "test main content",
"type": "markdown"
}
]
}
"""
def __init__(self, *args, **kwargs):
self.fields = kwargs.pop('fields')
super().__init__(*args, **kwargs)
def get_attribute(self, instance):
return instance
def to_representation(self, page):
content = {}
if page:
for field_name, serializer_field in self.fields:
if hasattr(page, field_name):
value = getattr(page, field_name)
field = serializer_field()
field.context = dict(self.context)
content[field_name] = field.to_representation(value)
return content
class PageSerializer(WagtailPageSerializer):
parent = PageParentField(read_only=True)
children = ChildrenField(read_only=True)
siblings = SiblingsField(read_only=True)
content = ContentField(
fields=[
('header', StreamField),
('main', StreamField),
],
read_only=True
)
| 2.296875 | 2 |
scripts/visualize.py | TeamBrot/client | 1 | 12760821 | <filename>scripts/visualize.py<gh_stars>1-10
#!/usr/bin/python3
import sys
import math
import json
import tempfile
import os
import argparse
from PIL import Image, ImageDraw, ImageFont
import ffmpeg
from common import place
COLORS = ["#ffffff", "#f6a800", "#4c5b5c", "#98ce00",
"#a44a3f", "#ff751b", "#ff00ff", "#000000"]
HEADCOLORS = ["#ffffff", "#bb8000", "#313b3c", "#6d9300",
"#79372f", "#df5800", "#c400c4", "#000000"]
PLAYER_COLOR_INDEX = 1
# Video dimensions
WIDTH = 1920
HEIGHT = 1080
# Video fps
FPS = 30
# Frames per board position
FPB = 1
# Start and end lengths
START_SEC = 2
END_SEC = 1
# Outline width and color
OUTLINE = 1
OUTLINE_COLOR = 'black'
def json_basename(json_filename):
return os.path.basename(json_filename).removesuffix(".json")
def video_filename(json_filename):
return json_filename.removesuffix(".json") + ".mp4"
def image_filename(tmpdir, json_filename, index):
return os.path.join(tmpdir, json_basename(json_filename) + "-" + str(index).zfill(4) + ".png")
def start_image(data, colors, font, width=WIDTH, height=HEIGHT):
board_width = data["game"][0]["width"]
board_height = data["game"][0]["height"]
client_name = data["config"]["clientName"]
game_url = data["config"]["gameURL"]
start_time = data["start"][:19]
numplayers_start = len(data["game"][0]["players"])
text = (
"time: " + start_time + "\n"
"server: " + game_url + "\n"
"client: " + client_name + "\n\n"
"width: " + str(board_width) + "\n"
"height: " + str(board_height) + "\n"
"number of players: " + str(numplayers_start) + "\n\n"
"place: " + str(place(data)) + "\n\n"
"our color:"
)
im = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(im)
draw.text((width/2, height/2), text, anchor="mm",
align="center", font=font, fill="black")
draw.rectangle([1030, 685, 1062, 717],
colors[data["game"][0]["you"]])
return im
def draw_square(draw, i, j, color):
draw.rectangle([j*SCALING, i*SCALING+40, (j+1)*SCALING-1, (i+1)*SCALING-1+40], fill=color)
def create_image(width, height):
return Image.new("RGB", (width * SCALING, height * SCALING + 40))
def board_image(status, colors, headcolors, font, turn=None, width=WIDTH, height=HEIGHT, outline=OUTLINE, outline_color=OUTLINE_COLOR):
board_width = status["width"]
board_height = status["height"]
if turn is not None:
height -= 30
size = math.floor(height / board_height)
if size * board_width > width:
size = math.floor(width / board_width)
x_offset = (width-size*board_width)/2
y_offset = (height-size*board_height)/2
if turn is not None:
y_offset += 30
im = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(im)
for i, y in enumerate(status["cells"]):
for j, x in enumerate(y):
draw.rectangle([j*size+x_offset, i*size+y_offset, (j+1)*size+x_offset-1, (i+1)*size+y_offset-1], fill=colors[x], outline=outline_color, width=outline)
for n in status["players"]:
player = status["players"][n]
if player["active"]:
draw.rectangle([player["x"]*size+x_offset, player["y"]*size+y_offset, (player["x"]+1)*size+x_offset-1, (player["y"]+1)*size+y_offset-1], fill=headcolors[int(n)], outline=outline_color, width=outline)
if turn is not None:
draw.text((0, 0), "Turn {}".format(turn), font=font, fill='black')
return im
def make_video(json_filename, show_turn=False, width=WIDTH, height=HEIGHT, fps=FPS, fpb=FPB, start_frames=FPS*START_SEC, end_frames=FPS*END_SEC, outline=OUTLINE, outline_color=OUTLINE_COLOR):
with open(json_filename) as f:
data = json.load(f)
player_id = data["game"][0]["you"]
# Adjust colors to current player so that current player is always colors[1]
colors = [color for color in COLORS]
colors[PLAYER_COLOR_INDEX], colors[player_id] = colors[player_id], colors[PLAYER_COLOR_INDEX]
headcolors = [headcolor for headcolor in HEADCOLORS]
headcolors[PLAYER_COLOR_INDEX], headcolors[player_id] = headcolors[player_id], headcolors[PLAYER_COLOR_INDEX]
font = ImageFont.truetype("arial.ttf", size=30)
index = 0
with tempfile.TemporaryDirectory() as tmpdir:
index = 0
im = start_image(data, colors, font, width=width, height=height)
for _ in range(start_frames):
im.save(image_filename(tmpdir, json_filename, index))
index += 1
for turn, status in enumerate(data["game"]):
im = board_image(status, colors, headcolors, font, turn=turn+1 if show_turn else None, width=width, height=height, outline=outline, outline_color=outline_color)
for _ in range(fpb):
im.save(image_filename(tmpdir, json_filename, index))
index += 1
for _ in range(end_frames):
im.save(image_filename(tmpdir, json_filename, index))
index += 1
(
ffmpeg
.input(os.path.join(tmpdir, "*.png"), pattern_type='glob', framerate=fps)
.output(video_filename(json_filename))
.global_args('-loglevel', 'error', '-y')
.run()
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualize spe_ed JSON game logs as videos')
parser.add_argument('files', nargs='+', help='log files to visualize')
parser.add_argument('--fps', type=int, default=FPS, help='frames per second')
parser.add_argument('--fpb', type=int, default=FPB, help='frames per board position')
parser.add_argument('--start', type=float, default=START_SEC, help='number of seconds the start image is shown')
parser.add_argument('--end', type=float, default=END_SEC, help='number of seconds the end image is shown')
parser.add_argument('--width', type=int, default=WIDTH, help='video width in pixels')
parser.add_argument('--height', type=int, default=HEIGHT, help='video height in pixels')
parser.add_argument('--outline', type=int, default=OUTLINE, help='outline width in pixels')
parser.add_argument('--outline-color', default=OUTLINE_COLOR, help='outline color')
parser.add_argument('--turn', action='store_true', help='show turn counter')
parser.add_argument('--force', '-f', action='store_true', help='overwrite existing video file')
args = parser.parse_args()
for json_filename in args.files:
output_filename = video_filename(json_filename)
if args.force or not os.path.exists(output_filename):
print("processing " + json_filename + "...")
make_video(json_filename, show_turn=args.turn, width=args.width, height=args.height, fps=args.fps, fpb=args.fpb, start_frames=args.fps*args.start, end_frames=args.fps*args.end, outline=args.outline, outline_color=args.outline_color)
print("wrote to", output_filename)
else:
print("skipping", json_filename, "because output",
output_filename, "already exists...")
| 2.4375 | 2 |
src/DBA_setting/spiders/web_img.py | alanzhchou/Coursitter | 1 | 12760822 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Alan-11510
# Date: 2018/11/8
# version: 1.0
# python_version: 3.62
import re
import os
import requests
img = requests.get("https://sustc.io/assets/avatars/9anht3trbqaeoare.png")
type_pattern = re.compile(r".*/(\w*)$")
img_type = type_pattern.search(img.headers["Content-type"]).group(1)
index = 0
while os.path.exists(str(index) + "." + str(img_type)):
index += 1
img_resource = str(index) + "." + str(img_type)
with open(img_resource,mode="wb") as f:
f.write(img.content)
| 2.8125 | 3 |
tests/test_ephemeral_operator.py | swtwsk/dbt-airflow-manifest-parser | 0 | 12760823 | from os import path
from dbt_airflow_factory.airflow_dag_factory import AirflowDagFactory
from dbt_airflow_factory.operator import EphemeralOperator
from tests.utils import task_group_prefix_builder, test_dag
def _get_ephemeral_name(model_name: str) -> str:
return f"{model_name}__ephemeral"
def test_ephemeral_dag_factory():
# given
factory = AirflowDagFactory(
path.dirname(path.abspath(__file__)), "ephemeral_operator"
)
# when
dag = factory.create()
# then
assert len(dag.tasks) == 15
task_group_names = [
el
for node_name in ["model1", "model4", "model6"]
for el in [
task_group_prefix_builder(node_name, "test"),
task_group_prefix_builder(node_name, "run"),
]
]
ephemeral_task_names = [
node_name + "__ephemeral"
for node_name in [
"model2",
"model3",
"model5",
"model7",
"model8",
"model9",
"model10",
]
]
assert set(dag.task_ids) == set(
["dbt_seed", "end"] + task_group_names + ephemeral_task_names
)
for ephemeral_task_name in ephemeral_task_names:
assert isinstance(dag.task_dict[ephemeral_task_name], EphemeralOperator)
def test_ephemeral_tasks():
with test_dag():
factory = AirflowDagFactory(
path.dirname(path.abspath(__file__)), "ephemeral_operator"
)
tasks = factory._builder.parse_manifest_into_tasks(
factory._manifest_file_path(factory.read_config())
)
# then
assert (
task_group_prefix_builder("model1", "test")
in tasks.get_task("model.dbt_test.model1").run_airflow_task.downstream_task_ids
)
assert (
task_group_prefix_builder("model1", "run")
in tasks.get_task("model.dbt_test.model1").test_airflow_task.upstream_task_ids
)
assert (
task_group_prefix_builder("model1", "test")
in tasks.get_task("model.dbt_test.model2").run_airflow_task.upstream_task_ids
)
assert (
"model2__ephemeral"
in tasks.get_task("model.dbt_test.model1").test_airflow_task.downstream_task_ids
)
assert (
"model2__ephemeral"
in tasks.get_task("model.dbt_test.model3").run_airflow_task.upstream_task_ids
)
assert (
"model3__ephemeral"
in tasks.get_task("model.dbt_test.model5").run_airflow_task.downstream_task_ids
)
assert (
"model3__ephemeral"
in tasks.get_task("model.dbt_test.model10").run_airflow_task.upstream_task_ids
)
assert (
"model9__ephemeral"
in tasks.get_task("model.dbt_test.model10").run_airflow_task.upstream_task_ids
)
assert (
"model10__ephemeral"
in tasks.get_task("model.dbt_test.model3").run_airflow_task.downstream_task_ids
)
assert (
"model10__ephemeral"
in tasks.get_task("model.dbt_test.model9").run_airflow_task.downstream_task_ids
)
| 2.28125 | 2 |
benchmark/milliEgo/test_double.py | MAPS-Lab/OdomBydVision | 2 | 12760824 | <reponame>MAPS-Lab/OdomBydVision
"""
Test the model with DOUBLE sensor modalities using h5 files as the input
"""
from keras import backend as K
from utility.networks import build_model_cross_att
from utility.test_util import convert_rel_to_44matrix, iround
from utility.data_loader import load_data_multi_timestamp
from utility import plot_util
import math
import time
import json
import numpy as np
import argparse
import inspect
from os.path import join
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
# import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.sys.path.insert(0, os.path.dirname(currentdir))
# keras
K.set_image_dim_ordering('tf')
K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))) #
K.set_learning_phase(0) # Run testing mode
SCALER = 1.0 # scale label: 1, 100, 10000
RADIUS_2_DEGREE = 180.0 / math.pi
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--seqs', type=str, required=True, help='h5 file sequences, e.g, 1, 6, 13')
parser.add_argument('--model', type=str, required=True, help='model architecture')
parser.add_argument('--epoch', type=str, required=True, help='which trained epoch to load in')
parser.add_argument('--data_dir', type=str, required=True,
help='specify the data dir of test data)')
parser.add_argument('--out_pred', type=str, required=False,
help='specify the output of csv file for the prediction)')
parser.add_argument('--out_gt', type=str, required=False,
help='specify the output of csv file for the ground truth)')
args = parser.parse_args()
IMU_LENGTH = (np.int(os.path.dirname(args.data_dir)[-1]) - 1) * 5
if IMU_LENGTH < 10:
IMU_LENGTH = 10
IMU_LENGTH = 25
print('IMU LENGTH is {}'.format(IMU_LENGTH))
# Define and construct model
print("Building network model ......")
if 'sel-' in args.model:
network_model = build_model_selective_imu(join('./models', args.model, args.epoch), istraining=False)
if 'deepmio' in args.model:
network_model = build_model_plus_imu(join('./models', args.model, args.epoch), istraining=False)
if 'att-' in args.model:
nn_opt_path = join('./models', args.model, 'nn_opt.json')
with open(nn_opt_path) as handle:
nn_opt = json.loads(handle.read())
imu_att_type = nn_opt['imu_att_type']
network_model = build_model_att_fusion(join('./models', args.model, args.epoch),
imu_length=IMU_LENGTH, istraining=False, imu_att_type=imu_att_type)
if 'cross-' in args.model:
nn_opt_path = join('./models', args.model, 'nn_opt.json')
with open(nn_opt_path) as handle:
nn_opt = json.loads(handle.read())
if 'only' in args.model:
network_model = build_model_cross_fusion(join('./models', args.model, args.epoch),
imu_length=IMU_LENGTH, mask_att=nn_opt['cross_att_type'], istraining=False)
else:
print(join('./models', args.model, args.epoch))
network_model = build_model_cross_att(join('./models', args.model, args.epoch),
imu_length=IMU_LENGTH, mask_att=nn_opt['cross_att_type'], istraining=False)
if 'dio' in args.model:
network_model = build_model_plus_imu(join('./models', args.model, args.epoch), input_shape=(1, 480, 640, 3),
istraining=False)
if 'vio' in args.model:
network_model = build_model_plus_imu(join('./models', args.model, args.epoch), input_shape=(1, 480, 640, 3),
istraining=False)
network_model.summary(line_length=120)
seqs = args.seqs.split(',')
with K.get_session() as sess:
for seq in seqs:
test_file = join(args.data_dir, 'turtle_seq_' + seq + '.h5')
if 'dio' in args.model:
n_chunk, x_time, x_mm_t, x_imu_t, y_t = load_data_multi_timestamp(test_file, 'depth')
elif 'vio' in args.model:
n_chunk, x_time, x_mm_t, x_imu_t, y_t = load_data_multi_timestamp(test_file, 'rgb')
else:
n_chunk, x_time, x_mm_t, x_imu_t, y_t = load_data_multi_timestamp(test_file,
'mmwave_middle') # y (1, 2142, 6)
y_t = y_t[0]
print('Data shape: ', np.shape(x_mm_t), np.shape(x_imu_t), np.shape(y_t))
len_x_i = x_mm_t[0].shape[0]
print(len_x_i)
# Set initial pose for GT and prediction
gt_transform_t_1 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
pred_transform_t_1 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]) # initial pose for prediction
# Initialize value and counter
count_img = 0
ls_time = [0, 0, 0, 0]
out_gt_array = [] # format (x,y) gt and (x,y) prediction
out_pred_array = [] # format (x,y) gt and (x,y) prediction
print('Reading images and imu ....')
# for i in range(0, iround ((len_thermal_x_i-2)/2)):
for i in range(0, (len_x_i - 1)):
# Make prediction
st_cnn_time = time.time()
x_mm_1 = x_mm_t[0][i]
x_mm_2 = x_mm_t[0][i + 1]
x_mm_1 = np.expand_dims(x_mm_1, axis=0)
x_mm_2 = np.expand_dims(x_mm_2, axis=0)
# Repeat channels
if any(x in args.model for x in ['deepmio', 'dio', 'sel-', 'att', 'cross', 'skip']):
x_mm_1 = np.repeat(x_mm_1, 3, axis=-1)
x_mm_2 = np.repeat(x_mm_2, 3, axis=-1)
print('x_mm_2 shape is {}'.format(np.shape(x_mm_2)))
x_imu = x_imu_t[0]
x_imu = x_imu[i + 1, 0:IMU_LENGTH, :]
x_imu = np.expand_dims(x_imu, axis=0)
print('x_imu shape is {}'.format(np.shape(x_imu)))
predicted = sess.run([network_model.outputs],
feed_dict={network_model.inputs[0]: x_mm_1,
network_model.inputs[1]: x_mm_2,
network_model.inputs[2]: x_imu})
pred_pose = predicted[0][0][0][0]
prediction_time = time.time() - st_cnn_time
ls_time[0] += prediction_time
print('Running (Hz)', 1.0 / (prediction_time))
# Display the figure
st_plot_time = time.time()
# Composing the relative transformationnetwork_model for the prediction
pred_transform_t = convert_rel_to_44matrix(0, 0, 0, pred_pose)
abs_pred_transform = np.dot(pred_transform_t_1, pred_transform_t)
print(abs_pred_transform)
# Composing the relative transformation for the ground truth
gt_transform_t = convert_rel_to_44matrix(0, 0, 0, y_t[i])
abs_gt_transform = np.dot(gt_transform_t_1, gt_transform_t)
print(abs_gt_transform)
# Save the composed prediction and gt in a list
out_gt_array.append(
[abs_gt_transform[0, 0], abs_gt_transform[0, 1], abs_gt_transform[0, 2], abs_gt_transform[0, 3],
abs_gt_transform[1, 0], abs_gt_transform[1, 1], abs_gt_transform[1, 2], abs_gt_transform[1, 3],
abs_gt_transform[2, 0], abs_gt_transform[2, 1], abs_gt_transform[2, 2], abs_gt_transform[2, 3]])
out_pred_array.append(
[abs_pred_transform[0, 0], abs_pred_transform[0, 1], abs_pred_transform[0, 2], abs_pred_transform[0, 3],
abs_pred_transform[1, 0], abs_pred_transform[1, 1], abs_pred_transform[1, 2], abs_pred_transform[1, 3],
abs_pred_transform[2, 0], abs_pred_transform[2, 1], abs_pred_transform[2, 2],
abs_pred_transform[2, 3]])
plot_time = time.time() - st_plot_time
ls_time[1] += plot_time
gt_transform_t_1 = abs_gt_transform
pred_transform_t_1 = abs_pred_transform
count_img += 1
if not os.path.exists('./results'):
os.makedirs('./results')
# csv_location = join('./results', exp_name)
np.savetxt(join('./results', args.model + '_ep' + args.epoch + '_seq' + seq),
out_pred_array, delimiter=",")
np.savetxt(join('./results', 'gt_seq' + seq),
out_gt_array, delimiter=",")
np.savetxt(join('./results', 'time_seq' + seq),
x_time, delimiter="\n")
fig_dir = join('./figs', args.model, seq)
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_pred_array, out_gt_array = np.array(out_pred_array), np.array(out_gt_array)
plot_util.plot2d(out_pred_array, out_gt_array,
join(fig_dir, args.model + '_ep' + args.epoch + '_seq' + seq + '.png'))
for a in range(2):
if a == 0:
ls_time[a] = ls_time[a] / count_img
ls_time[a] = int(round(ls_time[a] * 1000, 0))
else:
ls_time[a] = ls_time[a] / count_img
ls_time[a] = int(round(ls_time[a] * 1000, 0))
print('Model Prediction: {0} ms. Plot: {1} ms.'.format(str(ls_time[0]), str(ls_time[1])))
print("Seq {} Finished!".format(seq))
if __name__ == "__main__":
os.system("hostname")
main()
| 2.015625 | 2 |
checkGodiPlan.py | wulmer/churchtools-automation | 0 | 12760825 | import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from gottesdienstplan import GoDiPlanChecker
MAIL_TEXT_TEMPLATE = """
Hallo,
im Gottesdienstplan scheint es einen Fehler, bzw. einen fehlenden Eintrag zu geben.
Bitte ergänze die fehlende Information oder korrigiere den falschen Eintrag.
Um was es sich genau handelt:
{message}
Liebe Grüße
Dein freundlicher Gottesdienstplan Checker
"""
MAIL_HTML_TEMPLATE = """
<p>Hallo,</p>
<p>
im Gottesdienstplan scheint es einen Fehler, bzw. einen fehlenden Eintrag zu geben.
Bitte ergänze die fehlende Information oder korrigiere den falschen Eintrag.
</p>
<p>
Um was es sich genau handelt:
</p>
<p><em>
{message}
</em></p>
<p>
<p>Liebe Grüße</p>
<p>Dein freundlicher Gottesdienstplan Checker</p>
"""
class Mailer:
def __init__(self, my_addr, port: int = 25):
self._server = None
self._my_addr = my_addr
self._port = port
def __enter__(self):
self._server = smtplib.SMTP("localhost", port=self._port)
return self
def __exit__(self, type, value, traceback):
self._server.quit()
self._server = None
def handle_check_report(self, data):
msg = data["message"]
recp = data["recipient"]
cc = f"webmaster@{MAIL_DOMAIN}"
mail = MIMEMultipart("alternative")
mail["Subject"] = "Gottesdienstplan Checker Nachricht"
mail["From"] = self._my_addr
mail["To"] = recp
mail["Cc"] = cc
mail.attach(MIMEText(MAIL_TEXT_TEMPLATE.format(message=msg), "plain"))
mail.attach(MIMEText(MAIL_HTML_TEMPLATE.format(message=msg), "html"))
self._server.send_message(
to_addrs=recp,
from_addr=self._my_addr,
msg=mail,
)
if __name__ == "__main__":
MAIL_DOMAIN = os.environ.get("MAIL_DOMAIN")
p = GoDiPlanChecker(mail_domain=MAIL_DOMAIN)
if MAIL_DOMAIN:
with Mailer(my_addr=f"godiplanchecker@{MAIL_DOMAIN}", port=25) as m:
p.check("1w", report=m.handle_check_report)
else:
p.check("1w", report=print)
| 2.875 | 3 |
migration/migration.py | Lambda-School-Labs/deprecated-labs31-family-promise-spokane-ds-a | 0 | 12760826 | <gh_stars>0
"""Approximate migration of historical data into a temp database. Kept
separate from actual web app database to avoid messing with web team as they
update the structure.
Use caution as this will overwrite any database you connect it to!
First put database url in .env as DATABASE_URL.
"""
from sqlalchemy.exc import DataError
import pandas as pd
from migrate_util import SessionLocal, Member, Family, EXIT_DICT
# JSON cannot store NaNs, so these columns must be singled out and filled with
# appropriate values.
JSON_STR_COLS = [
'3.917 Homeless Start Date', '4.4 Covered by Health Insurance',
'4.11 Domestic Violence - Currently Fleeing DV?', '3.6 Gender',
'3.15 Relationship to HoH', '3.4 Race', '3.5 Ethnicity',
'4.10 Alcohol Abuse (Substance Abuse)', '4.06 Developmental Disability',
'4.07 Chronic Health Condition', '4.10 Drug Abuse (Substance Abuse)',
'4.08 HIV/AIDS', '4.09 Mental Health Problem', '4.05 Physical Disability',
'R5 School Status', '3.12 Exit Destination'
]
JSON_NUM_COLS = [
'4.2 Income Total at Entry', '4.2 Income Total at Exit'
]
if __name__ == '__main__':
print('reading csv...')
df = pd.read_csv('All_data_with_exits.csv', parse_dates=['3.10 Enroll Date', '3.11 Exit Date'])
print('wrangling...')
df[JSON_NUM_COLS] = df[JSON_NUM_COLS].fillna(-1)
df[JSON_STR_COLS] = df[JSON_STR_COLS].fillna('')
# Look only at HoHs for family data.
heads = df[df['3.15 Relationship to HoH'] == 'Self']
print('migrating families...')
db = SessionLocal()
for idx in heads.index:
head = heads.loc[idx]
family = Family(
id = int(head['5.9 Household ID']),
homeless_info = {
# JSON cannot hold datetime
'homeless_start_date':head['3.917 Homeless Start Date']
},
insurance = {
'has_insurance':head['4.4 Covered by Health Insurance']
},
domestic_violence_info = {
'fleeing_dv':head['4.11 Domestic Violence - Currently Fleeing DV?']
}
)
db.add(family)
db.commit()
print('migrating members...')
for idx in df.index:
row = df.loc[idx]
mem_id = int(row['5.8 Personal ID'])
# Check if id already exists (there are id repeats in historical data).
if not db.query(Member).filter(Member.id==mem_id).first():
member = Member(
id = mem_id,
date_of_enrollment = row['3.10 Enroll Date'],
household_type = row['Household Type'],
length_of_stay = (row['3.11 Exit Date'] - row['3.10 Enroll Date']).days,
demographics = {
'gender':row['3.6 Gender'],
'relationship':row['3.15 Relationship to HoH'],
'income':float(row['4.2 Income Total at Entry']),
'race':row['3.4 Race'],
'ethnicity':row['3.5 Ethnicity']
},
barriers = {
'alcohol_abuse':row['4.10 Alcohol Abuse (Substance Abuse)'],
'developmental_disabilities':row['4.06 Developmental Disability'],
'chronic_health_issues':row['4.07 Chronic Health Condition'],
'drug_abuse':row['4.10 Drug Abuse (Substance Abuse)'],
'HIV_AIDs':row['4.08 HIV/AIDS'],
'mental_illness':row['4.09 Mental Health Problem'],
'physical_disabilities':row['4.05 Physical Disability'],
},
schools = {
'enrolled_status':row['R5 School Status'],
},
case_members = int(row['CaseMembers']),
date_of_exit = row['3.11 Exit Date'],
income_at_exit = float(row['4.2 Income Total at Exit']),
exit_destination = EXIT_DICT[row['3.12 Exit Destination']]
)
family = db.query(Family).filter(Family.id==int(row['5.9 Household ID'])).first()
if family:
family.members.append(member)
# Postgres throws a weird error about integers being too big, even though
# none of the integer values in this data are above 150,000. Since this is
# only a test migration I simply ignored those member rows which threw the
# error.
try:
db.commit()
except DataError:
db.rollback()
print('DataError on', mem_id)
print('done!')
print(db.query(Family).count(), 'families.')
print(db.query(Member).count(), 'members.')
db.close() | 2.78125 | 3 |
Backend-Frontend/src/controllers/controller.py | DanielCamachoFonseca/Flask-app-demo | 0 | 12760827 | #En este modulo se almacenan todas las rutas del proyecto
from flask import request, render_template, redirect, flash
from flask.views import MethodView #Este modulo importa la logica de la clase MethodView
from src.db import mysql
class IndexController(MethodView): #Heredo de methodview
def get(self):
with mysql.cursor() as cur:
cur.execute("SELECT * FROM products")
data = cur.fetchall() #Crea una tupla con todos los datos de la tabla, y los guardo en la variable data
cur.execute("SELECT * FROM categories")
categories = cur.fetchall()
return render_template('public/index.html', data=data, categories=categories)#renderiza el template html y le pasamos por parametro la variable data, donde se encuentran todos los datos de la tabla
def post(self):
code = request.form['code']
name = request.form['name']
stock = request.form['stock']
value = request.form['value']
category = request.form['category']
#Guardo los datos del formulario en la base de datos - tabla products
with mysql.cursor() as cur: #creo un alias a la sentencia mysql.cursor()
try:
cur.execute("INSERT INTO products VALUES(%s, %s, %s, %s, %s)", (code, name, stock, value, category)) #Inserto los valores del formulario a la tabla de la base de datos
cur.connection.commit() #Ejecucion de la sentencia
flash("El producto ha sido agregado correctamente", "success")
except:
flash("Un error ha ocurrido","error")
return redirect('/') #Retorno a la pagina principal - Index
class DeleteProductController(MethodView):
def post(self, code):
with mysql.cursor() as cur:
try:
cur.execute("DELETE FROM products WHERE code = %s", (code, ))
cur.connection.commit() # Ejecucion de la sentencia
flash("El producto ha sido eliminado correctamente", "success")
except:
flash("Un error ha ocurrido", "error")
return redirect('/') # Retorno a la pagina principal - Index
class UpdateProductController(MethodView):
def get(self, code):
with mysql.cursor() as cur:
cur.execute("SELECT * FROM products WHERE code = %s", (code, ))
product = cur.fetchone()#Recibe el primer dato que encuentre de acuerdo a la condicion sql
return render_template('public/update.html', product = product)
def post(self, code):
productCode = request.form['code']
name = request.form['name']
stock = request.form['stock']
value = request.form['value']
with mysql.cursor() as cur:
try:
cur.execute("UPDATE products SET code = %s, name = %s, stock = %s, value = %s WHERE code = %s", (productCode, name, stock, value, code))
cur.connection.commit()
flash("El producto ha sido actualizado correctamente", "success")
except:
flash("Un error ha ocurrido", "error")
return redirect('/')
class CreateCategoriesController(MethodView):
def get(self):
return render_template("public/categories.html")
def post(self):
id = request.form['id']
name = request.form['name']
description = request.form['description']
with mysql.cursor() as cur:
try:
cur.execute("INSERT INTO categories VALUES(%s, %s, %s)", (id, name, description))
cur.connection.commit()
flash("La categoria se ha creado!", "success")
except:
flash("Un error ha ocurrido", "error")
return redirect('/')
| 2.875 | 3 |
third_party/universal-ctags/ctags/Units/parser-python.r/python-fullqualified-tags.d/input.py | f110/wing | 4 | 12760828 | class Foo():
def g(self):
pass
class Bar():
def f(self):
pass
| 1.828125 | 2 |
examples/logreg/tfe-logreg.py | bendecoste/tf-encrypted | 0 | 12760829 | <filename>examples/logreg/tfe-logreg.py
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
from data import gen_training_input, gen_test_input
tf.set_random_seed(1)
# Parameters
learning_rate = 0.01
training_set_size = 2000
test_set_size = 100
training_epochs = 10
batch_size = 100
nb_feats = 10
xp, yp = tfe.define_private_input('input-provider', lambda: gen_training_input(training_set_size, nb_feats, batch_size))
xp_test, yp_test = tfe.define_private_input('input-provider', lambda: gen_test_input(training_set_size, nb_feats, batch_size))
W = tfe.define_private_variable(tf.random_uniform([nb_feats, 1], -0.01, 0.01))
b = tfe.define_private_variable(tf.zeros([1]))
# Training model
out = tfe.matmul(xp, W) + b
pred = tfe.sigmoid(out)
# Due to missing log function approximation, we need to compute the cost in numpy
# cost = -tfe.sum(y * tfe.log(pred) + (1 - y) * tfe.log(1 - pred)) * (1/train_batch_size)
# Backprop
dc_dout = pred - yp
dW = tfe.matmul(tfe.transpose(xp), dc_dout) * (1 / batch_size)
db = tfe.reduce_sum(1. * dc_dout, axis=0) * (1 / batch_size)
ops = [
tfe.assign(W, W - dW * learning_rate),
tfe.assign(b, b - db * learning_rate)
]
# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)
def print_accuracy(pred_test_tf, y_test_tf: tf.Tensor) -> tf.Operation:
correct_prediction = tf.equal(tf.round(pred_test_tf), y_test_tf)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return tf.Print(accuracy, data=[accuracy], message="Accuracy: ")
print_acc_op = tfe.define_output('input-provider', [pred_test, yp_test], print_accuracy)
total_batch = training_set_size // batch_size
with tfe.Session() as sess:
sess.run(tfe.global_variables_initializer(), tag='init')
for epoch in range(training_epochs):
avg_cost = 0.
for i in range(total_batch):
_, y_out, p_out = sess.run([ops, yp.reveal(), pred.reveal()], tag='optimize')
# Our sigmoid function is an approximation
# it can have values outside of the range [0, 1], we remove them and add/substract an epsilon to compute the cost
p_out = p_out * (p_out > 0) + 0.001
p_out = p_out * (p_out < 1) + (p_out >= 1) * 0.999
c = -np.mean(y_out * np.log(p_out) + (1 - y_out) * np.log(1 - p_out))
avg_cost += c / total_batch
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
sess.run(print_acc_op)
| 2.375 | 2 |
sample_py/scripts/sample_py_subscriber.py | hiro-han/ros_packages | 0 | 12760830 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
from sample_py.msg import sample_message
def callback(msg):
rospy.loginfo("I heard: message = [%s], count = [%d]" % (msg.message, msg.count));
def subscriber():
rospy.init_node('sample_py_subscriber', anonymous=True)
rospy.Subscriber('sample_topic', sample_message, callback)
rospy.spin()
if __name__ == '__main__':
subscriber()
| 2.34375 | 2 |
app.py | wenqisun/WeiboLite | 0 | 12760831 | <gh_stars>0
from flask import Flask,render_template,request
import GstoreConnector
import json
import ast
import sys
import user
from DBop import DataBaseOp
app = Flask(__name__)
u = user.User()
op = DataBaseOp()
sys.path.append('../src')
@app.route('/')
def index():
u.user_id="1775467263"
op = DataBaseOp()
user_name = "3708696074833794"
results = op.recent_my_weibo(user_name)
'''
real_results = []
for result in results:
real_results.append(result.value())
context = {
'whatsnew': real_results,
'user_name': user_name
}
'''
print(results)
print("11111111111111111111")
weibo_dict = ast.literal_eval(results)
print(weibo_dict)
print(weibo_dict['results']['bindings'][4]['o']['value'])
return render_template('index.html')
@app.route('/new')
def whatsnew():
'''
context:
whatsnew: list: [
{ owner: 微博作者,
contexts: 微博内容,
comments: 评论,list,暂先不实现
},
... ]
user_name: 用户名
'''
# user_name = "1775467263"
u_id=u.user_name
results = op.recent_new_weibo(u_id)
print(results)
print("2222222222222")
'''
if request.method == "GET":
op = DataBaseOp(True)
user_name="<http://localhost:2020/weibo/3708696074833794>"
results = op.recent_new_weibo(user_name)
real_results = []
for result in results:
real_results.append(result.value())
context = {
'whatsnew': real_results,
'user_name': user_name
}
'''
return render_template('index.html')
@app.route('/login')
def login():
result = op.login()
print(result)
return render_template('index.html')
@app.route('/register')
def register():
result = op.register()
print(result)
return render_template('index.html')
if __name__ == '__main__':
app.run()
| 2.46875 | 2 |
archABM/aerosol_model_colorado.py | vishalbelsare/ArchABM | 8 | 12760832 | <reponame>vishalbelsare/ArchABM
import math
from typing import Tuple
from .aerosol_model import AerosolModel
from .parameters import Parameters
class AerosolModelColorado(AerosolModel):
"""Aerosol transmission estimator
COVID-19 Airborne Transmission Estimator :cite:`doi:10.1021/acs.estlett.1c00183,https://doi.org/10.1111/ina.12751,Peng2021.04.21.21255898`
The model combines two submodels:
#. A standard atmospheric box model, which assumes that the emissions are completely mixed across a control volume quickly \
(such as an indoor room or other space). \
See for example Chapter 3 of the Jacob Atmos. Chem. textbook :cite:`10.2307/j.ctt7t8hg`, and Chapter 21 of the Cooper and \
Alley Air Pollution Control Engineering Textbook :cite:`cooper2010air` for indoor applications. \
This is an approximation that allows easy calculation, is approximately correct as long as near-field effects \
are avoided by social distancing, and is commonly used in air quality modeling.
#. A standard aerosol infection model (Wells-Riley model), as formulated in Miller et al. 2020 :cite:`https://doi.org/10.1111/ina.12751`,\
and references therein :cite:`10.1093/oxfordjournals.aje.a112560,BUONANNO2020105794,BUONANNO2020106112`.
.. important::
The propagation of COVID-19 is only by aerosol transmission.
The model is based on a standard model of aerosol disease transmission, the Wells-Riley model.
It is calibrated to COVID-19 per recent literature on quanta emission rate.
This is not an epidemiological model, and does not include droplet or contact / fomite transmission, and assumes that 6 ft / 2 m social distancing is respected. Otherwise higher transmission will result.
"""
name: str = "Colorado"
def __init__(self, params):
super().__init__(params)
self.params = params
def get_risk(self, inputs: Parameters) -> Tuple[float, float]:
"""Calculate the infection risk of an individual in a room
and the CO\ :sub:`2` thrown into the air.
Args:
inputs (Parameters): model parameters
Returns:
Tuple[float, float]: CO\ :sub:`2` concentration (ppm), and infection risk probability
"""
params = self.params
# length = 8
# width = 6
height = inputs.room_height
area = inputs.room_area # width * length
volume = area * height
pressure = params.pressure # 0.95
temperature = params.temperature # 20
# relative_humidity = params.relative_humidity # 50
CO2_background = params.CO2_background # 415
event_duration = inputs.event_duration # 50 / 60 # h
ventilation = inputs.room_ventilation_rate # 3
decay_rate = params.decay_rate # 0.62
deposition_rate = params.deposition_rate # 0.3
hepa_flow_rate = params.hepa_flow_rate
hepa_removal = hepa_flow_rate * volume
recirculated_flow_rate = inputs.recirculated_flow_rate
filter_efficiency = params.filter_efficiency
ducts_removal = params.ducts_removal
other_removal = params.other_removal
ach_additional = recirculated_flow_rate / volume * min(1, filter_efficiency + ducts_removal + other_removal)
additional_measures = hepa_removal + ach_additional
loss_rate = ventilation + decay_rate + deposition_rate + additional_measures
# ventilation_person = volume * (ventilation + additional_measures) * 1000 / 3600 / num_people
num_people = inputs.num_people
infective_people = inputs.infective_people # 1
fraction_immune = params.fraction_immune # 0
susceptible_people = (num_people - infective_people) * (1 - fraction_immune)
# density_area_person = area / num_people
# density_people_area = num_people / area
# density_volume_person = volume / num_people
breathing_rate = params.breathing_rate # 0.52
breathing_rate_relative = breathing_rate / (0.0048 * 60)
CO2_emission_person = params.CO2_emission_person # 0.005
CO2_emission = CO2_emission_person * num_people / pressure * (273.15 + temperature) / 273.15
quanta_exhalation = params.quanta_exhalation # 25
quanta_enhancement = params.quanta_enhancement # 1
quanta_exhalation_relative = quanta_exhalation / 2
mask_efficiency_exhalation = inputs.mask_efficiency # 50 / 100
mask_efficiency_inhalation = inputs.mask_efficiency # 30 / 100
people_with_masks = params.people_with_masks # 100 / 100
# probability_infective = 0.20 / 100
# hospitalization_rate = 20 / 100
# death_rate = 1 / 100
net_emission_rate = quanta_exhalation * (1 - mask_efficiency_exhalation * people_with_masks) * infective_people * quanta_enhancement
quanta_concentration = net_emission_rate / loss_rate / volume * (1 - (1 / loss_rate / event_duration) * (1 - math.exp(-loss_rate * event_duration)))
# TODO: NEW FORMULA
# TODO: infection risk dynamic
quanta_concentration = (
net_emission_rate / loss_rate / volume * (1 - (1 / loss_rate / event_duration) * (1 - math.exp(-loss_rate * event_duration)))
+ math.exp(-loss_rate * event_duration) * (inputs.quanta_level - 0)
+ 0
)
quanta_inhaled_per_person = quanta_concentration * breathing_rate * event_duration * (1 - mask_efficiency_inhalation * people_with_masks)
# probability_infection = 1 - math.exp(-quanta_inhaled_per_person)
# probability_infection = probability_infection * susceptible_people
# probability_hospitalization = probability_infection * hospitalization_rate
# probability_death = probability_infection * death_rate
# if susceptible_people == 0 or infective_people == 0:
# infection_risk = 0.0
# infection_risk_relative = 0.0
# else:
# infection_risk = (
# breathing_rate_relative
# * quanta_exhalation_relative
# * (1 - mask_efficiency_exhalation * people_with_masks)
# * (1 - mask_efficiency_inhalation * people_with_masks)
# * event_duration
# * susceptible_people
# / (loss_rate * volume)
# * (1 - (1 - math.exp(-loss_rate * event_duration)) / (loss_rate * event_duration))
# + math.exp(-loss_rate * event_duration) * (inputs.infection_risk - 0)
# + 0
# )
# infection_risk_relative = infection_risk / susceptible_people
# infection_risk = (1 - math.exp(-infection_risk_relative))*susceptible_people # TODO: review Taylor approximation
CO2_mixing_ratio = (
(CO2_emission * 3.6 / ventilation / volume * (1 - (1 / ventilation / event_duration) * (1 - math.exp(-ventilation * event_duration)))) * 1e6
+ math.exp(-ventilation * event_duration) * (inputs.CO2_level - CO2_background)
+ CO2_background
)
CO2_mixing_ratio_delta = CO2_mixing_ratio - inputs.CO2_level
CO2_concentration = CO2_mixing_ratio_delta * 40.9 / 1e6 * 44 * 298 / (273.15 + temperature) * pressure
CO2_reinhaled_grams = CO2_concentration * breathing_rate * event_duration
CO2_reinhaled_ppm = CO2_mixing_ratio_delta * event_duration
# CO2_probability_infection_= CO2_reinhaled_ppm / 1e4 / probability_infection
# CO2_inhale_ppm = CO2_mixing_ratio_delta * event_duration * 0.01 / probability_infection + CO2_background
return CO2_mixing_ratio, quanta_inhaled_per_person, quanta_concentration
| 2.359375 | 2 |
aries_cloudagent/revocation/error.py | krgko/aries-cloudagent-python | 0 | 12760833 | """Revocation error classes."""
from ..core.error import BaseError
class RevocationError(BaseError):
"""Base exception for revocation-related errors."""
class RevocationNotSupportedError(RevocationError):
"""Attempted to create registry for non-revocable cred def."""
class RevocationRegistryBadSizeError(RevocationError):
"""Attempted to create registry with maximum credentials too large or too small."""
| 2.421875 | 2 |
test/matchers.py | madedotcom/ouroboros | 8 | 12760834 | <filename>test/matchers.py
import json
from expects.matchers import Matcher
from pprint import PrettyPrinter
import httpretty
pp = PrettyPrinter(indent=4)
class have_posted_to(Matcher):
_fail = "Expected {2} request to {0} but was {1}"
def __init__(self, path, method=httpretty.POST):
self._path = path
self._method = method
def _match(self, req):
return self._path == req.path and self._method == req.method, "does not match"
def _failure_message(self, req):
return self._fail.format(self._path, req.path, self._method)
class have_json(Matcher):
def __init__(self, body):
self._body = body
def _match(self, req):
return json.loads(req.body.decode('Utf-8')) == self._body, "does not match"
def _failure_message(self, req):
return "Expected request with body {0} but was {1}".format(
pp.pformat(json.loads(req.body.decode('Utf-8'))),
pp.pformat(self._body))
| 2.875 | 3 |
queue_services/entity-filer/src/entity_filer/filing_processors/filing_components/aliases.py | argush3/lear | 1 | 12760835 | <reponame>argush3/lear
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages the names of a Business."""
from typing import Dict
from flask_babel import _ as babel # noqa: N813
from legal_api.models import Alias, Business
def update_aliases(business: Business, aliases) -> Dict:
"""Update the aliases of the business."""
if not business:
return {'error': babel('Business required before alternate names can be set.')}
for alias in aliases:
if alias_id := alias.get('id'):
existing_alias = next((x for x in business.aliases.all() if str(x.id) == alias_id), None)
existing_alias.alias = alias['name'].upper()
else:
new_alias = Alias(alias=alias['name'].upper(), type=Alias.AliasType.TRANSLATION.value)
business.aliases.append(new_alias)
for current_alias in business.aliases.all():
if not next((x for x in aliases if x['name'].upper() == current_alias.alias.upper()), None):
business.aliases.remove(current_alias)
return None
| 2.078125 | 2 |
network.py | galbiati/nns-for-mnk | 0 | 12760836 | import numpy as np
import theano
import lasagne
## ALIASES ##
L = lasagne.layers
T = theano.tensor
get_output = L.get_output
get_all_params = L.get_all_params
cross_entropy = lasagne.objectives.categorical_crossentropy
get_layers = L.get_all_layers
class Network(object):
"""
Wrapper for neural networks for MNK that automates network compilation and
provides some conveninece functions for freezing, saving, and loading params
Things to consider doing:
mod save/load to use named layers
add self.reinitialize(layers)
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.update_algo = lasagne.updates.adam # just a default
self.build()
self.objectives()
self.compile_functions()
self.val_trace = np.zeros(500)
self.train_trace = np.zeros(500)
self.trace_loc = 0
def build(self):
"""
Generates network graph, grabs params and output symbols
"""
self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.params = get_all_params(self.net, trainable=True)
self.value_layer = get_layers(self.net)[-4]
self.value_prediction = get_output(self.value_layer)
return None
def objectives(self):
"""
Adds loss and accuracy nodes
"""
self.loss = cross_entropy(self.prediction, self.target_var)
self.loss = self.loss.mean()
self.itemized_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
return None
def compile_functions(self):
"""
Compiles theano functions for computing output, losses, etc
"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var], self.loss,
updates=self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_loss
)
return None
def update_traces(self):
"""
Saves traces for plotting
"""
self.val_trace[self.trace_loc] = self.val_err
self.train_trace[self.trace_loc] = self.train_err
self.trace_loc += 1 # so hacky
return None
def freeze_params(self, net=None, exclude=None):
"""
Sets params to be untrainable
Excludes layers in optional arg exclude (tuple or list)
"""
if net is None:
net = self.net
layers = get_layers(net)
num_layers = len(layers)
exclude = [i if i >= 0 else num_layers + i for i in exclude]
if exclude is not None:
layers = [layer for l, layer in enumerate(layers) if not (l in exclude)]
for layer in layers:
for param in layer.params:
layer.params[param].remove('trainable')
self.params = get_all_params(net, trainable=True) # CAUTION: needs rewritten to not throw errors as autoencoders develop
return None
def unfreeze_params(self):
"""
Sets all parameters back to trainable
"""
for layer in L.get_all_layers(self.net):
for param in layer.params:
layer.params[param].add('trainable')
self.params = L.get_all_params(self.net, trainable=True)
return None
def save_params(self, param_file):
"""
Save parameters for reuse later
"""
all_params = L.get_all_param_values(self.net)
np.savez(param_file, *all_params)
return None
def load_params(self, paramsfile):
"""
Loads parameters from npz files
"""
with np.load(paramsfile) as loaded:
params_list = [(i[0], i[1]) for i in loaded.items()]
params_order = np.array([i[0][4:6] for i in params_list]).astype(int)
params_list = [params_list[i] for i in params_order.argsort()]
L.set_all_param_values(self.net, [i[1] for i in params_list])
return None
class Autoencoder(Network):
"""
Wrapper for training and testing transfer learning with an autoencoder.
Almost as cool as it sounds.
Later, use super() to cut down bloat inside functions
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.ae_target_var = T.tensor4('ae inputs')
self.update_algo = lasagne.updates.adam
self.val_trace = []
self.train_trace = []
self.build()
self.objectives()
self.compile_functions()
def build(self):
"""Generates graph, caches params, output symbols"""
self.autoencoder, self.value_layer, self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.value_prediction = get_output(self.value_layer)
self.image = get_output(self.autoencoder)
self.test_image = get_output(self.autoencoder, deterministic=True)
self.params = get_all_params(self.net)
self.ae_params = get_all_params(self.autoencoder)
return None
def objectives(self):
"""Loss functions, etc"""
self.loss = cross_entropy(self.prediction, self.target_var).mean()
self.itemized_test_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_test_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
self.ae_loss = T.mean((self.ae_target_var - self.image)**2, dtype=theano.config.floatX)
self.ae_test_loss = T.mean((self.ae_target_var - self.test_image)**2, dtype=theano.config.floatX)
self.ae_updates = self.update_algo(self.ae_loss, self.ae_params)
return None
def compile_functions(self):
"""Compile theano functions"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var],
self.loss,
updates = self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_test_loss
)
self.ae_output_fn = theano.function([self.input_var], self.test_image)
self.ae_train_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_loss,
updates=self.ae_updates
)
self.ae_test_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_test_loss
)
return None
| 2.609375 | 3 |
train_RFC_AHN3.py | MPa-TUDelft/Semantic-segmentation-of-the-AHN-dataset-with-the-Random-Forest-Classifier | 0 | 12760837 | <reponame>MPa-TUDelft/Semantic-segmentation-of-the-AHN-dataset-with-the-Random-Forest-Classifier
#
#
# 0=================================0
# | Random Forest Trainer |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start training a RFC with AHN3 dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 16/08/2021
# TU Delft - MSc Geomatics - GEO2020 MSc Thesis
# scikit-learn RFC version
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports
# \**********************************/
#
from point_cloud_analysis import data_preparation, uniform_sampling, Z_values, spherical_neighborhoods, density, input_dataframe
import numpy as np
import time
import os
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
# ----------------------------------------------------------------------------------------------------------------------
#
# train RFC model
# \******************/
#
def RFC(X_train, Y_train, name):
start = time.time()
print(f"Started training RFC...")
model = RandomForestClassifier( n_estimators = 100,
criterion = 'gini',
max_depth = 15,
min_samples_split = 2,
min_samples_leaf = 1,
max_features = 'sqrt',
bootstrap = True,
oob_score = True)
model.fit(X_train, Y_train)
filename = 'pre_trained_models/model_' + name + '.sav'
pickle.dump(model, open(filename, 'wb'))
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
ff = open("pre_trained_models/features_ranking_model_" + name + ".txt", "w")
ff.write("Feature ranking: \n")
for f in range(X_train.shape[1]):
ff.write("%d. feature %d (%f) \n" % (f + 1, indices[f], importances[indices[f]]))
end = time.time()
print(f"Finished training RFC {round((end - start) / 60, 2)} minutes.")
return
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
start = time.time()
# training data folder
path = "data/training/"
# reading file names from the training data folder
training_data = os.listdir(path)
# declaring the training dataframe
df_train = pd.DataFrame()
train_lbs = []
for file in training_data:
print('Processing ' + file[:-4] + '...')
print('##################################')
# path to the training data file
file_path = path + file
# reading coordinates and labels of the points
points, labels = data_preparation(file_path)
grid_candidate_center, grid_candidate_center_lbs = uniform_sampling(points, labels)
train_lbs += list(grid_candidate_center_lbs)
del grid_candidate_center_lbs
# creating the file dataframe
df = pd.DataFrame(data=grid_candidate_center, columns=['X', 'Y', 'Z'])
del grid_candidate_center
# calculating the normalized Z and height below
df = Z_values(df)
# calculate neighborhoods
n_train = spherical_neighborhoods(df[["X", "Y", "Z"]].values, [2, 3, 4])
# calculating density
df = density(df, n_train[2], '4m')
# preparing input dataframe
df = input_dataframe(df[["X", "Y", "Z"]].values, n_train[0], "2m", df)
df = input_dataframe(df[["X", "Y", "Z"]].values, n_train[1], "3m", df)
df = input_dataframe(df[["X", "Y", "Z"]].values, n_train[2], "4m", df)
# exporting the features in csv format
features_path = "data/training_features/" + file[:-4] + '_features.csv'
df.to_csv(features_path, index=False)
# removing the point coordinates
df = df.drop(df.columns[[0, 1, 2]], axis=1)
del n_train
df_train = pd.concat([df_train, df], axis=0)
del df
print('##################################\n')
model_name = "RFC_AHN3"
RFC(df_train.values, train_lbs, model_name)
end = time.time()
print(f"Script ended after {round((end - start) / 60, 2)} minutes.")
| 1.875 | 2 |
src/env_objects/enum/TileType.py | Mighstye/uqac_tp3IA_RobotSauveVie | 0 | 12760838 | <reponame>Mighstye/uqac_tp3IA_RobotSauveVie
"""
Enum for all the possible type of Tile and a String version of them to make them possible to print
Thus, our Expert system use these String versions
"""
from enum import Enum
class TileType(Enum):
DUST = 'D'
FIRE = 'F'
HUMAN = 'H'
ROBOT = 'A'
RUINS = 'R'
WARM = 'W'
UNKNOWN = '?'
| 3.25 | 3 |
examples/04.StockMarket.py | wuqunfei/stream-app-python-training | 1 | 12760839 | import logging
import os
from datetime import timedelta
import alpaca_trade_api
import faust
from alpaca_trade_api.common import URL
from faust import Worker
app = faust.App(
'stock-app',
broker='kafka://localhost:9092',
value_serializer='json',
store='rocksdb://',
version=1,
)
ALPACA_BASE_URL = URL('https://paper-api.alpaca.markets')
alpaca_ws_url = URL('wss://data.alpaca.markets')
ALPACA_API_KEY = os.getenv('alpaca_key_id')
ALPACA_SECRET_KEY = os.getenv('alpaca_secret_key')
alpaca = alpaca_trade_api.REST(ALPACA_API_KEY, ALPACA_SECRET_KEY, ALPACA_BASE_URL, api_version='v2')
class OHLCRecord(faust.Record, serializer='json'):
Name: str
datetime: str
Open: float
High: float
Low: float
Close: float
Volume: float
stock_market_topic = app.topic('stockmarket-topic', key_type=OHLCRecord, value_type=OHLCRecord, partitions=2)
stock_market_table = app.Table(name='stockmarket-table', default=float, partitions=10).tumbling(
size=timedelta(seconds=10), expires=timedelta(seconds=60))
stock_order_topic = app.topic('stock-order-topic', key_type=str, value_type=str, partitions=10)
@app.timer(interval=1)
async def get_ohlc():
FAANG_STOCKS = ['FB', 'AMZN', 'APPL', 'NFLX', 'GOOG']
df = alpaca.get_latest_bars(symbols=FAANG_STOCKS)
for name, bar in df.items():
record = OHLCRecord(
Name=name,
datetime=bar.t,
Open=bar.o,
Close=bar.c,
High=bar.h,
Low=bar.l,
Volume=bar.v
)
# logging.info(record)
await stock_market_topic.send(key=record, value=record)
@app.timer(interval=10)
async def back_test():
key = 'stock_name'
action = 'buy'
await stock_order_topic.send(key=key, value=action)
@app.agent(stock_order_topic, concurrency=10, isolated_partitions=False)
async def handle_order(orders):
async for key, order in orders.items():
logging.info(f'Send Order {key} to {order}')
if __name__ == '__main__':
worker = Worker(app=app, loglevel=logging.INFO)
worker.execute_from_commandline()
| 2.21875 | 2 |
datasets/voc/prepare.py | biyoml/PyTorch-DeepLabV3 | 0 | 12760840 | <reponame>biyoml/PyTorch-DeepLabV3<filename>datasets/voc/prepare.py
import os
import argparse
import glob
import shutil
import scipy.io as sio
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
def save_as_csv(filename, ids, image_dir, anno_dir):
dataset = [
[
os.path.join(image_dir, id + '.jpg'),
os.path.join(anno_dir, id + '.png')
]
for id in ids
]
df = pd.DataFrame(dataset, columns=['image', 'annotation'])
df = df.applymap(os.path.abspath)
print(df)
df.to_csv(os.path.join(os.path.dirname(__file__), filename), index=False)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--voc', type=str, required=True,
help="path to VOCdevkit/VOC2012/")
parser.add_argument('--sbd', type=str, required=True,
help="path to SBD root directory (benchmark_RELEASE/)")
args = parser.parse_args()
anno_dir = os.path.join(os.path.dirname(__file__), 'annotations/')
image_dir = os.path.join(args.voc, 'JPEGImages/')
shutil.rmtree(anno_dir, ignore_errors=True)
os.mkdir(anno_dir)
train_ids, val_ids = [], []
# Parse VOC2012 annotations
# Refer to: https://github.com/tensorflow/models/blob/master/research/deeplab/datasets/remove_gt_colormap.py
for split in ['train', 'val']:
with open(os.path.join(args.voc, 'ImageSets/Segmentation/%s.txt' % split)) as f:
ids = [line.strip() for line in f.readlines()]
for id in tqdm(ids, desc="VOC2012 %s" % split):
anno = Image.open(os.path.join(args.voc, 'SegmentationClass/%s.png' % id))
anno = Image.fromarray(np.array(anno)) # remove palette
anno.save(os.path.join(anno_dir, id + '.png'))
if split == 'train':
train_ids.append(id)
else:
val_ids.append(id)
save_as_csv('train.csv', train_ids, image_dir, anno_dir)
save_as_csv('val.csv', val_ids, image_dir, anno_dir)
# Parse the SBD annotations
# Refer to: https://pytorch.org/vision/stable/_modules/torchvision/datasets/sbd.html#SBDataset
mat_files = glob.glob(os.path.join(args.sbd, 'dataset/cls/*.mat'))
mat_files.sort()
num_extra = 0
for mat_path in tqdm(mat_files, desc="SBD"):
id = os.path.basename(os.path.splitext(mat_path)[0])
if (id in train_ids) or (id in val_ids):
continue
num_extra += 1
mat = sio.loadmat(mat_path)
anno = Image.fromarray(mat['GTcls'][0]['Segmentation'][0])
anno.save(os.path.join(anno_dir, id + '.png'))
train_ids.append(id)
print("Number of extra annotations:", num_extra)
save_as_csv('trainaug.csv', train_ids, image_dir, anno_dir)
if __name__ == '__main__':
main()
| 2.328125 | 2 |
code_examples/tensorflow/basic_nmt_example/data_gen/reader.py | Splendon/examples | 0 | 12760841 | # Copyright 2019 Graphcore Ltd.
"""
Dataset reader from Datalogue keras-attention tutorial.
References:
https://github.com/datalogue/keras-attention
https://medium.com/datalogue
"""
import json
import csv
import random
import numpy as np
# from keras.utils.np_utils import to_categorical
INPUT_PADDING = 50
OUTPUT_PADDING = 100
class Vocabulary(object):
def __init__(self, vocabulary_file, padding=None):
"""
Creates a vocabulary from a file
:param vocabulary_file: the path to the vocabulary
"""
self.vocabulary_file = vocabulary_file
with open(vocabulary_file, 'r') as f:
self.vocabulary = json.load(f)
self.padding = padding
self.reverse_vocabulary = {v: k for k, v in self.vocabulary.items()}
def start_id(self):
return self.vocabulary['<sot>']
def end_id(self):
return self.vocabulary['<eot>']
def size(self):
"""
Gets the size of the vocabulary
"""
return len(self.vocabulary.keys())
def string_to_int(self, text):
"""
Converts a string into it's character integer
representation
:param text: text to convert
"""
characters = list(text)
integers = []
if self.padding and len(characters) >= self.padding:
# truncate if too long
characters = characters[:self.padding-1]
characters.append('<eot>')
for c in characters:
if c in self.vocabulary:
integers.append(self.vocabulary[c])
else:
integers.append(self.vocabulary['<unk>'])
# pad:
if self.padding and len(integers) < self.padding:
integers.extend(
[self.vocabulary['<unk>']] * (self.padding - len(integers))
)
if len(integers) != self.padding:
print(text)
raise AttributeError('Length of text was not padding.')
return integers
def int_to_string(self, integers):
"""
Decodes a list of integers
into it's string representation
"""
characters = []
for i in integers:
characters.append(self.reverse_vocabulary[i])
return characters
class Data(object):
def __init__(self, file_name, input_vocabulary, output_vocabulary):
"""
Creates an object that gets data from a file
:param file_name: name of the file to read from
:param vocabulary: the Vocabulary object to use
:param batch_size: the number of datapoints to return
:param padding: the amount of padding to apply to
a short string
"""
self.input_vocabulary = input_vocabulary
self.output_vocabulary = output_vocabulary
self.file_name = file_name
def load(self):
"""
Loads data from a file
"""
self.inputs = []
self.targets = []
with open(self.file_name, 'r') as f:
reader = csv.reader(f)
for row in reader:
self.inputs.append(row[0])
self.targets.append(row[1])
def transform(self):
"""
Transforms the data as necessary
"""
# @TODO: use `pool.map_async` here?
self.inputs = np.array(list(
map(self.input_vocabulary.string_to_int, self.inputs)))
self.targets = np.array(list(
map(self.output_vocabulary.string_to_int, self.targets)))
assert len(self.inputs.shape) == 2, 'Inputs could not properly be encoded'
assert len(self.targets.shape) == 2, 'Targets could not properly be encoded'
def generator(self, batch_size):
"""
Creates a generator that can be used in `model.fit_generator()`
Batches are generated randomly.
:param batch_size: the number of instances to include per batch
"""
instance_id = range(len(self.inputs))
while True:
try:
batch_ids = random.sample(instance_id, batch_size)
yield (np.array(self.inputs[batch_ids], dtype=int),
np.array(self.targets[batch_ids]))
except Exception as e:
print('EXCEPTION OMG')
print(e)
yield None, None
if __name__ == '__main__':
input_vocab = Vocabulary('../data/human_vocab.json', padding=50)
output_vocab = Vocabulary('../data/machine_vocab.json', padding=12)
ds = Data('../data/fake.csv', input_vocab, output_vocab)
ds.load()
ds.transform()
print(ds.inputs.shape)
print(ds.targets.shape)
g = ds.generator(32)
print(ds.inputs[[5, 10, 12]].shape)
print(ds.targets[[5, 10, 12]].shape)
| 3.25 | 3 |
python-package/setup.py | svenpeter42/LightGBM-CEGB | 12 | 12760842 | # coding: utf-8
# pylint: disable=invalid-name, exec-used
"""Setup lightgbm package."""
from __future__ import absolute_import
import os
import sys
from setuptools import find_packages, setup
sys.path.insert(0, '.')
CURRENT_DIR = os.path.dirname(__file__)
libpath_py = os.path.join(CURRENT_DIR, 'lightgbm/libpath.py')
libpath = {'__file__': libpath_py}
exec(compile(open(libpath_py, "rb").read(), libpath_py, 'exec'), libpath, libpath)
LIB_PATH = [os.path.relpath(path, CURRENT_DIR) for path in libpath['find_lib_path']()]
print("Install lib_lightgbm from: %s" % LIB_PATH)
setup(name='lightgbm',
version=0.2,
description="LightGBM Python Package",
install_requires=[
'numpy',
'scipy',
],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
zip_safe=False,
packages=find_packages(),
include_package_data=True,
data_files=[('lightgbm', LIB_PATH)],
url='https://github.com/Microsoft/LightGBM')
| 1.84375 | 2 |
migrations/versions/11848f9f4372_entries_table.py | EstebanMonge/flask-crud | 0 | 12760843 | """entries table
Revision ID: 11848f9f4372
Revises: e<PASSWORD>
Create Date: 2021-05-20 13:39:16.441005
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '11848f9f4372'
down_revision = 'eb<PASSWORD>d363477'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('handover',
sa.Column('ho_id', sa.Integer(), nullable=False),
sa.Column('ticket', sa.String(length=64), nullable=False),
sa.Column('ticket_type', sa.String(length=64), nullable=False),
sa.Column('servers', sa.String(length=300), nullable=False),
sa.Column('platform', sa.Integer(), nullable=True),
sa.Column('steps', sa.String(length=300), nullable=False),
sa.Column('next_steps', sa.String(length=300), nullable=False),
sa.Column('chat_url', sa.String(length=300), nullable=False),
sa.Column('owner', sa.Integer(), nullable=True),
sa.Column('old_owners', sa.String(length=300), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('ho_id')
)
op.create_index(op.f('ix_handover_chat_url'), 'handover', ['chat_url'], unique=False)
op.create_index(op.f('ix_handover_next_steps'), 'handover', ['next_steps'], unique=False)
op.create_index(op.f('ix_handover_old_owners'), 'handover', ['old_owners'], unique=False)
op.create_index(op.f('ix_handover_servers'), 'handover', ['servers'], unique=False)
op.create_index(op.f('ix_handover_steps'), 'handover', ['steps'], unique=False)
op.create_index(op.f('ix_handover_ticket'), 'handover', ['ticket'], unique=False)
op.create_index(op.f('ix_handover_ticket_type'), 'handover', ['ticket_type'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_handover_ticket_type'), table_name='handover')
op.drop_index(op.f('ix_handover_ticket'), table_name='handover')
op.drop_index(op.f('ix_handover_steps'), table_name='handover')
op.drop_index(op.f('ix_handover_servers'), table_name='handover')
op.drop_index(op.f('ix_handover_old_owners'), table_name='handover')
op.drop_index(op.f('ix_handover_next_steps'), table_name='handover')
op.drop_index(op.f('ix_handover_chat_url'), table_name='handover')
op.drop_table('handover')
# ### end Alembic commands ###
| 1.789063 | 2 |
s3tests/roundtrip.py | ilv-ludmila/s3-tests | 3 | 12760844 | <filename>s3tests/roundtrip.py
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey; gevent.monkey.patch_all()
import itertools
import optparse
import os
import sys
import time
import traceback
import random
import yaml
import realistic
import common
NANOSECOND = int(1e9)
def writer(bucket, objname, fp, queue):
key = bucket.new_key(objname)
result = dict(
type='w',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.set_contents_from_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.last_chunks,
)
queue.put(result)
def reader(bucket, objname, queue):
key = bucket.new_key(objname)
fp = realistic.FileVerifier()
result = dict(
type='r',
bucket=bucket.name,
key=key.name,
)
start = time.time()
try:
key.get_contents_to_file(fp)
except gevent.GreenletExit:
raise
except Exception as e:
# stop timer ASAP, even on errors
end = time.time()
result.update(
error=dict(
msg=str(e),
traceback=traceback.format_exc(),
),
)
# certain kinds of programmer errors make this a busy
# loop; let parent greenlet get some time too
time.sleep(0)
else:
end = time.time()
if not fp.valid():
result.update(
error=dict(
msg='md5sum check failed',
),
)
elapsed = end - start
result.update(
start=start,
duration=int(round(elapsed * NANOSECOND)),
chunks=fp.chunks,
)
queue.put(result)
def parse_options():
parser = optparse.OptionParser(
usage='%prog [OPTS] <CONFIG_YAML',
)
parser.add_option("--no-cleanup", dest="cleanup", action="store_false",
help="skip cleaning up all created buckets", default=True)
return parser.parse_args()
def main():
# parse options
(options, args) = parse_options()
if os.isatty(sys.stdin.fileno()):
raise RuntimeError('Need configuration in stdin.')
config = common.read_config(sys.stdin)
conn = common.connect(config.s3)
bucket = None
try:
# setup
real_stdout = sys.stdout
sys.stdout = sys.stderr
# verify all required config items are present
if 'roundtrip' not in config:
raise RuntimeError('roundtrip section not found in config')
for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
if item not in config.roundtrip:
raise RuntimeError("Missing roundtrip config item: {item}".format(item=item))
for item in ['num', 'size', 'stddev']:
if item not in config.roundtrip.files:
raise RuntimeError("Missing roundtrip config item: files.{item}".format(item=item))
seeds = dict(config.roundtrip.get('random_seed', {}))
seeds.setdefault('main', random.randrange(2**32))
rand = random.Random(seeds['main'])
for name in ['names', 'contents', 'writer', 'reader']:
seeds.setdefault(name, rand.randrange(2**32))
print 'Using random seeds: {seeds}'.format(seeds=seeds)
# setup bucket and other objects
bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket, max_len=30)
bucket = conn.create_bucket(bucket_name)
print "Created bucket: {name}".format(name=bucket.name)
objnames = realistic.names(
mean=15,
stddev=4,
seed=seeds['names'],
)
objnames = itertools.islice(objnames, config.roundtrip.files.num)
objnames = list(objnames)
files = realistic.files(
mean=1024 * config.roundtrip.files.size,
stddev=1024 * config.roundtrip.files.stddev,
seed=seeds['contents'],
)
q = gevent.queue.Queue()
logger_g = gevent.spawn_link_exception(yaml.safe_dump_all, q, stream=real_stdout)
print "Writing {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.writers,
)
pool = gevent.pool.Pool(size=config.roundtrip.writers)
start = time.time()
for objname in objnames:
fp = next(files)
pool.spawn_link_exception(
writer,
bucket=bucket,
objname=objname,
fp=fp,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='write_done',
duration=int(round(elapsed * NANOSECOND)),
))
print "Reading {num} objects with {w} workers...".format(
num=config.roundtrip.files.num,
w=config.roundtrip.readers,
)
# avoid accessing them in the same order as the writing
rand.shuffle(objnames)
pool = gevent.pool.Pool(size=config.roundtrip.readers)
start = time.time()
for objname in objnames:
pool.spawn_link_exception(
reader,
bucket=bucket,
objname=objname,
queue=q,
)
pool.join()
stop = time.time()
elapsed = stop - start
q.put(dict(
type='read_done',
duration=int(round(elapsed * NANOSECOND)),
))
q.put(StopIteration)
logger_g.get()
finally:
# cleanup
if options.cleanup:
if bucket is not None:
common.nuke_bucket(bucket)
| 2.4375 | 2 |
float_display_dialog_ui.py | boeglinw/DFE | 0 | 12760845 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'float_display_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FloatDisplayDialog(object):
def setupUi(self, FloatDisplayDialog):
FloatDisplayDialog.setObjectName("FloatDisplayDialog")
FloatDisplayDialog.setWindowModality(QtCore.Qt.WindowModal)
FloatDisplayDialog.resize(398, 129)
FloatDisplayDialog.setModal(True)
self.layoutWidget = QtWidgets.QWidget(FloatDisplayDialog)
self.layoutWidget.setGeometry(QtCore.QRect(17, 13, 371, 105))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.floatPrecisionLabel = QtWidgets.QLabel(self.layoutWidget)
self.floatPrecisionLabel.setObjectName("floatPrecisionLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.floatPrecisionLabel)
self.floatDisplayTypeLabel = QtWidgets.QLabel(self.layoutWidget)
self.floatDisplayTypeLabel.setObjectName("floatDisplayTypeLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.floatDisplayTypeLabel)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.fix_radioButton = QtWidgets.QRadioButton(self.layoutWidget)
self.fix_radioButton.setChecked(True)
self.fix_radioButton.setObjectName("fix_radioButton")
self.horizontalLayout.addWidget(self.fix_radioButton)
self.sci_radioButton = QtWidgets.QRadioButton(self.layoutWidget)
self.sci_radioButton.setObjectName("sci_radioButton")
self.horizontalLayout.addWidget(self.sci_radioButton)
self.formLayout.setLayout(1, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout)
self.floatPrecision_spinBox = QtWidgets.QSpinBox(self.layoutWidget)
self.floatPrecision_spinBox.setObjectName("floatPrecision_spinBox")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.floatPrecision_spinBox)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(self.layoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(FloatDisplayDialog)
self.buttonBox.accepted.connect(FloatDisplayDialog.accept)
self.buttonBox.rejected.connect(FloatDisplayDialog.reject)
QtCore.QMetaObject.connectSlotsByName(FloatDisplayDialog)
def retranslateUi(self, FloatDisplayDialog):
_translate = QtCore.QCoreApplication.translate
FloatDisplayDialog.setWindowTitle(_translate("FloatDisplayDialog", "Variable Name and Type"))
self.floatPrecisionLabel.setText(_translate("FloatDisplayDialog", "Float Precision"))
self.floatDisplayTypeLabel.setText(_translate("FloatDisplayDialog", "Float Display Type"))
self.fix_radioButton.setText(_translate("FloatDisplayDialog", "Fixed"))
self.sci_radioButton.setText(_translate("FloatDisplayDialog", "Scientific"))
| 2.046875 | 2 |
Lab_4/4.2_rsa_key.py | Saif-M-Dhrubo/Crypto-Lab | 2 | 12760846 | from Crypto.PublicKey import RSA
rsa_private_key = open('rsa_key').read()
rsa_public_key = open('rsa_key.pub').read()
rsa_private_key = RSA.importKey(rsa_private_key)
rsa_public_key = RSA.importKey(rsa_public_key)
m = 'hello world'
print 'Plain Text :', m
c = rsa_public_key.encrypt(m, '')
print 'Encryption :', c
m = rsa_private_key.decrypt(c)
print 'Decrypted Text :', m | 2.953125 | 3 |
scripts/mksprite.py | gasman/kisskill | 2 | 12760847 | <reponame>gasman/kisskill<filename>scripts/mksprite.py
from PIL import Image
import sys
im = Image.open(sys.argv[1])
im = im.convert('1')
out = file(sys.argv[2], 'wb')
out.write(im.tostring())
out.close()
| 2.390625 | 2 |
tests/test_equal_constraint.py | sbacheld/sudoku-solver | 0 | 12760848 | <filename>tests/test_equal_constraint.py
import unittest
from assignment import Assignment
from constraint import EqualConstraint
from domain import Domain
from variable import Variable
class TestEqualConstraint(unittest.TestCase):
def test_not_violated_if_variable_not_set(self):
# Arrange
variable = Variable('1', Domain([1]))
constraint = EqualConstraint(variable, 1)
assignment = Assignment()
# Act
result = constraint.is_violated(assignment)
# Assert
self.assertFalse(result)
def test_not_violated_if_variable_equal(self):
# Arrange
expected_value = 1
variable = Variable('1', Domain([expected_value]))
constraint = EqualConstraint(variable, expected_value)
assignment = Assignment()
assignment.set(variable.name, expected_value)
# Act
result = constraint.is_violated(assignment)
# Assert
self.assertFalse(result)
def test_violated_if_variable_not_equal(self):
# Arrange
actual_value = 1
expected_value = 2
variable = Variable('1', Domain([actual_value]))
constraint = EqualConstraint(variable, expected_value)
assignment = Assignment()
assignment.set(variable.name, actual_value)
# Act
result = constraint.is_violated(assignment)
# Assert
self.assertTrue(result)
| 3.5625 | 4 |
test/unit/test_verify_frontend_rule.py | KTH/aspen | 0 | 12760849 | <reponame>KTH/aspen
__author__ = '<EMAIL>'
import os
import unittest
from unittest import mock
from modules.steps.verify_frontend_rule import VerifyFrontendRule
from modules.steps.base_pipeline_step import BasePipelineStep
from modules.util import data_defs, environment, reporter_service
from modules.util.exceptions import DeploymentError
from test import mock_test_data
class TestVerifyFrontendRule(unittest.TestCase):
def test_get_frontend_rule(self):
pipeline_data = mock_test_data.get_pipeline_data()
label = 'traefik.frontend.rule'
step = VerifyFrontendRule()
result = step.get_frontend_rule(label, pipeline_data)
self.assertEqual(result, 'PathPrefix:/kth-azure-app')
pipeline_data[data_defs.SERVICES][0][data_defs.S_DEPLOY_LABELS] = []
result = step.get_frontend_rule(label, pipeline_data)
self.assertIsNone(result)
def test_run_step(self):
pipeline_data = mock_test_data.get_pipeline_data()
try:
os.environ[environment.FRONT_END_RULE_LABEL] = 'traefik.frontend.rule'
step = VerifyFrontendRule()
step.run_step(pipeline_data)
os.environ[environment.FRONT_END_RULE_LABEL] = 'doesnt.exist'
step.run_step(pipeline_data)
except Exception:
self.fail()
os.environ[environment.FRONT_END_RULE_LABEL] = 'test.rule'
pipeline_data[data_defs.SERVICES][0][data_defs.S_DEPLOY_LABELS] = [
'test.rule=PathPrefixStrip:/'
]
self.assertRaises(DeploymentError, step.run_step, pipeline_data)
# Application exclusion test
try:
os.environ[environment.FRONT_END_RULE_LABEL] = 'traefik.frontend.rule'
pipeline_data[data_defs.APPLICATION_NAME] = 'tamarack'
pipeline_data[data_defs.SERVICES][0][data_defs.S_DEPLOY_LABELS] = [
'traefik.frontend.rule=PathPrefixStrip:/'
]
step.run_step(pipeline_data)
except Exception:
self.fail()
| 1.960938 | 2 |
PyGaBot/pysimbotlib/core/Scaler.py | huak95/PySimBot-extended | 1 | 12760850 | #!python
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ObjectProperty
from kivy.base import EventLoop
from kivy.lang import Builder
class Scaler(Widget):
scale = NumericProperty(2)
container = ObjectProperty(None)
def __init__(self, **kwargs):
from kivy.base import EventLoop
from kivy.lang import Builder
Builder.load_string('''
<Scaler>:
container: container
canvas.before:
PushMatrix
Scale:
scale: root.scale
canvas.after:
PopMatrix
FloatLayout:
id: container
size: root.width / root.scale, root.height / root.scale
''')
super(Scaler, self).__init__(**kwargs)
EventLoop.add_postproc_module(self)
def get_parent_window(self):
return self.container
def add_widget(self, widget):
if self.container is not None:
return self.container.add_widget(widget)
return super(Scaler, self).add_widget(widget)
def remove_widget(self, widget):
if self.container is not None:
return self.container.remove_widget(widget)
return super(Scaler, self).remove_widget(widget)
def process_to_local(self, x, y, relative=False):
if x is None:
return None, None
s = float(self.scale)
return x / s, y / s
def process(self, events):
transform = self.process_to_local
transformed = []
for etype, event in events:
# you might have a move and up event in the same process
# then avoid the double-transformation
if event in transformed:
continue
transformed.append(event)
event.sx, event.sy = transform(event.sx, event.sy)
if etype == 'begin':
event.osx, event.osy = transform(event.osx, event.osy)
else:
# update the delta
event.dsx = event.sx - event.psx
event.dsy = event.sy - event.psy
return events
| 2.25 | 2 |