hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
219ba636e42aee8cec43580d423fc62e4f5c5cf3 | 686 | py | Python | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from database import db
class User(object):
def __init__(self, name, avatar, email, password):
self.name = name
self.email = email
self.password = password
self.avatar = avatar
def insert(self):
if not db.find_one('user', {'email': self.email}):
db.insert(collection='user', data=self.json())
def json(self):
return {
"name": self.name,
"avatar": self.avatar,
"email": self.email,
"password": self.password
}
| 31.181818 | 58 | 0.586006 | 536 | 0.781341 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.072886 |
219c423bdd2f170bbacb2a4f7c4d40c610971bf4 | 4,807 | py | Python | project/interpreter_gql/interpreter_utils/set_operations.py | makar-pelogeiko/formal-lang-course | 8d0e1ffb081aaccf19ab69103509928ecccb46d9 | [
"Apache-2.0"
] | null | null | null | project/interpreter_gql/interpreter_utils/set_operations.py | makar-pelogeiko/formal-lang-course | 8d0e1ffb081aaccf19ab69103509928ecccb46d9 | [
"Apache-2.0"
] | 3 | 2021-10-14T14:20:02.000Z | 2022-01-22T23:51:11.000Z | project/interpreter_gql/interpreter_utils/set_operations.py | makar-pelogeiko/formal-lang-course | 8d0e1ffb081aaccf19ab69103509928ecccb46d9 | [
"Apache-2.0"
] | null | null | null | from pyformlang.regular_expression import Regex
from pyformlang.regular_expression.regex_objects import Symbol
from project.interpreter_gql.memory import MemBox
from project.interpreter_gql.interpreter_utils.type_utils import get_target_type
from project.interpreter_gql.interpreter_utils.interpreter_except import InterpError
def kleene_star(arg):
allow_types = ["dfa", "regex", "str"]
worked = arg
if isinstance(arg, str):
worked = MemBox(False, "str", arg)
if not isinstance(worked, MemBox):
raise InterpError(["kleene func"], "Arg is not in correct internal type")
if worked.v_type not in allow_types or worked.is_list:
raise InterpError(["kleene func"], "Arg is not in allowed type for operation")
if worked.v_type == "dfa":
result = MemBox(
False, "dfa", worked.value.kleene_star().to_deterministic().minimize()
)
else:
worked = get_target_type(worked, "regex")
result = MemBox(False, "regex", worked.value.kleene_star())
return result
def concatenate(first, second):
allow_types = ["dfa", "regex", "str"]
f_worked = first
s_worked = second
if isinstance(first, str):
f_worked = MemBox(False, "str", first)
if isinstance(second, str):
s_worked = MemBox(False, "str", second)
if not isinstance(f_worked, MemBox) or not isinstance(s_worked, MemBox):
raise InterpError(["concatenate func"], "Args are not in correct internal type")
if (
f_worked.v_type not in allow_types
or s_worked.v_type not in allow_types
or f_worked.is_list
or s_worked.is_list
):
raise InterpError(
["concatenate func"], "Args are not in allowed type for operation"
)
if f_worked.v_type == "dfa" or s_worked.v_type == "dfa":
f_worked = get_target_type(f_worked, "dfa")
s_worked = get_target_type(s_worked, "dfa")
result = MemBox(False, "dfa", f_worked.value.concatenate(s_worked.value))
else:
f_worked = get_target_type(f_worked, "regex")
s_worked = get_target_type(s_worked, "regex")
result = MemBox(False, "regex", f_worked.value.concatenate(s_worked.value))
return result
def union(first, second):
allow_types = ["dfa", "regex", "str"]
f_worked = first
s_worked = second
if isinstance(first, str):
f_worked = MemBox(False, "str", first)
if isinstance(second, str):
s_worked = MemBox(False, "str", second)
if not isinstance(f_worked, MemBox) or not isinstance(s_worked, MemBox):
raise InterpError(["union func"], "Args are not in correct internal type")
if (
f_worked.v_type not in allow_types
or s_worked.v_type not in allow_types
or f_worked.is_list
or s_worked.is_list
):
raise InterpError(["union func"], "Args are not in allowed type for operation")
if f_worked.v_type == "dfa" or s_worked.v_type == "dfa":
f_worked = get_target_type(f_worked, "dfa")
s_worked = get_target_type(s_worked, "dfa")
result = MemBox(False, "dfa", f_worked.value.union(s_worked.value))
else:
f_worked = get_target_type(f_worked, "regex")
s_worked = get_target_type(s_worked, "regex")
result = MemBox(False, "regex", f_worked.value.union(s_worked.value))
return result
def intersection(first, second):
allow_types = ["dfa", "regex", "str"]
f_worked = first
s_worked = second
if isinstance(first, str):
f_worked = MemBox(False, "regex", Regex(first))
if isinstance(second, str):
s_worked = MemBox(False, "regex", Regex(second))
if not isinstance(f_worked, MemBox) or not isinstance(s_worked, MemBox):
raise InterpError(
["intersection func"], "Args are not in correct internal type"
)
if (
f_worked.v_type not in allow_types
or s_worked.v_type not in allow_types
or f_worked.is_list
or s_worked.is_list
):
raise InterpError(
["intersection func"], "Args are not in allowed type for operation"
)
elif f_worked.v_type == "dfa" or s_worked.v_type == "dfa":
f_worked = get_target_type(f_worked, "dfa")
s_worked = get_target_type(s_worked, "dfa")
result = MemBox(False, "dfa", f_worked.value.get_intersection(s_worked.value))
else:
f_worked = get_target_type(f_worked, "regex")
s_worked = get_target_type(s_worked, "regex")
f_enfa = f_worked.value.to_epsilon_nfa()
s_enfa = s_worked.value.to_epsilon_nfa()
res_dfa = f_enfa.get_intersection(s_enfa).to_deterministic().minimize()
res_regex = res_dfa.to_regex()
result = MemBox(False, "regex", res_regex)
return result
| 34.582734 | 88 | 0.655918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 721 | 0.14999 |
219c724863fb056a3439c9d977aff6892d9fe9e5 | 2,757 | py | Python | 2021/day-09/solve.py | alexandru-dinu/aoc-2020 | c7a5f648ea6fceb90ee3e2c1b9dd24bf206cf15f | [
"MIT"
] | 1 | 2021-12-03T11:56:56.000Z | 2021-12-03T11:56:56.000Z | 2021/day-09/solve.py | alexandru-dinu/aoc-2020 | c7a5f648ea6fceb90ee3e2c1b9dd24bf206cf15f | [
"MIT"
] | 9 | 2021-12-04T19:16:06.000Z | 2021-12-21T16:43:05.000Z | 2021/day-09/solve.py | alexandru-dinu/aoc-2020 | c7a5f648ea6fceb90ee3e2c1b9dd24bf206cf15f | [
"MIT"
] | null | null | null | from __future__ import annotations
from argparse import ArgumentParser
from collections import deque
import numpy as np
def count_lte(mat: np.ndarray) -> np.ndarray:
"""
lte[i,j] = count (neighbours <= mat[i,j])
. t .
l . r
. b .
"""
aug = np.pad(mat.astype(float), (1, 1), mode="constant", constant_values=np.inf)
l = aug[1:-1, :-2] <= mat
r = aug[1:-1, 2:] <= mat
t = aug[:-2, 1:-1] <= mat
b = aug[2:, 1:-1] <= mat
return l + r + t + b
def part1(xs):
lte = count_lte(xs)
return np.sum(1 + xs[lte == 0])
def get_basin(xs: np.ndarray, row: int, col: int) -> list[tuple[int, int]]:
"""
Return the indices of the locations flowing towards the low point `row, col`.
"""
h, w = xs.shape
out = []
q = deque()
v = np.zeros_like(xs).astype(bool)
q.append((row, col))
v[row, col] = True
while q:
i, j = q.popleft()
out.append((i, j))
for di, dj in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
i2 = i + di
j2 = j + dj
if not (0 <= i2 < h) or not (0 <= j2 < w):
continue
if v[i2, j2]:
continue
if xs[i2, j2] == 9:
continue
q.append((i2, j2))
v[i2, j2] = True
return out
def part2(xs):
lte = count_lte(xs)
basins = [get_basin(xs, row, col) for row, col in zip(*np.where(lte == 0))]
top = sorted(map(len, basins), reverse=True)
return np.product(top[:3])
def visualize(xs):
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
lte = count_lte(xs)
cmap = cm.Blues_r(np.linspace(0, 1, 10))
cmap[-1] = [0, 0, 0, 1]
plt.imshow(xs, cmap=ListedColormap(cmap))
basins = sorted(
[get_basin(xs, row, col) for row, col in zip(*np.where(lte == 0))],
key=len,
reverse=True,
)
cmap = cm.viridis(np.linspace(0.8, 0.2, 6))
for i in range(3):
r, c = zip(*basins[i])
plt.scatter(c, r, c=[cmap[i * 2]], marker="s")
r, c = np.where(lte == 0)
plt.scatter(c, r, c="red", marker="x")
plt.show()
def main():
with open(args.file) as fp:
xs = np.array([[int(i) for i in x.strip()] for x in fp.readlines()])
if args.visualize:
visualize(xs)
return
print("Part 1:", part1(xs))
print("Part 2:", part2(xs))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--file", type=str, required=True)
parser.add_argument(
"--visualize",
action="store_true",
help="Visualize the map with low points and basins",
)
args = parser.parse_args()
main()
| 21.372093 | 84 | 0.52811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.111716 |
219d913bf411991d2c2b4c9921337f7af16cc0c8 | 5,997 | py | Python | applications/tensorflow2/image_classification/data/data_transformer.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 260 | 2019-11-18T01:50:00.000Z | 2022-03-28T23:08:53.000Z | applications/tensorflow2/image_classification/data/data_transformer.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 27 | 2020-01-28T23:07:50.000Z | 2022-02-14T15:37:06.000Z | applications/tensorflow2/image_classification/data/data_transformer.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 56 | 2019-11-18T02:13:12.000Z | 2022-02-28T14:36:09.000Z | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python.ops import math_ops
import logging
from . import imagenet_processing
from custom_exceptions import UnsupportedFormat, DimensionError
class DataTransformer:
logger = logging.getLogger('data_transformer')
@staticmethod
def normalization(ds, scale=1 / 255.0, img_type=tf.float32):
# Applying normalization before `ds.cache()` to re-use it.
# Note: Random transformations (e.g. images augmentations) should be applied
# after both `ds.cache()` (to avoid caching randomness) and `ds.batch()`
# (for vectorization https://www.tensorflow.org/guide/data_performance#vectorizing_mapping).
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
if not hasattr(
ds.element_spec, '__len__') or len(ds.element_spec) != 2:
raise DimensionError(
f'Data dimension is not the one supported (2) {ds.element_spec}')
multiplier = tf.cast(scale, img_type)
return ds.map(lambda x,
y: (multiplier * tf.cast(x, img_type), tf.cast(y, tf.int32)),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@staticmethod
def cache_shuffle(ds: tf.data.Dataset, buffer_size: int = 1, shuffle: bool = True, seed: int = 42):
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
ds = ds.cache()
if shuffle:
ds = ds.shuffle(buffer_size, seed=seed)
return ds
@staticmethod
def cifar_preprocess(ds,
buffer_size,
img_type=tf.float32,
is_training=True,
accelerator_side_preprocess=False,
pipeline_num_parallel=48,
seed=42):
if not isinstance(ds, tf.data.Dataset):
raise UnsupportedFormat(
f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')
if not hasattr(
ds.element_spec, '__len__') or len(ds.element_spec) != 2:
raise DimensionError(
f'Data dimension is not the one supported (2) {ds.element_spec}')
ds = DataTransformer.cache_shuffle(ds, buffer_size, is_training, seed)
preprocess_fn = cifar_preprocess_training_fn if is_training else cifar_preprocess_inference_fn
if accelerator_side_preprocess:
host_side_preprocess_fn = None
accelerator_side_preprocess_fn = preprocess_fn
else:
host_side_preprocess_fn = preprocess_fn
accelerator_side_preprocess_fn = None
def cifar_preprocess_map_func(x_image):
assert(x_image.shape == (32, 32, 3))
if host_side_preprocess_fn is not None:
x_image = tf.cast(x_image, tf.float32)
x_image = host_side_preprocess_fn(x_image)
x_image = tf.cast(x_image, img_type)
if is_training:
shape = x_image.get_shape().as_list()
padding = 4
x_image = tf.pad(x_image, [[padding, padding], [padding, padding], [0, 0]], "CONSTANT")
x_image = tf.image.random_crop(x_image, shape, seed=seed)
return x_image
ds = ds.map(lambda x, y: (cifar_preprocess_map_func(x), tf.cast(y, tf.int32)),
num_parallel_calls=pipeline_num_parallel)
accelerator_side_preprocess_fn = preprocess_fn if accelerator_side_preprocess is True else None
return ds, accelerator_side_preprocess_fn
@staticmethod
def imagenet_preprocessing(ds,
img_type,
is_training,
accelerator_side_preprocess=True,
pipeline_num_parallel=48,
seed=None):
preprocessing_fn = imagenet_preprocess_training_fn if is_training else imagenet_preprocess_inference_fn
if accelerator_side_preprocess:
host_side_preprocess_fn = None
accelerator_side_preprocess_fn = preprocessing_fn
else:
host_side_preprocess_fn = preprocessing_fn
accelerator_side_preprocess_fn = None
def processing_fn(raw_record): return imagenet_processing.parse_record(
raw_record, is_training, img_type, host_side_preprocess_fn, seed=seed)
return ds.map(processing_fn, num_parallel_calls=pipeline_num_parallel), accelerator_side_preprocess_fn
def _image_normalisation(image, mean, std, scale=255):
mean = tf.cast(mean, dtype=image.dtype)
std = tf.cast(std, dtype=image.dtype)
mean = tf.broadcast_to(mean, tf.shape(image))
std = tf.broadcast_to(std, tf.shape(image))
return (image / scale - mean) / std
def _imagenet_normalize(image):
IMAGENET_NORMALISATION_MEAN = [0.485, 0.456, 0.406]
IMAGENET_NORMALISATION_STD = [0.229, 0.224, 0.225]
return _image_normalisation(image,
IMAGENET_NORMALISATION_MEAN,
IMAGENET_NORMALISATION_STD)
def _cifar_normalize(image):
mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)
std = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)
return _image_normalisation(image, mean, std, scale=1)
def imagenet_preprocess_training_fn(image):
return _imagenet_normalize(image)
def imagenet_preprocess_inference_fn(image):
return _imagenet_normalize(image)
def cifar_preprocess_training_fn(image):
image = tf.image.random_flip_left_right(image)
return _cifar_normalize(image)
def cifar_preprocess_inference_fn(image):
return _cifar_normalize(image)
| 38.941558 | 111 | 0.638486 | 4,558 | 0.760047 | 0 | 0 | 4,460 | 0.743705 | 0 | 0 | 726 | 0.121061 |
219dec95a0e5d58334e741ed6c4d5f6ef28f50d0 | 273 | py | Python | social/urls.py | zhongmei57485/SwiperPro | b00dde5af05f158d7cd2c649e8a07a2c19623b69 | [
"Apache-2.0"
] | null | null | null | social/urls.py | zhongmei57485/SwiperPro | b00dde5af05f158d7cd2c649e8a07a2c19623b69 | [
"Apache-2.0"
] | 9 | 2019-12-04T23:48:54.000Z | 2021-06-10T18:31:57.000Z | social/urls.py | zhongmei57485/SwiperPro | b00dde5af05f158d7cd2c649e8a07a2c19623b69 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from social import apis
urlpatterns=[
path('recommend',apis.recommend),
path('like',apis.like),
path('dislike',apis.dislike),
path('superlike',apis.superlike),
path('rewind',apis.rewind),
path('like-me',apis.like_me),
] | 24.818182 | 37 | 0.681319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.197802 |
219edc590375fdad03e3c0c586530e16a13fd443 | 3,657 | py | Python | GPIB_Control.py | TheHWcave/GPIB-to-USB | 2f2469900ecca459fcee24f550519abc78480886 | [
"MIT"
] | 7 | 2020-02-02T06:29:13.000Z | 2022-03-22T00:39:52.000Z | GPIB_Control.py | TheHWcave/GPIB-to-USB | 2f2469900ecca459fcee24f550519abc78480886 | [
"MIT"
] | null | null | null | GPIB_Control.py | TheHWcave/GPIB-to-USB | 2f2469900ecca459fcee24f550519abc78480886 | [
"MIT"
] | 1 | 2019-03-21T15:49:40.000Z | 2019-03-21T15:49:40.000Z | #MIT License
#
#Copyright (c) 2019 TheHWcave
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
# Description:
# ============
# GPIB_Control connects to the GPIBtoUSB interface via a serial port. It supports the
# follwing commandline parameters all of which are optional
#
# -p or --port : serial input device, default /dev/ttyUSB0
# -a or --addr : GPIB address, default 3
# -c or --cmd : GPIB command default = none, which means just polling
# -i or --ifcmd: sends a command to the GPIB interface itself (not the GPIB bus)
# -r or --read : if specified means read the response after sending the cmd
# -d or --debug: followed by an integer (default = 0) for debuging purposes
#-----------------------------
import serial,argparse
from time import sleep,time,localtime,strftime,perf_counter
parser = argparse.ArgumentParser()
parser.add_argument('--port','-p',help='port (default = /dev/ttyUSB0',
dest='port_dev',action='store',type=str,default='/dev/ttyUSB0')
parser.add_argument('--addr','-a',help='GPIB address (default = 3)',metavar=' 1..30',
dest='address',action='store',type=int,default=3,choices=range(1,31))
parser.add_argument('--cmd','-c',help='GPIB command (default = '')',
dest='cmd_msg',action='store',type=str,default='')
parser.add_argument('--ifcmd','-i',help='GPIB interface command (default = '')',
dest='ifcmd_msg',action='store',type=str,default='')
parser.add_argument('--read','-r',help='read from device ',
dest='read_resp',action='store_true')
parser.add_argument('--debug','-d',help='debug level 0.. (def=1)',
dest='debug',action='store',type=int,default=0)
arg = parser.parse_args()
do_read = arg.read_resp
GPIB2USB = serial.Serial(
port=arg.port_dev,
baudrate=115200,
timeout=1)
sleep(2)
def readdata():
buf = ''
n = 0
while True:
buf = GPIB2USB.readline(64).decode().strip()
if len(buf) > 0:
if buf.startswith('!'):
if arg.debug > 0: print('ignored:'+buf)
else:
break
else:
print('timeout')
return (buf)
if arg.ifcmd_msg >'':
pollmsg = arg.ifcmd_msg+'\x0a'
do_read = True
else:
if arg.cmd_msg >'':
if do_read:
pollmsg = 'R'+str(arg.address)+','+arg.cmd_msg+'\x0a'
else:
pollmsg = 'W'+str(arg.address)+','+arg.cmd_msg+'\x0a'
else:
pollmsg = 'H'+str(arg.address)+',\x0a'
do_read = True
m = pollmsg.encode('ascii')
# change timeout of GPIB-to-USB interface to 1 s to wait in case we
# poll and a value is not ready yet
GPIB2USB.write('T1000000\x0a'.encode('ascii'))
Done = False
while not Done:
try:
GPIB2USB.write(m)
if do_read:
data = readdata()
print(data)
Done = True
except KeyboardInterrupt:
quit()
| 33.245455 | 86 | 0.699207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,247 | 0.614438 |
219eff34d4867620514405299c53bb4d517aed92 | 3,224 | py | Python | scripts/check_spec.py | soasme/PeppaPEG | 3ad481674ba3bbed6d495a6ad3b1f8087e6fd02d | [
"MIT"
] | 30 | 2021-02-10T04:40:52.000Z | 2022-03-04T07:49:35.000Z | scripts/check_spec.py | soasme/PeppaPEG | 3ad481674ba3bbed6d495a6ad3b1f8087e6fd02d | [
"MIT"
] | 30 | 2021-02-16T09:24:44.000Z | 2022-01-09T02:45:17.000Z | scripts/check_spec.py | soasme/PeppaPEG | 3ad481674ba3bbed6d495a6ad3b1f8087e6fd02d | [
"MIT"
] | 4 | 2021-02-22T22:37:58.000Z | 2021-12-24T16:28:27.000Z | import os.path
import subprocess
import sys
import json
import yaml
import shlex
def test_spec():
executable = sys.argv[1]
specs_file = sys.argv[2]
if specs_file.endswith('.json'):
with open(specs_file) as f:
try:
specs = json.load(f)
except json.decoder.JSONDecodeError:
print("invalid json spec")
exit(1)
elif specs_file.endswith('.yaml'):
with open(specs_file) as f:
try:
specs = yaml.load(f, Loader=yaml.Loader)
except Exception:
print('invalid yaml spec')
exit(1)
failed, ignored, total = 0, 0, 0
for spec in specs:
for test in spec['tests']:
total += 1
cmd = shlex.split(executable) + [
'parse',
'--grammar-entry', spec['entry'],
]
if 'grammar' in spec:
cmd.extend(['--grammar-str', spec['grammar']])
elif 'grammar_file' in spec:
if spec['grammar_file'].startswith('/'):
cmd.extend(['--grammar-file', spec['grammar_file']])
else:
cmd.extend(['--grammar-file', os.path.dirname(os.path.abspath(specs_file)) + '/' + spec['grammar_file']])
else:
raise ValueError('Missing grammar/grammar_file')
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input=test['I'].encode('utf-8'),
)
if 'O' in test:
if proc.returncode == 0:
output = json.loads(proc.stdout.decode('utf-8'))
expect = test['O']
if output != expect:
print(
f"GRAMMAR:\n{spec.get('grammar') or spec.get('grammar_file')}\n"
f"INPUT:\n{test['I']}\n"
f"OUTPUT:\n{test['O']}\n"
f"GOT:\n{json.dumps(output)}{proc.stderr.decode('utf-8')}\n"
)
failed += 1
else:
print(
f"GRAMMAR:\n{spec.get('grammar') or spec.get('grammar_file')}\n"
f"INPUT:\n{test['I']}\n"
f"OUTPUT:\n{test['O']}\n"
f"GOT:\n{proc.stderr.decode('utf-8')}{proc.stdout}\n"
)
failed += 1
elif 'E' in test:
if proc.stderr.decode('utf-8').strip() != test['E']:
print(
f"GRAMMAR:\n{spec.get('grammar') or spec.get('grammar_file')}\n"
f"INPUT:\n{test['I']}\n"
f"ERROR:\n{test['E']}\n"
f"GOT:\n{proc.stderr.decode('utf-8')}{proc.stdout.decode('utf-8')}"
)
failed += 1
else:
ignored += 1
print("total: %d, failed: %d, ignored: %d" % (total, failed, ignored))
if failed:
exit(1)
if __name__ == '__main__':
test_spec()
| 37.057471 | 125 | 0.433313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.262717 |
219f1d9b10fd7858f91ccf44c96ea3fd2cc531d1 | 4,874 | py | Python | interlacer/utils.py | MedicalVisionGroup/interlacer | 60c14782729031a2af48c27fddb649d37cdca0e9 | [
"MIT"
] | null | null | null | interlacer/utils.py | MedicalVisionGroup/interlacer | 60c14782729031a2af48c27fddb649d37cdca0e9 | [
"MIT"
] | null | null | null | interlacer/utils.py | MedicalVisionGroup/interlacer | 60c14782729031a2af48c27fddb649d37cdca0e9 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
def split_reim(array):
"""Split a complex valued matrix into its real and imaginary parts.
Args:
array(complex): An array of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): An array of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = np.real(array)
imag = np.imag(array)
split_array = np.stack((real, imag), axis=3)
return split_array
def split_reim_tensor(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
split_array = tf.stack((real, imag), axis=3)
return split_array
def split_reim_channels(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
n_ch = array.get_shape().as_list()[3]
split_array = tf.concat((real, imag), axis=3)
return split_array
def join_reim(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): An complex-valued array of shape (batch_size, N, N, 1)
"""
joined_array = array[:, :, :, 0] + 1j * array[:, :, :, 1]
return joined_array
def join_reim_tensor(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N)
"""
joined_array = tf.cast(array[:, :, :, 0], 'complex64') + \
1j * tf.cast(array[:, :, :, 1], 'complex64')
return joined_array
def join_reim_channels(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, ch)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N, ch/2)
"""
ch = array.get_shape().as_list()[3]
joined_array = tf.cast(array[:,
:,
:,
:int(ch / 2)],
dtype=tf.complex64) + 1j * tf.cast(array[:,
:,
:,
int(ch / 2):],
dtype=tf.complex64)
return joined_array
def convert_to_frequency_domain(images):
"""Convert an array of images to their Fourier transforms.
Args:
images(float): An array of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed array of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim(np.fft.fft2(join_reim(images), axes=(1, 2)))
return spectra
def convert_tensor_to_frequency_domain(images):
"""Convert a tensor of images to their Fourier transforms.
Args:
images(float): A tensor of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed tensor of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim_tensor(tf.signal.fft2d(join_reim_tensor(images)))
return spectra
def convert_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim(np.fft.ifft2(join_reim(spectra), axes=(1, 2)))
return images
def convert_tensor_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim_tensor(tf.signal.ifft2d(join_reim_tensor(spectra)))
return images
| 29.719512 | 147 | 0.603816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,721 | 0.558268 |
219f201f5364e4e40d6488c12a95af6558fcad59 | 233 | py | Python | workspace/src/barc/src/modify_cam_param.py | Cyphysecurity/darc | 2fe4f35d4ac7dc52606f30b86bf52464d6ca0ac3 | [
"MIT"
] | 1 | 2019-07-31T11:55:34.000Z | 2019-07-31T11:55:34.000Z | workspace/src/barc/src/modify_cam_param.py | Cyphysecurity/darc | 2fe4f35d4ac7dc52606f30b86bf52464d6ca0ac3 | [
"MIT"
] | 4 | 2020-02-12T00:54:30.000Z | 2021-06-10T20:26:26.000Z | workspace/src/barc/src/modify_cam_param.py | Cyphysecurity/darc | 2fe4f35d4ac7dc52606f30b86bf52464d6ca0ac3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
modify camera parameters using v4l
'''
import os
# change /dev/video6 resolution
#os.system('v4l2-ctl -d /dev/video6 -v width=640,height=480')
os.system('v4l2-ctl -d /dev/video6 -v width=160,height=120')
| 19.416667 | 61 | 0.703863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.875536 |
21a04335d89c7d0c5916d0d77c189c61e2cfb328 | 24,611 | py | Python | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | null | null | null | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | null | null | null | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | 1 | 2019-06-25T15:05:02.000Z | 2019-06-25T15:05:02.000Z | # /bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import argparse
import logging
import os
import math
import cv2
import numpy as np
class GenerateSyntheticData:
import PythonMagick as Magick
def __init__(self, logger=None):
if logger == None:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
self.logger = logging.getLogger()
else:
self.logger = logger
@staticmethod
def appendArgumentParser(argparser):
argparser.add_argument('--shift-x', type=int, help='')
argparser.add_argument('--shift-y', type=int, help='')
argparser.add_argument('--skew-x', type=float, help='')
argparser.add_argument('--skew-y', type=float, help='')
argparser.add_argument('--rotate', type=float, help='rotates image clock- or counterclock-wise (angle in degrees)')
argparser.add_argument('--horizontal_flip', action='store_true', help='horizontally flips image')
argparser.add_argument('--zoom', type=str, help='resize image; argument given in percentage')
argparser.add_argument('--contrast', type=int, help='default=0; 0~infinity (integer times contract is applided to image)')
argparser.add_argument('--brightness', type=float, help='default=100')
argparser.add_argument('--saturation', type=float, help='default=100')
argparser.add_argument('--hue', type=float, help='default=100')
argparser.add_argument('--blur', action='store_true', help='')
argparser.add_argument('--blur_radius', type=float, default=10, help='')
argparser.add_argument('--blur_sigma', type=float, default=1, help='')
argparser.add_argument('--gaussianBlur', action='store_true', help='')
argparser.add_argument('--gaussianBlur_width', type=float, default=5, help='')
argparser.add_argument('--gaussianBlur_sigma', type=float, default=1, help='')
argparser.add_argument('--despeckle', action='store_true', help='')
argparser.add_argument('--enhance', action='store_true', help='')
argparser.add_argument('--equalize', action='store_true', help='')
argparser.add_argument('--gamma', type=float, help='0 ~ 2; 1 is default')
argparser.add_argument('--implode', type=float, help='Implode factor 0~1; 0 (nothing) to 1 (full); 0.0 ~ 0.5 recommended.')
argparser.add_argument('--negate', action='store_true', help='')
argparser.add_argument('--normalize', action='store_true', help='')
argparser.add_argument('--quantize', action='store_true', help='')
argparser.add_argument('--reduceNoise', type=int, help='default=1')
argparser.add_argument('--shade', action='store_true', help='')
argparser.add_argument('--shade_azimuth', type=float, default=50, help='')
argparser.add_argument('--shade_elevation', type=float, default=50, help='')
argparser.add_argument('--sharpen', action='store_true', help='')
argparser.add_argument('--sharpen_radius', type=float, default=1, help='')
argparser.add_argument('--sharpen_sigma', type=float, default=0.5, help='')
argparser.add_argument('--swirl', type=float, help='degree; default=10')
argparser.add_argument('--wave', action='store_true', help='')
argparser.add_argument('--wave_amplitude', type=float, default=5, help='')
argparser.add_argument('--wave_wavelength', type=float, default=100, help='')
argparser.add_argument('--auto', action='store_true', help='')
argparser.add_argument('--auto_ops', type=str, default='', help='')
argparser.add_argument('--auto_rotate_min', type=float, default=0, help='')
argparser.add_argument('--auto_rotate_max', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_min', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_max', type=float, default=0, help='')
def generateRandomOptions(self, cmdArg):
def _generateRandomOptionsShift(args):
args.shift_x = int(np.abs(np.random.normal(0, 3))) # -10 ~ +10
args.shift_y = int(np.abs(np.random.normal(0, 1))) # -3 ~ +3
def _generateRandomOptionsSkew(args):
args.skew_x = int(np.random.normal(0, 3)) # -10 ~ +10
args.skew_y = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsRotate(args):
if cmdArg.auto_rotate_min != cmdArg.auto_rotate_max:
args.rotate = int(np.random.uniform(cmdArg.auto_rotate_min, cmdArg.auto_rotate_max))
else:
args.rotate = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsZoom(args):
if cmdArg.auto_zoom_min != cmdArg.auto_zoom_max:
args.zoom = str(int(np.random.uniform(cmdArg.auto_zoom_min, cmdArg.auto_zoom_max))) + '%'
else:
args.zoom = str(int(np.random.normal(100, 3))) + '%' # 90% ~ 110%
def _generateRandomOptionsContrast(args):
args.contrast = int(np.abs(np.random.normal(0, 1))) # 0 ~ +3
def _generateRandomOptionsBrightness(args):
args.brightness = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsSaturation(args):
args.saturation = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsHue(args):
args.hue = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsBlur(args):
if np.random.binomial(1,0.1): # do blur
if np.random.binomial(1,0.5):
args.blur = True
else:
args.gaussianBlur = True
if args.blur:
args.blur_radius = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.blur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
if args.gaussianBlur:
args.gaussianBlur_width = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.gaussianBlur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
def _generateRandomOptionsHorizontalFlip(args):
args.horizontal_flip = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsDespeckle(args):
args.despeckle = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEnhance(args):
args.enhance = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEqualize(args):
args.equalize = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNegate(args):
args.negate = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNormalize(args):
args.normalize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsQuantize(args):
args.quantize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsGamma(args):
args.gamma = np.abs(np.random.normal(1, 0.03)) # 0 ~ 2
def _generateRandomOptionsImplode(args):
args.implode = 0
if np.random.binomial(1,0.5) > 0:
args.implode = np.random.normal(0, 0.15) # -0.5 ~ 0.5
def _generateRandomOptionsReduceNoise(args):
args.reduceNoise = int(np.abs(np.random.normal(0, 0.7))) # 0 ~ 2
def _generateRandomOptionsShade(args):
args.shade = (np.random.binomial(1,0.1) > 0)
if args.shade:
args.shade_azimuth = np.random.normal(50, 17) # 0 ~ 100
args.shade_elevation = np.random.normal(50, 17) # 0 ~ 100
def _generateRandomOptionsSharpen(args):
args.sharpen = (np.random.binomial(1,0.1) > 0)
if args.sharpen:
args.sharpen_radius = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
args.sharpen_sigma = np.abs(np.random.normal(0, 0.3)) # 0 ~ 1
def _generateRandomOptionsSwirl(args):
args.swirl = np.random.normal(0, 5) # -15 ~ +15
def _generateRandomOptionsWave(args):
args.wave = (np.random.binomial(1,0.3) > 0)
if args.wave:
args.wave_amplitude = np.abs(np.random.normal(5, 0.3)) # 0 ~ 10
args.wave_wavelength = np.abs(np.random.normal(100, 10)) # 0 ~ 200
args = argparse.Namespace()
args.shift_x = args.shift_y = None
args.skew_x = args.skew_y = None
args.rotate = args.zoom = None
args.contrast = args.brightness = args.saturation = args.hue = None
args.blur = args.gaussianBlur = None
args.horizontal_flip = None
args.despeckle = args.enhance = args.reduceNoise = None
args.equalize = args.negate = args.normalize = args.quantize = args.gamma = None
args.shade = None
args.sharpen = None
args.implode = args.swirl = args.wave = None
if len(cmdArg.auto_ops)>0:
for op in cmdArg.auto_ops.split(","):
if op == 'shift': _generateRandomOptionsShift(args)
elif op == 'skew': _generateRandomOptionsSkew(args)
elif op == 'rotate': _generateRandomOptionsRotate(args)
elif op == 'zoom': _generateRandomOptionsZoom(args)
elif op == 'contrast': _generateRandomOptionsContrast(args)
elif op == 'brightness': _generateRandomOptionsBrightness(args)
elif op == 'saturation': _generateRandomOptionsSaturation(args)
elif op == 'hue': _generateRandomOptionsHue(args)
elif op == 'blur': _generateRandomOptionsBlur(args)
elif op == 'horizontal_flip': _generateRandomOptionsHorizontalFlip(args)
elif op == 'despeckle': _generateRandomOptionsDespeckle(args)
elif op == 'enhance': _generateRandomOptionsEnhance(args)
elif op == 'equalize': _generateRandomOptionsEqualize(args)
elif op == 'negate': _generateRandomOptionsNegate(args)
elif op == 'normalize': _generateRandomOptionsNormalize(args)
elif op == 'quantize': _generateRandomOptionsQuantize(args)
elif op == 'gamma': _generateRandomOptionsGamma(args)
elif op == 'implode': _generateRandomOptionsImplode(args)
elif op == 'reduceNoise': _generateRandomOptionsReduceNoise(args)
elif op == 'shade': _generateRandomOptionsShade(args)
elif op == 'sharpen': _generateRandomOptionsSharpen(args)
elif op == 'swirl': _generateRandomOptionsSwirl(args)
elif op == 'wave': _generateRandomOptionsWave(args)
else:
self.logger.error('Unknown Operation Name ' + op)
else: # apply all operations
_generateRandomOptionsShift(args)
_generateRandomOptionsSkew(args)
_generateRandomOptionsRotate(args)
_generateRandomOptionsZoom(args)
_generateRandomOptionsContrast(args)
_generateRandomOptionsBrightness(args)
_generateRandomOptionsSaturation(args)
_generateRandomOptionsHue(args)
_generateRandomOptionsBlur(args)
#_generateRandomOptionsHorizontalFlip(args)
_generateRandomOptionsDespeckle(args)
_generateRandomOptionsEnhance(args)
#_generateRandomOptionsEqualize(args)
#_generateRandomOptionsNegate(args)
_generateRandomOptionsNormalize(args)
_generateRandomOptionsQuantize(args)
_generateRandomOptionsGamma(args)
_generateRandomOptionsImplode(args)
_generateRandomOptionsReduceNoise(args)
_generateRandomOptionsShade(args)
_generateRandomOptionsSharpen(args)
_generateRandomOptionsSwirl(args)
#_generateRandomOptionsWave(args)
self.logger.debug('Randomly generated options: ')
for key in vars(args):
self.logger.debug(' -- %s: %s' % (key, getattr(args, key)))
self.logger.debug('')
return args
def isVideo(self, inputF):
video_file_extensions = (
'.264', '.3g2', '.3gp', '.3gp2', '.3gpp', '.3gpp2', '.3mm', '.3p2', '.60d', '.787', '.89', '.aaf', '.aec', '.aep', '.aepx',
'.aet', '.aetx', '.ajp', '.ale', '.am', '.amc', '.amv', '.amx', '.anim', '.aqt', '.arcut', '.arf', '.asf', '.asx', '.avb',
'.avc', '.avd', '.avi', '.avp', '.avs', '.avs', '.avv', '.axm', '.bdm', '.bdmv', '.bdt2', '.bdt3', '.bik', '.bin', '.bix',
'.bmk', '.bnp', '.box', '.bs4', '.bsf', '.bvr', '.byu', '.camproj', '.camrec', '.camv', '.ced', '.cel', '.cine', '.cip',
'.clpi', '.cmmp', '.cmmtpl', '.cmproj', '.cmrec', '.cpi', '.cst', '.cvc', '.cx3', '.d2v', '.d3v', '.dat', '.dav', '.dce',
'.dck', '.dcr', '.dcr', '.ddat', '.dif', '.dir', '.divx', '.dlx', '.dmb', '.dmsd', '.dmsd3d', '.dmsm', '.dmsm3d', '.dmss',
'.dmx', '.dnc', '.dpa', '.dpg', '.dream', '.dsy', '.dv', '.dv-avi', '.dv4', '.dvdmedia', '.dvr', '.dvr-ms', '.dvx', '.dxr',
'.dzm', '.dzp', '.dzt', '.edl', '.evo', '.eye', '.ezt', '.f4p', '.f4v', '.fbr', '.fbr', '.fbz', '.fcp', '.fcproject',
'.ffd', '.flc', '.flh', '.fli', '.flv', '.flx', '.gfp', '.gl', '.gom', '.grasp', '.gts', '.gvi', '.gvp', '.h264', '.hdmov',
'.hkm', '.ifo', '.imovieproj', '.imovieproject', '.ircp', '.irf', '.ism', '.ismc', '.ismv', '.iva', '.ivf', '.ivr', '.ivs',
'.izz', '.izzy', '.jss', '.jts', '.jtv', '.k3g', '.kmv', '.ktn', '.lrec', '.lsf', '.lsx', '.m15', '.m1pg', '.m1v', '.m21',
'.m21', '.m2a', '.m2p', '.m2t', '.m2ts', '.m2v', '.m4e', '.m4u', '.m4v', '.m75', '.mani', '.meta', '.mgv', '.mj2', '.mjp',
'.mjpg', '.mk3d', '.mkv', '.mmv', '.mnv', '.mob', '.mod', '.modd', '.moff', '.moi', '.moov', '.mov', '.movie', '.mp21',
'.mp21', '.mp2v', '.mp4', '.mp4v', '.mpe', '.mpeg', '.mpeg1', '.mpeg4', '.mpf', '.mpg', '.mpg2', '.mpgindex', '.mpl',
'.mpl', '.mpls', '.mpsub', '.mpv', '.mpv2', '.mqv', '.msdvd', '.mse', '.msh', '.mswmm', '.mts', '.mtv', '.mvb', '.mvc',
'.mvd', '.mve', '.mvex', '.mvp', '.mvp', '.mvy', '.mxf', '.mxv', '.mys', '.ncor', '.nsv', '.nut', '.nuv', '.nvc', '.ogm',
'.ogv', '.ogx', '.osp', '.otrkey', '.pac', '.par', '.pds', '.pgi', '.photoshow', '.piv', '.pjs', '.playlist', '.plproj',
'.pmf', '.pmv', '.pns', '.ppj', '.prel', '.pro', '.prproj', '.prtl', '.psb', '.psh', '.pssd', '.pva', '.pvr', '.pxv',
'.qt', '.qtch', '.qtindex', '.qtl', '.qtm', '.qtz', '.r3d', '.rcd', '.rcproject', '.rdb', '.rec', '.rm', '.rmd', '.rmd',
'.rmp', '.rms', '.rmv', '.rmvb', '.roq', '.rp', '.rsx', '.rts', '.rts', '.rum', '.rv', '.rvid', '.rvl', '.sbk', '.sbt',
'.scc', '.scm', '.scm', '.scn', '.screenflow', '.sec', '.sedprj', '.seq', '.sfd', '.sfvidcap', '.siv', '.smi', '.smi',
'.smil', '.smk', '.sml', '.smv', '.spl', '.sqz', '.srt', '.ssf', '.ssm', '.stl', '.str', '.stx', '.svi', '.swf', '.swi',
'.swt', '.tda3mt', '.tdx', '.thp', '.tivo', '.tix', '.tod', '.tp', '.tp0', '.tpd', '.tpr', '.trp', '.ts', '.tsp', '.ttxt',
'.tvs', '.usf', '.usm', '.vc1', '.vcpf', '.vcr', '.vcv', '.vdo', '.vdr', '.vdx', '.veg', '.vem', '.vep', '.vf', '.vft',
'.vfw', '.vfz', '.vgz', '.vid', '.video', '.viewlet', '.viv', '.vivo', '.vlab', '.vob', '.vp3', '.vp6', '.vp7', '.vpj',
'.vro', '.vs4', '.vse', '.vsp', '.w32', '.wcp', '.webm', '.wlmp', '.wm', '.wmd', '.wmmp', '.wmv', '.wmx', '.wot', '.wp3',
'.wpl', '.wtv', '.wve', '.wvx', '.xej', '.xel', '.xesc', '.xfl', '.xlmv', '.xmv', '.xvid', '.y4m', '.yog', '.yuv', '.zeg',
'.zm1', '.zm2', '.zm3', '.zmv')
if inputF.endswith((video_file_extensions)):
return True
return False
def getFPS(self, vF):
video = cv2.VideoCapture(vF);
major_ver, _, _ = (cv2.__version__).split('.')
if int(major_ver) < 3 :
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else :
fps = video.get(cv2.CAP_PROP_FPS)
video.release()
return fps
def splitFromVideo(self, inputF, outputFPrefix):
retVal = []
vid = cv2.VideoCapture(inputF)
idx = 0
while(True):
ret, frame = vid.read()
if not ret:
break
name = outputFPrefix + '_frame' + str(idx) + '.png'
cv2.imwrite(name, frame)
retVal.append(name)
idx += 1
return retVal
def mergeIntoVideo(self, inFs, outputF, FPS):
frame = cv2.imread(inFs[0])
height, width, _ = frame.shape
video = cv2.VideoWriter(outputF, cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height))
for inF in inFs:
video.write(cv2.imread(inF))
video.release()
def generate(self, inputF, outputF, args):
if args.auto:
auto_options = self.generateRandomOptions(args)
logger.info('Random options: ' + str(auto_options))
if self.isVideo(inputF):
FPS = self.getFPS(inputF)
inputFs = self.splitFromVideo(inputF, outputF+'_input')
outputFs = []
for idx in range(0, len(inputFs)):
iF = inputFs[idx]
oF = outputF + '_output_frame' + str(idx) + '.png'
if args.auto:
self._generate(iF, oF, auto_options)
else:
self._generate(iF, oF, args)
outputFs.append(oF)
self.mergeIntoVideo(outputFs, outputF, FPS)
for f in inputFs:
os.remove(f)
for f in outputFs:
os.remove(f)
return True
else:
if args.auto:
return self._generate(inputF, outputF, auto_options)
else:
return self._generate(inputF, outputF, args)
def _generate(self, inputF, outputF, args):
inputImage = self.Magick.Image(inputF)
input_width = inputImage.size().width()
input_height = inputImage.size().height()
self.logger.debug('Input width and height: %d x %d' % (input_width, input_height))
# make image ready to be modified
inputImage.modifyImage()
inputImage.backgroundColor(self.Magick.Color('black'))
if args.shift_x != None:
inputImage.roll(args.shift_x, 0)
if args.shift_y != None:
inputImage.roll(0, args.shift_y)
if args.skew_x != None and args.skew_y != None:
inputImage.shear(args.skew_x, args.skew_y)
elif args.skew_x != None:
inputImage.shear(args.skew_x, 0)
if args.skew_y != None:
inputImage.shear(0, args.skew_y)
if args.rotate != None:
inputImage.rotate(args.rotate)
inputImage.crop(self.Magick.Geometry(input_width, input_height, 0, 0))
if args.horizontal_flip:
inputImage.flop()
if args.zoom != None:
inputImage.sample(self.Magick.Geometry(args.zoom))
if int(args.zoom.strip()[0:-1]) >= 100:
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int((inputImage.size().width() - input_width) / 2),
int((inputImage.size().height() - input_height) / 2)))
else:
# PythonMagick is missing extent() API
# inputImage.exent(Magick.Geometry(input_width, input_height), Magick.GravityType.CenterGravity)
smallWidth = inputImage.size().width()
smallHeight = inputImage.size().height()
inputImage.size(self.Magick.Geometry(input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, smallHeight, input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, 0, input_width, smallHeight))
inputImage.draw(self.Magick.DrawableRectangle(0, smallHeight, smallWidth, input_height))
inputImage.roll(int((input_width - smallWidth) / 2), int((input_height - smallHeight) / 2))
if args.contrast != None:
for _ in range(0, args.contrast):
inputImage.contrast(args.contrast)
if args.brightness != None or args.saturation != None or args.hue != None:
if args.brightness is None:
args.brightness = 100
if args.saturation is None:
args.saturation = 100
if args.hue is None:
args.hue = 100
inputImage.modulate(args.brightness, args.saturation, args.hue)
if args.blur:
inputImage.blur(args.blur_radius, args.blur_sigma)
if args.gaussianBlur:
inputImage.gaussianBlur(args.gaussianBlur_width, args.gaussianBlur_sigma)
if args.despeckle:
inputImage.despeckle()
if args.enhance:
inputImage.enhance()
if args.equalize:
inputImage.equalize()
if args.gamma != None:
inputImage.gamma(args.gamma)
if args.implode != None:
inputImage.implode(args.implode)
if args.negate:
inputImage.negate()
if args.normalize:
inputImage.normalize()
if args.quantize:
inputImage.quantize()
if args.reduceNoise != None:
inputImage.reduceNoise(args.reduceNoise)
if args.shade:
inputImage.shade(args.shade_azimuth, args.shade_elevation)
if args.sharpen:
inputImage.sharpen(args.sharpen_radius, args.sharpen_sigma)
if args.swirl != None:
inputImage.swirl(args.swirl)
if args.wave:
inputImage.wave(args.wave_amplitude, args.wave_wavelength)
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int(math.fabs((inputImage.size().width() - input_width) / 2)),
int(math.fabs((inputImage.size().height() - input_height) / 2))))
inputImage.write(outputF)
self.logger.debug('Output width and height: %d x %d' % (inputImage.size().width(), inputImage.size().height()))
return True
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('-l', '--log-level', default='INFO', help="log-level (INFO|WARN|DEBUG|FATAL|ERROR)")
argparser.add_argument('-i', '--input', required=True, help='Input image file name')
argparser.add_argument('-o', '--output', required=True, help='Output image file name')
argparser.add_argument('-w', '--overwrite', action='store_true', help='If set, will overwrite the existing output file')
GenerateSyntheticData.appendArgumentParser(argparser)
args = argparser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.log_level)
logger = logging.getLogger("DragonFly-ASL-GSD")
logger.debug('CLI arguments')
for key in vars(args):
logger.debug(' -- %s: %s' % (key, getattr(args, key)))
logger.debug('')
# check input file exists
if not os.path.isfile(args.input):
logger.error('Input file %s does not exist: ' % args.input)
sys.exit(1)
# check if output file exists
if os.path.isfile(args.output) and not args.overwrite:
try: input = raw_input
except NameError: pass
yn = input('Do you wish to overwrite %s? (y/n) ' % args.output)
if yn != 'y' and yn != 'Y':
logger.error('Output file %s will not be overwritten.' % args.output)
sys.exit(1)
GSD = GenerateSyntheticData(logger=logger)
status = GSD.generate(args.input, args.output, args)
logger.debug('Generation status: %r' % status)
| 48.35167 | 135 | 0.557027 | 22,912 | 0.930966 | 0 | 0 | 3,635 | 0.147698 | 0 | 0 | 5,169 | 0.210028 |
21a24250ffe367e1f9da7b56a97743385de6126c | 10,065 | py | Python | front/services/ingest_matches_service.py | jimixjay/acestats | 015a26e084fda70ab5754b78ce2e5157fee29d10 | [
"Apache-2.0"
] | null | null | null | front/services/ingest_matches_service.py | jimixjay/acestats | 015a26e084fda70ab5754b78ce2e5157fee29d10 | [
"Apache-2.0"
] | null | null | null | front/services/ingest_matches_service.py | jimixjay/acestats | 015a26e084fda70ab5754b78ce2e5157fee29d10 | [
"Apache-2.0"
] | 1 | 2021-01-15T19:56:41.000Z | 2021-01-15T19:56:41.000Z | from service_objects import services
import numpy as np
import pandas as pd
from django.db import connection
import datetime
from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface
class IngestMatchesService(services.Service):
def process(self):
cursor = connection.cursor()
errors = ''
total_matches_updated = 0
total_matches_inserted = 0
tourneys = {}
surfaces = {}
tourney_levels = {}
players = {}
for year in range(1990, 2021):
csv_file = pd.read_csv('https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_' + str(year) + '.csv', header=1, names=self.getColumns())
for row in csv_file.itertuples():
created_at = datetime.datetime.now()
updated_at = datetime.datetime.now()
#try:
id = str(row.tourney_id) + '-' + str(row.match_num)
match = Match.objects.filter(id=id)
if (not match):
match = Match()
match.id = id
match.year = row.tourney_id.split('-')[0]
match.match_num = row.match_num
match.result = row.score
match.best_of = row.best_of
match.minutes = None if np.isnan(row.minutes) else row.minutes
match.round = row.round
if not tourneys.get(str(row.tourney_id)):
tourney = Tourney.objects.filter(id=row.tourney_id)
if (not tourney):
tourney = Tourney()
tourney.id = row.tourney_id
tourney.name = row.tourney_name
tourney.date = datetime.datetime.strptime(str(int(row.tourney_date)), '%Y%m%d').date()
tourney.created_at = created_at
tourney.updated_at = updated_at
if not surfaces.get(str(row.surface)):
surfaces[str(row.surface)] = self.getSurface(str(row.surface))
tourney.surface = surfaces[str(row.surface)]
if not tourney_levels.get(str(row.tourney_level)):
tourney_levels[str(row.tourney_level)] = self.getTourneyLevel(str(row.tourney_level))
tourney.tourney_level = tourney_levels[str(row.tourney_level)]
tourney.created_at = created_at
tourney.updated_at = updated_at
tourney.save()
else:
tourney = tourney[0]
tourneys[str(row.tourney_id)] = tourney
match.tourney = tourneys[str(row.tourney_id)]
match.created_at = created_at
match.updated_at = updated_at
match.save()
total_matches_inserted += 1
else:
match[0].year = row.tourney_id.split('-')[0]
match[0].save()
total_matches_updated += 1
match = match[0]
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.winner_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.winner_seed
if pd.isnull(row.winner_seed) or not str(row.winner_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.w_ace) else row.w_ace
match_stats.double_faults = None if np.isnan(row.w_df) else row.w_df
match_stats.service_points = None if np.isnan(row.w_svpt) else row.w_svpt
match_stats.first_services = None if np.isnan(row.w_1stIn) else row.w_1stIn
match_stats.first_services_won = None if np.isnan(row.w_1stWon) else row.w_1stWon
match_stats.second_services_won = None if np.isnan(row.w_2ndWon) else row.w_2ndWon
match_stats.service_game_won = None if np.isnan(row.w_SvGms) else row.w_SvGms
match_stats.break_points_saved = None if np.isnan(row.w_bpSaved) else row.w_bpSaved
match_stats.break_points_played = None if np.isnan(row.w_bpFaced) else row.w_bpFaced
match_stats.rank = None if np.isnan(row.winner_rank) else row.winner_rank
match_stats.rank_points = None if np.isnan(row.winner_rank_points) else row.winner_rank_points
match_stats.is_winner = True
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.winner_id] = self.getPlayer(str(row.winner_id))
match_stats.player = players[row.winner_id]
match_stats.match = match
match_stats.save()
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.loser_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.loser_seed
if pd.isnull(row.loser_seed) or not str(row.loser_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.l_ace) else row.l_ace
match_stats.double_faults = None if np.isnan(row.l_df) else row.l_df
match_stats.service_points = None if np.isnan(row.l_svpt) else row.l_svpt
match_stats.first_services = None if np.isnan(row.l_1stIn) else row.l_1stIn
match_stats.first_services_won = None if np.isnan(row.l_1stWon) else row.l_1stWon
match_stats.second_services_won = None if np.isnan(row.l_2ndWon) else row.l_2ndWon
match_stats.service_game_won = None if np.isnan(row.l_SvGms) else row.l_SvGms
match_stats.break_points_saved = None if np.isnan(row.l_bpSaved) else row.l_bpSaved
match_stats.break_points_played = None if np.isnan(row.l_bpFaced) else row.l_bpFaced
match_stats.rank = None if np.isnan(row.loser_rank) else row.loser_rank
match_stats.rank_points = None if np.isnan(row.loser_rank_points) else row.loser_rank_points
match_stats.is_winner = False
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.loser_id] = self.getPlayer(str(row.loser_id))
match_stats.player = players[row.loser_id]
match_stats.match = match
match_stats.save()
#except:
# assert False, (row.tourney_date, )
#errors = errors + '|||' + str(row.tourney_id) + '-' + str(row.match_num)
return {'inserts': total_matches_inserted, 'updates': total_matches_updated}
def getColumns(self):
return ["tourney_id","tourney_name","surface","draw_size","tourney_level","tourney_date","match_num","winner_id","winner_seed","winner_entry","winner_name","winner_hand","winner_ht","winner_ioc","winner_age",
"loser_id","loser_seed","loser_entry","loser_name","loser_hand","loser_ht","loser_ioc","loser_age","score","best_of","round","minutes","w_ace","w_df","w_svpt","w_1stIn","w_1stWon","w_2ndWon","w_SvGms","w_bpSaved",
"w_bpFaced","l_ace","l_df","l_svpt","l_1stIn","l_1stWon","l_2ndWon","l_SvGms","l_bpSaved","l_bpFaced","winner_rank","winner_rank_points","loser_rank","loser_rank_points"]
def getPlayer(self, id):
player = Player.objects.filter(id=id)
if (not player):
return None
else:
player = player[0]
return player
def getSurface(self, name):
surface = Surface.objects.filter(name=name)
if (not surface):
surface = Surface()
surface.name = name
surface.created_at = datetime.datetime.now()
surface.updated_at = datetime.datetime.now()
surface.save()
else:
surface = surface[0]
return surface
def getTourneyLevel(self, code):
tourney_level = Tourney_Level.objects.filter(code=code)
if (not tourney_level):
tourney_level = Tourney_Level()
tourney_level.code = code
tourney_level.name = code
tourney_level.created_at = datetime.datetime.now()
tourney_level.updated_at = datetime.datetime.now()
tourney_level.save()
else:
tourney_level = tourney_level[0]
return tourney_level | 52.696335 | 221 | 0.525286 | 9,843 | 0.977943 | 0 | 0 | 0 | 0 | 0 | 0 | 796 | 0.079086 |
21a39959b787e7f048c3956b733c098a43568590 | 5,583 | py | Python | test/test_websocket.py | lmacken/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 22 | 2019-04-27T02:14:52.000Z | 2021-01-04T00:37:41.000Z | test/test_websocket.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 7 | 2019-04-28T20:57:49.000Z | 2021-09-03T03:39:22.000Z | test/test_websocket.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 9 | 2019-04-27T23:43:51.000Z | 2021-04-15T18:09:51.000Z | # Copyright 2019, Luke Macken, Kim Bui, and the binance-chain-python contributors
# SPDX-License-Identifier: MIT
"""
Binance DEX WebSocket Test Suite
"""
import asyncio
import pytest
from binancechain import HTTPClient, WebSocket
def on_error(msg):
print(f'Error: {msg}')
@pytest.fixture
async def client():
# If we create fresh websockets too fast it may error?
await asyncio.sleep(1)
client = WebSocket(testnet=True)
yield client
client.close()
@pytest.fixture
async def symbols():
symbols = []
rest = HTTPClient(testnet=True)
markets = await rest.get_markets()
for market in markets:
symbol = f"{market['base_asset_symbol']}_{market['quote_asset_symbol']}"
symbols.append(symbol)
yield symbols
await rest.close()
@pytest.mark.asyncio
async def test_open_close(client):
""""Open then immediately close"""
def on_open():
print('opened')
client.close()
await client.start_async(on_open=on_open, on_error=on_error)
print('closed')
@pytest.mark.asyncio
async def test_trades(client, symbols):
print(symbols)
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_trades(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'trades'
@pytest.mark.asyncio
async def test_market_diff(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_market_diff(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'marketDiff'
@pytest.mark.asyncio
async def test_market_depth(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_market_depth(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'marketDepth'
@pytest.mark.asyncio
async def test_kline(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_kline(interval='1m', symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'kline_1m'
@pytest.mark.asyncio
async def test_tickers(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_ticker(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'ticker'
@pytest.mark.asyncio
async def test_all_tickers(client):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_all_tickers(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'allTickers'
@pytest.mark.asyncio
async def test_mini_ticker(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_mini_ticker(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'miniTicker'
@pytest.mark.asyncio
async def test_all_mini_ticker(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_all_mini_tickers(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'allMiniTickers'
@pytest.mark.asyncio
async def test_blockheight(client):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_blockheight(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert 'stream' in result
@pytest.mark.asyncio
async def test_keepalive(client):
def on_open():
client.keepalive()
client.close()
await client.start_async(on_open=on_open, on_error=on_error)
@pytest.mark.asyncio
async def test_unsubscribe(client):
results = []
def callback(msg):
results.append(msg)
client.unsubscribe("blockheight")
client.close()
def on_open():
client.subscribe_blockheight(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
assert results
@pytest.mark.asyncio
async def test_decorator(client):
@client.on('open')
def callback():
client.close()
await client.start_async()
@pytest.mark.asyncio
async def test_decorator_async(client):
@client.on('open')
async def callback():
client.close()
await client.start_async()
@pytest.mark.asyncio
async def test_decorator_sub_queue(client):
results = []
@client.on("allTickers", symbols=["$all"])
async def callback(msg):
results.append(msg)
client.close()
await client.start_async()
assert results
| 21.980315 | 81 | 0.675981 | 0 | 0 | 470 | 0.084184 | 5,252 | 0.940713 | 4,905 | 0.87856 | 543 | 0.09726 |
21a3bdf657a4e6add202d0974b1f52333a1151c2 | 2,590 | py | Python | opentaxii/config.py | eclecticiq/OpenTAXII | d04d0fcc65809cf8fd7baf0c69019c45c4243080 | [
"BSD-3-Clause"
] | 84 | 2018-04-16T18:35:27.000Z | 2022-03-02T15:50:22.000Z | opentaxii/config.py | eclecticiq/OpenTAXII | d04d0fcc65809cf8fd7baf0c69019c45c4243080 | [
"BSD-3-Clause"
] | 90 | 2018-04-18T08:56:50.000Z | 2022-03-30T12:42:21.000Z | opentaxii/config.py | eclecticiq/OpenTAXII | d04d0fcc65809cf8fd7baf0c69019c45c4243080 | [
"BSD-3-Clause"
] | 54 | 2018-05-05T03:10:39.000Z | 2022-03-11T16:26:49.000Z | import os
from collections import defaultdict
import yaml
from libtaxii.constants import ST_TYPES_10, ST_TYPES_11
current_dir = os.path.dirname(os.path.realpath(__file__))
ENV_VAR_PREFIX = 'OPENTAXII_'
CONFIG_ENV_VAR = 'OPENTAXII_CONFIG'
DEFAULT_CONFIG_NAME = 'defaults.yml'
DEFAULT_CONFIG = os.path.join(current_dir, DEFAULT_CONFIG_NAME)
def _infinite_dict():
return defaultdict(_infinite_dict)
class ServerConfig(dict):
'''Class responsible for loading configuration files.
This class will load default configuration file (shipped with OpenTAXII)
and apply user specified configuration file on top of default one.
Users can specify custom configuration file (YAML formatted) using
enviromental variable. The variable should contain a full path to
a custom configuration file.
:param str optional_env_var: name of the enviromental variable
:param list extra_configs: list of additional config filenames
'''
def __init__(self, optional_env_var=CONFIG_ENV_VAR, extra_configs=None):
# 4. default config
configs = [DEFAULT_CONFIG]
# 3. explicit configs
configs.extend(extra_configs or [])
# 2. config from OPENTAXII_CONFIG env var path
env_var_path = os.environ.get(optional_env_var)
if env_var_path:
configs.append(env_var_path)
# 1. config built from env vars
configs.append(self._get_env_config())
options = self._load_configs(*configs)
if options['unauthorized_status'] not in ST_TYPES_10 + ST_TYPES_11:
raise ValueError('invalid value for unauthorized_status field')
super(ServerConfig, self).__init__(options)
@staticmethod
def _get_env_config(env=os.environ):
result = _infinite_dict()
for key, value in env.items():
if not key.startswith(ENV_VAR_PREFIX):
continue
key = key[len(ENV_VAR_PREFIX):].lstrip('_').lower()
value = yaml.safe_load(value)
container = result
parts = key.split('__')
for part in parts[:-1]:
container = container[part]
container[parts[-1]] = value
return dict(result)
@classmethod
def _load_configs(cls, *configs):
result = dict()
for config in configs:
# read content from path-like object
if not isinstance(config, dict):
with open(config) as stream:
config = yaml.safe_load(stream=stream)
result.update(config)
return result
| 32.375 | 76 | 0.666795 | 2,181 | 0.842085 | 0 | 0 | 886 | 0.342085 | 0 | 0 | 790 | 0.305019 |
21a5fba047b0e38c889d6a4e058f430fb4400ae9 | 56,108 | py | Python | classifier/quant_trees.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
] | null | null | null | classifier/quant_trees.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
] | 2 | 2016-12-09T21:16:28.000Z | 2016-12-09T21:29:10.000Z | classifier/quant_trees.py | bradysalz/MinVAD | 4d4a396b381bbb4714b434f60e09fb2fa7d3c474 | [
"MIT"
] | null | null | null | def tree_16b(features):
if features[12] <= 0.0026689696301218646:
if features[2] <= 0.00825153129312639:
if features[19] <= 0.005966400067336508:
if features[19] <= 0.0029812112336458085:
if features[17] <= 0.001915214421615019:
return 0
else: # if features[17] > 0.001915214421615019
return 0
else: # if features[19] > 0.0029812112336458085
if features[2] <= 0.0018615168210089905:
return 0
else: # if features[2] > 0.0018615168210089905
return 0
else: # if features[19] > 0.005966400067336508
if features[19] <= 0.00793332328953511:
if features[18] <= 0.005491076861972033:
return 1
else: # if features[18] > 0.005491076861972033
return 0
else: # if features[19] > 0.00793332328953511
if features[6] <= 0.001075940812143017:
return 1
else: # if features[6] > 0.001075940812143017
return 1
else: # if features[2] > 0.00825153129312639
if features[2] <= 0.011165123326009052:
if features[19] <= 0.0012947088146120223:
if features[9] <= 0.002559585628887362:
return 1
else: # if features[9] > 0.002559585628887362
return 0
else: # if features[19] > 0.0012947088146120223
if features[10] <= 0.0028857488325684244:
return 1
else: # if features[10] > 0.0028857488325684244
return 0
else: # if features[2] > 0.011165123326009052
if features[1] <= 0.012951065746165114:
if features[6] <= 0.009407024106167228:
return 1
else: # if features[6] > 0.009407024106167228
return 0
else: # if features[1] > 0.012951065746165114
return 1
else: # if features[12] > 0.0026689696301218646
if features[19] <= 0.017378134596128803:
if features[2] <= 0.01920186421671133:
if features[0] <= 0.0018734496211436635:
if features[10] <= 0.0055686400628474075:
return 0
else: # if features[10] > 0.0055686400628474075
return 1
else: # if features[0] > 0.0018734496211436635
if features[3] <= 0.02158046904355615:
return 0
else: # if features[3] > 0.02158046904355615
return 1
else: # if features[2] > 0.01920186421671133
if features[3] <= 0.06516701033547179:
if features[15] <= 0.00476715365380187:
return 1
else: # if features[15] > 0.00476715365380187
return 1
else: # if features[3] > 0.06516701033547179
if features[0] <= 0.034261057986668675:
return 1
else: # if features[0] > 0.034261057986668675
return 0
else: # if features[19] > 0.017378134596128803
if features[0] <= 0.0035281312398183218:
if features[2] <= 0.0026570368299871916:
if features[14] <= 0.008929712100780307:
return 1
else: # if features[14] > 0.008929712100780307
return 0
else: # if features[2] > 0.0026570368299871916
if features[19] <= 0.03522761479757719:
return 1
else: # if features[19] > 0.03522761479757719
return 0
else: # if features[0] > 0.0035281312398183218
if features[8] <= 0.0518500053851767:
if features[13] <= 0.010222432115369884:
return 1
else: # if features[13] > 0.010222432115369884
return 0
else: # if features[8] > 0.0518500053851767
if features[0] <= 0.03477615719248206:
return 1
else: # if features[0] > 0.03477615719248206
return 0
##################################################
def tree_15b(features):
if features[12] <= 0.0026689696301218646:
if features[2] <= 0.008249542493103945:
if features[19] <= 0.005966400067336508:
if features[19] <= 0.002979222433623363:
if features[17] <= 0.0019132256215925736:
return 0
else: # if features[17] > 0.0019132256215925736
return 0
else: # if features[19] > 0.002979222433623363
if features[2] <= 0.0018615168210089905:
return 0
else: # if features[2] > 0.0018615168210089905
return 0
else: # if features[19] > 0.005966400067336508
if features[19] <= 0.007935312089557556:
if features[18] <= 0.005493065661994478:
return 1
else: # if features[18] > 0.005493065661994478
return 0
else: # if features[19] > 0.007935312089557556
if features[6] <= 0.0010739520121205715:
return 1
else: # if features[6] > 0.0010739520121205715
return 1
else: # if features[2] > 0.008249542493103945
if features[2] <= 0.011165123326009052:
if features[19] <= 0.0012927200145895767:
if features[9] <= 0.0025575968288649165:
return 1
else: # if features[9] > 0.0025575968288649165
return 0
else: # if features[19] > 0.0012927200145895767
if features[10] <= 0.00288773763259087:
return 1
else: # if features[10] > 0.00288773763259087
return 0
else: # if features[2] > 0.011165123326009052
if features[1] <= 0.012951065746165114:
if features[6] <= 0.009407024106167228:
return 1
else: # if features[6] > 0.009407024106167228
return 0
else: # if features[1] > 0.012951065746165114
return 1
else: # if features[12] > 0.0026689696301218646
if features[19] <= 0.017378134596128803:
if features[2] <= 0.019199875416688883:
if features[0] <= 0.0018734496211436635:
if features[10] <= 0.0055686400628474075:
return 0
else: # if features[10] > 0.0055686400628474075
return 1
else: # if features[0] > 0.0018734496211436635
if features[3] <= 0.021582457843578595:
return 0
else: # if features[3] > 0.021582457843578595
return 1
else: # if features[2] > 0.019199875416688883
if features[3] <= 0.06516502153544934:
if features[15] <= 0.0047651648537794244:
return 1
else: # if features[15] > 0.0047651648537794244
return 1
else: # if features[3] > 0.06516502153544934
if features[0] <= 0.03426304678669112:
return 1
else: # if features[0] > 0.03426304678669112
return 0
else: # if features[19] > 0.017378134596128803
if features[0] <= 0.0035281312398183218:
if features[2] <= 0.0026570368299871916:
if features[14] <= 0.008929712100780307:
return 1
else: # if features[14] > 0.008929712100780307
return 0
else: # if features[2] > 0.0026570368299871916
if features[19] <= 0.035225625997554744:
return 1
else: # if features[19] > 0.035225625997554744
return 0
else: # if features[0] > 0.0035281312398183218
if features[8] <= 0.051848016585154255:
if features[13] <= 0.010222432115369884:
return 1
else: # if features[13] > 0.010222432115369884
return 0
else: # if features[8] > 0.051848016585154255
if features[0] <= 0.03477615719248206:
return 1
else: # if features[0] > 0.03477615719248206
return 0
##################################################
def tree_14b(features):
if features[12] <= 0.0026729472301667556:
if features[2] <= 0.008249542493103945:
if features[19] <= 0.005966400067336508:
if features[19] <= 0.002983200033668254:
if features[17] <= 0.0019172032216374646:
return 0
else: # if features[17] > 0.0019172032216374646
return 0
else: # if features[19] > 0.002983200033668254
if features[2] <= 0.0018615168210089905:
return 0
else: # if features[2] > 0.0018615168210089905
return 0
else: # if features[19] > 0.005966400067336508
if features[19] <= 0.007931334489512665:
if features[18] <= 0.005489088061949587:
return 1
else: # if features[18] > 0.005489088061949587
return 0
else: # if features[19] > 0.007931334489512665
if features[6] <= 0.0010739520121205715:
return 1
else: # if features[6] > 0.0010739520121205715
return 1
else: # if features[2] > 0.008249542493103945
if features[2] <= 0.011161145725964161:
if features[19] <= 0.0012966976146344678:
if features[9] <= 0.0025615744289098075:
return 1
else: # if features[9] > 0.0025615744289098075
return 0
else: # if features[19] > 0.0012966976146344678
if features[10] <= 0.00288773763259087:
return 1
else: # if features[10] > 0.00288773763259087
return 0
else: # if features[2] > 0.011161145725964161
if features[1] <= 0.012951065746165114:
if features[6] <= 0.009411001706212119:
return 1
else: # if features[6] > 0.009411001706212119
return 0
else: # if features[1] > 0.012951065746165114
return 1
else: # if features[12] > 0.0026729472301667556
if features[19] <= 0.01737415699608391:
if features[2] <= 0.019203853016733774:
if features[0] <= 0.0018774272211885545:
if features[10] <= 0.0055686400628474075:
return 0
else: # if features[10] > 0.0055686400628474075
return 1
else: # if features[0] > 0.0018774272211885545
if features[3] <= 0.021582457843578595:
return 0
else: # if features[3] > 0.021582457843578595
return 1
else: # if features[2] > 0.019203853016733774
if features[3] <= 0.06516104393540445:
if features[15] <= 0.0047651648537794244:
return 1
else: # if features[15] > 0.0047651648537794244
return 1
else: # if features[3] > 0.06516104393540445
if features[0] <= 0.03426304678669112:
return 1
else: # if features[0] > 0.03426304678669112
return 0
else: # if features[19] > 0.01737415699608391
if features[0] <= 0.0035321088398632128:
if features[2] <= 0.0026570368299871916:
if features[14] <= 0.008925734500735416:
return 1
else: # if features[14] > 0.008925734500735416
return 0
else: # if features[2] > 0.0026570368299871916
if features[19] <= 0.035225625997554744:
return 1
else: # if features[19] > 0.035225625997554744
return 0
else: # if features[0] > 0.0035321088398632128
if features[8] <= 0.051851994185199146:
if features[13] <= 0.010222432115369884:
return 1
else: # if features[13] > 0.010222432115369884
return 0
else: # if features[8] > 0.051851994185199146
if features[0] <= 0.03477217959243717:
return 1
else: # if features[0] > 0.03477217959243717
return 0
##################################################
def tree_13b(features):
if features[12] <= 0.0026729472301667556:
if features[2] <= 0.008257497693193727:
if features[19] <= 0.005966400067336508:
if features[19] <= 0.002975244833578472:
if features[17] <= 0.0019092480215476826:
return 0
else: # if features[17] > 0.0019092480215476826
return 0
else: # if features[19] > 0.002975244833578472
if features[2] <= 0.0018615168210089905:
return 0
else: # if features[2] > 0.0018615168210089905
return 0
else: # if features[19] > 0.005966400067336508
if features[19] <= 0.007939289689602447:
if features[18] <= 0.005489088061949587:
return 1
else: # if features[18] > 0.005489088061949587
return 0
else: # if features[19] > 0.007939289689602447
if features[6] <= 0.0010819072122103535:
return 1
else: # if features[6] > 0.0010819072122103535
return 1
else: # if features[2] > 0.008257497693193727
if features[2] <= 0.011169100926053943:
if features[19] <= 0.0012887424145446857:
if features[9] <= 0.0025615744289098075:
return 1
else: # if features[9] > 0.0025615744289098075
return 0
else: # if features[19] > 0.0012887424145446857
if features[10] <= 0.002879782432501088:
return 1
else: # if features[10] > 0.002879782432501088
return 0
else: # if features[2] > 0.011169100926053943
if features[1] <= 0.012951065746165114:
if features[6] <= 0.009403046506122337:
return 1
else: # if features[6] > 0.009403046506122337
return 0
else: # if features[1] > 0.012951065746165114
return 1
else: # if features[12] > 0.0026729472301667556
if features[19] <= 0.01737415699608391:
if features[2] <= 0.019203853016733774:
if features[0] <= 0.0018774272211885545:
if features[10] <= 0.0055686400628474075:
return 0
else: # if features[10] > 0.0055686400628474075
return 1
else: # if features[0] > 0.0018774272211885545
if features[3] <= 0.021574502643488813:
return 0
else: # if features[3] > 0.021574502643488813
return 1
else: # if features[2] > 0.019203853016733774
if features[3] <= 0.06515308873531467:
if features[15] <= 0.0047731200538692065:
return 1
else: # if features[15] > 0.0047731200538692065
return 1
else: # if features[3] > 0.06515308873531467
if features[0] <= 0.03425509158660134:
return 1
else: # if features[0] > 0.03425509158660134
return 0
else: # if features[19] > 0.01737415699608391
if features[0] <= 0.0035321088398632128:
if features[2] <= 0.0026570368299871916:
if features[14] <= 0.008925734500735416:
return 1
else: # if features[14] > 0.008925734500735416
return 0
else: # if features[2] > 0.0026570368299871916
if features[19] <= 0.035225625997554744:
return 1
else: # if features[19] > 0.035225625997554744
return 0
else: # if features[0] > 0.0035321088398632128
if features[8] <= 0.051851994185199146:
if features[13] <= 0.010214476915280102:
return 1
else: # if features[13] > 0.010214476915280102
return 0
else: # if features[8] > 0.051851994185199146
if features[0] <= 0.03478013479252695:
return 1
else: # if features[0] > 0.03478013479252695
return 0
##################################################
def tree_12b(features):
if features[12] <= 0.0026729472301667556:
if features[2] <= 0.008241587293014163:
if features[19] <= 0.005950489667156944:
if features[19] <= 0.002991155233758036:
if features[17] <= 0.0019092480215476826:
return 0
else: # if features[17] > 0.0019092480215476826
return 0
else: # if features[19] > 0.002991155233758036
if features[2] <= 0.0018456064208294265:
return 0
else: # if features[2] > 0.0018456064208294265
return 0
else: # if features[19] > 0.005950489667156944
if features[19] <= 0.007923379289422883:
if features[18] <= 0.0055049984621291514:
return 1
else: # if features[18] > 0.0055049984621291514
return 0
else: # if features[19] > 0.007923379289422883
if features[6] <= 0.0010819072122103535:
return 1
else: # if features[6] > 0.0010819072122103535
return 1
else: # if features[2] > 0.008241587293014163
if features[2] <= 0.011169100926053943:
if features[19] <= 0.0013046528147242498:
if features[9] <= 0.0025456640287302434:
return 1
else: # if features[9] > 0.0025456640287302434
return 0
else: # if features[19] > 0.0013046528147242498
if features[10] <= 0.002895692832680652:
return 1
else: # if features[10] > 0.002895692832680652
return 0
else: # if features[2] > 0.011169100926053943
if features[1] <= 0.012951065746165114:
if features[6] <= 0.0094189569063019:
return 1
else: # if features[6] > 0.0094189569063019
return 0
else: # if features[1] > 0.012951065746165114
return 1
else: # if features[12] > 0.0026729472301667556
if features[19] <= 0.01737415699608391:
if features[2] <= 0.01918794261655421:
if features[0] <= 0.0018774272211885545:
if features[10] <= 0.0055686400628474075:
return 0
else: # if features[10] > 0.0055686400628474075
return 1
else: # if features[0] > 0.0018774272211885545
if features[3] <= 0.021574502643488813:
return 0
else: # if features[3] > 0.021574502643488813
return 1
else: # if features[2] > 0.01918794261655421
if features[3] <= 0.0651371783351351:
if features[15] <= 0.0047731200538692065:
return 1
else: # if features[15] > 0.0047731200538692065
return 1
else: # if features[3] > 0.0651371783351351
if features[0] <= 0.0342710019867809:
return 1
else: # if features[0] > 0.0342710019867809
return 0
else: # if features[19] > 0.01737415699608391
if features[0] <= 0.0035321088398632128:
if features[2] <= 0.0026411264298076276:
if features[14] <= 0.00894164490091498:
return 1
else: # if features[14] > 0.00894164490091498
return 0
else: # if features[2] > 0.0026411264298076276
if features[19] <= 0.035225625997554744:
return 1
else: # if features[19] > 0.035225625997554744
return 0
else: # if features[0] > 0.0035321088398632128
if features[8] <= 0.05183608378501958:
if features[13] <= 0.010214476915280102:
return 1
else: # if features[13] > 0.010214476915280102
return 0
else: # if features[8] > 0.05183608378501958
if features[0] <= 0.03478013479252695:
return 1
else: # if features[0] > 0.03478013479252695
return 0
##################################################
def tree_11b(features):
if features[12] <= 0.0026729472301667556:
if features[2] <= 0.008273408093373291:
if features[19] <= 0.005982310467516072:
if features[19] <= 0.002991155233758036:
if features[17] <= 0.0019092480215476826:
return 0
else: # if features[17] > 0.0019092480215476826
return 0
else: # if features[19] > 0.002991155233758036
if features[2] <= 0.0018456064208294265:
return 0
else: # if features[2] > 0.0018456064208294265
return 0
else: # if features[19] > 0.005982310467516072
if features[19] <= 0.00795520008978201:
if features[18] <= 0.005473177661770023:
return 1
else: # if features[18] > 0.005473177661770023
return 0
else: # if features[19] > 0.00795520008978201
if features[6] <= 0.0010819072122103535:
return 1
else: # if features[6] > 0.0010819072122103535
return 1
else: # if features[2] > 0.008273408093373291
if features[2] <= 0.011137280125694815:
if features[19] <= 0.0012728320143651217:
if features[9] <= 0.0025456640287302434:
return 1
else: # if features[9] > 0.0025456640287302434
return 0
else: # if features[19] > 0.0012728320143651217
if features[10] <= 0.002863872032321524:
return 1
else: # if features[10] > 0.002863872032321524
return 0
else: # if features[2] > 0.011137280125694815
if features[1] <= 0.012919244945805985:
if features[6] <= 0.0094189569063019:
return 1
else: # if features[6] > 0.0094189569063019
return 0
else: # if features[1] > 0.012919244945805985
return 1
else: # if features[12] > 0.0026729472301667556
if features[19] <= 0.01737415699608391:
if features[2] <= 0.019219763416913338:
if features[0] <= 0.0018456064208294265:
if features[10] <= 0.0055368192624882795:
return 0
else: # if features[10] > 0.0055368192624882795
return 1
else: # if features[0] > 0.0018456064208294265
if features[3] <= 0.021574502643488813:
return 0
else: # if features[3] > 0.021574502643488813
return 1
else: # if features[2] > 0.019219763416913338
if features[3] <= 0.06510535753477598:
if features[15] <= 0.0047731200538692065:
return 1
else: # if features[15] > 0.0047731200538692065
return 1
else: # if features[3] > 0.06510535753477598
if features[0] <= 0.034239181186421774:
return 1
else: # if features[0] > 0.034239181186421774
return 0
else: # if features[19] > 0.01737415699608391
if features[0] <= 0.0035002880395040847:
if features[2] <= 0.0026729472301667556:
if features[14] <= 0.008909824100555852:
return 1
else: # if features[14] > 0.008909824100555852
return 0
else: # if features[2] > 0.0026729472301667556
if features[19] <= 0.03525744679791387:
return 1
else: # if features[19] > 0.03525744679791387
return 0
else: # if features[0] > 0.0035002880395040847
if features[8] <= 0.05186790458537871:
if features[13] <= 0.01024629771563923:
return 1
else: # if features[13] > 0.01024629771563923
return 0
else: # if features[8] > 0.05186790458537871
if features[0] <= 0.03474831399216782:
return 1
else: # if features[0] > 0.03474831399216782
return 0
##################################################
def tree_10b(features):
if features[12] <= 0.0026729472301667556:
if features[2] <= 0.008273408093373291:
if features[19] <= 0.005982310467516072:
if features[19] <= 0.00292751363303978:
if features[17] <= 0.0019092480215476826:
return 0
else: # if features[17] > 0.0019092480215476826
return 0
else: # if features[19] > 0.00292751363303978
if features[2] <= 0.0019092480215476826:
return 0
else: # if features[2] > 0.0019092480215476826
return 0
else: # if features[19] > 0.005982310467516072
if features[19] <= 0.007891558489063755:
if features[18] <= 0.005473177661770023:
return 1
else: # if features[18] > 0.005473177661770023
return 0
else: # if features[19] > 0.007891558489063755
if features[6] <= 0.0010182656114920974:
return 1
else: # if features[6] > 0.0010182656114920974
return 1
else: # if features[2] > 0.008273408093373291
if features[2] <= 0.011200921726413071:
if features[19] <= 0.0012728320143651217:
if features[9] <= 0.0025456640287302434:
return 1
else: # if features[9] > 0.0025456640287302434
return 0
else: # if features[19] > 0.0012728320143651217
if features[10] <= 0.00292751363303978:
return 1
else: # if features[10] > 0.00292751363303978
return 0
else: # if features[2] > 0.011200921726413071
if features[1] <= 0.012982886546524242:
if features[6] <= 0.0094189569063019:
return 1
else: # if features[6] > 0.0094189569063019
return 0
else: # if features[1] > 0.012982886546524242
return 1
else: # if features[12] > 0.0026729472301667556
if features[19] <= 0.017437798596802168:
if features[2] <= 0.019219763416913338:
if features[0] <= 0.0019092480215476826:
if features[10] <= 0.005600460863206536:
return 0
else: # if features[10] > 0.005600460863206536
return 1
else: # if features[0] > 0.0019092480215476826
if features[3] <= 0.02163814424420707:
return 0
else: # if features[3] > 0.02163814424420707
return 1
else: # if features[2] > 0.019219763416913338
if features[3] <= 0.06504171593405772:
if features[15] <= 0.00470947845315095:
return 1
else: # if features[15] > 0.00470947845315095
return 1
else: # if features[3] > 0.06504171593405772
if features[0] <= 0.034239181186421774:
return 1
else: # if features[0] > 0.034239181186421774
return 0
else: # if features[19] > 0.017437798596802168
if features[0] <= 0.003563929640222341:
if features[2] <= 0.0026729472301667556:
if features[14] <= 0.008909824100555852:
return 1
else: # if features[14] > 0.008909824100555852
return 0
else: # if features[2] > 0.0026729472301667556
if features[19] <= 0.03525744679791387:
return 1
else: # if features[19] > 0.03525744679791387
return 0
else: # if features[0] > 0.003563929640222341
if features[8] <= 0.051804262984660454:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.051804262984660454
if features[0] <= 0.03474831399216782:
return 1
else: # if features[0] > 0.03474831399216782
return 0
##################################################
def tree_9b(features):
if features[12] <= 0.0025456640287302434:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.00585502726607956:
if features[19] <= 0.003054796834476292:
if features[17] <= 0.0020365312229841948:
return 0
else: # if features[17] > 0.0020365312229841948
return 0
else: # if features[19] > 0.003054796834476292
if features[2] <= 0.0017819648201111704:
return 0
else: # if features[2] > 0.0017819648201111704
return 0
else: # if features[19] > 0.00585502726607956
if features[19] <= 0.007891558489063755:
if features[18] <= 0.005600460863206536:
return 1
else: # if features[18] > 0.005600460863206536
return 0
else: # if features[19] > 0.007891558489063755
if features[6] <= 0.0010182656114920974:
return 1
else: # if features[6] > 0.0010182656114920974
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.011200921726413071:
if features[19] <= 0.0012728320143651217:
if features[9] <= 0.0025456640287302434:
return 1
else: # if features[9] > 0.0025456640287302434
return 0
else: # if features[19] > 0.0012728320143651217
if features[10] <= 0.002800230431603268:
return 1
else: # if features[10] > 0.002800230431603268
return 0
else: # if features[2] > 0.011200921726413071
if features[1] <= 0.012982886546524242:
if features[6] <= 0.0094189569063019:
return 1
else: # if features[6] > 0.0094189569063019
return 0
else: # if features[1] > 0.012982886546524242
return 1
else: # if features[12] > 0.0025456640287302434
if features[19] <= 0.017310515395365655:
if features[2] <= 0.019092480215476826:
if features[0] <= 0.0017819648201111704:
if features[10] <= 0.005600460863206536:
return 0
else: # if features[10] > 0.005600460863206536
return 1
else: # if features[0] > 0.0017819648201111704
if features[3] <= 0.02163814424420707:
return 0
else: # if features[3] > 0.02163814424420707
return 1
else: # if features[2] > 0.019092480215476826
if features[3] <= 0.06491443273262121:
if features[15] <= 0.0048367616545874625:
return 1
else: # if features[15] > 0.0048367616545874625
return 1
else: # if features[3] > 0.06491443273262121
if features[0] <= 0.034366464387858286:
return 1
else: # if features[0] > 0.034366464387858286
return 0
else: # if features[19] > 0.017310515395365655
if features[0] <= 0.003563929640222341:
if features[2] <= 0.0025456640287302434:
if features[14] <= 0.008909824100555852:
return 1
else: # if features[14] > 0.008909824100555852
return 0
else: # if features[2] > 0.0025456640287302434
if features[19] <= 0.03513016359647736:
return 1
else: # if features[19] > 0.03513016359647736
return 0
else: # if features[0] > 0.003563929640222341
if features[8] <= 0.051931546186096966:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.051931546186096966
if features[0] <= 0.034875597193604335:
return 1
else: # if features[0] > 0.034875597193604335
return 0
##################################################
def tree_8b(features):
if features[12] <= 0.0025456640287302434:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.006109593668952584:
if features[19] <= 0.003054796834476292:
if features[17] <= 0.0020365312229841948:
return 0
else: # if features[17] > 0.0020365312229841948
return 0
else: # if features[19] > 0.003054796834476292
if features[2] <= 0.0020365312229841948:
return 0
else: # if features[2] > 0.0020365312229841948
return 0
else: # if features[19] > 0.006109593668952584
if features[19] <= 0.008146124891936779:
if features[18] <= 0.005600460863206536:
return 1
else: # if features[18] > 0.005600460863206536
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0010182656114920974:
return 1
else: # if features[6] > 0.0010182656114920974
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.011200921726413071:
if features[19] <= 0.001527398417238146:
if features[9] <= 0.0025456640287302434:
return 1
else: # if features[9] > 0.0025456640287302434
return 0
else: # if features[19] > 0.001527398417238146
if features[10] <= 0.003054796834476292:
return 1
else: # if features[10] > 0.003054796834476292
return 0
else: # if features[2] > 0.011200921726413071
if features[1] <= 0.012728320143651217:
if features[6] <= 0.009164390503428876:
return 1
else: # if features[6] > 0.009164390503428876
return 0
else: # if features[1] > 0.012728320143651217
return 1
else: # if features[12] > 0.0025456640287302434
if features[19] <= 0.017310515395365655:
if features[2] <= 0.01934704661834985:
if features[0] <= 0.0020365312229841948:
if features[10] <= 0.005600460863206536:
return 0
else: # if features[10] > 0.005600460863206536
return 1
else: # if features[0] > 0.0020365312229841948
if features[3] <= 0.021383577841334045:
return 0
else: # if features[3] > 0.021383577841334045
return 1
else: # if features[2] > 0.01934704661834985
if features[3] <= 0.06465986632974818:
if features[15] <= 0.004582195251714438:
return 1
else: # if features[15] > 0.004582195251714438
return 1
else: # if features[3] > 0.06465986632974818
if features[0] <= 0.03411189798498526:
return 1
else: # if features[0] > 0.03411189798498526
return 0
else: # if features[19] > 0.017310515395365655
if features[0] <= 0.003563929640222341:
if features[2] <= 0.0025456640287302434:
if features[14] <= 0.009164390503428876:
return 1
else: # if features[14] > 0.009164390503428876
return 0
else: # if features[2] > 0.0025456640287302434
if features[19] <= 0.03513016359647736:
return 1
else: # if features[19] > 0.03513016359647736
return 0
else: # if features[0] > 0.003563929640222341
if features[8] <= 0.051931546186096966:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.051931546186096966
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
##################################################
def tree_7b(features):
if features[12] <= 0.003054796834476292:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.006109593668952584:
if features[19] <= 0.003054796834476292:
if features[17] <= 0.0020365312229841948:
return 0
else: # if features[17] > 0.0020365312229841948
return 0
else: # if features[19] > 0.003054796834476292
if features[2] <= 0.0020365312229841948:
return 0
else: # if features[2] > 0.0020365312229841948
return 0
else: # if features[19] > 0.006109593668952584
if features[19] <= 0.008146124891936779:
if features[18] <= 0.005091328057460487:
return 1
else: # if features[18] > 0.005091328057460487
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0010182656114920974:
return 1
else: # if features[6] > 0.0010182656114920974
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.011200921726413071:
if features[19] <= 0.0010182656114920974:
if features[9] <= 0.003054796834476292:
return 1
else: # if features[9] > 0.003054796834476292
return 0
else: # if features[19] > 0.0010182656114920974
if features[10] <= 0.003054796834476292:
return 1
else: # if features[10] > 0.003054796834476292
return 0
else: # if features[2] > 0.011200921726413071
if features[1] <= 0.013237452949397266:
if features[6] <= 0.009164390503428876:
return 1
else: # if features[6] > 0.009164390503428876
return 0
else: # if features[1] > 0.013237452949397266
return 1
else: # if features[12] > 0.003054796834476292
if features[19] <= 0.017310515395365655:
if features[2] <= 0.01934704661834985:
if features[0] <= 0.0020365312229841948:
if features[10] <= 0.005091328057460487:
return 0
else: # if features[10] > 0.005091328057460487
return 1
else: # if features[0] > 0.0020365312229841948
if features[3] <= 0.021383577841334045:
return 0
else: # if features[3] > 0.021383577841334045
return 1
else: # if features[2] > 0.01934704661834985
if features[3] <= 0.06415073352400213:
if features[15] <= 0.005091328057460487:
return 1
else: # if features[15] > 0.005091328057460487
return 1
else: # if features[3] > 0.06415073352400213
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
else: # if features[19] > 0.017310515395365655
if features[0] <= 0.003054796834476292:
if features[2] <= 0.003054796834476292:
if features[14] <= 0.009164390503428876:
return 1
else: # if features[14] > 0.009164390503428876
return 0
else: # if features[2] > 0.003054796834476292
if features[19] <= 0.03563929640222341:
return 1
else: # if features[19] > 0.03563929640222341
return 0
else: # if features[0] > 0.003054796834476292
if features[8] <= 0.051931546186096966:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.051931546186096966
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
##################################################
def tree_6b(features):
if features[12] <= 0.0020365312229841948:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.006109593668952584:
if features[19] <= 0.0020365312229841948:
if features[17] <= 0.0020365312229841948:
return 0
else: # if features[17] > 0.0020365312229841948
return 0
else: # if features[19] > 0.0020365312229841948
if features[2] <= 0.0020365312229841948:
return 0
else: # if features[2] > 0.0020365312229841948
return 0
else: # if features[19] > 0.006109593668952584
if features[19] <= 0.008146124891936779:
if features[18] <= 0.006109593668952584:
return 1
else: # if features[18] > 0.006109593668952584
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0020365312229841948:
return 1
else: # if features[6] > 0.0020365312229841948
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.010182656114920974:
if features[19] <= 0.0020365312229841948:
if features[9] <= 0.0020365312229841948:
return 1
else: # if features[9] > 0.0020365312229841948
return 0
else: # if features[19] > 0.0020365312229841948
if features[10] <= 0.0020365312229841948:
return 1
else: # if features[10] > 0.0020365312229841948
return 0
else: # if features[2] > 0.010182656114920974
if features[1] <= 0.012219187337905169:
if features[6] <= 0.010182656114920974:
return 1
else: # if features[6] > 0.010182656114920974
return 0
else: # if features[1] > 0.012219187337905169
return 1
else: # if features[12] > 0.0020365312229841948
if features[19] <= 0.018328781006857753:
if features[2] <= 0.018328781006857753:
if features[0] <= 0.0020365312229841948:
if features[10] <= 0.006109593668952584:
return 0
else: # if features[10] > 0.006109593668952584
return 1
else: # if features[0] > 0.0020365312229841948
if features[3] <= 0.022401843452826142:
return 0
else: # if features[3] > 0.022401843452826142
return 1
else: # if features[2] > 0.018328781006857753
if features[3] <= 0.06313246791251004:
if features[15] <= 0.0040730624459683895:
return 1
else: # if features[15] > 0.0040730624459683895
return 1
else: # if features[3] > 0.06313246791251004
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
else: # if features[19] > 0.018328781006857753
if features[0] <= 0.0040730624459683895:
if features[2] <= 0.0020365312229841948:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0020365312229841948
if features[19] <= 0.03462103079073131:
return 1
else: # if features[19] > 0.03462103079073131
return 0
else: # if features[0] > 0.0040730624459683895
if features[8] <= 0.05091328057460487:
if features[13] <= 0.010182656114920974:
return 1
else: # if features[13] > 0.010182656114920974
return 0
else: # if features[8] > 0.05091328057460487
if features[0] <= 0.03462103079073131:
return 1
else: # if features[0] > 0.03462103079073131
return 0
##################################################
def tree_5b(features):
if features[12] <= 0.0040730624459683895:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.0040730624459683895:
if features[19] <= 0.0040730624459683895:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0040730624459683895
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.0040730624459683895
if features[19] <= 0.008146124891936779:
if features[18] <= 0.0040730624459683895:
return 1
else: # if features[18] > 0.0040730624459683895
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.012219187337905169:
if features[19] <= 0.0:
if features[9] <= 0.0040730624459683895:
return 1
else: # if features[9] > 0.0040730624459683895
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0040730624459683895:
return 1
else: # if features[10] > 0.0040730624459683895
return 0
else: # if features[2] > 0.012219187337905169
if features[1] <= 0.012219187337905169:
if features[6] <= 0.008146124891936779:
return 1
else: # if features[6] > 0.008146124891936779
return 0
else: # if features[1] > 0.012219187337905169
return 1
else: # if features[12] > 0.0040730624459683895
if features[19] <= 0.016292249783873558:
if features[2] <= 0.020365312229841948:
if features[0] <= 0.0:
if features[10] <= 0.0040730624459683895:
return 0
else: # if features[10] > 0.0040730624459683895
return 1
else: # if features[0] > 0.0
if features[3] <= 0.020365312229841948:
return 0
else: # if features[3] > 0.020365312229841948
return 1
else: # if features[2] > 0.020365312229841948
if features[3] <= 0.06109593668952584:
if features[15] <= 0.0040730624459683895:
return 1
else: # if features[15] > 0.0040730624459683895
return 1
else: # if features[3] > 0.06109593668952584
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0040730624459683895:
if features[2] <= 0.0040730624459683895:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0040730624459683895
if features[19] <= 0.036657562013715506:
return 1
else: # if features[19] > 0.036657562013715506
return 0
else: # if features[0] > 0.0040730624459683895
if features[8] <= 0.052949811797589064:
if features[13] <= 0.012219187337905169:
return 1
else: # if features[13] > 0.012219187337905169
return 0
else: # if features[8] > 0.052949811797589064
if features[0] <= 0.036657562013715506:
return 1
else: # if features[0] > 0.036657562013715506
return 0
##################################################
def tree_4b(features):
if features[12] <= 0.0:
if features[2] <= 0.008146124891936779:
if features[19] <= 0.008146124891936779:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.008146124891936779
if features[19] <= 0.008146124891936779:
if features[18] <= 0.008146124891936779:
return 1
else: # if features[18] > 0.008146124891936779
return 0
else: # if features[19] > 0.008146124891936779
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.008146124891936779
if features[2] <= 0.008146124891936779:
if features[19] <= 0.0:
if features[9] <= 0.0:
return 1
else: # if features[9] > 0.0
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0:
return 1
else: # if features[10] > 0.0
return 0
else: # if features[2] > 0.008146124891936779
if features[1] <= 0.016292249783873558:
if features[6] <= 0.008146124891936779:
return 1
else: # if features[6] > 0.008146124891936779
return 0
else: # if features[1] > 0.016292249783873558
return 1
else: # if features[12] > 0.0
if features[19] <= 0.016292249783873558:
if features[2] <= 0.016292249783873558:
if features[0] <= 0.0:
if features[10] <= 0.008146124891936779:
return 0
else: # if features[10] > 0.008146124891936779
return 1
else: # if features[0] > 0.0
if features[3] <= 0.024438374675810337:
return 0
else: # if features[3] > 0.024438374675810337
return 1
else: # if features[2] > 0.016292249783873558
if features[3] <= 0.05702287424355745:
if features[15] <= 0.008146124891936779:
return 1
else: # if features[15] > 0.008146124891936779
return 1
else: # if features[3] > 0.05702287424355745
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0:
if features[2] <= 0.0:
if features[14] <= 0.008146124891936779:
return 1
else: # if features[14] > 0.008146124891936779
return 0
else: # if features[2] > 0.0
if features[19] <= 0.032584499567747116:
return 1
else: # if features[19] > 0.032584499567747116
return 0
else: # if features[0] > 0.0
if features[8] <= 0.048876749351620674:
if features[13] <= 0.008146124891936779:
return 1
else: # if features[13] > 0.008146124891936779
return 0
else: # if features[8] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
##################################################
def tree_3b(features):
if features[12] <= 0.0:
if features[2] <= 0.016292249783873558:
if features[19] <= 0.0:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.0
if features[19] <= 0.0:
if features[18] <= 0.0:
return 1
else: # if features[18] > 0.0
return 0
else: # if features[19] > 0.0
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.016292249783873558
if features[2] <= 0.016292249783873558:
if features[19] <= 0.0:
if features[9] <= 0.0:
return 1
else: # if features[9] > 0.0
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0:
return 1
else: # if features[10] > 0.0
return 0
else: # if features[2] > 0.016292249783873558
if features[1] <= 0.016292249783873558:
if features[6] <= 0.016292249783873558:
return 1
else: # if features[6] > 0.016292249783873558
return 0
else: # if features[1] > 0.016292249783873558
return 1
else: # if features[12] > 0.0
if features[19] <= 0.016292249783873558:
if features[2] <= 0.016292249783873558:
if features[0] <= 0.0:
if features[10] <= 0.0:
return 0
else: # if features[10] > 0.0
return 1
else: # if features[0] > 0.0
if features[3] <= 0.016292249783873558:
return 0
else: # if features[3] > 0.016292249783873558
return 1
else: # if features[2] > 0.016292249783873558
if features[3] <= 0.048876749351620674:
if features[15] <= 0.0:
return 1
else: # if features[15] > 0.0
return 1
else: # if features[3] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.016292249783873558
if features[0] <= 0.0:
if features[2] <= 0.0:
if features[14] <= 0.016292249783873558:
return 1
else: # if features[14] > 0.016292249783873558
return 0
else: # if features[2] > 0.0
if features[19] <= 0.032584499567747116:
return 1
else: # if features[19] > 0.032584499567747116
return 0
else: # if features[0] > 0.0
if features[8] <= 0.048876749351620674:
if features[13] <= 0.016292249783873558:
return 1
else: # if features[13] > 0.016292249783873558
return 0
else: # if features[8] > 0.048876749351620674
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
##################################################
def tree_2b(features):
if features[12] <= 0.0:
if features[2] <= 0.0:
if features[19] <= 0.0:
if features[19] <= 0.0:
if features[17] <= 0.0:
return 0
else: # if features[17] > 0.0
return 0
else: # if features[19] > 0.0
if features[2] <= 0.0:
return 0
else: # if features[2] > 0.0
return 0
else: # if features[19] > 0.0
if features[19] <= 0.0:
if features[18] <= 0.0:
return 1
else: # if features[18] > 0.0
return 0
else: # if features[19] > 0.0
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 1
else: # if features[2] > 0.0
if features[2] <= 0.0:
if features[19] <= 0.0:
if features[9] <= 0.0:
return 1
else: # if features[9] > 0.0
return 0
else: # if features[19] > 0.0
if features[10] <= 0.0:
return 1
else: # if features[10] > 0.0
return 0
else: # if features[2] > 0.0
if features[1] <= 0.0:
if features[6] <= 0.0:
return 1
else: # if features[6] > 0.0
return 0
else: # if features[1] > 0.0
return 1
else: # if features[12] > 0.0
if features[19] <= 0.032584499567747116:
if features[2] <= 0.032584499567747116:
if features[0] <= 0.0:
if features[10] <= 0.0:
return 0
else: # if features[10] > 0.0
return 1
else: # if features[0] > 0.0
if features[3] <= 0.032584499567747116:
return 0
else: # if features[3] > 0.032584499567747116
return 1
else: # if features[2] > 0.032584499567747116
if features[3] <= 0.032584499567747116:
if features[15] <= 0.0:
return 1
else: # if features[15] > 0.0
return 1
else: # if features[3] > 0.032584499567747116
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
else: # if features[19] > 0.032584499567747116
if features[0] <= 0.0:
if features[2] <= 0.0:
if features[14] <= 0.0:
return 1
else: # if features[14] > 0.0
return 0
else: # if features[2] > 0.0
if features[19] <= 0.032584499567747116:
return 1
else: # if features[19] > 0.032584499567747116
return 0
else: # if features[0] > 0.0
if features[8] <= 0.032584499567747116:
if features[13] <= 0.0:
return 1
else: # if features[13] > 0.0
return 0
else: # if features[8] > 0.032584499567747116
if features[0] <= 0.032584499567747116:
return 1
else: # if features[0] > 0.032584499567747116
return 0
##################################################
| 40.191977 | 58 | 0.55247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17,633 | 0.314269 |
21a698ad6f8035ce96d6e79e8a6eb4d69be7b56f | 1,193 | py | Python | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | 1 | 2021-06-03T10:20:50.000Z | 2021-06-03T10:20:50.000Z | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | null | null | null | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | null | null | null | class Employee:
def __init__(self, name, emp_id, email_id):
self.__name=name
self.__emp_id=emp_id
self.__email_id=email_id
def get_name(self):
return self.__name
def get_emp_id(self):
return self.__emp_id
def get_email_id(self):
return self.__email_id
class OrganizationDirectory:
def __init__(self,emp_list):
self.__emp_list=emp_list
def lookup(self,key_name):
result_list=[]
for emp in self.__emp_list:
if(key_name in emp.get_name()):
result_list.append(emp)
self.display(result_list)
return result_list
def display(self,result_list):
print("Search results:")
for emp in result_list:
print(emp.get_name()," ", emp.get_emp_id()," ",emp.get_email_id())
emp1=Employee("Kevin",24089, "Kevin_xyz@organization.com")
emp2=Employee("Jack",56789,"Jack_xyz@organization.com")
emp3=Employee("Jackson",67895,"Jackson_xyz@organization.com")
emp4=Employee("Henry Jack",23456,"Jacky_xyz@organization.com")
emp_list=[emp1,emp2,emp3,emp4]
org_dir=OrganizationDirectory(emp_list)
#Search for an employee
org_dir.lookup("KEVIN") | 27.744186 | 78 | 0.674769 | 828 | 0.694049 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.167645 |
21a81c32453ea2e8cd44b51c042ee837402f31c0 | 11,290 | py | Python | build/lib/sshColab/code.py | libinruan/ssh_Colab | 3b014c76404137567ada4a67582ff8600e61e7b0 | [
"MIT"
] | 1 | 2021-03-21T16:28:16.000Z | 2021-03-21T16:28:16.000Z | sshColab/code.py | libinruan/ssh_Colab | 3b014c76404137567ada4a67582ff8600e61e7b0 | [
"MIT"
] | null | null | null | sshColab/code.py | libinruan/ssh_Colab | 3b014c76404137567ada4a67582ff8600e61e7b0 | [
"MIT"
] | 2 | 2021-07-08T07:26:52.000Z | 2021-10-05T10:23:47.000Z | import subprocess
import secrets
import getpass
import os
import requests
import urllib.parse
import time
from google.colab import files, drive, auth
from google.cloud import storage
import glob
def connect(LOG_DIR = '/log/fit'):
print('It may take a few seconds for processing. Please wait.')
root_password = secrets.token_urlsafe()
subprocess.call('apt-get update -qq', shell=True)
subprocess.call('apt-get install -qq -o=Dpkg::Use-Pty=0 openssh-server pwgen > /dev/null', shell=True)
subprocess.call(f'echo root:{root_password} | chpasswd', shell=True)
subprocess.call('mkdir -p /var/run/sshd', shell=True)
subprocess.call('echo "PermitRootLogin yes" >> /etc/ssh/sshd_config', shell=True)
subprocess.call('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config', shell=True)
get_ipython().system_raw('/usr/sbin/sshd -D &')
subprocess.call('mkdir -p /content/ngrok-ssh', shell=True)
os.chdir('/content/ngrok-ssh')
subprocess.call('wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip -O ngrok-stable-linux-amd64.zip', shell=True)
subprocess.call('unzip -u ngrok-stable-linux-amd64.zip', shell=True)
subprocess.call('cp /content/ngrok-ssh/ngrok /ngrok', shell=True)
subprocess.call('chmod +x /ngrok', shell=True)
print("Copy&paste your authtoken from https://dashboard.ngrok.com/auth")
authtoken = getpass.getpass()
get_ipython().system_raw(f'/ngrok authtoken {authtoken} &')
_create_tunnels()
get_ipython().system_raw(f'tensorboard --logdir {LOG_DIR} --host 0.0.0.0 --port 6006 &')
time.sleep(3) # synchronize.
with open('/content/ngrok-ssh/ngrok-tunnel-info.txt', 'w') as f:
url, port = urllib.parse.urlparse(_get_ngrok_url('ssh')).netloc.split(':')
# f.write('Run the command below on local machines to SSH into the Colab instance:\n')
f.write(f'ssh -p {port} root@{url}\n')
f.write('Password:\n')
f.write(f'{root_password}\n')
if 'COLAB_TPU_ADDR' in os.environ:
tpu_address = 'grpc://' + os.environ['COLAB_TPU_ADDR']
f.write(f"""Copy and paste the commands below to the beginning of your TPU program:
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='{tpu_address}')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)""")
url_tensorboard = _get_ngrok_url('tensorboard')
# f.write(f'To view tensorboard, visit {url_tensorboard}')
f.write(f'Tensorboard: {url_tensorboard}')
# f.write('after running the following two commands on the Colab notebook:\n')
# f.write(f' %load_ext tensorboard')
# f.write(f' %tensorboard --logdir {LOG_DIR}')
# f.write('Run kill() to close all the tunnels.\n')
# print('SSH connection is successfully established. Run info() for connection configuration.')
def info():
with open('/content/ngrok-ssh/ngrok-tunnel-info.txt', 'r') as f:
lines = f.readlines()
for line in lines:
print(line)
def kill():
os.system("kill $(ps aux | grep ngrok | awk '{print $2}')")
print('Done.')
def _create_tunnels():
with open('/content/ngrok-ssh/ssh.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' ssh:\n')
f.write(' proto: tcp\n')
f.write(' addr: 22')
with open('/content/ngrok-ssh/tensorboard.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' tensorboard:\n')
f.write(' proto: http\n')
f.write(' addr: 6006\n')
f.write(' inspect: false\n')
f.write(' bind_tls: true')
with open('/content/ngrok-ssh/http8080.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' http8080:\n')
f.write(' proto: http\n')
f.write(' addr: 8080\n')
f.write(' inspect: false\n')
f.write(' bind_tls: true')
with open('/content/ngrok-ssh/tcp8080.yml', 'w') as f:
f.write('tunnels:\n')
f.write(' tcp8080:\n')
f.write(' proto: tcp\n')
f.write(' addr: 8080')
if 'COLAB_TPU_ADDR' in os.environ:
with open('/content/ngrok-ssh/tpu.yml', 'w') as f:
COLAB_TPU_ADDR = os.environ['COLAB_TPU_ADDR']
f.write('tunnels:\n')
f.write(' tpu:\n')
f.write(' proto: tcp\n')
f.write(f' addr: {COLAB_TPU_ADDR}')
with open('/content/ngrok-ssh/run_ngrok.sh', 'w') as f:
f.write('#!/bin/sh\n')
f.write('set -x\n')
if 'COLAB_TPU_ADDR' in os.environ:
f.write('/ngrok start --config ~/.ngrok2/ngrok.yml --config /content/ngrok-ssh/ssh.yml --log=stdout --config /content/ngrok-ssh/tensorboard.yml --config /content/ngrok-ssh/http8080.yml --config /content/ngrok-ssh/tcp8080.yml --config /content/ngrok-ssh/tpu.yml "$@"')
else:
f.write('/ngrok start --config ~/.ngrok2/ngrok.yml --config /content/ngrok-ssh/ssh.yml --log=stdout --config /content/ngrok-ssh/tensorboard.yml --config /content/ngrok-ssh/http8080.yml --config /content/ngrok-ssh/tcp8080.yml "$@"')
if 'COLAB_TPU_ADDR' in os.environ:
get_ipython().system_raw('bash /content/ngrok-ssh/run_ngrok.sh ssh tensorboard tcp8080 tpu &')
else:
get_ipython().system_raw('bash /content/ngrok-ssh/run_ngrok.sh ssh tensorboard tcp8080 &')
def _get_ngrok_info():
return requests.get('http://localhost:4040/api/tunnels').json()
def _get_ngrok_tunnels():
for tunnel in _get_ngrok_info()['tunnels']:
name = tunnel['name']
yield name, tunnel
def _get_ngrok_tunnel(name):
for name1, tunnel in _get_ngrok_tunnels():
if name == name1:
return tunnel
def _get_ngrok_url(name, local=False):
if local:
return _get_ngrok_tunnel(name)['config']['addr']
else:
return _get_ngrok_tunnel(name)['public_url']
def kaggle(data='tabular-playground-series-mar-2021', output='/kaggle/input'):
subprocess.call('sudo apt -q update', shell=True)
subprocess.call('sudo apt -q install unar nano less p7zip', shell=True)
subprocess.call('pip install -q --upgrade --force-reinstall --no-deps kaggle kaggle-cli', shell=True)
subprocess.call('mkdir -p /root/.kaggle', shell=True)
os.chdir('/root/.kaggle')
if 'kaggle.json' not in os.listdir('/root/.kaggle'):
print('Upload your kaggle API token')
files.upload()
subprocess.call('chmod 600 /root/.kaggle/kaggle.json', shell=True)
subprocess.call(f'mkdir -p {output}', shell=True)
os.chdir(f'{output}')
subprocess.call(f'kaggle competitions download -c {data}', shell=True)
subprocess.call(f'7z x {data}.zip -o{output}', shell=True)
print(f'\nUnzipped {data}.zip to {output}.')
subprocess.call('mkdir -p /kaggle/working', shell=True)
os.chdir('/kaggle/working')
def google_drive(dir='/gdrive'):
print(f'\nGoogle Drive authentication starts...')
drive.mount(dir)
def GCSconnect(key_file=None):
if key_file:
if not os.path.exists('/root/.kaggle/'):
os.makedirs('/root/.kaggle/')
print('Upload your Google Storage API token')
os.chdir('/root/.kaggle/')
files.upload()
subprocess.call(f'chmod 600 /root/.kaggle/{key_file}', shell=True)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = f'/root/.kaggle/{key_file}'
subprocess.call('echo $GOOGLE_APPLICATION_CREDENTIALS', shell=True)
else:
print('\nGCS authentication starts...')
auth.authenticate_user()
def _create_bucket(project, bucket_name):
storage_client = storage.Client(project=project)
bucket = storage_client.bucket(bucket_name)
bucket.create(location='US')
print(f'bucket {bucket.name} created.')
def _list_blobs(project, bucket_name):
storage_client = storage.Client(project=project)
blobs = storage_client.list_blobs(bucket_name)
blist = []
for blob in blobs:
blist.append(blob.name)
if not len(blist):
print('empty bucket!')
else:
print('\n'.join(blist))
def create_bucket(project, bucket_name):
try:
_create_bucket(project, bucket_name)
except Exception as e:
print(f"create_bucket('{bucket_name}') fails. Code:", e)
def list_blobs(project, bucket_name):
try:
_list_blobs(project, bucket_name)
except Exception as e:
print(f"list_blobs('{bucket_name}') fails. Code:", e)
def upload_to_gcs(project, bucket_name, destination_blob, source_directory):
# Upload file(s) from Google Colaboratory to GCS Bucket.
# type: {string} project name
# {string} bucket name
# {string} source directory
# rtype: None
# usage:
# upload_to_gcs("strategic-howl-123", "gcs-station-16", 'temp8/a.pkl', '/a.pkl')
# note: DON'T put a leading slash in the third argument.
storage_client = storage.Client(project=project)
bucket = storage_client.get_bucket(bucket_name)
# paths = glob.glob(os.path.join(source_directory, file if file else f'*.{ext}'))
# for path in paths:
# filename = os.path.join(source_directory, file) if file else path.split('/')[-1]
# blob = bucket.blob(filename)
# blob.upload_from_filename(path)
# print(f'{path} uploaded to {os.path.join(bucket_name, filename)}')
blob = bucket.blob(destination_blob)
blob.upload_from_filename(source_directory)
def download_to_colab(project, bucket_name, destination_directory, remote_blob_path='', local_file_name=''):
# Download file(s) from Google Cloud Storage Bucket to Colaboratory.
# type: {string} project name
# {string} bucket name
# {string} destination directory
# {string} (optional) filename: If set, the target file is downloaded.
# rtype: None
# usage:
# project = "strategic-howl-123456522"
# bucket_name = "gcs-station-168"
# >>> download_to_colab(project, bucket_name, '/temp8')
# >>> download_to_colab(project, bucket_name, destination_directory = '/temp9/fun', remote_blob_path='tps-apr-2021-label/data_fare_age.pkl', local_file_name='data_fare_age.pkl')
storage_client = storage.Client(project=project)
os.makedirs(destination_directory, exist_ok = True)
if local_file_name and remote_blob_path:
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(remote_blob_path)
blob.download_to_filename(os.path.join(destination_directory, local_file_name))
print('download finished.')
else:
from pathlib import Path
os.chdir(destination_directory)
blobs = storage_client.list_blobs(bucket_name)
count = 1
for blob in blobs:
if blob.name.endswith("/"): continue #
file_split = blob.name.split("/")
directory = "/".join(file_split[0:-1])
Path(directory).mkdir(parents=True, exist_ok=True) # (2)
blob.download_to_filename(blob.name)
des = os.path.join(destination_directory, directory)
if count==1: print(f"Destination: {des}")
print(f'{count}. {blob.name.split("/")[-1]:>50s}')
count += 1
| 43.590734 | 279 | 0.651993 | 0 | 0 | 130 | 0.011515 | 0 | 0 | 0 | 0 | 5,540 | 0.4907 |
21ab9ff1c815e6c21057d32a64d3dded51ce4eb3 | 763 | py | Python | static/py/discussionNum.py | m1-llie/SCU_hotFollowing | 8cc29aadc7ac2e7b9e8a9502ea13971b8cd93abb | [
"BSD-3-Clause"
] | 1 | 2020-12-15T13:06:31.000Z | 2020-12-15T13:06:31.000Z | static/py/discussionNum.py | m1-llie/SCU_hotFollowing | 8cc29aadc7ac2e7b9e8a9502ea13971b8cd93abb | [
"BSD-3-Clause"
] | null | null | null | static/py/discussionNum.py | m1-llie/SCU_hotFollowing | 8cc29aadc7ac2e7b9e8a9502ea13971b8cd93abb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import pymysql
import json
def countNum(table):
# 打开数据库连接
db = pymysql.connect("cd-cdb-6sbfm2hw.sql.tencentcdb.com", "root", "Mrsnow@0", "spider")
# 使用cursor()方法创建一个可以执行SQL语句的游标对象cursor
cursor = db.cursor()
sql = "SELECT COUNT(*) FROM" + table + "WHERE text like '%川大%'"
cursor.execute(sql)
number_row = cursor.fetchone()[0] # number_row是指定表中包含关键词的记录总条数
# 关闭数据库连接
db.close()
return number_row
if __name__ == "__main__":
numList = []
# 根据数据库中数据返回一串结果到本地,以json格式返回给echarts图表
for day in ["date20200606","date20200607","date20200608","date20200609","date20200610","date20200611","date20200612"]:
numList.append(countNum(day))
print(numList)
# json.dumps(numList)
| 26.310345 | 122 | 0.663172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.583424 |
21abd949743f6366711da7e003fd265439edaff6 | 1,683 | py | Python | python/ray/tests/test_list_actors.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_list_actors.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_list_actors.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | import pytest
import sys
import ray
from ray._private.test_utils import wait_for_condition
def test_list_named_actors_basic(ray_start_regular):
@ray.remote
class A:
pass
a = A.remote()
assert not ray.util.list_named_actors()
a = A.options(name="hi").remote()
assert len(ray.util.list_named_actors()) == 1
assert "hi" in ray.util.list_named_actors()
b = A.options(name="hi2").remote()
assert len(ray.util.list_named_actors()) == 2
assert "hi" in ray.util.list_named_actors()
assert "hi2" in ray.util.list_named_actors()
def one_actor():
actors = ray.util.list_named_actors()
return actors == ["hi2"]
del a
wait_for_condition(one_actor)
del b
wait_for_condition(lambda: not ray.util.list_named_actors())
@pytest.mark.parametrize("ray_start_regular", [{"local_mode": True}], indirect=True)
def test_list_named_actors_basic_local_mode(ray_start_regular):
@ray.remote
class A:
pass
a = A.remote()
assert not ray.util.list_named_actors()
a = A.options(name="hi").remote() # noqa: F841
assert len(ray.util.list_named_actors()) == 1
assert "hi" in ray.util.list_named_actors()
b = A.options(name="hi2").remote() # noqa: F841
assert len(ray.util.list_named_actors()) == 2
assert "hi" in ray.util.list_named_actors()
assert "hi2" in ray.util.list_named_actors()
if __name__ == "__main__":
import os
# Test suite is timing out. Disable on windows for now.
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| 26.714286 | 84 | 0.66429 | 42 | 0.024955 | 0 | 0 | 643 | 0.382056 | 0 | 0 | 211 | 0.125371 |
21ac305d52e17cf13665344430982d9de8fffdd1 | 218 | py | Python | python3/TableCloumnInfo.py | shengpli/LearnPython | 8e22afa9dc5b2a9e26c9f3e7ef9eb85196fd1559 | [
"Apache-2.0"
] | null | null | null | python3/TableCloumnInfo.py | shengpli/LearnPython | 8e22afa9dc5b2a9e26c9f3e7ef9eb85196fd1559 | [
"Apache-2.0"
] | null | null | null | python3/TableCloumnInfo.py | shengpli/LearnPython | 8e22afa9dc5b2a9e26c9f3e7ef9eb85196fd1559 | [
"Apache-2.0"
] | null | null | null | class tablecloumninfo:
col_name=""
data_type=""
comment=""
def __init__(self,col_name,data_type,comment):
self.col_name=col_name
self.data_type=data_type
self.comment=comment
| 18.166667 | 50 | 0.655963 | 215 | 0.986239 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.027523 |
21ac52aabd54ed388edac1605b21259e6ba60313 | 2,620 | py | Python | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | """Main/example start-up script for the pyjobserver
Use this as a guide if importing pyjobserver into another app instead
"""
# Built-Ins:
import asyncio
from logging import getLogger, Logger
import os
from pathlib import Path
# External Dependencies:
from aiohttp import web
import click
from dotenv import load_dotenv
# Local Dependencies:
from .access_control import get_authentication_middleware
from .config import load as load_config, Config
from .jobs.example import example_job_fn
from .runner import JobRunner
# (Only entry point scripts should load dotenvs)
load_dotenv(os.getcwd() + "/.env")
async def alive_handler(request) -> web.Response:
"""Basic server aliveness indicator
"""
return web.json_response({"ok": True})
async def init_app(config: Config, LOGGER: Logger):
"""Create an application instance.
:return: application instance
"""
app = web.Application(logger=LOGGER)
app.router.add_get("/", alive_handler)
authentication_middleware = get_authentication_middleware(config)
runner = JobRunner(config)
# ADD YOUR JOB TYPES LIKE THIS:
# The job function must be conformant including the correct signature type annotations.
runner.register_job_handler("example", example_job_fn)
runner_app = await runner.webapp(middlewares=[authentication_middleware] if authentication_middleware else None)
app.add_subapp("/api", runner_app)
return app
# Note we need to separate out the main_coro from main() because click (our command line args processor) can't decorate
# async functions
async def main_coro(manifest: str):
"""Initialise and serve application.
Function is called when the module is run directly
"""
config = await load_config(Path(manifest) if manifest else None)
LOGGER = getLogger(__name__)
app = await init_app(config, LOGGER)
runner = web.AppRunner(app, handle_signals=True)
await runner.setup()
site = web.TCPSite(runner, port=config.server.port)
await site.start()
LOGGER.info("Server running on port %i", config.server.port)
# TODO: Are we supposed to expose the runner somehow to clean up on shutdown?
#await runner.cleanup()
@click.command()
@click.option("--manifest", default="", help="Location of (optional) manifest file relative to current working dir")
def main(manifest: str):
loop = asyncio.get_event_loop()
loop.run_until_complete(main_coro(manifest))
loop.run_forever()
if __name__ == "__main__":
# Linter error here is caused by PyLint not understanding the click decorator:
main() # pylint: disable=no-value-for-parameter
| 32.75 | 119 | 0.743511 | 0 | 0 | 0 | 0 | 266 | 0.101527 | 1,434 | 0.547328 | 1,072 | 0.40916 |
21acb4fa80b3916f001211cac88508c8d9ee7743 | 492 | py | Python | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 2,904 | 2019-05-07T08:09:33.000Z | 2022-03-31T18:28:41.000Z | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 238 | 2019-05-11T02:57:22.000Z | 2022-03-31T23:47:18.000Z | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 527 | 2019-05-08T16:23:45.000Z | 2022-03-30T21:02:41.000Z | class GraphLearner:
"""Base class for causal discovery methods.
Subclasses implement different discovery methods. All discovery methods are in the package "dowhy.causal_discoverers"
"""
def __init__(self, data, library_class, *args, **kwargs):
self._data = data
self._labels = list(self._data.columns)
self._adjacency_matrix = None
self._graph_dot = None
def learn_graph(self):
'''
Discover causal graph and the graph in DOT format.
'''
raise NotImplementedError
| 23.428571 | 118 | 0.739837 | 491 | 0.997967 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.471545 |
21acbe7b1a842e9cd72e943d539706693b47c59c | 16,227 | py | Python | ppci/wasm/_instantiate.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | ppci/wasm/_instantiate.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | ppci/wasm/_instantiate.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | """ Provide function to load a wasm module into the current process.
Note that for this to work, we require compiled wasm code and a runtime.
The wasm runtime contains the following:
- Implement function like sqrt, floor, bit rotations etc..
"""
import os
import abc
import shelve
import io
import struct
import logging
from types import ModuleType
from ..arch.arch_info import TypeInfo
from ..utils.codepage import load_obj, MemoryPage
from ..utils.reporting import DummyReportGenerator
from ..irutils import verify_module
from .. import ir
from . import wasm_to_ir
from .components import Export, Import
from .wasm2ppci import create_memories
from .util import PAGE_SIZE
from .runtime import create_runtime
__all__ = ('instantiate',)
logger = logging.getLogger('instantiate')
def instantiate(module, imports, target='native', reporter=None,
cache_file=None):
""" Instantiate a wasm module.
Args:
module (ppci.wasm.Module): The wasm-module to instantiate
imports: A collection of functions available to the wasm module.
target: Use 'native' to compile wasm to machine code.
Use 'python' to generate python code. This option is slower
but more reliable.
reporter: A reporter which can record detailed compilation information.
cache_file: a file to use as cache
"""
if reporter is None:
reporter = DummyReportGenerator()
reporter.heading(2, 'Wasm instantiation')
# Check if all required imports are given:
for definition in module:
if isinstance(definition, Import):
modname, name = definition.modname, definition.name
if modname not in imports:
raise ValueError(
'imported module "{}" not found'.format(modname))
if name not in imports[modname]:
raise ValueError(
'imported object "{}" not found in "{}"'.format(
name, modname))
# Inject wasm runtime functions:
if 'wasm_rt' in imports:
raise ValueError('wasm_rt is a special import section')
imports = imports.copy() # otherwise we'd render the imports unsuable
imports['wasm_rt'] = create_runtime()
imports = flatten_imports(imports)
if target == 'native':
instance = native_instantiate(module, imports, reporter, cache_file)
elif target == 'python':
instance = python_instantiate(module, imports, reporter, cache_file)
else:
raise ValueError('Unknown instantiation target {}'.format(target))
# Call magic function _run_init which initializes tables and optionally
# calls start function as defined by the wasm start section.
instance._run_init()
return instance
def native_instantiate(module, imports, reporter, cache_file):
""" Load wasm module native """
from ..api import ir_to_object, get_current_arch
logger.info('Instantiating wasm module as native code')
arch = get_current_arch()
key = (arch, module)
# TODO: think of clever caching trickery:
cache_file = None
if cache_file and os.path.exists(cache_file):
logger.info('Using cached object from %s', cache_file)
with shelve.open(cache_file) as s:
obj = s['obj']
ppci_module = s['ppci_module']
else:
# TODO: use cache here to short circuit re-compilation
# hash(key)
# print(hash(key))
# hgkfdg
ppci_module = wasm_to_ir(
module, arch.info.get_type_info('ptr'), reporter=reporter)
verify_module(ppci_module)
obj = ir_to_object([ppci_module], arch, debug=True, reporter=reporter)
if cache_file:
logger.info('Saving object to %s for later use', cache_file)
with shelve.open(cache_file) as s:
s['obj'] = obj
s['ppci_module'] = ppci_module
instance = NativeModuleInstance(obj, imports)
instance.load_memory(module)
# Export all exported functions
for definition in module:
if isinstance(definition, Export):
if definition.kind == 'func':
exported_name = ppci_module._wasm_function_names[definition.ref.index]
instance.exports._function_map[definition.name] = \
getattr(instance._code_module, exported_name)
elif definition.kind == 'global':
global_name = ppci_module._wasm_globals[definition.ref.index]
instance.exports._function_map[definition.name] = \
NativeWasmGlobal(global_name, instance._code_module)
logger.debug('global exported')
elif definition.kind == 'memory':
memory = instance._memories[definition.ref.index]
instance.exports._function_map[definition.name] = memory
logger.debug('memory exported')
else:
raise NotImplementedError(definition.kind)
return instance
def python_instantiate(module, imports, reporter, cache_file):
""" Load wasm module as a PythonModuleInstance """
from ..api import ir_to_python
logger.info('Instantiating wasm module as python')
ptr_info = TypeInfo(4, 4)
ppci_module = wasm_to_ir(module, ptr_info, reporter=reporter)
verify_module(ppci_module)
f = io.StringIO()
ir_to_python([ppci_module], f, reporter=reporter)
pysrc = f.getvalue()
pycode = compile(pysrc, '<string>', 'exec')
_py_module = ModuleType('gen')
exec(pycode, _py_module.__dict__)
instance = PythonModuleInstance(_py_module, imports)
# Initialize memory:
instance.load_memory(module)
# Export all exported functions
for definition in module:
if isinstance(definition, Import):
pass
# TODO: maybe validate imported functions?
elif isinstance(definition, Export):
if definition.kind == 'func':
exported_name = ppci_module._wasm_function_names[definition.ref.index]
instance.exports._function_map[definition.name] = \
getattr(instance._py_module, exported_name)
elif definition.kind == 'global':
global_name = ppci_module._wasm_globals[definition.ref.index]
instance.exports._function_map[definition.name] = \
PythonWasmGlobal(global_name, instance)
logger.debug('global exported')
elif definition.kind == 'memory':
memory = instance._memories[definition.ref.index]
instance.exports._function_map[definition.name] = memory
logger.debug('memory exported')
else:
raise NotImplementedError(definition.kind)
return instance
def flatten_imports(imports):
""" Go from a two level dict to a single level dict """
flat_imports = {}
for mod_name, funcs in imports.items():
for func_name, func in funcs.items():
flat_imports['{}_{}'.format(mod_name, func_name)] = func
return flat_imports
class ModuleInstance:
""" Web assembly module instance """
""" Instantiated module """
def __init__(self):
self.exports = Exports()
self._memories = []
def memory_size(self) -> int:
""" return memory size in pages """
# TODO: idea is to have multiple memories and query the memory:
memory_index = 0
memory = self._memories[memory_index]
return memory.memory_size()
class NativeModuleInstance(ModuleInstance):
""" Wasm module loaded as natively compiled code """
def __init__(self, obj, imports):
super().__init__()
imports['wasm_rt_memory_grow'] = self.memory_grow
imports['wasm_rt_memory_size'] = self.memory_size
self._code_module = load_obj(obj, imports=imports)
def _run_init(self):
self._code_module._run_init()
def memory_size(self) -> int:
""" return memory size in pages """
return self._data_page.size // PAGE_SIZE
def memory_grow(self, amount: int) -> int:
""" Grow memory and return the old size.
Current strategy:
- claim new memory
- copy all data
- free old memory
- update wasm memory base pointer
"""
max_size = self._memories[0].max_size
old_size = self.memory_size()
new_size = old_size + amount
# Keep memory within sensible bounds:
if new_size >= 0x10000:
return -1
if max_size is not None and new_size > max_size:
return -1
# Read old data:
self._data_page.seek(0)
old_data = self._data_page.read()
# Create new page and fill with old data:
self._data_page = MemoryPage(new_size * PAGE_SIZE)
self._data_page.write(old_data)
# Update pointer:
self.set_mem_base_ptr(self._data_page.addr)
return old_size
def load_memory(self, module):
memories = create_memories(module)
if memories:
assert len(memories) == 1
memory, min_size, max_size = memories[0]
self._data_page = MemoryPage(len(memory))
self._data_page.write(memory)
mem0 = NativeWasmMemory(min_size, max_size)
# mem0._data_page = self._data_page
mem0._instance = self
self._memories.append(mem0)
base_addr = self._data_page.addr
self.set_mem_base_ptr(base_addr)
def set_mem_base_ptr(self, base_addr):
""" Set memory base address """
baseptr = self._code_module.get_symbol_offset('wasm_mem0_address')
print(baseptr)
# TODO: major hack:
# TODO: too many assumptions made here ...
self._code_module._data_page.seek(baseptr)
self._code_module._data_page.write(struct.pack('Q', base_addr))
class WasmGlobal(metaclass=abc.ABCMeta):
def __init__(self, name):
self.name = name
@abc.abstractmethod
def read(self):
raise NotImplementedError()
@abc.abstractmethod
def write(self, value):
raise NotImplementedError()
# TODO: we might implement the descriptor protocol in some way?
class PythonWasmGlobal(WasmGlobal):
def __init__(self, name, memory):
super().__init__(name)
self.instance = memory
def _get_ptr(self):
addr = getattr(self.instance._py_module, self.name[1].name)
return addr
def read(self):
addr = self._get_ptr()
# print('Reading', self.name, addr)
mp = {
ir.i32: self.instance._py_module.load_i32,
ir.i64: self.instance._py_module.load_i64,
}
f = mp[self.name[0]]
return f(addr)
def write(self, value):
addr = self._get_ptr()
# print('Writing', self.name, addr)
mp = {
ir.i32: self.instance._py_module.write_i32,
ir.i64: self.instance._py_module.write_i64,
}
f = mp[self.name[0]]
f(addr, value)
class NativeWasmGlobal(WasmGlobal):
def __init__(self, name, memory):
super().__init__(name)
self._code_obj = memory
def _get_ptr(self):
# print('Getting address of', self.name)
vpointer = getattr(self._code_obj, self.name[1].name)
return vpointer
def read(self):
addr = self._get_ptr()
# print('Reading', self.name, addr)
value = addr.contents.value
return value
def write(self, value):
addr = self._get_ptr()
# print('Writing', self.name, addr, value)
addr.contents.value = value
class WasmMemory(metaclass=abc.ABCMeta):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
def __setitem__(self, location, data):
assert isinstance(location, slice)
assert location.step is None
if location.start is None:
address = location.stop
size = 1
else:
address = location.start
size = location.stop - location.start
assert len(data) == size
self.write(address, data)
def __getitem__(self, location):
assert isinstance(location, slice)
assert location.step is None
if location.start is None:
address = location.stop
size = 1
else:
address = location.start
size = location.stop - location.start
data = self.read(address, size)
assert len(data) == size
return data
@abc.abstractmethod
def write(self, address, data):
raise NotImplementedError()
@abc.abstractmethod
def read(self, address, size):
raise NotImplementedError()
class NativeWasmMemory(WasmMemory):
""" Native wasm memory emulation """
def memory_size(self) -> int:
""" return memory size in pages """
return self._data_page.size // PAGE_SIZE
def write(self, address, data):
self._instance._data_page.seek(address)
self._instance._data_page.write(data)
def read(self, address, size):
self._instance._data_page.seek(address)
data = self._instance._data_page.read(size)
assert len(data) == size
return data
class PythonWasmMemory(WasmMemory):
""" Python wasm memory emulation """
def write(self, address, data):
address = self._module.mem0_start + address
self._module._py_module.write_mem(address, data)
def read(self, address, size):
address = self._module.mem0_start + address
data = self._module._py_module.read_mem(address, size)
assert len(data) == size
return data
class PythonModuleInstance(ModuleInstance):
""" Wasm module loaded a generated python module """
def __init__(self, module, imports):
super().__init__()
self._py_module = module
self.mem_end = self._py_module.heap_top()
# Magical python memory interface, add it now:
imports['wasm_rt_memory_grow'] = self.memory_grow
imports['wasm_rt_memory_size'] = self.memory_size
# Link all imports:
for name, f in imports.items():
# TODO: make a choice between those two options:
# gen_rocket_wasm.externals[name] = f
setattr(self._py_module, name, f)
def _run_init(self):
self._py_module._run_init()
def load_memory(self, module):
memories = create_memories(module)
if memories:
assert len(memories) == 1
memory, min_size, max_size = memories[0]
self.mem0_start = self._py_module.heap_top()
self._py_module.heap.extend(memory)
mem0_ptr_ptr = self._py_module.wasm_mem0_address
self._py_module.store_i32(self.mem0_start, mem0_ptr_ptr)
mem0 = PythonWasmMemory(min_size, max_size)
# TODO: HACK HACK HACK:
mem0._module = self
self._memories.append(mem0)
def memory_grow(self, amount):
""" Grow memory and return the old size """
# Limit the bounds of memory somewhat:
if amount >= 0x10000:
return -1
else:
max_size = self._memories[0].max_size
old_size = self.memory_size()
new_size = old_size + amount
if max_size is not None and new_size > max_size:
return -1
else:
self._py_module.heap.extend(bytes(amount * PAGE_SIZE))
return old_size
def memory_size(self):
""" return memory size in pages """
size = (self._py_module.heap_top() - self.mem0_start) // PAGE_SIZE
return size
class Exports:
def __init__(self):
self._function_map = {}
""" Container for exported functions """
def __getitem__(self, key):
assert isinstance(key, str)
return self._function_map[key]
def __getattr__(self, name):
if name in self._function_map:
return self._function_map[name]
else:
raise AttributeError('Name "{}" was not exported'.format(name))
| 33.596273 | 86 | 0.628397 | 9,023 | 0.556049 | 0 | 0 | 339 | 0.020891 | 0 | 0 | 3,484 | 0.214704 |
21acea2332e84f0ed16b289cb54ead0afbf1565f | 2,823 | py | Python | easytrader/utils/stock.py | chforest/easytrader | 7825efa90aa6af6a5f181a0736dc8c3e8ed852e5 | [
"MIT"
] | 6,829 | 2015-12-07T16:40:17.000Z | 2022-03-31T15:27:03.000Z | easytrader/utils/stock.py | chforest/easytrader | 7825efa90aa6af6a5f181a0736dc8c3e8ed852e5 | [
"MIT"
] | 350 | 2016-01-18T09:13:27.000Z | 2022-03-21T06:56:57.000Z | easytrader/utils/stock.py | chforest/easytrader | 7825efa90aa6af6a5f181a0736dc8c3e8ed852e5 | [
"MIT"
] | 2,599 | 2015-12-08T02:09:04.000Z | 2022-03-30T13:33:50.000Z | # coding:utf-8
import datetime
import json
import random
import requests
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
stock_code = str(stock_code)
if stock_code.startswith(("sh", "sz")):
return stock_code[:2]
if stock_code.startswith(
("50", "51", "60", "73", "90", "110", "113", "132", "204", "78")
):
return "sh"
if stock_code.startswith(
("00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "1318")
):
return "sz"
if stock_code.startswith(("5", "6", "9")):
return "sh"
return "sz"
def get_30_date():
"""
获得用于查询的默认日期, 今天的日期, 以及30天前的日期
用于查询的日期格式通常为 20160211
:return:
"""
now = datetime.datetime.now()
end_date = now.date()
start_date = end_date - datetime.timedelta(days=30)
return start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d")
def get_today_ipo_data():
"""
查询今天可以申购的新股信息
:return: 今日可申购新股列表 apply_code申购代码 price发行价格
"""
agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0"
send_headers = {
"Host": "xueqiu.com",
"User-Agent": agent,
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "deflate",
"Cache-Control": "no-cache",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://xueqiu.com/hq",
"Connection": "keep-alive",
}
timestamp = random.randint(1000000000000, 9999999999999)
home_page_url = "https://xueqiu.com"
ipo_data_url = (
"https://xueqiu.com/proipo/query.json?column=symbol,name,onl_subcode,onl_subbegdate,actissqty,onl"
"_actissqty,onl_submaxqty,iss_price,onl_lotwiner_stpub_date,onl_lotwinrt,onl_lotwin_amount,stock_"
"income&orderBy=onl_subbegdate&order=desc&stockType=&page=1&size=30&_=%s"
% (str(timestamp))
)
session = requests.session()
session.get(home_page_url, headers=send_headers) # 产生cookies
ipo_response = session.post(ipo_data_url, headers=send_headers)
json_data = json.loads(ipo_response.text)
today_ipo = []
for line in json_data["data"]:
if datetime.datetime.now().strftime("%a %b %d") == line[3][:10]:
today_ipo.append(
{
"stock_code": line[0],
"stock_name": line[1],
"apply_code": line[2],
"price": line[7],
}
)
return today_ipo
| 30.684783 | 106 | 0.575275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,538 | 0.503108 |
21ad39e08cd9eb28d218baa876962eb7e1bf5352 | 13,370 | py | Python | IR analysis.py | jankulik/Transition-Line-Detection | 26775b7a3b6ab4f7a487c488cc1e97708277a18e | [
"MIT"
] | null | null | null | IR analysis.py | jankulik/Transition-Line-Detection | 26775b7a3b6ab4f7a487c488cc1e97708277a18e | [
"MIT"
] | null | null | null | IR analysis.py | jankulik/Transition-Line-Detection | 26775b7a3b6ab4f7a487c488cc1e97708277a18e | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import os
from matplotlib import pyplot as plt
#### INPUT ####
# folder that contains datapoints
folderName = '2dIR'
#### SETTINGS ####
# settings listed below are suitable for 2D data
# intensity of noise filtering; higher values mean more blurring
medianKernel = 5
# blurring radius in x and y direction; higher values mean more blurring; note: these values need to be odd
gaussian_x = 9
gaussian_y = 181
# decay blurring strength; higher values mean blurring will be more focused on the center
gaussianSigma = 60
# number of pixels that are averaged on both sides when iterating over each pixel in a row
pixelsAveraged1 = 10
# number of pixels that are averaged on both sides when iterating over pixels closer to the leading edge; this number should be smaller than pixelsAveraged1 since higher precision is needed
pixelsAveraged2 = 6
# vertical range of pixels considered when determining transition line; range is selected so that noise at the root and the tip is disregarded
rangeVer = (40, 400)
# maximal fraction of standard deviation for the point to be included during filtering
maxStd = 0.9
# minimal fraction of the points left after filtering for the line to be considered as transition line
minFiltered = 0.5
# critical angle at which the line closest to the leading edge is considered to be the transition line
criticalAngle = 7.5
# margin of averaged pixels between the leading edge and detected transition points
margin = 2
# minimal average difference of the more aft lines to be considered as transition line
minDifference1 = 4.68
# minimal average difference of the more forward lines to be considered as transition line
minDifference2 = 3.1
# width of the cropped image
width = 360
# settings listed below are suitable for 3D data
# medianKernel = 5
# gaussian_x = 9
# gaussian_y = 181
# gaussianSigma = 60
# pixelsAveraged1 = 9
# pixelsAveraged2 = 6
# rangeVer = (40, 400)
# maxStd = 1.5
# minFiltered = 0.5
# criticalAngle = 9.5
# margin = 2
# minDifference1 = 3.84
# minDifference2 = 3.1
# width = 360
# processing image
def findTransition(data, angle):
# removing NaN values from the array
data = data[:, ~np.isnan(data).all(axis=0)]
# normalising data
data = ((data - np.amin(data)) / (np.amax(data) - np.amin(data)) * 255)
# converting to pixel data
data = data.astype(np.uint8)
# processing data using median and gaussian blur
blurred = cv2.medianBlur(data, medianKernel)
blurred = cv2.GaussianBlur(blurred, (gaussian_x, gaussian_y), gaussianSigma)
# creating empty arrays to store locations of edges and potential transitions
edges = np.zeros((len(blurred), 2), dtype=int)
edge = (0, 0)
differencesVer = np.zeros((len(blurred), 3))
transitions1 = np.zeros((len(blurred), 2), dtype=int)
transitions2 = np.zeros((len(blurred), 2), dtype=int)
# iterating over each row of pixels
for i in range(len(blurred)):
# iterating over each pixel in a row and calculating differences between pixels to the right and to the left
differencesHor1 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if j - pixelsAveraged1 >= 0 and j + pixelsAveraged1 <= len(blurred[i]):
differencesHor1[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged1:j]) - np.average(blurred[i, j:j + pixelsAveraged1]))
# selecting two locations where differences are the highest
edges[i, 0] = np.argmax(differencesHor1)
for j in range(len(differencesHor1)):
if differencesHor1[j] > differencesHor1[edges[i, 1]] and np.absolute(edges[i, 0] - j) > pixelsAveraged1:
edges[i, 1] = j
edges = np.sort(edges, axis=1)
# averaging the detected locations to determine position of the edges
edge = int(np.average(edges[rangeVer[0]:rangeVer[1], 0])), int(np.average([edges[rangeVer[0]:rangeVer[1], 1]]))
# iterating over each pixel between edges and calculating differences between pixels to the right and to the left
differencesHor1 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if edges[i, 0] + 2 * pixelsAveraged1 <= j <= edges[i, 1] - margin * pixelsAveraged1:
differencesHor1[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged1:j]) - np.average(blurred[i, j:j + pixelsAveraged1]))
# selecting two locations where differences are the highest
transitions1[i, 0] = np.argmax(differencesHor1)
for j in range(len(differencesHor1)):
if differencesHor1[j] > differencesHor1[transitions1[i, 1]] and np.absolute(transitions1[i, 0] - j) > 3 * pixelsAveraged1:
transitions1[i, 1] = j
transitions1 = np.sort(transitions1, axis=1)
# iterating over pixels closer to the leading edge and calculating differences between pixels to the right and to the left
differencesHor2 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if edges[i, 0] + 10 * pixelsAveraged2 <= j <= edges[i, 1] - pixelsAveraged2:
differencesHor2[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged2:j]) - np.average(blurred[i, j:j + pixelsAveraged2]))
# selecting two locations where differences are the highest
transitions2[i, 0] = np.argmax(differencesHor2)
for j in range(len(differencesHor2)):
if differencesHor2[j] > differencesHor2[transitions2[i, 1]] and np.absolute(transitions2[i, 0] - j) > pixelsAveraged2:
transitions2[i, 1] = j
transitions2 = np.sort(transitions2, axis=1)
# saving maximal horizontal differences to calculate vertical differences
differencesVer[i, 0] = differencesHor1[transitions1[i, 0]]
differencesVer[i, 1] = differencesHor1[transitions1[i, 1]]
differencesVer[i, 2] = differencesHor2[transitions2[i, 0]]
# cropping locations of transitions and vertical differences
transitions1 = transitions1[rangeVer[0]:rangeVer[1], :]
transitions2 = transitions2[rangeVer[0]:rangeVer[1], :]
differencesVer = differencesVer[rangeVer[0]:rangeVer[1], :]
# calculating average and standard deviation of the first detected transition line
transitions1Avg = np.average(transitions1[:, 0])
transitions1Std = np.std(transitions1[:, 0])
# filtering locations that are too far from the average
transitions1Filtered = []
for i in range(len(transitions1)):
if round(transitions1Avg - maxStd * transitions1Std) <= transitions1[i, 0] <= round(transitions1Avg + maxStd * transitions1Std):
transitions1Filtered.append(transitions1[i, 0])
# calculating average and standard deviation of the second detected transition line
transitions2Avg = np.average(transitions1[:, 1])
transitions2Std = np.std(transitions1[:, 1])
# filtering locations that are too far from the average
transitions2Filtered = []
for i in range(len(transitions1)):
if round(transitions2Avg - maxStd * transitions2Std) <= transitions1[i, 1] <= round(transitions2Avg + maxStd * transitions2Std):
transitions2Filtered.append(transitions1[i, 1])
# calculating average and standard deviation of the third detected transition line
transitions3Avg = [np.average(transitions2[:, 0]), np.average(transitions2[:, 1])]
transitions3Std = [np.std(transitions2[:, 0]), np.std(transitions2[:, 1])]
# filtering locations that are too far from the average
transitions3Filtered = []
for i in range(len(transitions2)):
if round(transitions3Avg[0] - maxStd * transitions3Std[0]) <= transitions2[i, 0] <= round(transitions3Avg[0] + maxStd * transitions3Std[0]) \
and round(transitions3Avg[1] - maxStd * transitions3Std[1]) <= transitions2[i, 1] <= round(transitions3Avg[1] + maxStd * transitions3Std[1]):
transitions3Filtered.append(np.average(transitions2[i, :]))
# calculating the average of vertical differences for each transition line
differences = np.zeros(3)
differences[0] = np.average(differencesVer[:, 0])
differences[1] = np.average(differencesVer[:, 1])
differences[2] = np.average(differencesVer[:, 2])
# choosing one of the three detected lines
if differences[0] >= minDifference1 and len(transitions1Filtered) > minFiltered * (rangeVer[1] - rangeVer[0]) and angle < criticalAngle:
transition = round(np.average(transitions1Filtered))
elif differences[1] >= minDifference1 and len(transitions2Filtered) > minFiltered * (rangeVer[1] - rangeVer[0]) and angle < criticalAngle:
transition = round(np.average(transitions2Filtered))
elif differences[2] >= minDifference2:
transition = round(np.average(transitions3Filtered))
else:
transition = edge[1]
# printing parameters for debugging
# print('Differences 1: ' + differences[0])
# print('Differences 2: ' + differences[1])
# print('Differences 3: ' + differences[2])
# print('Length of filtered transitions 1:' + str(len(transitions1Filtered)))
# print('Length of filtered transitions 1:' + str(len(transitions2Filtered)))
# print('Length of filtered transitions 1:' + str(len(transitions3Filtered)))
# calculating the location of transition as percentage of chord length
XC = 1 - ((transition - edge[0]) / (edge[1] - edge[0]))
# printing edges and transition line on the generated image
for i in range(len(data)):
data[i, edge[0] - 1:edge[0] + 1] = 0
data[i, edge[1] - 1:edge[1] + 1] = 0
data[i, transition - 1:transition + 1] = 0
# data[i, edges[i, 0] - 1:edges[i, 0] + 1] = 0
# data[i, edges[i, 1] - 1:edges[i, 1] + 1] = 0
# printing detected lines on the generated image
# for i in range(len(transitions1)):
# data[i + rangeVer[0], transitions1[i, 0] - 1:transitions1[i, 0] + 1] = 0
# data[i + rangeVer[0], transitions1[i, 1] - 1:transitions1[i, 1] + 1] = 0
# data[i + rangeVer[0], transitions2[i, 0] - 1:transitions2[i, 0] + 1] = 0
# data[i + rangeVer[0], transitions2[i, 1] - 1:transitions2[i, 1] + 1] = 0
# calculating midpoint between edges and cropping the image
midpoint = int((edge[1] - edge[0]) / 2 + edge[0])
data = data[:, int(midpoint - width / 2):int(midpoint + width / 2)]
blurred = blurred[:, int(midpoint - width / 2):int(midpoint + width / 2)]
# converting data to contiguous array
data = np.ascontiguousarray(data, dtype=np.uint8)
# settings for placing AoA and transition location on the image
text1 = 'AoA: ' + str(angle)
text2 = 'x/c = ' + str(round(XC, 3))
org1 = (60, 20)
org2 = (60, 40)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
color = (255, 0, 0)
thickness = 1
# inserting text to the image
data = cv2.putText(data, text1, org1, font, fontScale, color, thickness, cv2.LINE_AA)
data = cv2.putText(data, text2, org2, font, fontScale, color, thickness, cv2.LINE_AA)
# showing generated images
# cv2.imshow("data", data)
# cv2.imshow("blurred", blurred)
# cv2.waitKey(0)
# saving generated images
path = 'Images'
fileName = 'AoA=' + str(angle) + ',XC=' + str(round(XC, 3)) + '.jpg'
cv2.imwrite(os.path.join(path, fileName), data)
# cv2.imwrite(os.path.join(path, 'blurred.jpg'), blurred)
return XC
# detecting all folders in the selected directory
folders = os.listdir(folderName + '/.')
# creating empty array for results
results = np.zeros((len(folders), 2))
# iterating over each folder
for i, folder in enumerate(folders):
# detecting all files in the selected folder
folderPath = folderName + '/' + folder + '/.'
files = os.listdir(folderPath)
# creating empty array in the size of data
dataPoints = np.zeros((480, 640))
# monitoring progress of the program
print('---------------------------------------')
print('Progress: ' + str(round(i / len(folders) * 100, 2)) + '%')
print('AoA: ' + folder)
# iterating over detected files
for file in files:
# importing data into array
filePath = folderName + '/' + folder + '/' + file
dataPoint = np.genfromtxt(filePath, delimiter=';')
# removing NaN values from the array
dataPoint = dataPoint[:, ~np.isnan(dataPoint).all(axis=0)]
# adding imported data to the array
dataPoints += dataPoint
break
# calculating average of the data
# dataPoints = dataPoints / len(files)
# calculating location of transition and saving it into the results
transitionXC = findTransition(dataPoints, float(folder))
results[i] = [float(folder), transitionXC]
# saving results to text file
results = results[results[:, 0].argsort()]
np.savetxt('results.txt', results, delimiter=',')
# generating graph of location vs angle of attack
plt.plot(results[:, 0], results[:, 1])
plt.xlabel("Angle of attack [deg]")
plt.ylabel("Location of transition [x/c]")
plt.show()
| 43.550489 | 190 | 0.659013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,445 | 0.407255 |
21ae2a24ae236e3d5b5a92a327d356b5c7ba6074 | 90 | py | Python | aiida_crystal_dft/__init__.py | tilde-lab/aiida-crystal-dft | 971fd13a3f414d6e80cc654dc92a8758f6e0365c | [
"MIT"
] | 2 | 2019-02-05T16:49:08.000Z | 2020-01-29T12:27:14.000Z | aiida_crystal_dft/__init__.py | tilde-lab/aiida-crystal-dft | 971fd13a3f414d6e80cc654dc92a8758f6e0365c | [
"MIT"
] | 36 | 2020-03-09T19:35:10.000Z | 2021-12-07T22:13:31.000Z | aiida_crystal_dft/__init__.py | tilde-lab/aiida-crystal-dft | 971fd13a3f414d6e80cc654dc92a8758f6e0365c | [
"MIT"
] | 1 | 2019-11-13T23:12:10.000Z | 2019-11-13T23:12:10.000Z | """
aiida_crystal_dft
AiiDA plugin for running the CRYSTAL code
"""
__version__ = "0.8"
| 11.25 | 41 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.811111 |
21af95c3e6f5614235525e918b9f73b1e391d922 | 42 | py | Python | fzzzMaskBackend/users/serializers.py | FZZZMask/backend | 4f987e96a5ff42d89cf536c099b944f5f7254764 | [
"BSD-3-Clause"
] | null | null | null | fzzzMaskBackend/users/serializers.py | FZZZMask/backend | 4f987e96a5ff42d89cf536c099b944f5f7254764 | [
"BSD-3-Clause"
] | 3 | 2020-02-11T23:24:39.000Z | 2021-06-04T21:45:25.000Z | fzzzMaskBackend/users/serializers.py | FZZZMask/backend | 4f987e96a5ff42d89cf536c099b944f5f7254764 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
| 8.4 | 38 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21b0edc91f5567ee1123bcbb0bfd919c0b28c903 | 2,938 | py | Python | src/python/pants/backend/terraform/target_gen_test.py | bastianwegge/pants | 43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/terraform/target_gen_test.py | bastianwegge/pants | 43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2 | [
"Apache-2.0"
] | 14 | 2020-09-26T02:01:56.000Z | 2022-03-30T10:19:28.000Z | src/python/pants/backend/terraform/target_gen_test.py | ryanking/pants | e45b00d2eb467b599966bca262405a5d74d27bdd | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.terraform import target_gen
from pants.backend.terraform.target_types import (
TerraformModulesGeneratorTarget,
TerraformModuleSourcesField,
TerraformModuleTarget,
)
from pants.core.util_rules import external_tool
from pants.engine.addresses import Address
from pants.engine.internals.graph import _TargetParametrizations
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[TerraformModuleTarget, TerraformModulesGeneratorTarget],
rules=[
*external_tool.rules(),
*target_gen.rules(),
QueryRule(_TargetParametrizations, [Address]),
],
)
def test_target_generation_at_build_root(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": "terraform_modules(name='tf_mods')\n",
"src/tf/versions.tf": "",
"src/tf/outputs.tf": "",
"src/tf/foo/versions.tf": "",
"src/tf/not-terraform/README.md": "This should not trigger target generation.",
}
)
generator_addr = Address("", target_name="tf_mods")
generator = rule_runner.get_target(generator_addr)
targets = rule_runner.request(_TargetParametrizations, [generator.address])
assert set(targets.parametrizations.values()) == {
TerraformModuleTarget(
{TerraformModuleSourcesField.alias: ("src/tf/foo/versions.tf",)},
generator_addr.create_generated("src/tf/foo"),
residence_dir="src/tf/foo",
),
TerraformModuleTarget(
{TerraformModuleSourcesField.alias: ("src/tf/outputs.tf", "src/tf/versions.tf")},
generator_addr.create_generated("src/tf"),
residence_dir="src/tf",
),
}
def test_target_generation_at_subdir(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/tf/BUILD": "terraform_modules()\n",
"src/tf/versions.tf": "",
"src/tf/foo/versions.tf": "",
}
)
generator_addr = Address("src/tf")
generator = rule_runner.get_target(generator_addr)
targets = rule_runner.request(_TargetParametrizations, [generator.address])
assert set(targets.parametrizations.values()) == {
TerraformModuleTarget(
{TerraformModuleSourcesField.alias: ("foo/versions.tf",)},
generator_addr.create_generated("foo"),
residence_dir="src/tf/foo",
),
TerraformModuleTarget(
{TerraformModuleSourcesField.alias: ("versions.tf",)},
generator_addr.create_generated("."),
residence_dir="src/tf",
),
}
| 34.564706 | 93 | 0.660994 | 0 | 0 | 0 | 0 | 311 | 0.105854 | 0 | 0 | 583 | 0.198434 |
21b1eb4686bf40669ec47b042269eff5341c4c0e | 377 | py | Python | tkinterLearning/graphinKivyExample.py | MertEfeSevim/ECar-ABUTeam | 4a37cbddff1609a1e1e8bd55fe6077b384471024 | [
"Apache-2.0"
] | null | null | null | tkinterLearning/graphinKivyExample.py | MertEfeSevim/ECar-ABUTeam | 4a37cbddff1609a1e1e8bd55fe6077b384471024 | [
"Apache-2.0"
] | null | null | null | tkinterLearning/graphinKivyExample.py | MertEfeSevim/ECar-ABUTeam | 4a37cbddff1609a1e1e8bd55fe6077b384471024 | [
"Apache-2.0"
] | null | null | null | from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
import matplotlib.pyplot as plt
plt.plot([1, 23, 2, 4])
plt.ylabel('some numbers')
class MyApp(App):
def build(self):
box = BoxLayout()
box.add_widget(FigureCanvasKivyAgg(plt.gcf()))
return box
MyApp().run()
| 22.176471 | 70 | 0.71618 | 139 | 0.3687 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.037135 |
21b22ee8ebf7ebc9c5d48a409810e94e5629e56d | 5,240 | py | Python | data_genie/get_data.py | noveens/sampling_cf | e135819b1e7310ee58edbbd138f303e5240a2619 | [
"MIT"
] | 6 | 2022-01-14T13:38:03.000Z | 2022-03-01T17:57:09.000Z | data_genie/get_data.py | noveens/sampling_cf | e135819b1e7310ee58edbbd138f303e5240a2619 | [
"MIT"
] | null | null | null | data_genie/get_data.py | noveens/sampling_cf | e135819b1e7310ee58edbbd138f303e5240a2619 | [
"MIT"
] | null | null | null | import os
import random
from tqdm import tqdm
from collections import defaultdict
from data_genie.data_genie_config import *
from data_genie.data_genie_utils import TRAINING_DATA_PATH, CACHED_KENDALL_TAU_PATH, load_obj, save_obj
from data_genie.data_genie_utils import count_performance_retained, get_best_results
from utils import INF
def get_data_pointwise(dataset):
PATH = TRAINING_DATA_PATH(dataset, "pointwise")
if not os.path.exists(PATH + ".pkl"): prep_data(dataset)
return load_obj(PATH)
def get_data_pairwise(dataset):
PATH = TRAINING_DATA_PATH(dataset, "pairwise")
if not os.path.exists(PATH + ".pkl"): prep_data(dataset)
return load_obj(PATH)
def prep_data(dataset):
# Get model runs
results = get_results(dataset)
# Build train, val, and test data
val_data = [ [], [], [], [], [] ]
test_data = copy.deepcopy(val_data)
train_data_pointwise = copy.deepcopy(val_data)
train_data_pairwise = copy.deepcopy(val_data)
for task, metrics in scenarios:
all_options = []
for m in metrics:
for sampling_percent in percent_rns_options:
all_options.append([ m, sampling_percent ])
random.shuffle(all_options)
val_indices = [ all_options[0] ]
if len(metrics) == 1: test_indices, train_indices = [ all_options[1] ], all_options[2:]
else: test_indices, train_indices = all_options[1:4], all_options[4:]
# Validation/testing data
for container, indices in [ (val_data, val_indices), (test_data, test_indices) ]:
for m, sampling_percent in indices:
for sampling in all_samplers:
container[0].append(get_embedding_id(task, 'complete_data', 0))
container[1].append(get_embedding_id(task, sampling, sampling_percent))
container[2].append(task_map[task])
container[3].append(metric_map[m])
container[4].append(
count_performance_retained(results[task][m][sampling_percent][sampling], m, scaled = False)
)
# Training data
for m, sampling_percent in train_indices:
y = [ count_performance_retained(
results[task][m][sampling_percent][sampling], m, scaled = False
) for sampling in all_samplers ]
# Pointwise
for at, sampling in enumerate(all_samplers):
if y[at] in [ INF, -INF ]: continue
train_data_pointwise[0].append(get_embedding_id(task, 'complete_data', 0))
train_data_pointwise[1].append(get_embedding_id(task, sampling, sampling_percent))
train_data_pointwise[2].append(task_map[task])
train_data_pointwise[3].append(metric_map[m])
train_data_pointwise[4].append(y[at])
# Pairwise
for i in range(len(all_samplers)):
for j in range(i+1, len(all_samplers)):
if y[i] in [ INF, -INF ]: continue
if y[j] in [ INF, -INF ]: continue
if y[i] == y[j]: continue
if y[i] > y[j]: better, lower = i, j
else: better, lower = j, i
train_data_pairwise[0].append(get_embedding_id(task, 'complete_data', 0))
train_data_pairwise[1].append(get_embedding_id(task, all_samplers[better], sampling_percent))
train_data_pairwise[2].append(get_embedding_id(task, all_samplers[lower], sampling_percent))
train_data_pairwise[3].append(task_map[task])
train_data_pairwise[4].append(metric_map[m])
save_obj([ train_data_pointwise, val_data, test_data ], TRAINING_DATA_PATH(dataset, "pointwise"))
save_obj([ train_data_pairwise, val_data, test_data ], TRAINING_DATA_PATH(dataset, "pairwise"))
def get_results(dataset):
PATH = CACHED_KENDALL_TAU_PATH(dataset)
if os.path.exists(PATH + ".pkl"): return load_obj(PATH)
loop = tqdm(
total = len(scenarios) * ((len(svp_methods) * len(sampling_svp)) + len(sampling_kinds)) * \
len(methods_to_compare) * len(percent_rns_options)
)
y = {}
for task, metrics_to_return in scenarios:
# Structure of `y`
y[task] = {}
for m in metrics_to_return:
y[task][m] = {}
for percent_rns in percent_rns_options:
y[task][m][percent_rns] = defaultdict(list)
# Random/graph-based sampling
for sampling_kind in sampling_kinds:
for method in methods_to_compare:
complete_data_metrics = get_best_results(
dataset, 0, 'complete_data', method, task, metrics_to_return
)
for percent_rns in percent_rns_options:
loop.update(1)
metrics = get_best_results(
dataset, percent_rns, sampling_kind, method, task, metrics_to_return
)
if metrics is None: continue
for at, m in enumerate(metrics_to_return):
y[task][m][percent_rns][sampling_kind].append([
metrics[at], complete_data_metrics[at]
])
# SVP sampling
for svp_method in svp_methods:
for sampling_kind in sampling_svp:
for method in methods_to_compare:
complete_data_metrics = get_best_results(
dataset, 0, 'complete_data', method, task, metrics_to_return
)
for percent_rns in percent_rns_options:
loop.update(1)
metrics = get_best_results(
dataset, percent_rns, "svp_{}".format(svp_method), method, task, metrics_to_return,
sampling_svp = sampling_kind
)
if metrics is None: continue
for at, m in enumerate(metrics_to_return):
y[task][m][percent_rns]["svp_{}_{}".format(svp_method, sampling_kind)].append([
metrics[at], complete_data_metrics[at]
])
save_obj(y, PATH)
return y
| 33.589744 | 103 | 0.716412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.062214 |
21b3036dd9c7340de7952e841313f8a67214f250 | 3,934 | py | Python | benchmark.py | cmpute/EECS558-Project | d964059901c62773b475c5d4b40f018ee28a0c73 | [
"Unlicense"
] | null | null | null | benchmark.py | cmpute/EECS558-Project | d964059901c62773b475c5d4b40f018ee28a0c73 | [
"Unlicense"
] | null | null | null | benchmark.py | cmpute/EECS558-Project | d964059901c62773b475c5d4b40f018ee28a0c73 | [
"Unlicense"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from env import DrivingEnv
from solvers import GridSolver, SampleGraphSolver
def time_compare(seed=1234, min_sample=10, max_sample=50, count=10):
sample_count = np.linspace(min_sample, max_sample, count).astype(int)
grid_times = []
graph_times = []
for size in sample_count:
env = DrivingEnv(15, random_seed=seed)
solver = GridSolver(size)
grid_times.append(solver.solve(env, max_steps=500))
env = DrivingEnv(15, random_seed=seed)
solver = SampleGraphSolver(size*size)
graph_times.append(solver.solve(env, max_steps=500))
plt.figure()
plt.semilogy(sample_count, grid_times, label="Grid-based")
plt.semilogy(sample_count, graph_times, label="Graph-based")
plt.xlabel("Equivalent sample size")
plt.ylabel("Running time (s)")
plt.legend()
plt.show()
def grid_size_reward_compare(seed=1234, min_sample=10, max_sample=50, count=10, repeat=5):
env = DrivingEnv(15, random_seed=seed)
size_list = np.linspace(min_sample, max_sample, count).astype(int)
cost_list = []
for size in size_list:
cost_cases = []
for _ in range(repeat):
solver = SampleGraphSolver(size*size)
solver.solve(env, max_steps=200, early_stop=False)
states, cost = env.simulate(solver)
cost_cases.append(cost)
cost_list.append(cost_cases)
plt.figure()
plt.plot(size_list, np.mean(cost_list, axis=1))
plt.xlabel("Graph size")
plt.ylabel("Time and safety cost")
plt.title("Graph based policy performance versus graph size")
plt.show()
def grid_with_different_safety_cost(cost_type="linear"):
env = DrivingEnv(15, random_seed=1234)
def render_graph(solver, ax):
solution = solver.report_solution()
solution_set = set()
for i in range(len(solution) - 1):
solution_set.add((solution[i], solution[i+1]))
for n1, n2 in solver._connections:
if (n1, n2) in solution_set or (n2, n1) in solution_set:
color = "#1A090D"
lwidth = 5
else:
color = "#4A139488"
lwidth = 1
ax.plot([solver._samples[n1].x, solver._samples[n2].x], [solver._samples[n1].y, solver._samples[n2].y], lw=lwidth, c=color)
ax.scatter([p.x for p in solver._samples], [p.y for p in solver._samples], c=solver._safety_cost_cache)
solver = SampleGraphSolver(800)
solver.solve(env, max_steps=200, safety_weight=100, safety_type=cost_type)
fig, ax = plt.subplots(1)
env.render(ax)
render_graph(solver, ax)
plt.title("Graph-based solution with %s cost" % cost_type)
plt.show()
def graph_with_different_weight(seed=1234, ratio_count=7):
ratios = np.logspace(-3, 3, ratio_count)
fig, ax = plt.subplots(1)
DrivingEnv(15, random_seed=seed).render(ax)
handles = [None] * ratio_count
for rid, ratio in enumerate(ratios):
coeff = np.sqrt(ratio)
env = DrivingEnv(15, random_seed=seed)
solver = SampleGraphSolver(800)
solver.solve(env, max_steps=100, early_stop=False, safety_weight=coeff, time_weight=1/coeff, safety_type="linear")
solution = solver.report_solution()
solution_set = set()
for i in range(len(solution) - 1):
solution_set.add((solution[i], solution[i+1]))
for n1, n2 in solver._connections:
if (n1, n2) in solution_set or (n2, n1) in solution_set:
lwidth, color = 4, "C%d" % rid
handles[rid], = ax.plot([solver._samples[n1].x, solver._samples[n2].x], [solver._samples[n1].y, solver._samples[n2].y], lw=lwidth, c=color)
# fig.legend(handles, ["safety/time=%f" % ratio for ratio in ratios], loc=1)
plt.title("Difference path under different weights")
plt.show()
graph_with_different_weight()
| 37.826923 | 155 | 0.649466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.087443 |
21b3aa32ee34e39e88f108b17ea530a57eb6e324 | 1,912 | py | Python | pandas_market_calendars/exchange_calendars_mirror.py | matbox/pandas_market_calendars | 942ad6de5f3e2700a4f8b2c2d44ccb65fa9fdab5 | [
"MIT"
] | null | null | null | pandas_market_calendars/exchange_calendars_mirror.py | matbox/pandas_market_calendars | 942ad6de5f3e2700a4f8b2c2d44ccb65fa9fdab5 | [
"MIT"
] | null | null | null | pandas_market_calendars/exchange_calendars_mirror.py | matbox/pandas_market_calendars | 942ad6de5f3e2700a4f8b2c2d44ccb65fa9fdab5 | [
"MIT"
] | null | null | null | """
Imported calendars from the exchange_calendars project
GitHub: https://github.com/gerrymanoim/exchange_calendars
"""
from datetime import time
from .market_calendar import MarketCalendar
import exchange_calendars
class TradingCalendar(MarketCalendar):
def __init__(self, open_time=None, close_time=None):
self._tc = self._tc_class() # noqa: _tc.class is defined in the class generator below
super().__init__(open_time, close_time)
@property
def name(self):
return self._tc.name
@property
def tz(self):
return self._tc.tz
@property
def open_time_default(self):
return self._tc.open_times[0][1].replace(tzinfo=self.tz)
@property
def close_time_default(self):
return self._tc.close_times[0][1].replace(tzinfo=self.tz)
@property
def break_start(self):
tc_time = self._tc.break_start_times
return tc_time[0][1] if tc_time else None
@property
def break_end(self):
tc_time = self._tc.break_end_times
return tc_time[0][1] if tc_time else None
@property
def regular_holidays(self):
return self._tc.regular_holidays
@property
def adhoc_holidays(self):
return self._tc.adhoc_holidays
@property
def special_opens(self):
return self._tc.special_opens
@property
def special_opens_adhoc(self):
return self._tc.special_opens_adhoc
@property
def special_closes(self):
return self._tc.special_closes
@property
def special_closes_adhoc(self):
return self._tc.special_closes_adhoc
calendars = exchange_calendars.calendar_utils._default_calendar_factories # noqa
for exchange in calendars:
locals()[exchange + 'ExchangeCalendar'] = type(exchange, (TradingCalendar, ),
{'_tc_class': calendars[exchange], 'alias': [exchange]})
| 26.191781 | 107 | 0.680962 | 1,388 | 0.725941 | 0 | 0 | 1,078 | 0.563808 | 0 | 0 | 220 | 0.115063 |
21b4b857672198b3794c4cd67434ee8e238bf40c | 164 | py | Python | util/prelude.py | sinsay/ds_define | 0ee89edfc3ad1ed37c5b88e13936229baf50a966 | [
"Apache-2.0"
] | null | null | null | util/prelude.py | sinsay/ds_define | 0ee89edfc3ad1ed37c5b88e13936229baf50a966 | [
"Apache-2.0"
] | null | null | null | util/prelude.py | sinsay/ds_define | 0ee89edfc3ad1ed37c5b88e13936229baf50a966 | [
"Apache-2.0"
] | null | null | null | from .enum import EnumBase
def is_builtin_type(obj) -> bool:
"""
检查 obj 是否基础类型
"""
return isinstance(obj, (int, str, float, bool)) or obj is None
| 18.222222 | 66 | 0.628049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.25 |
21b5752cb9a0990564c49e3262c213225974ef34 | 1,647 | py | Python | tca_ng/server.py | wichovw/tca-gt | ad862286f153e5cd83db8a44ff0bb6ae7c4925ce | [
"MIT"
] | 1 | 2016-09-09T15:51:38.000Z | 2016-09-09T15:51:38.000Z | tca_ng/server.py | wichovw/tca-gt | ad862286f153e5cd83db8a44ff0bb6ae7c4925ce | [
"MIT"
] | null | null | null | tca_ng/server.py | wichovw/tca-gt | ad862286f153e5cd83db8a44ff0bb6ae7c4925ce | [
"MIT"
] | null | null | null | import cherrypy, cherrypy_cors, os
import tca_ng.example_maps
import tca_ng.models
import random
class TCAServer(object):
@cherrypy.expose
@cherrypy.tools.json_out()
def start(self):
self.automaton = tca_ng.models.Automaton()
self.automaton.topology = tca_ng.example_maps.simple_map(10)
return self.automaton.topology.json_view()
@cherrypy.expose
@cherrypy.tools.json_out()
def update(self):
self.automaton.update()
print()
print('total cars', len(self.automaton.topology.cars))
for car in self.automaton.topology.cars:
if car.id % 10 == 0:
print('car %3s %8s route: %s' % (
car.id,
tuple(car.cell.viewer_address),
car.route
))
# modify a light
light = random.choice(self.automaton.topology.lights)
change = random.randint(-2, 2)
print(light, light.time, change)
light.time += change
print()
return self.automaton.topology.json_view()
PATH = os.path.abspath(os.path.dirname(__file__))
def serve(ip, port):
cherrypy_cors.install()
config = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': PATH,
'tools.staticdir.index': 'index.html',
'cors.expose.on': True,
}
}
cherrypy.server.socket_host = ip
cherrypy.server.socket_port = port
cherrypy.quickstart(TCAServer(), '/', config)
if __name__ == '__main__':
serve('localhost', 5555)
| 27 | 68 | 0.56527 | 1,026 | 0.622951 | 0 | 0 | 982 | 0.596236 | 0 | 0 | 170 | 0.103218 |
21b63b9f54674792f408a6f07e0262da28ca36a1 | 553 | py | Python | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | null | null | null | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | 36 | 2019-03-22T01:50:24.000Z | 2022-02-26T10:28:41.000Z | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from api.serializers import LabelSerializer, ItemSerializer
from api.models import Label, Item
class LabelViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows labels to be viewed or edited.
"""
queryset = Label.objects.all().order_by('name')
serializer_class = LabelSerializer
class ItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows items to be viewed or edited.
"""
queryset = Item.objects.all().order_by('title')
serializer_class = ItemSerializer
| 26.333333 | 59 | 0.734177 | 415 | 0.750452 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.278481 |
21b6bc874be363315a5d7728d2f9c90f4bee8e37 | 802 | py | Python | user.py | sylvestus/passwordLocker | 2dc949996c60eb02d55ac6d7426e2eb9f0cb9375 | [
"Unlicense"
] | null | null | null | user.py | sylvestus/passwordLocker | 2dc949996c60eb02d55ac6d7426e2eb9f0cb9375 | [
"Unlicense"
] | null | null | null | user.py | sylvestus/passwordLocker | 2dc949996c60eb02d55ac6d7426e2eb9f0cb9375 | [
"Unlicense"
] | null | null | null |
import string
import random
class User:
def __init__(self,username,password):
self.username = username
self.password = password
userList = []
def addUser(self):
'''
method saves a new user object to credentials list
'''
User.userList.append(self)
def deleteUser(self):
'''
method deletes a saved user from user_list
'''
User.userList.remove(self)
@classmethod
def displayUser(cls):
return cls.userList
def generate_password(self):
'''
generate random password consisting of letters
'''
password = string.ascii_uppercase + string.ascii_lowercase
return ''.join(random.choice(password) for i in range(1,9))
| 19.560976 | 67 | 0.583541 | 744 | 0.927681 | 0 | 0 | 66 | 0.082294 | 0 | 0 | 212 | 0.264339 |
21b737190d56432c7d4ca921f5d6f60d7150164a | 289 | py | Python | batch/batch/public_gcr_images.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | null | null | null | batch/batch/public_gcr_images.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | null | null | null | batch/batch/public_gcr_images.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | null | null | null | from typing import List
def public_gcr_images(project: str) -> List[str]:
# the worker cannot import batch_configuration because it does not have all the environment
# variables
return [f'gcr.io/{project}/{name}' for name in ('query', 'hail', 'python-dill', 'batch-worker')]
| 36.125 | 100 | 0.709343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.581315 |
21ba9fc19364859893264a2f210099d5d934cfe1 | 24,972 | py | Python | django_cradmin/uicontainer/container.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 11 | 2015-07-05T16:57:58.000Z | 2020-11-24T16:58:19.000Z | django_cradmin/uicontainer/container.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 91 | 2015-01-08T22:38:13.000Z | 2022-02-10T10:25:27.000Z | django_cradmin/uicontainer/container.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 3 | 2016-12-07T12:19:24.000Z | 2018-10-03T14:04:18.000Z | from django.conf import settings
from django.forms.utils import flatatt
from django_cradmin import renderable
class NotBootsrappedError(Exception):
"""
Raised when trying to use features of
:class:`.AbstractContainerRenderable`
that requires is to have been bootstrapped.
"""
class AlreadyBootsrappedError(Exception):
"""
Raised when trying to :meth:`~.AbstractContainerRenderable.bootstrap`
and already bootstrapped :class:`.AbstractContainerRenderable`.
"""
class NotAllowedToAddChildrenError(Exception):
"""
Raised when trying to add children to a :class:`.AbstractContainerRenderable`
where :meth:`~.AbstractContainerRenderable.html_tag_supports_children`
returns ``False``.
"""
class UnsupportedHtmlTagError(ValueError):
"""
Raised when providing an invalid ``html_tag`` kwarg to :class:`.AbstractContainerRenderable`.
See :obj:`.AbstractContainerRenderable.supported_html_tags`.
"""
class InvalidBemError(ValueError):
"""
Raised when invalid BEM is supplied.
"""
class InvalidDomIdError(ValueError):
"""
Raised when invalid dom_id is supplied.
"""
class AbstractContainerRenderable(renderable.AbstractRenderableWithCss):
"""
Base class for all renderables in the uicontainer framework.
This can not be used directly. You extend it, and at least override
:meth:`.get_default_html_tag`, or use one of the subclasses.
The most basic subclass is :class:`django_cradmin.uicontainer.div.Div`.
.. attribute:: parent
The parent AbstractContainerRenderable. Set in :meth:`.bootstrap`.
The attribute does not exist if :meth:`.bootstrap` has not been
run. Is ``None`` if this is the root of the container tree.
.. attribute:: properties
A dict of properties. These properties is copied down to the
``properties`` attribute of children (with the update-method, not full replace)
in :meth:`.bootstrap`.
This means that you can add properties in ``__init__()``,
and make them available to any children recursively.
"""
template_name = 'django_cradmin/uicontainer/container.django.html'
#: You can override this to specify a set of supported HTML tags
#: for the ``html_tag`` attribute for :meth:`~.AbstractContainerRenderable.__init__`.
#: This is useful to avoid typing errors. It should not be a big problem if you
#: forget a tag that should be supported - developers can just create a subclass.
#:
#: If the value of this field is None, or any other value that is considered False by
#: ``bool()``, we do not validate the ``html_tag`` kwarg.
supported_html_tags = None
def __init__(self, children=None,
bem_block=None, bem_element=None, bem_variant_list=None,
html_tag=None,
css_classes_list=None,
extra_css_classes_list=None,
test_css_class_suffixes_list=None,
role=False,
dom_id=False,
html_element_attributes=None,
**kwargs):
"""
Args:
children: List of children. Children must be objects of subclasses
of :class:`.AbstractContainerRenderable`.
css_classes_list (list): Override the :meth:`default css classes <.get_default_css_classes_list>`
with your own list of css classes.
extra_css_classes_list (list): Add extra css classes. This is appended to
the css classes in the ``css_classes_list`` kwarg if that is specified,
or appended to the css classes returned by
:meth:`.get_default_css_classes_list`.
role (str): The value of the role attribute.
If this is not specified, we fall back on the value returned
by :meth:`.get_default_role`.
If both is ``False``, we do not render the role attribute.
dom_id (str): The value of the id attribute.
If this is not specified, we fall back on the value returned
by :meth:`.get_default_dom_id`.
If both is ``False``, we do not render the id attribute.
html_element_attributes (dict): HTML element attributes to add
to the HTML element. This adds attributes returned
by :meth:`.get_html_element_attributes`. If this dict includes
attributes returned by :meth:`.get_html_element_attributes`,
the attributes specified in this kwarg takes presedense.
The format of the dict is specified in :meth:`.get_html_element_attributes`.
"""
self.kwargs = kwargs
self.validate_dom_id(dom_id=dom_id)
self.validate_bem(bem_block=bem_block,
bem_element=bem_element)
self.validate_html_tag(html_tag=html_tag)
self._childrenlist = []
self._virtual_childrenlist = []
self._is_bootstrapped = False
self.properties = {}
self._overridden_bem_block_or_element = bem_block or bem_element
self._overridden_bem_variant_list = bem_variant_list
self._overridden_role = role
self._overridden_dom_id = dom_id
self._overridden_html_tag = html_tag
self._html_element_attributes = html_element_attributes
self._overridden_css_classes_list = css_classes_list
self._overridden_test_css_class_suffixes_list = test_css_class_suffixes_list
self._extra_css_classes_list = extra_css_classes_list
self.add_children(*self.prepopulate_children_list())
self.add_virtual_children(*self.prepopulate_virtual_children_list())
if children:
self.add_children(*children)
def should_validate_dom_id(self):
"""
Should we raise :class:`.InvalidDomIdError` exception
when the ``dom_id`` kwarg is malformed.
Returns the value of the :setting:`DJANGO_CRADMIN_UICONTAINER_VALIDATE_DOM_ID`
setting, falling back to ``True`` if it is not defined.
The validator requires the dom_id
to start with ``id_``, be lowercase, and not contain ``-``.
We recommend to not override this to ensure uniform DOM id naming.
You should disable this validation in production using the
:setting:`DJANGO_CRADMIN_UICONTAINER_VALIDATE_DOM_ID` setting.
"""
return getattr(settings, 'DJANGO_CRADMIN_UICONTAINER_VALIDATE_DOM_ID', True)
def should_validate_bem(self):
"""
Should we raise :class:`.InvalidBemIdError` exception
when the ``bem_block`` or ``bem_element`` kwarg is malformed?
Returns the value of the :setting:`DJANGO_CRADMIN_UICONTAINER_VALIDATE_BEM`
setting, falling back to ``True`` if it is not defined.
The validator requires the bem_block to not contain ``__``
(double underscore), and the bem_element to comtain ``__`` (double
underscore).
We recommend to not chanding this to ensure BEM elements and
blocks are used correctly.
You should disable this validation in production using the
:setting:`DJANGO_CRADMIN_UICONTAINER_VALIDATE_BEM` setting.
"""
return getattr(settings, 'DJANGO_CRADMIN_UICONTAINER_VALIDATE_BEM', True)
def validate_dom_id(self, dom_id):
if dom_id is False:
return
if not self.should_validate_dom_id():
return
normalized_dom_id = dom_id.replace('-', '').lower()
if not dom_id.startswith('id_') or dom_id != normalized_dom_id:
raise InvalidDomIdError(
'dom_id must begin with "id_", be all lowercase, and can not contain "-". '
'{dom_id!r} does not match this requirement.'.format(
dom_id=dom_id))
def validate_bem(self, bem_block, bem_element):
if not self.should_validate_bem():
return
if bem_block and bem_element:
raise InvalidBemError(
'Can not specify both bem_element or bem_block. An '
'HTML element is eighter a BEM block or a BEM element.')
if bem_block:
if '__' in bem_block:
raise InvalidBemError(
'{bem_block} is not a valid BEM block name. '
'BEM blocks do not contain "__". Are you sure you '
'did not mean to use the bem_element kwarg?'.format(
bem_block=bem_block
))
elif bem_element:
if '__' not in bem_element:
raise InvalidBemError(
'{bem_element} is not a valid BEM element name. '
'BEM elements must contain "__". Are you sure you '
'did not mean to use the bem_block kwarg?'.format(
bem_element=bem_element
))
def get_full_class_path_as_string(self):
"""
Get full class path as string.
Useful for providing some extra information in exceptions.
Normally this will be in a traceback, but when dealing with
things rendered by a Django template, this information is not
always included.
"""
return '{}.{}'.format(self.__class__.__module__, self.__class__.__name__)
def validate_html_tag(self, html_tag):
if html_tag and self.supported_html_tags and html_tag not in self.supported_html_tags:
raise UnsupportedHtmlTagError('Unsupported HTML tag for {classpath}: {html_tag}'.format(
classpath=self.get_full_class_path_as_string(),
html_tag=self._overridden_html_tag
))
def get_default_html_tag(self):
"""
Get the default HTML tag to wrap renderable in.
Can be overriden by the ``html_tag`` kwarg for :meth:`.__init__`.
Returns ``"div"`` by default.
"""
return 'div'
@property
def html_tag(self):
"""
Get the HTML tag for this container.
"""
return self._overridden_html_tag or self.get_default_html_tag()
@property
def html_tag_supports_children(self):
"""
Does the html tag support children?
If this returns ``False``, we:
- Do not render an end tag for the wrapper element.
- Do not allow children to be added to the container.
Should be overridden to return ``False`` if the :meth:`.get_default_html_tag`
does not allow for children. Examples of this case is if the
wrapper html tag i ``input`` or ``hr``.
See also :meth:`.can_have_children`, which should be used if the HTML tag
should have and end tag, but not children.
Returns:
boolean: True by default.
"""
return True
@property
def can_have_children(self):
"""
Can this container have children?
If this returns ``False``, :meth:`.add_child` will raise
:class:`.NotAllowedToAddChildrenError`.
Returns:
boolean: The return value from :meth:`.html_tag_supports_children` by default.
"""
return self.html_tag_supports_children
def get_default_role(self):
"""
Get the default value for the role attribute of the html element.
Defaults to ``False``.
"""
return False
@property
def role(self):
"""
Get the value for the role attribute of the html element.
You should not override this. Override :meth:`.get_default_role` instead.
"""
return self._overridden_role or self.get_default_role()
def get_default_dom_id(self):
"""
Get the default value for the id attribute of the html element.
Defaults to ``False``.
"""
return False
@property
def dom_id(self):
"""
Get the value for the id attribute of the html element.
You should not override this. Override :meth:`.get_default_dom_id` instead.
"""
return self._overridden_dom_id or self.get_default_dom_id()
def get_html_element_attributes(self):
"""
Get HTML element attributes as a dict.
The dict is parsed by :func:`django.forms.utils.flatatt`,
so:
- ``{'myattribute': True}`` results in ``myattribute`` (no value).
- ``{'myattribute': False}`` results in the attribute beeing ignored (not included in the output).
- ``{'myattribute': 'Some value'}`` results in the ``myattribute="Some value"``.
If you override this method, *remember to call super* to get
the attributes set in the superclass.
"""
html_element_attributes = {
'role': self.role,
'id': self.dom_id,
'class': self.css_classes or False, # Fall back to false to avoid class=""
}
if self._html_element_attributes:
html_element_attributes.update(self._html_element_attributes)
return html_element_attributes
@property
def html_element_attributes_string(self):
"""
Get :meth:`.get_html_element_attributes` + any attributes in
the ``html_element_attributes`` kwarg for :meth:`.__init__`
encoded as a string using :func:`django.forms.utils.flatatt`.
"""
return flatatt(self.get_html_element_attributes())
def get_default_css_classes_list(self):
"""
Override this to provide a default list of css classes.
The css classes specified here can be overridden using
the ``css_classes_list`` kwarg for :meth:`.__init__`.
"""
return []
def get_default_bem_block_or_element(self):
"""
Get the default BEM block or element.
A HTML element is eighter a BEM block or a
BEM element, so we have joined this into
a single method.
"""
return None
def get_bem_block_or_element(self):
"""
Get the BEM block or element.
DO NOT OVERRIDE THIS METHOD.
Override :meth:`.get_default_bem_block_or_element` instead.
"""
return (self._overridden_bem_block_or_element or
self.get_default_bem_block_or_element())
def get_default_bem_variant_list(self):
"""
Get the default BEM variants.
The full CSS class of any variant in the list will
be :meth:`.get_bem_block_or_element` with ``--`` and
the variant appended, so if the bem block/element is
``"menu"``, and the variant is ``"expanded"``, the
resulting css class will be ``"menu--expanded"``.
"""
return []
def get_bem_variant_list(self):
"""
Get the list of BEM variants.
DO NOT OVERRIDE THIS METHOD.
Override :meth:`.get_default_bem_variant_list` instead.
"""
return self._overridden_bem_variant_list or self.get_default_bem_variant_list()
def get_bem_css_classes_list(self):
"""
Get the BEM css classes as list.
DO NOT OVERRIDE THIS METHOD.
Override :meth:`.get_default_bem_block_or_element`
and :meth:`.get_default_bem_variant_list` instead.
"""
bem_block_or_element = self.get_bem_block_or_element()
bem_css_classes = []
if bem_block_or_element:
bem_css_classes.append(bem_block_or_element)
for variant in self.get_bem_variant_list():
css_class = '{}--{}'.format(bem_block_or_element, variant)
bem_css_classes.append(css_class)
return bem_css_classes
def get_css_classes_list(self):
"""
DO NOT OVERRIDE THIS METHOD.
Unlike with :class:`django_cradmin.renderable.AbstractRenderableWithCss`,
you do not override this class to add your own css classes. Override
:meth:`.get_default_css_classes_list`.
This is because this method respects the ``css_classes_list`` kwarg
for :meth:`.__init__`, and just falls back to :meth:`.get_default_css_classes_list`.
So if you override this method, the ``css_classes_list`` kwarg will be useless.
"""
css_classes_list = self.get_bem_css_classes_list()
if self._overridden_css_classes_list:
css_classes_list.extend(self._overridden_css_classes_list)
else:
css_classes_list.extend(self.get_default_css_classes_list())
if self._extra_css_classes_list:
css_classes_list.extend(self._extra_css_classes_list)
return css_classes_list
def get_default_test_css_class_suffixes_list(self):
"""
Override this to provide a default list of css classes for unit tests.
The css classes specified here can be overridden using
the ``test_css_class_suffixes_list`` kwarg for :meth:`.__init__`.
"""
return ['uicontainer-{}'.format(self.__class__.__name__.lower())]
def get_test_css_class_suffixes_list(self):
"""
DO NOT OVERRIDE THIS METHOD.
Unlike with :class:`django_cradmin.renderable.AbstractRenderableWithCss`,
you do not override this class to add your own test css classes. Override
:meth:`.get_default_test_css_class_suffixes_list`.
This is because this method respects the ``test_css_class_suffixes_list`` kwarg
for :meth:`.__init__`, and just falls back to :meth:`.get_default_test_css_class_suffixes_list`.
So if you override this method, the ``test_css_class_suffixes_list`` kwarg will be useless.
"""
if self._overridden_test_css_class_suffixes_list:
test_css_class_suffixes_list = self._overridden_test_css_class_suffixes_list
else:
test_css_class_suffixes_list = self.get_default_test_css_class_suffixes_list()
return test_css_class_suffixes_list
def bootstrap(self, parent=None):
"""
Bootstrap the container.
Must be called once on the top-level container
in the tree of containers.
Sets the provided parent as :attr:`.parent`.
Updates the properties of all children (using dict update())
with :attr:`.properties`.
"""
if self._is_bootstrapped:
raise AlreadyBootsrappedError('The container is already bootstrapped. Can not bootstrap '
'the same container twice.')
self.parent = parent
if self.parent:
self.properties.update(self.parent.properties)
for child in self._virtual_childrenlist:
child.bootstrap(parent=self)
for child in self._childrenlist:
child.bootstrap(parent=self)
self._is_bootstrapped = True
return self
def prepopulate_children_list(self):
"""
Pre-polulate the children list.
This is called in :meth:`.__init__` before
any children from the kwargs is added.
Returns:
list: An empty list by default, but you can override this
in subclasses.
"""
return []
def prepopulate_virtual_children_list(self):
"""
Pre-polulate the virtual children list.
This is called in :meth:`.__init__` before
any children from the kwargs is added, and before any children
is :meth:`.prepopulate_children_list` is added.
Returns:
list: An empty list by default, but you can override this
in subclasses.
"""
return []
def add_child(self, childcontainer):
"""
Add a child to the container.
Args:
childcontainer: A :class:`.AbstractContainerRenderable` object.
Returns:
A reference to self. This means that you can chain calls to this method.
"""
if self.can_have_children:
self._childrenlist.append(childcontainer)
if self._is_bootstrapped and not childcontainer._is_bootstrapped:
childcontainer.bootstrap(parent=self)
else:
raise NotAllowedToAddChildrenError('{modulename}.{classname} can not have children'.format(
modulename=self.__class__.__module__,
classname=self.__class__.__name__
))
return self
def add_virtual_child(self, childcontainer):
"""
Add a "virtual" child to the container.
This child is not rendered as a child of the container automatically
(that is left to the template rendering the container). But it
inherits properties and is automatically bootstrapped just like a
regular child.
Args:
childcontainer: A :class:`.AbstractContainerRenderable` object.
Returns:
A reference to self. This means that you can chain calls to this method.
"""
if self.can_have_children:
self._virtual_childrenlist.append(childcontainer)
if self._is_bootstrapped and not childcontainer._is_bootstrapped:
childcontainer.bootstrap(parent=self)
return self
def add_children(self, *childcontainers):
"""
Add children to the container.
Args:
*childcontainers: Zero or more :class:`.AbstractContainerRenderable` objects.
Returns:
A reference to self. This means that you can chain calls to this method.
"""
for childcontainer in childcontainers:
self.add_child(childcontainer)
return self
def add_virtual_children(self, *childcontainers):
"""
Add virtual children to the container.
Args:
*childcontainers: Zero or more :class:`.AbstractContainerRenderable` objects.
Returns:
A reference to self. This means that you can chain calls to this method.
"""
for childcontainer in childcontainers:
self.add_virtual_child(childcontainer)
return self
def iter_children(self):
"""
Returns an iterator over the children of this container.
The yielded children will be objects of :class:`.AbstractContainerRenderable`
subclasses.
"""
return iter(self._childrenlist)
def iter_virtual_children(self):
"""
Returns an iterator over the virtual children of this container.
The yielded children will be objects of :class:`.AbstractContainerRenderable`
subclasses.
"""
return iter(self._virtual_childrenlist)
def get_childcount(self):
"""
Get the number of children in the container.
"""
return len(self._childrenlist)
def get_virtual_childcount(self):
"""
Get the number of virtual children in the container.
"""
return len(self._virtual_childrenlist)
@property
def should_render(self):
"""
Should we render anything?
Override this to make the :meth:`.render` to control
if the container is rendered. If this returns ``False``,
:meth:`.render` returns an empty string instead of
rendering the template.
Returns:
bool: ``True`` by default, but subclasses can override this behavior.
"""
return True
def render(self, **kwargs):
"""
Overrides :meth:`django_cradmin.renderable.AbstractRenderable.render`.
The only change is that we return an empty string if
:meth:`.should_render` returns ``False``. If it returns ``True``,
we call the overriden method and returns the result.
Args:
**kwargs: Forwarded to the overridden method if it is called.
"""
if not self._is_bootstrapped:
raise NotBootsrappedError(
'Can not render an AbstractContainerRenderable that has not been bootstrapped. '
'Ensure you call bootsrap() on the top-level container in the container '
'hierarchy before rendering. Class causing this issue: {classpath}'.format(
classpath=self.get_full_class_path_as_string()
))
if self.should_render:
return super(AbstractContainerRenderable, self).render(**kwargs)
else:
return ''
class Div(AbstractContainerRenderable):
"""
Renders a ``<div>``.
The only thing this class does is to override
:meth:`django_cradmin.uicontainer.container.AbstractContainerRenderable.get_default_html_tag`
and return ``"div"``.
"""
def get_default_html_tag(self):
return 'div'
class NoWrapperElement(AbstractContainerRenderable):
"""
Renders children, but no wrapper HTML element.
"""
template_name = 'django_cradmin/uicontainer/no_wrapper_element.django.html'
| 37.160714 | 109 | 0.639797 | 24,835 | 0.994514 | 0 | 0 | 2,566 | 0.102755 | 0 | 0 | 15,303 | 0.612806 |
21baa6263f7bce8a697dc4c1214c2f9cbd322393 | 5,982 | py | Python | country_settings.py | region-spotteR/conora_chronologies | 0ee6cadb61921f95f738425ef99a13ae07f262a7 | [
"CC0-1.0"
] | null | null | null | country_settings.py | region-spotteR/conora_chronologies | 0ee6cadb61921f95f738425ef99a13ae07f262a7 | [
"CC0-1.0"
] | null | null | null | country_settings.py | region-spotteR/conora_chronologies | 0ee6cadb61921f95f738425ef99a13ae07f262a7 | [
"CC0-1.0"
] | null | null | null | class attributes_de:
def __init__(self,threshold_list,range_for_r):
self.country_name = 'Germany'
self.population = 83190556
self.url = 'https://opendata.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.geojson'
self.contains_tests=False
self.csv=False # if the resources have csv format
self.CasesPer100k_thresholds=threshold_list
self.Range_for_R=range_for_r
self.color_sizes = dict(
colorEvenRows = '#FFE6D9',
colorOddRows = 'white',
colorHeaderBG= '#FFCE00',
sizeHeaderFont = 14,
colorHeaderFont='black',
colorCellFont = 'black',
sizeCellFont = 12,
colorTitle = 'black',
sizeTitleFont = 27,
colorPivotColumnText='#DD0000'
)
# https://www.data.gouv.fr/fr/datasets/synthese-des-indicateurs-de-suivi-de-lepidemie-covid-19/
class attributes_fr:
def __init__(self,threshold_list,range_for_r):
self.country_name = 'France'
self.population = 67406000
self.url = "https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617"
self.contains_tests=True
self.csv_encoding='latin'
self.csv_separator=','
self.csv=True # if the resources have csv format
self.CasesPer100k_thresholds=threshold_list
self.Range_for_R=range_for_r
self.color_sizes=dict(
colorEvenRows = '#FFE6D9', #'#FFE3F1'#'#FFAFAE'
colorOddRows = 'white',
colorHeaderBG='#001489',
sizeHeaderFont = 14,
colorHeaderFont='white',
colorCellFont = 'black',
sizeCellFont = 12,
colorTitle = '#001489',
sizeTitleFont = 27,
colorPivotColumnText='#001489'
)
class attributes_at:
def __init__(self,threshold_list,range_for_r):
self.country_name = 'Austria'
self.population = 8901064
self.url="https://covid19-dashboard.ages.at/data/CovidFaelle_Timeline.csv"
self.contains_tests=False
self.csv_encoding='utf-8'
self.csv_separator=';'
self.csv=True # if the resources have csv format
self.CasesPer100k_thresholds=threshold_list
self.Range_for_R=range_for_r
self.color_sizes=dict(
colorEvenRows = '#F3EED9', #'#FFE3F1'#'#FFAFAE'
colorOddRows = 'white',
colorHeaderBG='#ED2939',
sizeHeaderFont = 14,
colorHeaderFont='white',
colorCellFont = 'black',
sizeCellFont = 12,
colorTitle = '#ED2939',
sizeTitleFont = 27,
colorPivotColumnText='#ED2939'
)
# Austria: What the fuck?! The way data is published I would guess this country is a banana republic
class attributes_be:
def __init__(self,threshold_list,range_for_r):
self.country_name = 'Belgium'
self.population = 11492641
self.url = 'https://epistat.sciensano.be/Data/COVID19BE_tests.json'
self.contains_tests=True
self.csv=False # if the resources have csv format
self.CasesPer100k_thresholds=threshold_list
self.Range_for_R=range_for_r
self.color_sizes = dict(
colorEvenRows = '#FFE6D9',
colorOddRows = 'white',
colorHeaderBG= '#FDDA24',
sizeHeaderFont = 14,
colorHeaderFont='black',
colorCellFont = 'black',
sizeCellFont = 12,
colorTitle = 'black',
sizeTitleFont = 27,
colorPivotColumnText='#EF3340'
)
class attributes_lv:
def __init__(self,threshold_list,range_for_r):
self.country_name = 'Latvia'
self.population = 1907675
self.url = 'https://data.gov.lv/dati/eng/api/3/action/datastore_search_sql?sql=SELECT%20*%20from%20%22d499d2f0-b1ea-4ba2-9600-2c701b03bd4a%22'
self.contains_tests=True
self.csv=False # if the resources have csv format
self.CasesPer100k_thresholds=threshold_list
self.Range_for_R=range_for_r
self.color_sizes=dict(
colorEvenRows = '#F3EED9', #'#FFE3F1'#'#FFAFAE'
colorOddRows = 'white',
colorHeaderBG='#9E3039',
sizeHeaderFont = 14,
colorHeaderFont='white',
colorCellFont = 'black',
sizeCellFont = 12,
colorTitle = '#9E3039',
sizeTitleFont = 27,
colorPivotColumnText='#9E3039'
)
def get_attributes(country,threshold_list=[10,20,50,100,200,400,600,800,1000],range_for_r=[0.8,0.85,0.9,0.95,1.05,1.1,1.15,1.2]):
"""
Gets the country specific attributes like Name, population, url etc.
Parameters
----------
country : str
A two letter color code for a country e.g. 'de' for Germany
thresholds_list : list
optional: A list of integers representing the threshold which R has to go above or below
range_for_r : list
optional: A list of floats representing the range of R
Returns
-------
class
Class with the country specific attributes. Also contains a color scheme class for this country
"""
try:
if country=='de':
attributes=attributes_de(threshold_list,range_for_r)
elif country=='fr':
attributes=attributes_fr(threshold_list,range_for_r)
elif country=='at':
attributes=attributes_at(threshold_list,range_for_r)
elif country=='be':
attributes=attributes_be(threshold_list,range_for_r)
elif country=='lv':
attributes=attributes_lv(threshold_list,range_for_r)
else:
print("Error no such country attribute defined")
return attributes
except Exception as e:
print(e)
# de -> Germany attributes
# lv -> Latvia attributes
# de | 37.860759 | 150 | 0.612671 | 4,366 | 0.729856 | 0 | 0 | 0 | 0 | 0 | 0 | 1,850 | 0.309261 |
21bc3e83174440b0d25cd071871ba1fe4765dc1b | 408 | py | Python | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-11-09 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_auto_20211108_1633'),
]
operations = [
migrations.AlterField(
model_name='protection',
name='description',
field=models.CharField(max_length=999, null=True),
),
]
| 21.473684 | 62 | 0.612745 | 315 | 0.772059 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.262255 |
21bc625c93948c267439f48c065862fcfaf59846 | 899 | py | Python | builder.py | Delivery-Klad/chat_desktop | 1702996255d6fa5dbd6c5b480f2a3f4f19cbfdc6 | [
"Apache-2.0"
] | null | null | null | builder.py | Delivery-Klad/chat_desktop | 1702996255d6fa5dbd6c5b480f2a3f4f19cbfdc6 | [
"Apache-2.0"
] | 1 | 2021-12-28T01:51:37.000Z | 2021-12-28T01:51:37.000Z | builder.py | Delivery-Klad/chat_desktop | 1702996255d6fa5dbd6c5b480f2a3f4f19cbfdc6 | [
"Apache-2.0"
] | null | null | null | import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == "win32":
base = "Win32GUI"
elif sys.platform == "win64":
base = "Win64GUI"
excludes = ['PyQt5', 'colorama', 'pandas', 'sqlalchemy', 'numpy', 'notebook', 'Django', 'schedule']
packages = ["idna", "_cffi_backend", "bcrypt", "rsa", "os", "keyring", "keyring.backends",
"win32ctypes", "shutil", "PIL", "qrcode", "pyminizip", "pathlib"]
zip_include_packages = ['collections', 'encodings', 'importlib']
options = {'build_exe': {
'packages': packages,
'excludes': excludes,
'zip_include_packages': zip_include_packages, }
}
executables = [Executable("main.py", base=base)]
setup(name="Chat", # bdist_msi, bdist_mac
author="Delivery Klad",
options=options,
version="4.2",
description='Encrypted chat',
executables=executables)
| 29 | 99 | 0.630701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.429366 |
21bc9eb14d61179cb27becc3805a37469b02b334 | 2,200 | py | Python | flaviabernardes/flaviabernardes/cms/migrations/0014_auto_20160717_1414.py | rogerhil/flaviabernardes | 30676c7e4b460f11ef9f09a33936ee3820b129da | [
"Apache-2.0"
] | null | null | null | flaviabernardes/flaviabernardes/cms/migrations/0014_auto_20160717_1414.py | rogerhil/flaviabernardes | 30676c7e4b460f11ef9f09a33936ee3820b129da | [
"Apache-2.0"
] | null | null | null | flaviabernardes/flaviabernardes/cms/migrations/0014_auto_20160717_1414.py | rogerhil/flaviabernardes | 30676c7e4b460f11ef9f09a33936ee3820b129da | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_auto_20151121_1602'),
]
operations = [
migrations.AddField(
model_name='page',
name='background_cover',
field=image_cropping.fields.ImageRatioField('background_cover_image', '1920x600', hide_image_field=False, adapt_rotation=False, size_warning=False, allow_fullsize=False, verbose_name='background cover', help_text=None, free_crop=False),
),
migrations.AddField(
model_name='page',
name='background_cover_image',
field=models.ImageField(blank=True, upload_to='uploads'),
),
migrations.AddField(
model_name='page',
name='show_footer',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='page',
name='show_header',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='pagedraft',
name='background_cover',
field=image_cropping.fields.ImageRatioField('background_cover_image', '1920x600', hide_image_field=False, adapt_rotation=False, size_warning=False, allow_fullsize=False, verbose_name='background cover', help_text=None, free_crop=False),
),
migrations.AddField(
model_name='pagedraft',
name='background_cover_image',
field=models.ImageField(blank=True, upload_to='uploads'),
),
migrations.AddField(
model_name='pagedraft',
name='show_footer',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='pagedraft',
name='show_header',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='page',
name='name',
field=models.CharField(max_length=128, editable=False, verbose_name='Slug', unique=True),
),
]
| 36.065574 | 248 | 0.616818 | 2,062 | 0.937273 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.180455 |
21bd217d40255d109408b19a7614470a41a98d8d | 5,674 | py | Python | backend/inst-selec/tree-match-burs-table/app/match_naive.py | obs145628/cle | 4a4a18b2ab5a6fbf26629f6845147541edabd7c9 | [
"MIT"
] | null | null | null | backend/inst-selec/tree-match-burs-table/app/match_naive.py | obs145628/cle | 4a4a18b2ab5a6fbf26629f6845147541edabd7c9 | [
"MIT"
] | null | null | null | backend/inst-selec/tree-match-burs-table/app/match_naive.py | obs145628/cle | 4a4a18b2ab5a6fbf26629f6845147541edabd7c9 | [
"MIT"
] | null | null | null | '''
Tree-Matching implementation
Based on BURS (Bottom-Up Rewrite System)
Similar to tree-match-burs1 project
Inspired from:
- Instruction Selection via Tree-Pattern Matching - Enginner a Compiler p610
- An Improvement to Bottom-up Tree Pattern Matching - David R. Chase
- Simple and Efficient BURS Table Generation - Todd A. Proebsting
'''
import os
import sys
import rules
import optree
from digraph import Digraph
import graphivz
MAX_COST = 100000000
class MatchInfos:
def __init__(self, node):
self.node = node
# list of pairs: rule / minimum cost for rule matching that node
self.rc = dict()
def add_match(self, r, cost):
name = r.lhs
if name in self.rc and self.rc[name][0] <= cost:
return False
self.rc[name] = (r, cost)
return True
def get_match(self, rule_name):
if rule_name in self.rc:
return self.rc[rule_name]
return (None, MAX_COST)
class Matcher:
def __init__(self, rules, t):
self.rules = rules
self.t = t
self.infos = [MatchInfos(n) for n in self.t.nodes]
def match(self):
self.match_node(t.root)
def match_node(self, node):
# Match children first
for arg in node.succs:
self.match_node(arg)
for r in self.rules.rules:
if r.is_nt():
continue # non-terminat rules matched indirectly
if r.get_op() != node.op or len(r.get_args()) != len(node.succs):
continue # doesn't match
cost = r.cost
# try to match all children
for (arg_i, arg) in enumerate(node.succs):
arg_infos = self.infos[arg.idx]
arg_rule = r.get_args()[arg_i]
arg_cost = arg_infos.get_match(arg_rule)[1]
if arg_cost == MAX_COST:
cost = MAX_COST
break
cost += arg_cost
if cost != MAX_COST:
#natch found
self.add_match(node, r, cost)
def add_match(self, node, r, cost):
infos = self.infos[node.idx]
if not infos.add_match(r, cost):
return
# propagate infos to all non-terminal rules
for ntr in self.rules.rules:
if ntr.is_nt() and ntr.rhs == r.lhs:
self.add_match(node, ntr, cost + ntr.cost)
def apply(self, runner):
self.apply_rec(runner, self.t.root, 'goal')
def apply_rec(self, runner, node, rule):
# Get node match rule
match = self.infos[node.idx].get_match(rule)
if match[0] == None:
raise Exception('Matching tree failed')
# List all non-terminal rules
nt_rules = []
while match[0].is_nt():
nt_rules.append(match[0])
match = self.infos[node.idx].get_match(match[0].rhs)
assert match[0] is not None
# Apply non-terminal rules
for r in nt_rules:
runner.before(node, r)
# Apply to all children
rule = match[0]
runner.before(node, rule)
for (arg_i, arg) in enumerate(node.succs):
arg_rule = rule.get_args()[arg_i]
self.apply_rec(runner, arg, arg_rule)
runner.after(node, rule)
# Apply non-terminal rules
for r in nt_rules[::-1]:
runner.after(node, r)
def all_matches(self):
def get_label(infos):
res = "{} {{".format(infos.node.op)
for rc in infos.rc.values():
res += "({}#{}, {}) ".format(rc[0].lhs, rc[0].idx, rc[1])
res += "}"
return res
class Helper:
def __init__(self, obj):
self.obj = obj
def save_dot(self, dot_file):
g = Digraph(len(self.obj.infos))
for infos in self.obj.infos:
n = infos.node
g.set_vertex_label(n.idx, get_label(infos))
if n.pred is not None:
g.add_edge(n.pred.idx, n.idx)
g.save_dot(dot_file)
return Helper(self)
def apply_matches(self):
class Helper:
def __init__(self, obj):
self.obj = obj
self.labels = ['' for _ in self.obj.infos]
def save_dot(self, dot_file):
g = Digraph(len(self.obj.infos))
for infos in self.obj.infos:
n = infos.node
if n.pred is not None:
g.add_edge(n.pred.idx, n.idx)
self.obj.apply(self)
for (u, label) in enumerate(self.labels):
g.set_vertex_label(u, label)
g.save_dot(dot_file)
def before(self, node, rule):
lbl = self.labels[node.idx]
if len(lbl) == 0:
lbl = '{}: '.format(node.op)
if not lbl.endswith(': '):
lbl += ' + '
lbl += '{}#{}'.format(rule.lhs, rule.idx)
self.labels[node.idx] = lbl
def after(self, node, rule):
pass
return Helper(self)
if __name__ == '__main__':
rs = rules.parse_file(os.path.join(os.path.dirname(__file__), '../config/rules.txt'))
print(rs)
t = optree.parse_file(sys.argv[1])
matcher = Matcher(rs, t)
matcher.match()
graphivz.show_obj(matcher.all_matches())
graphivz.show_obj(matcher.apply_matches())
| 27.278846 | 89 | 0.519739 | 4,899 | 0.863412 | 0 | 0 | 0 | 0 | 0 | 0 | 795 | 0.140113 |
21bdc2ccc7ab9e40f05cc42e706cde91619db6a2 | 95,650 | py | Python | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | 1 | 2021-03-29T07:47:32.000Z | 2021-03-29T07:47:32.000Z | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | null | null | null | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | null | null | null | import numpy as np
import math
from scipy.stats import truncnorm
class ElectricMotor:
"""
Base class for all technical electrical motor models.
A motor consists of the ode-state. These are the dynamic quantities of its ODE.
For example:
ODE-State of a DC-shunt motor: `` [i_a, i_e ] ``
* i_a: Anchor circuit current
* i_e: Exciting circuit current
Each electric motor can be parametrized by a dictionary of motor parameters,
the nominal state dictionary and the limit dictionary.
Initialization is given by initializer(dict). Can be constant state value
or random value in given interval.
dict should be like:
{ 'states'(dict): with state names and initital values
'interval'(array like): boundaries for each state
(only for random init), shape(num states, 2)
'random_init'(str): 'uniform' or 'normal'
'random_params(tuple): mue(float), sigma(int)
Example initializer(dict) for constant initialization:
{ 'states': {'omega': 16.0}}
Example initializer(dict) for random initialization:
{ 'random_init': 'normal'}
"""
#: Parameter indicating if the class is implementing the optional jacobian function
HAS_JACOBIAN = False
#: CURRENTS_IDX(list(int)): Indices for accessing all motor currents.
CURRENTS_IDX = []
#: CURRENTS(list(str)): List of the motor currents names
CURRENTS = []
#: VOLTAGES(list(str)): List of the motor input voltages names
VOLTAGES = []
#: _default_motor_parameter(dict): Default parameter dictionary for the motor
_default_motor_parameter = {}
#: _default_nominal_values(dict(float)): Default nominal motor state array
_default_nominal_values = {}
#: _default_limits(dict(float)): Default motor limits (0 for unbounded limits)
_default_limits = {}
#: _default_initial_state(dict): Default initial motor-state values
#_default_initializer = {}
_default_initializer = {'states': {},
'interval': None,
'random_init': None,
'random_params': None}
#: _default_initial_limits(dict): Default limit for initialization
_default_initial_limits = {}
@property
def nominal_values(self):
"""
Readonly motors nominal values.
Returns:
dict(float): Current nominal values of the motor.
"""
return self._nominal_values
@property
def limits(self):
"""
Readonly motors limit state array. Entries are set to the maximum physical possible values
in case of unspecified limits.
Returns:
dict(float): Limits of the motor.
"""
return self._limits
@property
def motor_parameter(self):
"""
Returns:
dict(float): The motors parameter dictionary
"""
return self._motor_parameter
@property
def initializer(self):
"""
Returns:
dict: Motor initial state and additional initializer parameter
"""
return self._initializer
@property
def initial_limits(self):
"""
Returns:
dict: nominal motor limits for choosing initial values
"""
return self._initial_limits
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
"""
:param motor_parameter: Motor parameter dictionary. Contents specified
for each motor.
:param nominal_values: Nominal values for the motor quantities.
:param limit_values: Limits for the motor quantities.
:param motor_initializer: Initial motor states (currents)
('constant', 'uniform', 'gaussian' sampled from
given interval or out of nominal motor values)
:param initial_limits: limits for of the initial state-value
"""
motor_parameter = motor_parameter or {}
self._motor_parameter = self._default_motor_parameter.copy()
self._motor_parameter.update(motor_parameter)
limit_values = limit_values or {}
self._limits = self._default_limits.copy()
self._limits.update(limit_values)
nominal_values = nominal_values or {}
self._nominal_values = self._default_nominal_values.copy()
self._nominal_values.update(nominal_values)
motor_initializer = motor_initializer or {}
self._initializer = self._default_initializer.copy()
self._initializer.update(motor_initializer)
self._initial_states = {}
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# intialize limits, in general they're not needed to be changed
# during training or episodes
initial_limits = initial_limits or {}
self._initial_limits = self._nominal_values.copy()
self._initial_limits.update(initial_limits)
# preventing wrong user input for the basic case
assert isinstance(self._initializer, dict), 'wrong initializer'
def electrical_ode(self, state, u_in, omega, *_):
"""
Calculation of the derivatives of each motor state variable for the given inputs / The motors ODE-System.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
ndarray(float): Derivatives of the motors ODE-system for the given inputs.
"""
raise NotImplementedError
def electrical_jacobian(self, state, u_in, omega, *_):
"""
Calculation of the jacobian of each motor ODE for the given inputs / The motors ODE-System.
Overriding this method is optional for each subclass. If it is overridden, the parameter HAS_JACOBIAN must also
be set to True. Otherwise, the jacobian will not be called.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
Tuple(ndarray, ndarray, ndarray):
[0]: Derivatives of all electrical motor states over all electrical motor states shape:(states x states)
[1]: Derivatives of all electrical motor states over omega shape:(states,)
[2]: Derivative of Torque over all motor states shape:(states,)
"""
pass
def initialize(self,
state_space,
state_positions,
**__):
"""
Initializes given state values. Values can be given as a constant or
sampled random out of a statistical distribution. Initial value is in
range of the nominal values or a given interval. Values are written in
initial_states attribute
Args:
state_space(gym.Box): normalized state space boundaries (given by
physical system)
state_positions(dict): indexes of system states (given by physical
system)
Returns:
"""
# for organization purposes
interval = self._initializer['interval']
random_dist = self._initializer['random_init']
random_params = self._initializer['random_params']
self._initial_states.update(self._default_initializer['states'])
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# different limits for InductionMotor
if any(map(lambda state: state in self._initial_states.keys(),
['psi_ralpha', 'psi_rbeta'])):
nominal_values_ = [self._initial_limits[state]
for state in self._initial_states]
upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)
# state space for Induction Envs based on documentation
# ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']
# hardcoded for Inductionmotors currently given in the toolbox
state_space_low = np.array([-1, -1, -1, -1, -1])
lower_bound = upper_bound * state_space_low
else:
if isinstance(self._nominal_values, dict):
nominal_values_ = [self._nominal_values[state]
for state in self._initial_states.keys()]
nominal_values_ = np.asarray(nominal_values_)
else:
nominal_values_ = np.asarray(self._nominal_values)
state_space_idx = [state_positions[state] for state in
self._initial_states.keys()]
upper_bound = np.asarray(nominal_values_, dtype=float)
lower_bound = upper_bound * \
np.asarray(state_space.low, dtype=float)[state_space_idx]
# clip nominal boundaries to user defined
if interval is not None:
lower_bound = np.clip(lower_bound,
a_min=
np.asarray(interval, dtype=float).T[0],
a_max=None)
upper_bound = np.clip(upper_bound,
a_min=None,
a_max=
np.asarray(interval, dtype=float).T[1])
# random initialization for each motor state (current, epsilon)
if random_dist is not None:
if random_dist == 'uniform':
initial_value = (upper_bound - lower_bound) * \
np.random.random_sample(
len(self._initial_states.keys())) + \
lower_bound
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
elif random_dist in ['normal', 'gaussian']:
# specific input or middle of interval
mue = random_params[0] or (upper_bound - lower_bound) / 2 + lower_bound
sigma = random_params[1] or 1
a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma
initial_value = truncnorm.rvs(a, b,
loc=mue,
scale=sigma,
size=(len(self._initial_states.keys())))
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
else:
# todo implement other distribution
raise NotImplementedError
# constant initialization for each motor state (current, epsilon)
elif self._initial_states is not None:
initial_value = np.atleast_1d(list(self._initial_states.values()))
# check init_value meets interval boundaries
if ((lower_bound <= initial_value).all()
and (initial_value <= upper_bound).all()):
initial_states_ = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(initial_states_)
else:
raise Exception('Initialization value has to be within nominal boundaries')
else:
raise Exception('No matching Initialization Case')
def reset(self,
state_space,
state_positions,
**__):
"""
Reset the motors state to a new initial state. (Default 0)
Args:
state_space(gym.Box): normalized state space boundaries
state_positions(dict): indexes of system states
Returns:
numpy.ndarray(float): The initial motor states.
"""
# check for valid initializer
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS))
def i_in(self, state):
"""
Args:
state(ndarray(float)): ODE state of the motor
Returns:
list(float): List of all currents flowing into the motor.
"""
raise NotImplementedError
def _update_limits(self, limits_d={}, nominal_d={}):
"""Replace missing limits and nominal values with physical maximums.
Args:
limits_d(dict): Mapping: quantitity to its limit if not specified
"""
# omega is replaced the same way for all motor types
limits_d.update(dict(omega=self._default_limits['omega']))
for qty, lim in limits_d.items():
if self._limits.get(qty, 0) == 0:
self._limits[qty] = lim
for entry in self._limits.keys():
if self._nominal_values.get(entry, 0) == 0:
self._nominal_values[entry] = nominal_d.get(entry, None) or \
self._limits[entry]
def _update_initial_limits(self, nominal_new={}, **kwargs):
"""
Complete initial states with further state limits
Args:
nominal_new(dict): new/further state limits
"""
self._initial_limits.update(nominal_new)
class DcMotor(ElectricMotor):
"""
The DcMotor and its subclasses implement the technical system of a dc motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_a V Armature circuit voltage
u_e v Exciting circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u_a Armature Voltage
u_e Exciting Voltage
======== ===========================================================
"""
# Indices for array accesses
I_A_IDX = 0
I_E_IDX = 1
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_a', 'i_e']
VOLTAGES = ['u_a', 'u_e']
_default_motor_parameter = {
'r_a': 0.78, 'r_e': 25, 'l_a': 6.3e-3, 'l_e': 1.2, 'l_e_prime': 0.0094,
'j_rotor': 0.017,
}
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **__):
# Docstring of superclass
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
#: Matrix that contains the constant parameters of the systems equation for faster computation
self._model_constants = None
self._update_model()
self._update_limits()
def _update_model(self):
"""
Update the motors model parameters with the motor parameters.
Called internally when the motor parameters are changed or the motor is initialized.
"""
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0],
[0, -mp['r_e'], 0, 0, 1]
])
self._model_constants[self.I_A_IDX] = self._model_constants[
self.I_A_IDX] / mp['l_a']
self._model_constants[self.I_E_IDX] = self._model_constants[
self.I_E_IDX] / mp['l_e']
def torque(self, currents):
# Docstring of superclass
return self._motor_parameter['l_e_prime'] * currents[self.I_A_IDX] * \
currents[self.I_E_IDX]
def i_in(self, currents):
# Docstring of superclass
return list(currents)
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(self._model_constants, np.array([
state[self.I_A_IDX],
state[self.I_E_IDX],
omega * state[self.I_E_IDX],
u_in[0],
u_in[1],
]))
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: Tuple of the two converters possible output currents.
input_voltages: Tuple of the two converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
a_converter = 0
e_converter = 1
low = {
'omega': -1 if input_voltages.low[a_converter] == -1
or input_voltages.low[e_converter] == -1 else 0,
'torque': -1 if input_currents.low[a_converter] == -1
or input_currents.low[e_converter] == -1 else 0,
'i_a': -1 if input_currents.low[a_converter] == -1 else 0,
'i_e': -1 if input_currents.low[e_converter] == -1 else 0,
'u_a': -1 if input_voltages.low[a_converter] == -1 else 0,
'u_e': -1 if input_voltages.low[e_converter] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u_a': 1,
'u_e': 1
}
return low, high
def _update_limits(self, limits_d={}):
# Docstring of superclass
# torque is replaced the same way for all DC motors
limits_d.update(dict(torque=self.torque([self._limits[state] for state
in self.CURRENTS])))
super()._update_limits(limits_d)
class DcShuntMotor(DcMotor):
"""
The DcShuntMotor is a DC motor with parallel armature and exciting circuit connected to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Voltage applied to both circuits
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
VOLTAGES = ['u']
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def i_in(self, state):
# Docstring of superclass
return [state[self.I_A_IDX] + state[self.I_E_IDX]]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return super().electrical_ode(state, (u_in[0], u_in[0]), omega)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: The converters possible output currents.
input_voltages: The converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
lower_limit = 0
low = {
'omega': 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i_a': -1 if input_currents.low[0] == -1 else 0,
'i_e': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u': 1,
}
return low, high
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class DcSeriesMotor(DcMotor):
"""
The DcSeriesMotor is a DcMotor with an armature and exciting circuit connected in series to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 2.78 Armature circuit resistance
r_e Ohm 1.0 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.6e-3 Exciting circuit inductance
l_e_prime H 0.05 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
_default_motor_parameter = {
'r_a': 2.78, 'r_e': 1.0, 'l_a': 6.3e-3, 'l_e': 1.6e-3,
'l_e_prime': 0.05, 'j_rotor': 0.017,
}
_default_nominal_values = dict(omega=80, torque=0.0, i=50, u=420)
_default_limits = dict(omega=100, torque=0.0, i=100, u=420)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]
])
self._model_constants[self.I_IDX] = self._model_constants[
self.I_IDX] / (
mp['l_a'] + mp['l_e'])
def torque(self, currents):
# Docstring of superclass
return super().torque([currents[self.I_IDX], currents[self.I_IDX]])
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(
self._model_constants,
np.array([
state[self.I_IDX],
omega * state[self.I_IDX],
u_in[0]
])
)
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / (r_a + self._motor_parameter['r_e']),
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': 0,
'torque': 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (
mp['l_a'] + mp['l_e'])]]),
np.array([-mp['l_e_prime'] * state[self.I_IDX] / (
mp['l_a'] + mp['l_e'])]),
np.array([2 * mp['l_e_prime'] * state[self.I_IDX]])
)
class DcPermanentlyExcitedMotor(DcMotor):
"""
The DcPermanentlyExcitedMotor is a DcMotor with a Permanent Magnet instead of the excitation circuit.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 25.0 Armature circuit resistance
l_a H 3.438e-2 Armature circuit inductance
psi_e Wb 18 Magnetic Flux of the permanent magnet
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
HAS_JACOBIAN = True
_default_motor_parameter = {
'r_a': 25.0, 'l_a': 3.438e-2, 'psi_e': 18, 'j_rotor': 0.017
}
_default_nominal_values = dict(omega=22, torque=0.0, i=16, u=400)
_default_limits = dict(omega=50, torque=0.0, i=25, u=400)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
# placeholder for omega, currents and u_in
_ode_placeholder = np.zeros(2 + len(CURRENTS_IDX), dtype=np.float64)
def torque(self, state):
# Docstring of superclass
return self._motor_parameter['psi_e'] * state[self.I_IDX]
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['psi_e'], -mp['r_a'], 1.0]
])
self._model_constants[self.I_IDX] /= mp['l_a']
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
self._ode_placeholder[:] = [omega] + np.atleast_1d(
state[self.I_IDX]).tolist() \
+ [u_in[0]]
return np.matmul(self._model_constants, self._ode_placeholder)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-mp['r_a'] / mp['l_a']]]),
np.array([-mp['psi_e'] / mp['l_a']]),
np.array([mp['psi_e']])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / r_a,
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': -1 if input_voltages.low[0] == -1 else 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
class DcExternallyExcitedMotor(DcMotor):
# Equals DC Base Motor
HAS_JACOBIAN = True
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u_a': self._default_limits['u'],
'u_e': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class ThreePhaseMotor(ElectricMotor):
"""
The ThreePhaseMotor and its subclasses implement the technical system of Three Phase Motors.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
"""
# transformation matrix from abc to alpha-beta representation
_t23 = 2 / 3 * np.array([
[1, -0.5, -0.5],
[0, 0.5 * np.sqrt(3), -0.5 * np.sqrt(3)]
])
# transformation matrix from alpha-beta to abc representation
_t32 = np.array([
[1, 0],
[-0.5, 0.5 * np.sqrt(3)],
[-0.5, -0.5 * np.sqrt(3)]
])
@staticmethod
def t_23(quantities):
"""
Transformation from abc representation to alpha-beta representation
Args:
quantities: The properties in the abc representation like ''[u_a, u_b, u_c]''
Returns:
The converted quantities in the alpha-beta representation like ''[u_alpha, u_beta]''
"""
return np.matmul(ThreePhaseMotor._t23, quantities)
@staticmethod
def t_32(quantities):
"""
Transformation from alpha-beta representation to abc representation
Args:
quantities: The properties in the alpha-beta representation like ``[u_alpha, u_beta]``
Returns:
The converted quantities in the abc representation like ``[u_a, u_b, u_c]``
"""
return np.matmul(ThreePhaseMotor._t32, quantities)
@staticmethod
def q(quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the electrical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
cos = math.cos(epsilon)
sin = math.sin(epsilon)
return cos * quantities[0] - sin * quantities[1], sin * quantities[
0] + cos * quantities[1]
@staticmethod
def q_inv(quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the electrical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return SynchronousMotor.q(quantities, -epsilon)
def q_me(self, quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the mechanical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
return self.q(quantities, epsilon * self._motor_parameter['p'])
def q_inv_me(self, quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the mechanical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return self.q_me(quantities, -epsilon)
def _torque_limit(self):
"""
Returns:
Maximal possible torque for the given limits in self._limits
"""
raise NotImplementedError()
def _update_limits(self, limits_d={}, nominal_d={}):
# Docstring of superclass
super()._update_limits(limits_d, nominal_d)
super()._update_limits(dict(torque=self._torque_limit()))
def _update_initial_limits(self, nominal_new={}, **kwargs):
# Docstring of superclass
super()._update_initial_limits(self._nominal_values)
super()._update_initial_limits(nominal_new)
class SynchronousMotor(ThreePhaseMotor):
"""
The SynchronousMotor and its subclasses implement the technical system of a three phase synchronous motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
psi_p Wb 0.0094 Effective excitation flux (PMSM only)
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd A Direct axis voltage
u_sq A Quadrature axis voltage
u_a A Voltage through branch a
u_b A Voltage through branch b
u_c A Voltage through branch c
u_alpha A Voltage in alpha axis
u_beta A Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SD_IDX = 0
I_SQ_IDX = 1
EPSILON_IDX = 2
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_sd', 'i_sq']
VOLTAGES = ['u_sd', 'u_sq']
_model_constants = None
_initializer = None
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **kwargs):
# Docstring of superclass
nominal_values = nominal_values or {}
limit_values = limit_values or {}
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
self._update_model()
self._update_limits()
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def reset(self, state_space,
state_positions,
**__):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + 1)
def torque(self, state):
# Docstring of superclass
raise NotImplementedError
def _update_model(self):
"""
Set motor parameters into a matrix for faster computation
"""
raise NotImplementedError
def electrical_ode(self, state, u_dq, omega, *_):
"""
The differential equation of the Synchronous Motor.
Args:
state: The current state of the motor. [i_sd, i_sq, epsilon]
omega: The mechanical load
u_qd: The input voltages [u_sd, u_sq]
Returns:
The derivatives of the state vector d/dt([i_sd, i_sq, epsilon])
"""
return np.matmul(self._model_constants, np.array([
omega,
state[self.I_SD_IDX],
state[self.I_SQ_IDX],
u_dq[0],
u_dq[1],
omega * state[self.I_SD_IDX],
omega * state[self.I_SQ_IDX],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
# def initialize(self,
# state_space,
# state_positions,
# **__):
# super().initialize(state_space, state_positions)
class SynchronousReluctanceMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/AMC.2008.4516099 (K. Malekian, M. R. Sharif, J. Milimonfared)
_default_motor_parameter = {'p': 4,
'l_d': 10.1e-3,
'l_q': 4.1e-3,
'j_rotor': 0.8e-3,
'r_s': 0.57
}
_default_nominal_values = {'i': 10, 'torque': 0, 'omega': 3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_limits = {'i': 13, 'torque': 0, 'omega': 4.3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_sd, i_sq, u_sd, u_sq, omega * i_sd, omega * i_sq
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[ 0, 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[mp['p'], 0, 0, 0, 0, 0, 0]
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
return self.torque([self._limits['i_sd'] / np.sqrt(2), self._limits['i_sq'] / np.sqrt(2), 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (
(mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * \
currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0],
[-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX],
mp['p']
]),
np.array([
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX],
0
])
)
class PermanentMagnetSynchronousMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
epsilon Electrical rotational angle
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/TPEL.2020.3006779 (A. Brosch, S. Hanke, O. Wallscheid, J. Boecker)
#### and DOI: 10.1109/IEMDC.2019.8785122 (S. Hanke, O. Wallscheid, J. Boecker)
_default_motor_parameter = {
'p': 3,
'l_d': 0.37e-3,
'l_q': 1.2e-3,
'j_rotor': 0.3883,
'r_s': 18e-3,
'psi_p': 66e-3,
}
HAS_JACOBIAN = True
_default_limits = dict(omega=12e3 * np.pi / 30, torque=0.0, i=260, epsilon=math.pi, u=300)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=240, epsilon=math.pi, u=300)
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_d, i_q, u_d, u_q, omega * i_d, omega * i_q
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[-mp['psi_p'] * mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[ mp['p'], 0, 0, 0, 0, 0, 0],
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
if mp['l_d'] == mp['l_q']:
return self.torque([0, self._limits['i_sq'], 0])
else:
i_n = self.nominal_values['i']
_p = mp['psi_p'] / (2 * (mp['l_d'] - mp['l_q']))
_q = - i_n ** 2 / 2
i_d_opt = - _p / 2 - np.sqrt( (_p / 2) ** 2 - _q)
i_q_opt = np.sqrt(i_n ** 2 - i_d_opt ** 2)
return self.torque([i_d_opt, i_q_opt, 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
return (
np.array([ # dx'/dx
[-mp['r_s'] / mp['l_d'], mp['l_q']/mp['l_d'] * omega * mp['p'], 0],
[-mp['l_d'] / mp['l_q'] * omega * mp['p'], - mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([ # dx'/dw
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp['l_q'],
mp['p']
]),
np.array([ # dT/dx
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]),
0
])
)
class InductionMotor(ThreePhaseMotor):
"""
The InductionMotor and its subclasses implement the technical system of a three phase induction motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Voltage through branch a
u_sb V Voltage through branch b
u_sc V Voltage through branch c
u_salpha V Voltage in alpha axis
u_sbeta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SALPHA_IDX = 0
I_SBETA_IDX = 1
PSI_RALPHA_IDX = 2
PSI_RBETA_IDX = 3
EPSILON_IDX = 4
CURRENTS_IDX = [0, 1]
FLUX_IDX = [2, 3]
CURRENTS = ['i_salpha', 'i_sbeta']
FLUXES = ['psi_ralpha', 'psi_rbeta']
STATOR_VOLTAGES = ['u_salpha', 'u_sbeta']
IO_VOLTAGES = ['u_sa', 'u_sb', 'u_sc', 'u_salpha', 'u_sbeta', 'u_sd',
'u_sq']
IO_CURRENTS = ['i_sa', 'i_sb', 'i_sc', 'i_salpha', 'i_sbeta', 'i_sd',
'i_sq']
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (O. Wallscheid, M. Schenke, J. Boecker)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_model_constants = None
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
_initializer = None
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
# Docstring of superclass
# convert placeholder i and u to actual IO quantities
_nominal_values = self._default_nominal_values.copy()
_nominal_values.update({u: _nominal_values['u'] for u in self.IO_VOLTAGES})
_nominal_values.update({i: _nominal_values['i'] for i in self.IO_CURRENTS})
del _nominal_values['u'], _nominal_values['i']
_nominal_values.update(nominal_values or {})
# same for limits
_limit_values = self._default_limits.copy()
_limit_values.update({u: _limit_values['u'] for u in self.IO_VOLTAGES})
_limit_values.update({i: _limit_values['i'] for i in self.IO_CURRENTS})
del _limit_values['u'], _limit_values['i']
_limit_values.update(limit_values or {})
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer, initial_limits)
self._update_model()
self._update_limits(_limit_values, _nominal_values)
def reset(self,
state_space,
state_positions,
omega=None):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self._update_initial_limits(omega=omega)
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + len(self.FLUXES) + 1)
def electrical_ode(self, state, u_sr_alphabeta, omega, *args):
"""
The differential equation of the Induction Motor.
Args:
state: The momentary state of the motor. [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon]
omega: The mechanical load
u_sr_alphabeta: The input voltages [u_salpha, u_sbeta, u_ralpha, u_rbeta]
Returns:
The derivatives of the state vector d/dt( [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon])
"""
return np.matmul(self._model_constants, np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
omega,
state[self.I_SALPHA_IDX],
state[self.I_SBETA_IDX],
state[self.PSI_RALPHA_IDX],
state[self.PSI_RBETA_IDX],
omega * state[self.PSI_RALPHA_IDX],
omega * state[self.PSI_RBETA_IDX],
u_sr_alphabeta[0, 0],
u_sr_alphabeta[0, 1],
u_sr_alphabeta[1, 0],
u_sr_alphabeta[1, 1],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m'] ** 2/(mp['l_m']+mp['l_sigr']) * self._limits['i_sd'] * self._limits['i_sq'] / 2
def torque(self, states):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m']/(mp['l_m'] + mp['l_sigr']) * (states[self.PSI_RALPHA_IDX] * states[self.I_SBETA_IDX] - states[self.PSI_RBETA_IDX] * states[self.I_SALPHA_IDX])
def _flux_limit(self, omega=0, eps_mag=0, u_q_max=0.0, u_rq_max=0.0):
"""
Calculate Flux limits for given current and magnetic-field angle
Args:
omega(float): speed given by mechanical load
eps_mag(float): magnetic field angle
u_q_max(float): maximal strator voltage in q-system
u_rq_max(float): maximal rotor voltage in q-system
returns:
maximal flux values(list) in alpha-beta-system
"""
mp = self.motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
l_mr = mp['l_m'] / l_r
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
# limiting flux for a low omega
if omega == 0:
psi_d_max = mp['l_m'] * self._nominal_values['i_sd']
else:
i_d, i_q = self.q_inv([self._initial_states['i_salpha'],
self._initial_states['i_sbeta']],
eps_mag)
psi_d_max = mp['p'] * omega * sigma * l_s * i_d + \
(mp['r_s'] + mp['r_r'] * l_mr**2) * i_q + \
u_q_max + \
l_mr * u_rq_max
psi_d_max /= - mp['p'] * omega * l_mr
# clipping flux and setting nominal limit
psi_d_max = 0.9 * np.clip(psi_d_max, a_min=0, a_max=np.abs(mp['l_m'] * i_d))
# returning flux in alpha, beta system
return self.q([psi_d_max, 0], eps_mag)
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
l_s = mp['l_m']+mp['l_sigs']
l_r = mp['l_m']+mp['l_sigr']
sigma = (l_s*l_r-mp['l_m']**2) /(l_s*l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
self._model_constants = np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
[0, -1 / tau_sig, 0,mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0, 0,
+mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s), 0,
-mp['l_m'] / (sigma * l_r * l_s), 0, ], # i_ralpha_dot
[0, 0, -1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
-mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0, 0,
1 / (sigma * l_s), 0, -mp['l_m'] / (sigma * l_r * l_s), ],
# i_rbeta_dot
[0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0, 1,
0, ], # psi_ralpha_dot
[0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1, ],
# psi_rbeta_dot
[mp['p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # epsilon_dot
])
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
return (
np.array([ # dx'/dx
# i_alpha i_beta psi_alpha psi_beta epsilon
[-1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0],
[0, - 1 / tau_sig,
- omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s),
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0],
[mp['l_m'] / tau_r, 0, - 1 / tau_r, - omega * mp['p'], 0],
[0, mp['l_m'] / tau_r, omega * mp['p'], - 1 / tau_r, 0],
[0, 0, 0, 0, 0]
]),
np.array([ # dx'/dw
mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RBETA_IDX],
- mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RALPHA_IDX],
- mp['p'] * state[self.PSI_RBETA_IDX],
mp['p'] * state[self.PSI_RALPHA_IDX],
mp['p']
]),
np.array([ # dT/dx
- state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp[
'l_m'] / l_r,
state[self.PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
state[self.I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
- state[self.I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
0
])
)
class SquirrelCageInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Stator current through branch a
i_sb A Stator current through branch b
i_sc A Stator current through branch c
i_salpha A Stator current in alpha direction
i_sbeta A Stator current in beta direction
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (O. Wallscheid, M. Schenke, J. Boecker)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def electrical_ode(self, state, u_salphabeta, omega, *args):
"""
The differential equation of the SCIM.
Sets u_ralpha = u_rbeta = 0 before calling the respective super function.
"""
u_ralphabeta = np.zeros_like(u_salphabeta)
u_sr_aphabeta = np.array([u_salphabeta, u_ralphabeta])
return super().electrical_ode(state, u_sr_aphabeta, omega, *args)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'])
# using absolute value, because limits should describe upper limit
# after abs-operator, norm of alphabeta flux still equal to
# d-component of flux
flux_alphabeta_limits = np.abs(flux_alphabeta_limits)
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
flux_nominal_limits.update(nominal_new)
super()._update_initial_limits(flux_nominal_limits)
class DoublyFedInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 12e-3 Stator resistance
r_r Ohm 21e-3 Rotor resistance
l_m H 13.5e-3 Main inductance
l_sigs H 0.2e-3 Stator-side stray inductance
l_sigr H 0.1e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 1e3 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
u_ralpha V Rotor voltage in alpha axis
u_rbeta V Rotor voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
u_ralpha Rotor voltage in alpha axis
u_rbeta Rotor voltage in beta axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
ROTOR_VOLTAGES = ['u_ralpha', 'u_rbeta']
ROTOR_CURRENTS = ['i_ralpha', 'i_rbeta']
IO_ROTOR_VOLTAGES = ['u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq']
IO_ROTOR_CURRENTS = ['i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq']
#### Parameters taken from DOI: 10.1016/j.jestch.2016.01.015 (N. Kumar, T. R. Chelliah, S. P. Srivastava)
_default_motor_parameter = {
'p': 2,
'l_m': 297.5e-3,
'l_sigs': 25.71e-3,
'l_sigr': 25.71e-3,
'j_rotor': 13.695e-3,
'r_s': 4.42,
'r_r': 3.51,
}
_default_limits = dict(omega=1800 * np.pi / 30, torque=0.0, i=9, epsilon=math.pi, u=720)
_default_nominal_values = dict(omega=1650 * np.pi / 30, torque=0.0, i=7.5, epsilon=math.pi, u=720)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, **kwargs):
self.IO_VOLTAGES += self.IO_ROTOR_VOLTAGES
self.IO_CURRENTS += self.IO_ROTOR_CURRENTS
super().__init__(**kwargs)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES+self.ROTOR_VOLTAGES,
self.IO_CURRENTS+self.ROTOR_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_r']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_r']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'],
u_rq_max=self._nominal_values['u_rq'])
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
super()._update_initial_limits(flux_nominal_limits) | 45.809387 | 285 | 0.464098 | 95,507 | 0.998505 | 0 | 0 | 3,722 | 0.038913 | 0 | 0 | 55,417 | 0.579373 |
21bdf99390c3df665d25199aea9fff225ef4b831 | 1,004 | py | Python | tests/pyrem_tests.py | sgdxbc/PyREM | e162efd5f95d1d335fb96d77cbe047def02c340e | [
"MIT"
] | 5 | 2016-01-20T22:34:41.000Z | 2020-12-19T15:24:33.000Z | tests/pyrem_tests.py | sgdxbc/PyREM | e162efd5f95d1d335fb96d77cbe047def02c340e | [
"MIT"
] | 12 | 2015-11-11T23:03:03.000Z | 2021-09-28T17:09:53.000Z | tests/pyrem_tests.py | sgdxbc/PyREM | e162efd5f95d1d335fb96d77cbe047def02c340e | [
"MIT"
] | 4 | 2015-12-10T05:14:30.000Z | 2021-08-14T02:48:05.000Z | from pyrem.task import Task, TaskStatus
class DummyTask(Task):
def _start(self):
pass
def _wait(self):
pass
def _stop(self):
pass
class TestDummyTask(object):
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
pass
@classmethod
def teardown_class(klass):
"""This method is run once for each class _after_ all tests are run"""
pass
def setup(self):
self.task = DummyTask()
def teardown(self):
"""This method is run once after _each_ test method is executed"""
pass
def test_status(self):
assert self.task._status == TaskStatus.IDLE
self.task.start()
assert self.task._status == TaskStatus.STARTED
self.task.wait()
assert self.task._status == TaskStatus.STOPPED
def test_status2(self):
self.task.start(wait=True)
assert self.task._status == TaskStatus.STOPPED
| 23.904762 | 78 | 0.625498 | 959 | 0.955179 | 0 | 0 | 266 | 0.26494 | 0 | 0 | 205 | 0.204183 |
21bead059ee3f22ec22b2bb48bbf62356bb305bf | 1,302 | py | Python | invenio_app_ils/records/resolver/jsonresolver/document_keyword.py | lauren-d/invenio-app-ils | 961e88ba144b1371b629dfbc0baaf388e46e667f | [
"MIT"
] | null | null | null | invenio_app_ils/records/resolver/jsonresolver/document_keyword.py | lauren-d/invenio-app-ils | 961e88ba144b1371b629dfbc0baaf388e46e667f | [
"MIT"
] | null | null | null | invenio_app_ils/records/resolver/jsonresolver/document_keyword.py | lauren-d/invenio-app-ils | 961e88ba144b1371b629dfbc0baaf388e46e667f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Resolve the Keyword referenced in the Document."""
import jsonresolver
from werkzeug.routing import Rule
from ...api import Document, Keyword
from ..resolver import get_field_value_for_record as get_field_value
# Note: there must be only one resolver per file,
# otherwise only the last one is registered
@jsonresolver.hookimpl
def jsonresolver_loader(url_map):
"""Resolve the referred Keywords for a Document record."""
from flask import current_app
def keywords_resolver(document_pid):
"""Return the Keyword records for the given Keyword or raise."""
keyword_pids = get_field_value(Document, document_pid, "keyword_pids")
keywords = []
for keyword_pid in keyword_pids:
keyword = Keyword.get_record_by_pid(keyword_pid)
del keyword["$schema"]
keywords.append(keyword)
return keywords
url_map.add(
Rule(
"/api/resolver/documents/<document_pid>/keywords",
endpoint=keywords_resolver,
host=current_app.config.get("JSONSCHEMAS_HOST"),
)
)
| 28.933333 | 78 | 0.689708 | 0 | 0 | 0 | 0 | 782 | 0.600614 | 0 | 0 | 556 | 0.427035 |
21beae082b613ebc189de03f874795adfa3f6a13 | 68 | py | Python | Other_AIMA_Scripts/planning.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 12 | 2015-06-17T05:15:40.000Z | 2021-05-18T15:39:33.000Z | Other_AIMA_Scripts/planning.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 1 | 2020-03-14T08:45:49.000Z | 2020-03-14T08:45:49.000Z | Other_AIMA_Scripts/planning.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 5 | 2016-09-10T19:16:56.000Z | 2018-10-10T05:09:03.000Z | """Planning (Chapters 11-12)
"""
from __future__ import generators
| 13.6 | 33 | 0.735294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.470588 |
21c005abab0af70acf7d0786eb0dee5f66346f8d | 15,025 | py | Python | Berkeley_pacman_project1/search.py | AndrewSpano/UC_Berkeley_AI_Projects | a695f7be1653e485fdb339f99e6a266a1b044ba4 | [
"MIT"
] | 1 | 2020-12-12T16:16:05.000Z | 2020-12-12T16:16:05.000Z | Berkeley_pacman_project1/search.py | AndrewSpano/University_AI_Projects | a695f7be1653e485fdb339f99e6a266a1b044ba4 | [
"MIT"
] | null | null | null | Berkeley_pacman_project1/search.py | AndrewSpano/University_AI_Projects | a695f7be1653e485fdb339f99e6a266a1b044ba4 | [
"MIT"
] | 1 | 2020-10-13T19:37:59.000Z | 2020-10-13T19:37:59.000Z | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Stack class which will be used to pop the state the first state that was pushed (LIFO)
from util import Stack
stack = Stack()
# the first item of the stack will be the starting state
stack.push(problem.getStartState())
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# the currentState becomes the starting state
currentState = problem.getStartState()
while True:
# if the stack is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if stack.isEmpty():
return None
# pop the next state that will be visited
currentState = stack.pop()
# mark the state as visited in the dictionary
visited[currentState] = True
# check if the currentState is a solution to the problem, and if so return a list with the solution
if problem.isGoalState(currentState):
return path.get(currentState)
# get the successors of the currentState
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
# check if the state (tuple[0]) has already been visited
if visited.get(tuple[0], None) == None:
# if it hasn't, construct it's path in the path dictionary
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# then push it into the stack
stack.push(tuple[0])
util.raiseNotDefined()
def breadthFirstSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Queue class which will be used to pop the state the first state that was pushed (FIFO)
from util import Queue
queue = Queue()
# the first item of the queue will be the starting state
queue.push(problem.getStartState())
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# the current state is initialized as the starting state
currentState = problem.getStartState()
while True:
# if the queue is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if queue.isEmpty():
return None
# pop the lastest state that was inserted
currentState = queue.pop()
# check if it is a solution, and if it is return the path
if problem.isGoalState(currentState):
return path.get(currentState)
# get the successors of the current state
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
# if the state has not been visited
if visited.get(tuple[0], None) == None:
# add the state (tuple[0]) to the visited dictionary and mark it's path using the path dictionary
visited[tuple[0]] = True
# the state's (tuple[0]) path is the path to it's predecessor (currentState) + the new action (tuple[2])
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# push the state (tuple[0]) to the queue
queue.push(tuple[0])
util.raiseNotDefined()
def uniformCostSearch(problem):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Priority Queue class which will be used to pop the state with the lowest cost
from util import PriorityQueue
priority_queue = PriorityQueue()
# the starting state has a cost of 0
priority_queue.push(problem.getStartState(), 0)
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# a dictionary (more like hash table) to store the predecessor of every state
# this dictionary is not needed in dfs and bfs because in those searches the predecessor
# of a state is always the variable currentState
predecessor = {problem.getStartState(): None}
# a dictionary (more like hash table) to store lowest cost needed to reach every state
# this dictionary was not used in the previous searches for the same reasons as above
cost = {problem.getStartState(): 0}
# the current state of the problem becomes the starting state
currentState = problem.getStartState()
while True:
# if the priority queue is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if priority_queue.isEmpty():
return None
# the new current state will become the successor state with the smallest priority (cost)
currentState = priority_queue.pop()
# check if the currentState is the goal State. If it is it means we have found a minimum cost
# solution. Return the path we have built for it.
if problem.isGoalState(currentState):
return path.get(currentState);
# get the successors states of the currentState
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
if visited.get(tuple[0], None) == None:
# mark state as visited
visited[tuple[0]] = True
# the predecessor of the state tuple[0] is the state from which we got the tuple, which is currentState
predecessor[tuple[0]] = currentState
# the cost of the state tuple[0] is equal to the cost to get to the previous state + the cost of the action
cost[tuple[0]] = cost[predecessor[tuple[0]]] + tuple[2]
# make the path
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# push the state in the priority queue with its cost, which we calculated above
priority_queue.push(tuple[0], cost[tuple[0]])
else:
# we have an already visited state, so we must check if the cost to get to it can be lowered.
if cost[currentState] + tuple[2] < cost[tuple[0]]:
# if the above condition is true, we have found a lower cost for the state tuple[0]
# therefore we update the cost, the predecessor and the path of the state
cost[tuple[0]] = cost[currentState] + tuple[2]
predecessor[tuple[0]] = currentState
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# update the new priority (cost) of the already visited state
priority_queue.update(tuple[0], cost[tuple[0]])
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
# check if the starting state is a solution
if problem.isGoalState(problem.getStartState()):
return []
# import the Priority Queue class which will be used to pop the state with the lowest cost
from util import PriorityQueue
priority_queue = PriorityQueue()
# the starting state has a cost of 0
priority_queue.push(problem.getStartState(), heuristic(problem.getStartState(), problem))
# a dictionary (more like hash table) that is used to check if a state has already beem visited in O(1) time
visited = {problem.getStartState(): True}
# a dictionary (more like hash table) to store the path taken to reach every state
path = {problem.getStartState(): []}
# a dictionary (more like hash table) to store the predecessor of every state
# this dictionary is not needed in dfs and bfs because in those searches the predecessor
# of a state is always the variable currentState
predecessor = {problem.getStartState(): None}
# a dictionary (more like hash table) to store lowest cost needed to reach every state
# this dictionary was not used in the previous searches for the same reasons as above
cost = {problem.getStartState(): 0}
# the current state of the problem becomes the starting state
currentState = problem.getStartState()
while True:
# if the priority queue is empty, then we have explored all states reachable from the StartState
# and we did not get to the goal State. Therefore it is unreachable. So we return None.
if priority_queue.isEmpty():
return None
# the new current state will become the successor state with the smallest priority (cost)
currentState = priority_queue.pop()
# check if the currentState is the goal State. If it is it means we have found a minimum cost
# solution. Return the path we have built for it.
if problem.isGoalState(currentState):
return path.get(currentState);
# get the successors states of the currentState
successors = problem.getSuccessors(currentState)
# REMEMBER: tuple[0] is the state, tuple[1] is the action and tuple[2] is the cost of the action
for tuple in successors:
if visited.get(tuple[0], None) == None:
# mark state as visited
visited[tuple[0]] = True
# the predecessor of the state tuple[0] is the state from which we got the tuple, which is currentState
predecessor[tuple[0]] = currentState
# the cost of the state tuple[0] is equal to the cost to get to the previous state + the cost of the action
cost[tuple[0]] = cost[predecessor[tuple[0]]] + tuple[2]
# make the path
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# push the state in the priority queue with its cost + heuristic, which we calculated above
priority_queue.push(tuple[0], cost[tuple[0]] + heuristic(tuple[0], problem))
else:
# we have an already visited state, so we must check if the cost to get to it can be lowered.
if cost[currentState] + tuple[2] < cost[tuple[0]]:
# if the above condition is true, we have found a lower cost for the state tuple[0]
# therefore we update the cost, the predecessor and the path of the state
cost[tuple[0]] = cost[currentState] + tuple[2]
predecessor[tuple[0]] = currentState
temp_list = path.get(currentState)[:]
temp_list.append(tuple[1])
path[tuple[0]] = temp_list
# update the new priority (cost + heuristic) of the already visited state
priority_queue.update(tuple[0], cost[tuple[0]] + heuristic(tuple[0], problem))
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| 40.066667 | 123 | 0.647388 | 1,276 | 0.084925 | 0 | 0 | 0 | 0 | 0 | 0 | 8,262 | 0.549884 |
21c10c49fed1208784b8ed8d90ec7a93c4893c97 | 270 | py | Python | src/python/WMCore/WMBS/Oracle/Files/GetLocation.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/WMBS/Oracle/Files/GetLocation.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/WMBS/Oracle/Files/GetLocation.py | hufnagel/WMCore | b150cc725b68fc1cf8e6e0fa07c826226a4421fa | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | """
Oracle implementation of GetLocationFile
"""
from WMCore.WMBS.MySQL.Files.GetLocation import GetLocation \
as GetLocationFileMySQL
class GetLocation(GetLocationFileMySQL):
"""
_GetLocation_
Oracle specific: file is reserved word
"""
pass
| 18 | 61 | 0.725926 | 128 | 0.474074 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.451852 |
21c279ad40c3cb9e53b7dcc853627dea0e3c47fa | 5,471 | py | Python | python/cohorte/vote/core.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 6 | 2015-04-28T16:51:08.000Z | 2017-07-12T11:29:00.000Z | python/cohorte/vote/core.py | isandlaTech/cohorte-runtime | 686556cdde20beba77ae202de9969be46feed5e2 | [
"Apache-2.0"
] | 29 | 2015-02-24T11:11:26.000Z | 2017-08-25T08:30:18.000Z | qualifier/deploy/cohorte-home/repo/cohorte/vote/core.py | isandlaTech/cohorte-devtools | 9ba9021369188d2f0ad5c845ef242fd5a7097b57 | [
"Apache-2.0"
] | 1 | 2015-08-24T13:23:43.000Z | 2015-08-24T13:23:43.000Z | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Voting system core service
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 1.1.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Requires, \
Instantiate
# Voting system
import cohorte.vote
import cohorte.vote.beans as beans
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.vote.SERVICE_VOTE_CORE)
@Requires('_store', cohorte.vote.SERVICE_VOTE_STORE)
@Requires('_engines', cohorte.vote.SERVICE_VOTE_ENGINE,
aggregate=True, optional=False)
@Instantiate('vote-core')
class VoteCore(object):
"""
Voting system core service
"""
def __init__(self):
"""
Sets up members
"""
# Vote engines
self._engines = []
# Vote results storage
self._store = None
# Votes counter
self._nb_votes = 0
def get_kinds(self):
"""
Returns the list of supported kinds of vote
"""
return [engine.get_kind() for engine in self._engines]
def vote(self, electors, candidates, subject=None, name=None,
kind=None, parameters=None):
"""
Runs a vote for the given
:param electors: List of electors
:param candidates: List of candidates
:param subject: Subject of the vote (optional)
:param name: Name of the vote
:param kind: Kind of vote
:param parameters: Parameters for the vote engine
:return: The result of the election (kind-dependent)
:raise NameError: Unknown kind of vote
"""
# 1. Select the engine
if kind is None:
if not self._engines:
# No engine available
raise NameError("No engine available")
# Use the first engine
engine = self._engines[0]
kind = engine.get_kind()
else:
# Engine given
for engine in self._engines:
if engine.get_kind() == kind:
break
else:
raise NameError("Unknown kind of vote: {0}".format(kind))
# 2. Normalize parameters
if not isinstance(parameters, dict):
# No valid parameters given
parameters = {}
else:
parameters = parameters.copy()
if not name:
# Generate a vote name
self._nb_votes += 1
name = "Vote {0} ({1})".format(self._nb_votes, kind)
# Do not try to shortcut the vote if there is only one candidate:
# it is possible that an elector has to be notified of the votes
# Prepare the results bean
vote_bean = beans.VoteResults(name, kind, candidates, electors,
subject, parameters)
# Vote until we have a result
vote_round = 1
result = None
while True:
try:
# 3. Vote
ballots = []
for elector in electors:
ballot = beans.Ballot(elector)
# TODO: add a "last resort" candidate
# (if no candidate works)
elector.vote(tuple(candidates), subject, ballot)
ballots.append(ballot)
# Store the ballots of this round
vote_bean.set_ballots(ballots)
# 4. Analyze votes
result = engine.analyze(vote_round, ballots, tuple(candidates),
parameters, vote_bean)
break
except beans.CoupdEtat as ex:
# Putch = Coup d'etat !
_logger.debug("A putch is perpetrated by [%s]", ex.claimant)
vote_bean.coup_d_etat = True
result = ex.claimant
break
except beans.NextRound as ex:
# Another round is necessary
candidates = ex.candidates
vote_round += 1
vote_bean.next_round(candidates)
if len(candidates) == 1:
# Tricky engine...
result = candidates[0]
break
else:
_logger.debug("New round required with: %s", candidates)
# Store the vote results
vote_bean.set_vote_results(result)
self._store.store_vote(vote_bean)
return result
| 31.085227 | 80 | 0.549443 | 3,880 | 0.709194 | 0 | 0 | 4,119 | 0.752879 | 0 | 0 | 2,405 | 0.439591 |
21c4e67bcec2a79afa2f1eebd700ab15449d0b2d | 4,665 | py | Python | aether-odk-module/aether/odk/api/serializers.py | lordmallam/aether | 7ceb71d2ef8b09d704d94dfcb243dbbdf8356135 | [
"Apache-2.0"
] | null | null | null | aether-odk-module/aether/odk/api/serializers.py | lordmallam/aether | 7ceb71d2ef8b09d704d94dfcb243dbbdf8356135 | [
"Apache-2.0"
] | null | null | null | aether-odk-module/aether/odk/api/serializers.py | lordmallam/aether | 7ceb71d2ef8b09d704d94dfcb243dbbdf8356135 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password as validate_pwd
from django.utils.translation import ugettext as _
from drf_dynamic_fields import DynamicFieldsMixin
from rest_framework import serializers
from .models import Project, XForm, MediaFile
from .xform_utils import parse_xform_file, validate_xform
from .surveyors_utils import get_surveyors, flag_as_surveyor
class MediaFileSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
name = serializers.CharField(allow_null=True, default=None)
class Meta:
model = MediaFile
fields = '__all__'
class XFormSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField('xform-detail', read_only=True)
project_url = serializers.HyperlinkedRelatedField(
'project-detail',
source='project',
read_only=True
)
surveyors = serializers.PrimaryKeyRelatedField(
label=_('Surveyors'),
many=True,
queryset=get_surveyors(),
allow_null=True,
default=[],
help_text=_('If you do not specify any surveyors, EVERYONE will be able to access this xForm.'),
)
xml_file = serializers.FileField(
write_only=True,
allow_null=True,
default=None,
label=_('XLS Form / XML File'),
help_text=_('Upload an XLS Form or an XML File'),
)
# this will return all media files in one request call
media_files = MediaFileSerializer(many=True, read_only=True)
def validate(self, value):
if value['xml_file']:
try:
# extract data from file and put it on `xml_data`
value['xml_data'] = parse_xform_file(
filename=str(value['xml_file']),
content=value['xml_file'],
)
# validate xml data and link the possible errors to this field
validate_xform(value['xml_data'])
except Exception as e:
raise serializers.ValidationError({'xml_file': str(e)})
value.pop('xml_file')
return super(XFormSerializer, self).validate(value)
class Meta:
model = XForm
fields = '__all__'
class SurveyorSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
password = serializers.CharField(style={'input_type': 'password'})
def validate_password(self, value):
validate_pwd(value)
return value
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
instance.set_password(password)
instance.save()
flag_as_surveyor(instance)
return instance
def update(self, instance, validated_data):
for attr, value in validated_data.items():
if attr == 'password':
if value != instance.password:
instance.set_password(value)
else:
setattr(instance, attr, value)
instance.save()
flag_as_surveyor(instance)
return instance
class Meta:
model = get_user_model()
fields = ('id', 'username', 'password', )
class ProjectSerializer(DynamicFieldsMixin, serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField('project-detail', read_only=True)
surveyors = serializers.PrimaryKeyRelatedField(
label=_('Surveyors'),
many=True,
queryset=get_surveyors(),
allow_null=True,
default=[],
help_text=_('If you do not specify any surveyors, EVERYONE will be able to access this project xForms.'),
)
# this will return all linked xForms with media files in one request call
xforms = XFormSerializer(read_only=True, many=True)
class Meta:
model = Project
fields = '__all__'
| 33.321429 | 113 | 0.674384 | 3,484 | 0.746838 | 0 | 0 | 0 | 0 | 0 | 0 | 1,419 | 0.30418 |
21c5953806a590d303da60ce30af9e05c9ffcf7f | 1,046 | py | Python | client.py | Klark007/Selbstfahrendes-Auto-im-Modell | d7fe81392de2b29b7dbc7c9d929fa0031b89900b | [
"MIT"
] | null | null | null | client.py | Klark007/Selbstfahrendes-Auto-im-Modell | d7fe81392de2b29b7dbc7c9d929fa0031b89900b | [
"MIT"
] | null | null | null | client.py | Klark007/Selbstfahrendes-Auto-im-Modell | d7fe81392de2b29b7dbc7c9d929fa0031b89900b | [
"MIT"
] | null | null | null | import socket
from ast import literal_eval
import Yetiborg.Drive as Yetiborg
HEADERSIZE = 2
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 12345)) # 192.168.0.11 / localhost / 192.168.0.108
# car always looks up at the beginning
car = Yetiborg.Yetiborg((0, 1))
"""
fs: finished
"""
def move(vec):
print(vec)
# movement command at motors
car.calculate_movement(vec)
pass
def stop():
print("Stop")
exit()
def command_decoder(str):
# decodes the send command into an action
cmd = str[:2]
if cmd == "mv":
# gets the direction (tuple) from the command
move(literal_eval(str[2:]))
elif cmd == "en":
stop()
pass
while True:
full_cmd = ""
header = s.recv(HEADERSIZE).decode("utf-8")
print("New message length:", header[:HEADERSIZE])
cmd_len = int(header[:HEADERSIZE])
full_cmd = s.recv(cmd_len).decode("utf-8")
command_decoder(full_cmd)
# send finished execution signal
s.send(bytes("fs", "utf-8"))
| 18.350877 | 75 | 0.637667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.308795 |
21c5ac45e757ed2ed07376fd5edc3fa9749f63d7 | 9,785 | py | Python | python/DeepSeaScene/Convert/GLTFModel.py | akb825/DeepSea | fff790d0a472cf2f9f89de653e0b4470ce605d24 | [
"Apache-2.0"
] | 5 | 2018-11-17T23:13:22.000Z | 2021-09-30T13:37:04.000Z | python/DeepSeaScene/Convert/GLTFModel.py | akb825/DeepSea | fff790d0a472cf2f9f89de653e0b4470ce605d24 | [
"Apache-2.0"
] | null | null | null | python/DeepSeaScene/Convert/GLTFModel.py | akb825/DeepSea | fff790d0a472cf2f9f89de653e0b4470ce605d24 | [
"Apache-2.0"
] | 2 | 2019-09-23T12:23:35.000Z | 2020-04-07T05:31:06.000Z | # Copyright 2020 Aaron Barany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import struct
from .ModelNodeConvert import ModelNodeVertexStream, ModelNodeGeometryData, addModelType
from .SceneResourcesConvert import modelVertexAttribEnum
class Object:
pass
gltfVertexAttribEnum = {
'POSITION': modelVertexAttribEnum['Position'],
'NORMAL': modelVertexAttribEnum['Normal'],
'TANGENT': modelVertexAttribEnum['Tangent'],
'TEXCOORD_0': modelVertexAttribEnum['TexCoord0'],
'TEXCOORD`1': modelVertexAttribEnum['TexCoord1'],
'TEXCOORD`2': modelVertexAttribEnum['TexCoord2'],
'TEXCOORD`3': modelVertexAttribEnum['TexCoord3'],
'TEXCOORD`4': modelVertexAttribEnum['TexCoord4'],
'TEXCOORD`5': modelVertexAttribEnum['TexCoord5'],
'TEXCOORD`6': modelVertexAttribEnum['TexCoord6'],
'TEXCOORD`7': modelVertexAttribEnum['TexCoord7'],
'COLOR_0': modelVertexAttribEnum['Color0'],
'COLOR_1': modelVertexAttribEnum['Color1'],
'JOINTS_0': modelVertexAttribEnum['BlendIndices'],
'WEIGHTS_0': modelVertexAttribEnum['BlendWeights'],
}
gltfTypeMap = {
('SCALAR', 5120): ('X8', 'Int'),
('SCALAR', 5121): ('X8', 'UInt'),
('SCALAR', 5122): ('X16', 'Int'),
('SCALAR', 5123): ('X16', 'UInt'),
('SCALAR', 5125): ('X32', 'UInt'),
('SCALAR', 5126): ('X32', 'Float'),
('VEC2', 5120): ('X8Y8', 'Int'),
('VEC2', 5121): ('X8Y8', 'UInt'),
('VEC2', 5122): ('X16Y16', 'Int'),
('VEC2', 5123): ('X16Y16', 'UInt'),
('VEC2', 5126): ('X32Y32', 'Float'),
('VEC3', 5120): ('X8Y8Z8', 'Int'),
('VEC3', 5121): ('X8Y8Z8', 'UInt'),
('VEC3', 5122): ('X16Y16Z16', 'Int'),
('VEC3', 5123): ('X16Y16Z16', 'UInt'),
('VEC3', 5126): ('X32Y32Z32', 'Float'),
('VEC4', 5120): ('X8Y8Z8W8', 'Int'),
('VEC4', 5121): ('X8Y8Z8W8', 'UInt'),
('VEC4', 5122): ('X16Y16Z16W16', 'Int'),
('VEC4', 5123): ('X16Y16Z16W16', 'UInt'),
('VEC4', 5126): ('X32Y32Z32W32', 'Float')
}
gltfPrimitiveTypeMap = ['PointList', 'LineList', 'LineStrip', 'LineStrip', 'TriangleList',
'TriangleStrip', 'TriangleFan']
def convertGLTFModel(convertContext, path):
"""
Converts an GLTF model for use with ModelNodeConvert.
If the "name" element is provided for a mesh, it will be used for the name of the model
geometry. Otherwise, the name will be "mesh#", where # is the index of the mesh. If multiple
sets of primitives are used, the index will be appended to the name, separated with '.'.
Limitations:
- Only meshes and dependent data (accessors, buffer views, and buffers) are extracted. All other
parts of the scene are ignored, including transforms.
- Morph targets aren't supported.
- Materials aren't read, and are instead provided in the DeepSea scene configuration.
- Buffer data may either be embedded or a file path relative to the main model file. General
URIs are not supported.
"""
with open(path) as f:
try:
data = json.load(f)
except:
raise Exception('Invalid GLTF file "' + path + '".')
parentDir = os.path.dirname(path)
try:
# Read the buffers.
buffers = []
bufferInfos = data['buffers']
dataPrefix = 'data:application/octet-stream;base64,'
try:
for bufferInfo in bufferInfos:
uri = bufferInfo['uri']
if uri.startswith(dataPrefix):
try:
buffers.append(base64.b64decode(uri[len(dataPrefix):]))
except:
raise Exception('Invalid buffer data for GLTF file "' + path + '".')
else:
with open(os.path.join(parentDir, uri), 'rb') as f:
buffers.append(f.read())
except (TypeError, ValueError):
raise Exception('Buffers must be an array of objects for GLTF file "' + path + '".')
except KeyError as e:
raise Exception('Buffer doesn\'t contain element "' + str(e) +
'" for GLTF file "' + path + '".')
# Read the buffer views.
bufferViews = []
bufferViewInfos = data['bufferViews']
try:
for bufferViewInfo in bufferViewInfos:
bufferView = Object()
try:
bufferData = buffers[bufferViewInfo['buffer']]
except (IndexError, TypeError):
raise Exception('Invalid buffer index for GLTF file "' + path + '".')
offset = bufferViewInfo['byteOffset']
length = bufferViewInfo['byteLength']
try:
bufferView.buffer = bufferData[offset:offset + length]
except (IndexError, TypeError):
raise Exception('Invalid buffer view range for GLTF file "' + path + '".')
bufferViews.append(bufferView)
except (TypeError, ValueError):
raise Exception(
'Buffer views must be an array of objects for GLTF file "' + path + '".')
except KeyError as e:
raise Exception('Buffer view doesn\'t contain element "' + str(e) +
'" for GLTF file "' + path + '".')
# Read the accessors.
accessors = []
accessorInfos = data['accessors']
try:
for accessorInfo in accessorInfos:
accessor = Object()
try:
accessor.bufferView = bufferViews[accessorInfo['bufferView']]
except (IndexError, TypeError):
raise Exception('Invalid buffer view index for GLTF file "' + path + '".')
gltfType = accessorInfo['type']
componentType = accessorInfo['componentType']
try:
accessorType, decorator = gltfTypeMap[(gltfType, componentType)]
except (KeyError, TypeError):
raise Exception('Invalid accessor type (' + str(gltfType) + ', ' +
str(componentType) + ') for GLTF file "' + path + '".')
accessor.type = accessorType
accessor.decorator = decorator
accessor.count = accessorInfo['count']
accessors.append(accessor)
except (TypeError, ValueError):
raise Exception('Accessors must be an array of objects for GLTF file "' + path + '".')
except KeyError as e:
raise Exception('Accessor doesn\'t contain element "' + str(e) +
'" for GLTF file "' + path + '".')
# Read the meshes.
meshes = []
meshInfos = data['meshes']
try:
meshIndex = 0
for meshInfo in meshInfos:
meshName = meshInfo.get('name', 'mesh' + str(meshIndex))
primitiveInfos = meshInfo['primitives']
try:
primitiveIndex = 0
for primitiveInfo in primitiveInfos:
mesh = Object()
mesh.attributes = []
mesh.name = meshName
if len(primitiveInfos) > 1:
mesh.name += '.' + str(primitiveIndex)
primitiveIndex += 1
try:
for attrib, index in primitiveInfo['attributes'].items():
if attrib not in gltfVertexAttribEnum:
raise Exception('Unsupported attribute "' + str(attrib) +
'" for GLTF file "' + path + '".')
try:
mesh.attributes.append((gltfVertexAttribEnum[attrib],
accessors[index]))
except (IndexError, TypeError):
raise Exception('Invalid accessor index for GLTF file "' +
path + '".')
except (TypeError, ValueError):
raise Exception(
'Mesh primitives attributes must be an object containing attribute '
'mappings for GLTF file "' + path + '".')
if 'indices' in primitiveInfo:
try:
mesh.indices = accessors[primitiveInfo['indices']]
except (IndexError, TypeError):
raise Exception(
'Invalid accessor index for GLTF file "' + path + '".')
else:
mesh.indices = None
mode = primitiveInfo.get('mode', 4)
try:
mesh.primitiveType = gltfPrimitiveTypeMap[mode]
except (IndexError, TypeError):
raise Exception('Unsupported primitive mode for GLTF file "' + path + '".')
meshes.append(mesh)
except (TypeError, ValueError):
raise Exception(
'Mesh primitives must be an array of objects for GLTF file "' + path + '".')
except KeyError as e:
raise Exception('Mesh primitives doesn\'t contain element "' + str(e) +
'" for GLTF file "' + path + '".')
meshIndex += 1
except (TypeError, ValueError):
raise Exception('Meshes must be an array of objects for GLTF file "' + path + '".')
except KeyError as e:
raise Exception('Mesh doesn\'t contain element "' + str(e) + '" for GLTF file "' +
path + '".')
except (TypeError, ValueError):
raise Exception('Root value in GLTF file "' + path + '" must be an object.')
except KeyError as e:
raise Exception('GLTF file "' + path + '" doesn\'t contain element "' + str(e) + '".')
# Convert meshes to geometry list. GLTF uses separate vertex streams rather than interleved
# vertices, so the index buffer will need to be separate for each. This will have some
# data duplication during processing, but isn't expected to be a large amount in practice.
geometry = []
for mesh in meshes:
if mesh.indices:
indexData = mesh.indices.bufferView.buffer
if mesh.indices.type == 'X16':
indexSize = 2
elif mesh.indices.type == 'X32':
indexSize = 4
else:
raise Exception('Unsupported index type "' + mesh.indices.type +
'" for GLTF file "' + path + '".')
else:
indexData = None
indexSize = 0
vertexStreams = []
for attrib, accessor in mesh.attributes:
vertexFormat = [(attrib, accessor.type, accessor.decorator)]
vertexStreams.append(ModelNodeVertexStream(vertexFormat, accessor.bufferView.buffer,
indexSize, indexData))
geometry.append(ModelNodeGeometryData(mesh.name, vertexStreams, mesh.primitiveType))
return geometry
def registerGLTFModelType(convertContext):
"""
Registers the GLTF model type under the name "gltf".
"""
addModelType(convertContext, 'gltf', convertGLTFModel)
| 35.711679 | 97 | 0.66745 | 19 | 0.001942 | 0 | 0 | 0 | 0 | 0 | 0 | 4,089 | 0.417885 |
21c9e3f18e9ff9713871cd9e59f532296cc7c00f | 8,500 | py | Python | python/sparktk/models/classification/naive_bayes.py | aayushidwivedi01/spark-tk-old | fcf25f86498ac416cce77de0db4cf0aa503d20ac | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-05-17T07:09:59.000Z | 2017-05-17T07:09:59.000Z | python/sparktk/models/classification/naive_bayes.py | aayushidwivedi01/spark-tk-old | fcf25f86498ac416cce77de0db4cf0aa503d20ac | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/sparktk/models/classification/naive_bayes.py | aayushidwivedi01/spark-tk-old | fcf25f86498ac416cce77de0db4cf0aa503d20ac | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.loggers import log_load; log_load(__name__); del log_load
from sparktk.propobj import PropertiesObject
from sparktk.frame.ops.classification_metrics_value import ClassificationMetricsValue
from sparktk import TkContext
__all__ = ["train", "load", "NaiveBayesModel"]
def train(frame, label_column, observation_columns, lambda_parameter = 1.0):
"""
Creates a Naive Bayes by training on the given frame
:param frame: (Frame) frame of training data
:param label_column: (str) Column containing the label for each observation
:param observation_columns: (List[str]) Column(s) containing the observations
:param lambda_parameter: (float) Additive smoothing parameter Default is 1.0
:return: (NaiveBayesModel) Trained Naive Bayes model
"""
if frame is None:
raise ValueError("frame cannot be None")
tc = frame._tc
_scala_obj = get_scala_obj(tc)
scala_model = _scala_obj.train(frame._scala,
label_column,
tc.jutils.convert.to_scala_list_string(observation_columns),
lambda_parameter)
return NaiveBayesModel(tc, scala_model)
def load(path, tc=TkContext.implicit):
"""load NaiveBayesModel from given path"""
TkContext.validate(tc)
return tc.load(path, NaiveBayesModel)
def get_scala_obj(tc):
"""Gets reference to the scala object"""
return tc.sc._jvm.org.trustedanalytics.sparktk.models.classification.naive_bayes.NaiveBayesModel
class NaiveBayesModel(PropertiesObject):
"""
A trained Naive Bayes model
Example
-------
>>> frame = tc.frame.create([[1,19.8446136104,2.2985856384],
... [1,16.8973559126,2.6933495054],
... [1,5.5548729596, 2.7777687995],
... [0,46.1810010826,3.1611961917],
... [0,44.3117586448,3.3458963222],
... [0,34.6334526911,3.6429838715]],
... [('Class', int), ('Dim_1', float), ('Dim_2', float)])
>>> model = tc.models.classification.naive_bayes.train(frame, 'Class', ['Dim_1', 'Dim_2'], 0.9)
>>> model.label_column
u'Class'
>>> model.observation_columns
[u'Dim_1', u'Dim_2']
>>> model.lambda_parameter
0.9
>>> predicted_frame = model.predict(frame, ['Dim_1', 'Dim_2'])
>>> predicted_frame.inspect()
[#] Class Dim_1 Dim_2 predicted_class
========================================================
[0] 1 19.8446136104 2.2985856384 0.0
[1] 1 16.8973559126 2.6933495054 1.0
[2] 1 5.5548729596 2.7777687995 1.0
[3] 0 46.1810010826 3.1611961917 0.0
[4] 0 44.3117586448 3.3458963222 0.0
[5] 0 34.6334526911 3.6429838715 0.0
>>> model.save("sandbox/naivebayes")
>>> restored = tc.load("sandbox/naivebayes")
>>> restored.label_column == model.label_column
True
>>> restored.lambda_parameter == model.lambda_parameter
True
>>> set(restored.observation_columns) == set(model.observation_columns)
True
>>> metrics = model.test(frame)
>>> metrics.precision
1.0
>>> predicted_frame2 = restored.predict(frame, ['Dim_1', 'Dim_2'])
>>> predicted_frame2.inspect()
[#] Class Dim_1 Dim_2 predicted_class
========================================================
[0] 1 19.8446136104 2.2985856384 0.0
[1] 1 16.8973559126 2.6933495054 1.0
[2] 1 5.5548729596 2.7777687995 1.0
[3] 0 46.1810010826 3.1611961917 0.0
[4] 0 44.3117586448 3.3458963222 0.0
[5] 0 34.6334526911 3.6429838715 0.0
>>> canonical_path = model.export_to_mar("sandbox/naivebayes.mar")
<hide>
>>> import os
>>> os.path.exists(canonical_path)
True
</hide>
"""
def __init__(self, tc, scala_model):
self._tc = tc
tc.jutils.validate_is_jvm_instance_of(scala_model, get_scala_obj(tc))
self._scala = scala_model
@staticmethod
def _from_scala(tc, scala_model):
return NaiveBayesModel(tc, scala_model)
@property
def label_column(self):
return self._scala.labelColumn()
@property
def observation_columns(self):
return self._tc.jutils.convert.from_scala_seq(self._scala.observationColumns())
@property
def lambda_parameter(self):
return self._scala.lambdaParameter()
def predict(self, future_periods = 0, ts = None):
"""
Forecasts future periods using ARIMA.
Provided fitted values of the time series as 1-step ahead forecasts, based on current model parameters, then
provide future periods of forecast. We assume AR terms prior to the start of the series are equal to the
model's intercept term (or 0.0, if fit without an intercept term). Meanwhile, MA terms prior to the start
are assumed to be 0.0. If there is differencing, the first d terms come from the original series.
:param future_periods: (int) Periods in the future to forecast (beyond length of time series that the
model was trained with).
:param ts: (Optional(List[float])) Optional list of time series values to use as golden values. If no time
series values are provided, the values used during training will be used during forecasting.
"""
if not isinstance(future_periods, int):
raise TypeError("'future_periods' parameter must be an integer.")
if ts is not None:
if not isinstance(ts, list):
raise TypeError("'ts' parameter must be a list of float values." )
ts_predict_values = self._tc.jutils.convert.to_scala_option_list_double(ts)
return list(self._tc.jutils.convert.from_scala_seq(self._scala.predict(future_periods, ts_predict_values)))
def predict(self, frame, columns=None):
"""
Predicts the labels for the observation columns in the given input frame. Creates a new frame
with the existing columns and a new predicted column.
Parameters
----------
:param frame: (Frame) Frame used for predicting the values
:param c: (List[str]) Names of the observation columns.
:return: (Frame) A new frame containing the original frame's columns and a prediction column
"""
c = self.__columns_to_option(columns)
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.predict(frame._scala, c))
def test(self, frame, columns=None):
c = self.__columns_to_option(columns)
return ClassificationMetricsValue(self._tc, self._scala.test(frame._scala, c))
def __columns_to_option(self, c):
if c is not None:
c = self._tc.jutils.convert.to_scala_list_string(c)
return self._tc.jutils.convert.to_scala_option(c)
def save(self, path):
self._scala.save(self._tc._scala_sc, path)
def export_to_mar(self, path):
"""
Exports the trained model as a model archive (.mar) to the specified path
Parameters
----------
:param path: (str) Path to save the trained model
:return: (str) Full path to the saved .mar file
"""
if isinstance(path, basestring):
return self._scala.exportToMar(self._tc._scala_sc, path)
del PropertiesObject
| 36.170213 | 116 | 0.609529 | 6,320 | 0.736768 | 0 | 0 | 395 | 0.046048 | 0 | 0 | 5,657 | 0.659478 |
21cac0e1969856e708db0ab52d143bf0ce25b967 | 10,769 | py | Python | matplotlib_venn/_venn2.py | TRuikes/matplotlib-venn | 64fdba46a61a4a19d2f192c84f02068af08f9e73 | [
"MIT"
] | 306 | 2015-01-01T20:48:41.000Z | 2022-03-28T03:12:18.000Z | matplotlib_venn/_venn2.py | TRuikes/matplotlib-venn | 64fdba46a61a4a19d2f192c84f02068af08f9e73 | [
"MIT"
] | 55 | 2015-01-07T14:06:36.000Z | 2022-03-07T16:18:48.000Z | matplotlib_venn/_venn2.py | TRuikes/matplotlib-venn | 64fdba46a61a4a19d2f192c84f02068af08f9e73 | [
"MIT"
] | 46 | 2015-05-08T04:55:24.000Z | 2022-02-08T08:38:11.000Z | '''
Venn diagram plotting routines.
Two-circle venn plotter.
Copyright 2012, Konstantin Tretyakov.
http://kt.era.ee/
Licensed under MIT license.
'''
# Make sure we don't try to do GUI stuff when running tests
import sys, os
if 'py.test' in os.path.basename(sys.argv[0]): # (XXX: Ugly hack)
import matplotlib
matplotlib.use('Agg')
import numpy as np
import warnings
from collections import Counter
from matplotlib.patches import Circle
from matplotlib.colors import ColorConverter
from matplotlib.pyplot import gca
from matplotlib_venn._math import *
from matplotlib_venn._common import *
from matplotlib_venn._region import VennCircleRegion
def compute_venn2_areas(diagram_areas, normalize_to=1.0):
'''
The list of venn areas is given as 3 values, corresponding to venn diagram areas in the following order:
(Ab, aB, AB) (i.e. last element corresponds to the size of intersection A&B&C).
The return value is a list of areas (A, B, AB), such that the total area is normalized
to normalize_to. If total area was 0, returns (1e-06, 1e-06, 0.0)
Assumes all input values are nonnegative (to be more precise, all areas are passed through and abs() function)
>>> compute_venn2_areas((1, 1, 0))
(0.5, 0.5, 0.0)
>>> compute_venn2_areas((0, 0, 0))
(1e-06, 1e-06, 0.0)
>>> compute_venn2_areas((1, 1, 1), normalize_to=3)
(2.0, 2.0, 1.0)
>>> compute_venn2_areas((1, 2, 3), normalize_to=6)
(4.0, 5.0, 3.0)
'''
# Normalize input values to sum to 1
areas = np.array(np.abs(diagram_areas), float)
total_area = np.sum(areas)
if np.abs(total_area) < tol:
warnings.warn("Both circles have zero area")
return (1e-06, 1e-06, 0.0)
else:
areas = areas / total_area * normalize_to
return (areas[0] + areas[2], areas[1] + areas[2], areas[2])
def solve_venn2_circles(venn_areas):
'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3).tolist()
[0.564, 0.564]
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3).tolist()
[0.461, 0.515]
'''
(A_a, A_b, A_ab) = list(map(float, venn_areas))
r_a, r_b = np.sqrt(A_a / np.pi), np.sqrt(A_b / np.pi)
radii = np.array([r_a, r_b])
if A_ab > tol:
# Nonzero intersection
coords = np.zeros((2, 2))
coords[1][0] = find_distance_by_area(radii[0], radii[1], A_ab)
else:
# Zero intersection
coords = np.zeros((2, 2))
coords[1][0] = radii[0] + radii[1] + max(np.mean(radii) * 1.1, 0.2) # The max here is needed for the case r_a = r_b = 0
coords = normalize_by_center_of_mass(coords, radii)
return (coords, radii)
def compute_venn2_regions(centers, radii):
'''
Returns a triple of VennRegion objects, describing the three regions of the diagram, corresponding to sets
(Ab, aB, AB)
>>> centers, radii = solve_venn2_circles((1, 1, 0.5))
>>> regions = compute_venn2_regions(centers, radii)
'''
A = VennCircleRegion(centers[0], radii[0])
B = VennCircleRegion(centers[1], radii[1])
Ab, AB = A.subtract_and_intersect_circle(B.center, B.radius)
aB, _ = B.subtract_and_intersect_circle(A.center, A.radius)
return (Ab, aB, AB)
def compute_venn2_colors(set_colors):
'''
Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram.
returns a list of 3 elements, providing colors for regions (10, 01, 11).
>>> str(compute_venn2_colors(('r', 'g'))).replace(' ', '')
'(array([1.,0.,0.]),array([0.,0.5,0.]),array([0.7,0.35,0.]))'
'''
ccv = ColorConverter()
base_colors = [np.array(ccv.to_rgb(c)) for c in set_colors]
return (base_colors[0], base_colors[1], mix_colors(base_colors[0], base_colors[1]))
def compute_venn2_subsets(a, b):
'''
Given two set or Counter objects, computes the sizes of (a & ~b, b & ~a, a & b).
Returns the result as a tuple.
>>> compute_venn2_subsets(set([1,2,3,4]), set([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([1,2,3,4]), Counter([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([]), Counter([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([]), set([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([1]), set([]))
(1, 0, 0)
>>> compute_venn2_subsets(set([1]), set([1]))
(0, 0, 1)
>>> compute_venn2_subsets(Counter([1]), Counter([1]))
(0, 0, 1)
>>> compute_venn2_subsets(set([1,2]), set([1]))
(1, 0, 1)
>>> compute_venn2_subsets(Counter([1,1,2,2,2]), Counter([1,2,3,3]))
(3, 2, 2)
>>> compute_venn2_subsets(Counter([1,1,2]), Counter([1,2,2]))
(1, 1, 2)
>>> compute_venn2_subsets(Counter([1,1]), set([]))
Traceback (most recent call last):
...
ValueError: Both arguments must be of the same type
'''
if not (type(a) == type(b)):
raise ValueError("Both arguments must be of the same type")
set_size = len if type(a) != Counter else lambda x: sum(x.values()) # We cannot use len to compute the cardinality of a Counter
return (set_size(a - b), set_size(b - a), set_size(a & b))
def venn2_circles(subsets, normalize_to=1.0, alpha=1.0, color='black', linestyle='solid', linewidth=2.0, ax=None, **kwargs):
'''
Plots only the two circles for the corresponding Venn diagram.
Useful for debugging or enhancing the basic venn diagram.
parameters ``subsets``, ``normalize_to`` and ``ax`` are the same as in venn2()
``kwargs`` are passed as-is to matplotlib.patches.Circle.
returns a list of three Circle patches.
>>> c = venn2_circles((1, 2, 3))
>>> c = venn2_circles({'10': 1, '01': 2, '11': 3}) # Same effect
>>> c = venn2_circles([set([1,2,3,4]), set([2,3,4,5,6])]) # Also same effect
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
result = []
for (c, r) in zip(centers, radii):
circle = Circle(c, r, alpha=alpha, edgecolor=color, facecolor='none', linestyle=linestyle, linewidth=linewidth, **kwargs)
ax.add_patch(circle)
result.append(circle)
return result
def venn2(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, ax=None, subset_label_formatter=None):
'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
if subset_label_formatter is None:
subset_label_formatter = str
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
regions = compute_venn2_regions(centers, radii)
colors = compute_venn2_colors(set_colors)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
# Create and add patches and subset labels
patches = [r.make_patch() for r in regions]
for (p, c) in zip(patches, colors):
if p is not None:
p.set_facecolor(c)
p.set_edgecolor('none')
p.set_alpha(alpha)
ax.add_patch(p)
label_positions = [r.label_position() for r in regions]
subset_labels = [ax.text(lbl[0], lbl[1], subset_label_formatter(s), va='center', ha='center') if lbl is not None else None for (lbl, s) in zip(label_positions, subsets)]
# Position set labels
if set_labels is not None:
padding = np.mean([r * 0.1 for r in radii])
label_positions = [centers[0] + np.array([0.0, - radii[0] - padding]),
centers[1] + np.array([0.0, - radii[1] - padding])]
labels = [ax.text(pos[0], pos[1], txt, size='large', ha='right', va='top') for (pos, txt) in zip(label_positions, set_labels)]
labels[1].set_ha('left')
else:
labels = None
return VennDiagram(patches, subset_labels, labels, centers, radii)
| 41.579151 | 173 | 0.642121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,361 | 0.590677 |
21cae347bb461a852e4405ed6113ca1a0a2ae37e | 370 | py | Python | examples/htmltopdf/lambda_function.py | dgilmanAIDENTIFIED/juniper | 81452cb86863340e9f7dd57ccd1cf69881b6e9a9 | [
"Apache-2.0"
] | 65 | 2019-02-01T19:49:49.000Z | 2022-01-17T10:43:50.000Z | examples/htmltopdf/lambda_function.py | dgilmanAIDENTIFIED/juniper | 81452cb86863340e9f7dd57ccd1cf69881b6e9a9 | [
"Apache-2.0"
] | 28 | 2019-02-12T18:57:13.000Z | 2021-09-21T00:00:50.000Z | examples/htmltopdf/lambda_function.py | dgilmanAIDENTIFIED/juniper | 81452cb86863340e9f7dd57ccd1cf69881b6e9a9 | [
"Apache-2.0"
] | 9 | 2019-03-02T02:30:50.000Z | 2022-01-12T21:34:54.000Z | import pdfkit
import boto3
s3 = boto3.client('s3')
def lambda_handler(event, context):
pdfkit.from_url('http://google.com', '/tmp/out.pdf')
with open('/tmp/out.pdf', 'rb') as f:
response = s3.put_object(
Bucket='temp-awseabsgddev',
Key='juni/google.pdf',
Body=f.read()
)
return {'response': response}
| 20.555556 | 56 | 0.578378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.272973 |
21cb54bbd6f23cf907190a946e745efc50dd9bc4 | 4,997 | py | Python | test/espnet2/enh/separator/test_dc_crn_separator.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | test/espnet2/enh/separator/test_dc_crn_separator.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | test/espnet2/enh/separator/test_dc_crn_separator.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | import pytest
import torch
from packaging.version import parse as V
from torch_complex import ComplexTensor
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.dc_crn_separator import DC_CRNSeparator
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
@pytest.mark.parametrize("input_dim", [33, 65])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("input_channels", [[2, 4], [2, 4, 4]])
@pytest.mark.parametrize("enc_hid_channels", [2, 5])
@pytest.mark.parametrize("enc_layers", [2])
@pytest.mark.parametrize("glstm_groups", [2])
@pytest.mark.parametrize("glstm_layers", [1, 2])
@pytest.mark.parametrize("glstm_bidirectional", [True, False])
@pytest.mark.parametrize("glstm_rearrange", [True, False])
@pytest.mark.parametrize("mode", ["mapping", "masking"])
def test_dc_crn_separator_forward_backward_complex(
input_dim,
num_spk,
input_channels,
enc_hid_channels,
enc_layers,
glstm_groups,
glstm_layers,
glstm_bidirectional,
glstm_rearrange,
mode,
):
model = DC_CRNSeparator(
input_dim=input_dim,
num_spk=num_spk,
input_channels=input_channels,
enc_hid_channels=enc_hid_channels,
enc_kernel_size=(1, 3),
enc_padding=(0, 1),
enc_last_kernel_size=(1, 3),
enc_last_stride=(1, 2),
enc_last_padding=(0, 1),
enc_layers=enc_layers,
skip_last_kernel_size=(1, 3),
skip_last_stride=(1, 1),
skip_last_padding=(0, 1),
glstm_groups=glstm_groups,
glstm_layers=glstm_layers,
glstm_bidirectional=glstm_bidirectional,
glstm_rearrange=glstm_rearrange,
mode=mode,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert is_complex(masked[0])
assert len(masked) == num_spk
masked[0].abs().mean().backward()
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("input_channels", [[4, 4], [6, 4, 4]])
@pytest.mark.parametrize(
"enc_kernel_size, enc_padding", [((1, 3), (0, 1)), ((1, 5), (0, 2))]
)
@pytest.mark.parametrize("enc_last_stride", [(1, 2)])
@pytest.mark.parametrize(
"enc_last_kernel_size, enc_last_padding",
[((1, 4), (0, 1)), ((1, 5), (0, 2))],
)
@pytest.mark.parametrize("skip_last_stride", [(1, 1)])
@pytest.mark.parametrize(
"skip_last_kernel_size, skip_last_padding",
[((1, 3), (0, 1)), ((1, 5), (0, 2))],
)
def test_dc_crn_separator_multich_input(
num_spk,
input_channels,
enc_kernel_size,
enc_padding,
enc_last_kernel_size,
enc_last_stride,
enc_last_padding,
skip_last_kernel_size,
skip_last_stride,
skip_last_padding,
):
model = DC_CRNSeparator(
input_dim=33,
num_spk=num_spk,
input_channels=input_channels,
enc_hid_channels=2,
enc_kernel_size=enc_kernel_size,
enc_padding=enc_padding,
enc_last_kernel_size=enc_last_kernel_size,
enc_last_stride=enc_last_stride,
enc_last_padding=enc_last_padding,
enc_layers=3,
skip_last_kernel_size=skip_last_kernel_size,
skip_last_stride=skip_last_stride,
skip_last_padding=skip_last_padding,
)
model.train()
real = torch.rand(2, 10, input_channels[0] // 2, 33)
imag = torch.rand(2, 10, input_channels[0] // 2, 33)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
assert is_complex(masked[0])
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dc_crn_separator_invalid_enc_layer():
with pytest.raises(AssertionError):
DC_CRNSeparator(
input_dim=17,
input_channels=[2, 2, 4],
enc_layers=1,
)
def test_dc_crn_separator_invalid_type():
with pytest.raises(ValueError):
DC_CRNSeparator(
input_dim=17,
input_channels=[2, 2, 4],
mode="xxx",
)
def test_dc_crn_separator_output():
real = torch.rand(2, 10, 17)
imag = torch.rand(2, 10, 17)
x = torch.complex(real, imag) if is_torch_1_9_plus else ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DC_CRNSeparator(
input_dim=17,
num_spk=num_spk,
input_channels=[2, 2, 4],
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 30.656442 | 85 | 0.655193 | 0 | 0 | 0 | 0 | 3,563 | 0.713028 | 0 | 0 | 364 | 0.072844 |
21cb6726d5d75e89f98527599cbd57bd6acc970a | 75 | py | Python | custom_model_runner/datarobot_drum/drum/description.py | cartertroy/datarobot-user-models | d2c2b47e0d46a0ce8d07f1baa8d57155a829d2fc | [
"Apache-2.0"
] | null | null | null | custom_model_runner/datarobot_drum/drum/description.py | cartertroy/datarobot-user-models | d2c2b47e0d46a0ce8d07f1baa8d57155a829d2fc | [
"Apache-2.0"
] | null | null | null | custom_model_runner/datarobot_drum/drum/description.py | cartertroy/datarobot-user-models | d2c2b47e0d46a0ce8d07f1baa8d57155a829d2fc | [
"Apache-2.0"
] | null | null | null | version = "1.1.5rc1"
__version__ = version
project_name = "datarobot-drum"
| 18.75 | 31 | 0.746667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.346667 |
21cbd02e2c85ee526f62d570e35aa6c6f628b43f | 661 | py | Python | usaspending_api/download/v2/urls.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | usaspending_api/download/v2/urls.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | 1 | 2021-11-15T17:53:27.000Z | 2021-11-15T17:53:27.000Z | usaspending_api/download/v2/urls.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | from django.conf.urls import url
from usaspending_api.download.v2 import views
urlpatterns = [
url(r'^awards', views.RowLimitedAwardDownloadViewSet.as_view()),
url(r'^accounts', views.AccountDownloadViewSet.as_view()),
# url(r'^columns', views.DownloadColumnsViewSet.as_view()),
url(r'^status', views.DownloadStatusViewSet.as_view()),
url(r'^transactions', views.RowLimitedTransactionDownloadViewSet.as_view()),
# Note: This is commented out for now as it may be used in the near future
# url(r'^subawards', views.RowLimitedSubawardDownloadViewSet.as_view()),
url(r'^count', views.DownloadTransactionCountViewSet.as_view())
]
| 41.3125 | 80 | 0.747352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.396369 |
21cbd2f48de433cddb1eeae935153e4c69f319de | 208 | bzl | Python | bazel_versions.bzl | cgrindel/buildifier-prebuilt | 79244e93755af8db1dcbe8d005f024901a7918dc | [
"MIT"
] | 8 | 2021-12-03T19:58:36.000Z | 2022-02-03T00:41:59.000Z | bazel_versions.bzl | cgrindel/buildifier-prebuilt | 79244e93755af8db1dcbe8d005f024901a7918dc | [
"MIT"
] | 13 | 2022-01-18T22:31:04.000Z | 2022-03-21T17:19:49.000Z | bazel_versions.bzl | cgrindel/buildifier-prebuilt | 79244e93755af8db1dcbe8d005f024901a7918dc | [
"MIT"
] | 2 | 2022-01-24T20:28:29.000Z | 2022-03-20T18:12:46.000Z | """
Common bazel version requirements for tests
"""
CURRENT_BAZEL_VERSION = "5.0.0"
OTHER_BAZEL_VERSIONS = [
"4.2.2",
]
SUPPORTED_BAZEL_VERSIONS = [
CURRENT_BAZEL_VERSION,
] + OTHER_BAZEL_VERSIONS
| 14.857143 | 43 | 0.721154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.3125 |
21cc04c7b836d418c1a118414f1038a5933f8f0d | 600 | py | Python | notebooks/bqml/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 359 | 2018-03-23T15:57:52.000Z | 2022-03-25T21:56:28.000Z | notebooks/bqml/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 84 | 2018-06-14T00:06:52.000Z | 2022-02-08T17:25:54.000Z | notebooks/bqml/track_meta.py | roannav/learntools | 355a5df6a66562de62254b723da1a9389b9acc49 | [
"Apache-2.0"
] | 213 | 2018-05-02T19:06:31.000Z | 2022-03-20T15:40:34.000Z | # See also examples/example_track/track_meta.py for a longer, commented example
track = dict(
author_username='dansbecker',
course_name='Machine Learning',
course_url='https://www.kaggle.com/learn/intro-to-machine-learning'
)
lessons = [
dict(
topic='Your First BiqQuery ML Model',
),
]
notebooks = [
dict(
filename='tut1.ipynb',
lesson_idx=0,
type='tutorial',
scriptid=4076893,
),
dict(
filename='ex1.ipynb',
lesson_idx=0,
type='exercise',
scriptid=4077160,
),
]
| 20 | 79 | 0.576667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.396667 |
21cf42098efd959d6275b16af7c4990f494ec81b | 859 | py | Python | inv/migrations/0001_subinterface_managed_object.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | inv/migrations/0001_subinterface_managed_object.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | inv/migrations/0001_subinterface_managed_object.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Initialize SubInterface.managed_object
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
db = self.mongo_db
# interface oid -> managed object id
imo = {
r["_id"]: r["managed_object"]
for r in db.noc.interfaces.find({}, {"id": 1, "managed_object": 1})
}
# Update subinterface managed object id
c = db.noc.subinterfaces
for i_oid in imo:
c.update({"interface": i_oid}, {"$set": {"managed_object": imo[i_oid]}})
| 35.791667 | 84 | 0.463329 | 466 | 0.542491 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.559953 |
21cfa74a8ebaf5d372efcc3cd78d24a3dd36b224 | 651 | py | Python | Python/1013. PartitionArrayIntoThreePartsWithEqualSum.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 263 | 2020-10-05T18:47:29.000Z | 2022-03-31T19:44:46.000Z | Python/1013. PartitionArrayIntoThreePartsWithEqualSum.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 1,264 | 2020-10-05T18:13:05.000Z | 2022-03-31T23:16:35.000Z | Python/1013. PartitionArrayIntoThreePartsWithEqualSum.py | nizD/LeetCode-Solutions | 7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349 | [
"MIT"
] | 760 | 2020-10-05T18:22:51.000Z | 2022-03-29T06:06:20.000Z | class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
# Since all the three parts are equal, if we sum all element of arrary it should be a multiplication of 3
# so the sum of each part must be equal to sum of all element divided by 3
quotient, remainder = divmod(sum(A), 3)
if remainder != 0:
return False
subarray = 0
partitions = 0
for num in A:
subarray += num
if subarray == quotient:
partitions += 1
subarray = 0
# Check if it consist at least 3 partitions
return partitions >= 3 | 34.263158 | 113 | 0.569892 | 651 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.341014 |
21d04b45422de71cb56cbdb189ae8ae7a0615c7b | 14,224 | py | Python | utils/etrm_stochastic_grid_search/residual_analysis.py | NMTHydro/Recharge | bbc1a05add92064acffeffb19f04e370b99a7918 | [
"Apache-2.0"
] | 7 | 2016-08-30T15:18:11.000Z | 2021-08-22T00:28:10.000Z | utils/etrm_stochastic_grid_search/residual_analysis.py | NMTHydro/Recharge | bbc1a05add92064acffeffb19f04e370b99a7918 | [
"Apache-2.0"
] | 2 | 2016-06-08T06:41:45.000Z | 2016-06-23T20:47:26.000Z | utils/etrm_stochastic_grid_search/residual_analysis.py | NMTHydro/Recharge | bbc1a05add92064acffeffb19f04e370b99a7918 | [
"Apache-2.0"
] | 1 | 2018-09-18T10:38:08.000Z | 2018-09-18T10:38:08.000Z | # ===============================================================================
# Copyright 2019 Jan Hendrickx and Gabriel Parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import yaml
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from datetime import datetime
# ============= standard library imports ========================
from utils.TAW_optimization_subroutine.timeseries_processor import accumulator
sitename = 'Wjs'
# if applicable
cum_days = '7'
# triggers a specific daterange for plotting specified lower down in script
date_range = True
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/mpj/calibration_output_II/mpj_7day_eta_cum'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/mpj/calibration_output_II/mpj_non_cum_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/seg/calibration_output_II/seg_cum_eta_1day'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/seg/calibration_output_II/seg_7day_eta_cum'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/seg/calibration_output_II/seg_non_cum_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/ses/calibration_output_II/ses_7day_eta_cum'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/ses/calibration_output_II/ses_non_cum_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/wjs/calibration_output_II/wjs_1day_eta_cum'
root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/wjs/calibration_output_II/wjs_cum_eta_7day'#wjs_cum_eta_7day
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs/wjs/calibration_output_II/wjs_non_cum_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_II/seg/calibration_output/seg_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_II/mpj/calibration_output/mpj_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_II/vcp/calibration_output/vcp_rzsm'
# root = '/Users/dcadol/Desktop/academic_docs_II/calibration_approach/mini_model_outputs_III/wjs/calibration_output/wjs_rzsm'
chimin_path = os.path.join(root, 'US-{}_chimin_cum_eta_{}.yml'.format(sitename, cum_days))
resid_path = os.path.join(root, 'US-{}_resid_cum_eta_{}.yml'.format(sitename, cum_days))
combined_timeseries_file = 'cum_eta_model_df_{}_cum7.csv'
# chimin_path = os.path.join(root, 'US-{}_chimin_non_cum_rzsm.yml'.format(sitename))
# resid_path = os.path.join(root, 'US-{}_resid_non_cum_rzsm.yml'.format(sitename))
# combined_timeseries_file = 'rzsm_model_df_{}.csv'
var = 'ETa'#'ETa' # 'RZSM'
taw = '50'
# starting TAW value
begin_taw = 25
# ending TAW value
end_taw = 925
# grid search step size. Each ETRM run will increase the uniform TAW of the RZSW holding capacity by this many mm.
taw_step = 25
taw_list = []
optimization_dict = {}
for i in range(0, ((end_taw - begin_taw) / taw_step)):
if i == 0:
current_taw = begin_taw
else:
current_taw += taw_step
taw_list.append(current_taw)
with open(chimin_path, 'r') as rfile:
chimin_dict = yaml.load(rfile)
with open(resid_path, 'r') as rfile:
resid_dict = yaml.load(rfile)
print 'residual dict \n', resid_dict
resid_tseries = resid_dict[taw][0]
resid_vals = resid_dict[taw][1]
resid_tseries = [datetime.strptime(str(i)[0:10], '%Y-%m-%d') for i in resid_tseries]
# sort t series greatest to least and keep timeseries there.
resid_sorted = sorted(zip(resid_vals, resid_tseries))
# TODO - Grab the PRISM data for the large values on either end of resid_sorted for the pixel...
combined_timeseries_file = combined_timeseries_file.format(taw)
combined_timeseries_path = os.path.join(root, combined_timeseries_file)
combined_df = pd.read_csv(combined_timeseries_path, parse_dates=True, index_col=0, header=0)
print combined_df.iloc[:, 0]
start_date = datetime(2013, 6, 16)
end_date = datetime(2013, 10, 20)
if date_range:
combined_df = combined_df.loc[(combined_df.index >= start_date) & (combined_df.index <= end_date)]
prism = combined_df['prism_values']
resid_large = resid_sorted[0:4] + resid_sorted[-4:]
# plt.plot(combined_df.index.values, prism)
# plt.plot(combined_df.index.values, combined_df['amf_eta_values'])
# plt.plot(combined_df.index.values, combined_df['average_vals_eta'])
# plt.show()
resid_dates = []
resid_vals = []
for resid_tup in resid_large:
val, dt = resid_tup
# print 'dt {}'.format(dt)
# datetime.datetime(dt)
resid_dates.append(dt)
resid_vals.append(val)
df_datelist = [i for i in combined_df.index]
high_outlier_indices = []
for i, d in enumerate(df_datelist):
# print d.year, d.month, d.day
for res_tup in resid_large:
res_val, res_d = res_tup
if (res_d.year, res_d.month, res_d.day) == (d.year, d.month, d.day):
# print 'resday', (res_d.year, res_d.month, res_d.day), 'dday', (d.year, d.month, d.day)
# if res_d == d:
high_outlier_indices.append(i)
print high_outlier_indices
prism = combined_df['prism_values'].tolist()
site_precip = combined_df['amf_precip_values'].tolist()
# site_precip_dates = pd.to_datetime(combined_df['amf_precip_dates']).tolist()
etrm_et = combined_df['average_vals_eta'].tolist()
amf_et = combined_df['amf_eta_values'].tolist()
if var == 'RZSM':
amf_rzsm = combined_df['nrml_depth_avg_sm'].tolist()
etrm_rzsm = combined_df['average_vals_rzsm'].tolist()
etrm_ro = combined_df['average_vals_ro'].tolist()
data_date = df_datelist
# print 'site precip dates \n', site_precip_dates
data_date = [d.to_pydatetime() for d in data_date]
high_outlier_prism = []
high_outlier_etrm = []
high_outlier_amf = []
high_outlier_dates = []
for oi in high_outlier_indices:
precip_outlier = prism[oi]
etrm_et_outlier = etrm_et[oi]
amf_et_outlier = amf_et[oi]
outlier_date = data_date[oi]
high_outlier_prism.append(precip_outlier)
high_outlier_etrm.append(etrm_et_outlier)
high_outlier_amf.append(amf_et_outlier)
high_outlier_dates.append(outlier_date)
##### ================ RESIDUALS PLOT ========================
ax1 = plt.subplot(411)
ax1.set_title('Largest Normalized Residuals in Timeseires')
ax1.set_xlabel('Date')
ax1.set_ylabel('Residual {}'.format(var))
plt.scatter(resid_dates, resid_vals)
plt.grid()
# plt.setp(ax1.get_xticklabels(), fontsize=6)
if var == 'ETa':
ax2 = plt.subplot(412, sharex=ax1)
ax2.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax2.set_xlabel('Date')
ax2.set_ylabel('ETa in mm')
plt.plot(data_date, etrm_et, color='black', label='ETRM')
plt.plot_date(data_date, etrm_et, color='black', fillstyle='none')
plt.plot(data_date, amf_et, color='green', label='AMF')
plt.plot_date(data_date, amf_et, color='green', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
# # make these tick labels invisible
# plt.setp(ax2.get_xticklabels(), visible=False)
elif var == 'RZSM':
ax2 = plt.subplot(412, sharex=ax1)
ax2.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax2.set_xlabel('Date')
ax2.set_ylabel('RZSM Fraction')
plt.plot(data_date, etrm_rzsm, color='red', label='ETRM')
plt.plot_date(data_date, etrm_rzsm, color='red', fillstyle='none', label=None)
plt.plot(data_date, amf_rzsm, color='purple', label='AMF')
plt.plot_date(data_date, amf_rzsm, color='purple', fillstyle='none', label=None)
plt.grid()
plt.legend(loc=(1.01, 0.5))
# share x and y
ax3 = plt.subplot(413, sharex=ax1)
ax3.set_title('PRISM and Site {} Precipitation'.format(sitename))
ax3.set_xlabel('Date')
ax3.set_ylabel(('Precipitation in mm'))
plt.plot(data_date, prism, color='blue', label='PRISM')
plt.plot_date(data_date, prism, color='blue', fillstyle='none')
plt.plot(data_date, site_precip, color='orange', label='AMF')
plt.plot_date(data_date, site_precip, color='orange', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
if var == 'RZSM':
# ax4 = plt.subplot(414, sharex=ax1)
# ax4.set_title('ETRM {} Runoff'.format(sitename))
# ax4.set_xlabel('Date')
# ax4.set_ylabel('ETRM Runoff in mm')
# plt.plot(data_date, etrm_ro, color='brown', label='Runoff')
# plt.plot_date(data_date, etrm_ro, color='brown', fillstyle='none')
# plt.grid()
# plt.legend(loc=(1.01, 0.5))
# =====
ax4 = plt.subplot(414, sharex=ax1)
ax4.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax4.set_xlabel('Date')
ax4.set_ylabel('ETa in mm')
plt.plot(data_date, etrm_et, color='black', label='ETRM')
plt.plot_date(data_date, etrm_et, color='black', fillstyle='none')
plt.plot(data_date, amf_et, color='green', label='AMF')
plt.plot_date(data_date, amf_et, color='green', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
plt.subplots_adjust(hspace=.75) # left, right, bottom, top, wspace, hspace
plt.show()
# ================== PLOTTING INFILTRATION ==================
etrm_infil = combined_df['average_vals_infil'].tolist()
ax1 = plt.subplot(311)
ax1.set_title('infil_timeseries')
ax1.set_xlabel('Date')
ax1.set_ylabel('infiltration {} TAW'.format(var, taw))
plt.plot(data_date, etrm_infil, color='black', label='ETRM')
plt.plot_date(data_date, etrm_infil, color='black', fillstyle='none')
plt.grid()
ax2 = plt.subplot(312, sharex=ax1)
ax2.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax2.set_xlabel('Date')
ax2.set_ylabel('ETa in mm')
plt.plot(data_date, etrm_et, color='black', label='ETRM')
plt.plot_date(data_date, etrm_et, color='black', fillstyle='none')
plt.plot(data_date, amf_et, color='green', label='AMF')
plt.plot_date(data_date, amf_et, color='green', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
ax3 = plt.subplot(313, sharex=ax1)
ax3.set_title('PRISM and Site {} Precipitation'.format(sitename))
ax3.set_xlabel('Date')
ax3.set_ylabel(('Precipitation in mm'))
plt.plot(data_date, prism, color='blue', label='PRISM')
plt.plot_date(data_date, prism, color='blue', fillstyle='none')
plt.plot(data_date, site_precip, color='orange', label='AMF')
plt.plot_date(data_date, site_precip, color='orange', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
plt.subplots_adjust(hspace=.75)
plt.show()
# ========================== Plotting Sans Residuals =======================
# plt.setp(ax1.get_xticklabels(), fontsize=6)
if var == 'ETa':
ax2 = plt.subplot(311)
ax2.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax2.set_xlabel('Date')
ax2.set_ylabel('ETa in mm')
plt.plot(data_date, etrm_et, color='black', label='ETRM')
plt.plot_date(data_date, etrm_et, color='black', fillstyle='none')
plt.plot(data_date, amf_et, color='green', label='AMF')
plt.plot_date(data_date, amf_et, color='green', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
# # make these tick labels invisible
# plt.setp(ax2.get_xticklabels(), visible=False)
elif var == 'RZSM':
ax2 = plt.subplot(311)
ax2.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax2.set_xlabel('Date')
ax2.set_ylabel('RZSM Fraction')
plt.plot(data_date, etrm_rzsm, color='red', label='ETRM')
plt.plot_date(data_date, etrm_rzsm, color='red', fillstyle='none', label=None)
plt.plot(data_date, amf_rzsm, color='purple', label='AMF')
plt.plot_date(data_date, amf_rzsm, color='purple', fillstyle='none', label=None)
plt.grid()
plt.legend(loc=(1.01, 0.5))
# share x and y
ax3 = plt.subplot(312, sharex=ax2)
ax3.set_title('PRISM and Site {} Precipitation'.format(sitename))
ax3.set_xlabel('Date')
ax3.set_ylabel(('Precipitation in mm'))
plt.plot(data_date, prism, color='blue', label='PRISM')
plt.plot_date(data_date, prism, color='blue', fillstyle='none')
plt.plot(data_date, site_precip, color='orange', label='AMF')
plt.plot_date(data_date, site_precip, color='orange', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
if var == 'RZSM':
# ax4 = plt.subplot(313, sharex=ax2)
# ax4.set_title('ETRM {} Runoff'.format(sitename))
# ax4.set_xlabel('Date')
# ax4.set_ylabel('ETRM Runoff in mm')
# plt.plot(data_date, etrm_ro, color='brown', label='Runoff')
# plt.plot_date(data_date, etrm_ro, color='brown', fillstyle='none')
# plt.grid()
# plt.legend(loc=(1.01, 0.5))
# =====
ax4 = plt.subplot(313, sharex=ax2)
ax4.set_title('Ameriflux {} and ETRM {}'.format(sitename, var))
ax4.set_xlabel('Date')
ax4.set_ylabel('ETa in mm')
plt.plot(data_date, etrm_et, color='black', label='ETRM')
plt.plot_date(data_date, etrm_et, color='black', fillstyle='none')
plt.plot(data_date, amf_et, color='green', label='AMF')
plt.plot_date(data_date, amf_et, color='green', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
else:
ax4 = plt.subplot(313, sharex=ax2)
ax4.set_title('ETRM {} Runoff'.format(sitename))
ax4.set_xlabel('Date')
ax4.set_ylabel('ETRM Runoff in mm')
plt.plot(data_date, etrm_ro, color='brown', label='Runoff')
plt.plot_date(data_date, etrm_ro, color='brown', fillstyle='none')
plt.grid()
plt.legend(loc=(1.01, 0.5))
plt.subplots_adjust(hspace=.5) # left, right, bottom, top, wspace, hspace
plt.show() | 37.729443 | 147 | 0.710911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,517 | 0.458169 |
21d44708b07bab2beabd646a214bd70c0233465b | 2,076 | py | Python | lib/data_utils/insta_utils_imgs.py | ziniuwan/maed | 9e1f1c37eba81da86c8d9c62dc9be41a01abff5b | [
"MIT"
] | 145 | 2021-08-15T13:22:08.000Z | 2022-03-29T13:37:19.000Z | lib/data_utils/insta_utils_imgs.py | vkirilenko/maed | 9e1f1c37eba81da86c8d9c62dc9be41a01abff5b | [
"MIT"
] | 9 | 2021-09-17T14:58:15.000Z | 2022-03-29T07:43:08.000Z | lib/data_utils/insta_utils_imgs.py | vkirilenko/maed | 9e1f1c37eba81da86c8d9c62dc9be41a01abff5b | [
"MIT"
] | 17 | 2021-08-15T13:22:10.000Z | 2022-01-17T02:34:14.000Z | import os
import sys
sys.path.append('.')
import argparse
import numpy as np
import os.path as osp
from multiprocessing import Process, Pool
from glob import glob
from tqdm import tqdm
import tensorflow as tf
from PIL import Image
from lib.core.config import INSTA_DIR, INSTA_IMG_DIR
def process_single_record(fname, outdir, split):
sess = tf.Session()
#print(fname)
record_name = fname.split('/')[-1]
for vid_idx, serialized_ex in enumerate(tf.python_io.tf_record_iterator(fname)):
#print(vid_idx)
os.makedirs(osp.join(outdir, split, record_name, str(vid_idx)), exist_ok=True)
example = tf.train.Example()
example.ParseFromString(serialized_ex)
N = int(example.features.feature['meta/N'].int64_list.value[0])
images_data = example.features.feature[
'image/encoded'].bytes_list.value
for i in range(N):
image = np.expand_dims(sess.run(tf.image.decode_jpeg(images_data[i], channels=3)), axis=0)
#video.append(image)
image = Image.fromarray(np.squeeze(image, axis=0))
image.save(osp.join(outdir, split, record_name, str(vid_idx), str(i)+".jpg"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--inp_dir', type=str, help='tfrecords file path', default=INSTA_DIR)
parser.add_argument('--n', type=int, help='total num of workers')
parser.add_argument('--i', type=int, help='current index of worker (from 0 to n-1)')
parser.add_argument('--split', type=str, help='train or test')
parser.add_argument('--out_dir', type=str, help='output images path', default=INSTA_IMG_DIR)
args = parser.parse_args()
fpaths = glob(f'{args.inp_dir}/{args.split}/*.tfrecord')
fpaths = sorted(fpaths)
total = len(fpaths)
fpaths = fpaths[args.i*total//args.n : (args.i+1)*total//args.n]
#print(fpaths)
#print(len(fpaths))
os.makedirs(args.out_dir, exist_ok=True)
for idx, fp in enumerate(fpaths):
process_single_record(fp, args.out_dir, args.split) | 34.032787 | 102 | 0.678227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.157514 |
21d487a575334245b8424e08a0ec1c4d3a7ff96b | 672 | py | Python | src/person/migrations/0004_actors_moved.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 10 | 2021-01-10T09:39:16.000Z | 2022-02-05T06:40:47.000Z | src/person/migrations/0004_actors_moved.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | null | null | null | src/person/migrations/0004_actors_moved.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 1 | 2021-01-11T17:04:06.000Z | 2021-01-11T17:04:06.000Z | # Generated by Django 3.1.5 on 2021-03-22 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0010_actors_moved'),
('person', '0003_refactoring_movie_person_m2m_rels'),
]
operations = [
migrations.AddField(
model_name='person',
name='movies',
field=models.ManyToManyField(related_name='persons', through='person.PersonRole', to='movies.Movie'),
),
migrations.AddField(
model_name='personrole',
name='role_name',
field=models.CharField(max_length=100, null=True),
),
]
| 26.88 | 113 | 0.605655 | 579 | 0.861607 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.302083 |
21d4a525edd40a1d975bbe0a24d86588eb396b49 | 1,653 | py | Python | tests/test_oxide.py | codepainters/edalize | 220952c38864735238212ab92405167cbf16c528 | [
"BSD-2-Clause"
] | 1 | 2022-03-17T23:30:32.000Z | 2022-03-17T23:30:32.000Z | tests/test_oxide.py | codepainters/edalize | 220952c38864735238212ab92405167cbf16c528 | [
"BSD-2-Clause"
] | null | null | null | tests/test_oxide.py | codepainters/edalize | 220952c38864735238212ab92405167cbf16c528 | [
"BSD-2-Clause"
] | null | null | null | import os
import pytest
from edalize_common import make_edalize_test
def run_oxide_test(tf):
tf.backend.configure()
tf.compare_files(
["Makefile", "edalize_yosys_procs.tcl", "edalize_yosys_template.tcl"]
)
tf.backend.build()
tf.compare_files(["yosys.cmd", "nextpnr-nexus.cmd", "prjoxide.cmd"])
def test_oxide(make_edalize_test):
tool_options = {
"device": "LIFCL-40-9BG400CES",
"yosys_synth_options": ["some", "yosys_synth_options"],
"nextpnr_options": ["a", "few", "nextpnr_options"],
}
tf = make_edalize_test(
"oxide", param_types=["vlogdefine", "vlogparam"], tool_options=tool_options
)
run_oxide_test(tf)
def test_oxide_minimal(make_edalize_test):
tool_options = {
"device": "LIFCL-40-9BG400CES",
}
tf = make_edalize_test(
"oxide", param_types=[], files=[], tool_options=tool_options, ref_dir="minimal"
)
run_oxide_test(tf)
def test_oxide_multiple_pdc(make_edalize_test):
files = [
{"name": "pdc_file.pdc", "file_type": "PDC"},
{"name": "pdc_file2.pdc", "file_type": "PDC"},
]
tf = make_edalize_test("oxide", param_types=[], files=files)
with pytest.raises(RuntimeError) as e:
tf.backend.configure()
assert (
"Nextpnr only supports one PDC file. Found pdc_file.pdc and pdc_file2.pdc"
in str(e.value)
)
def test_oxide_no_device(make_edalize_test):
tf = make_edalize_test("oxide", param_types=[])
with pytest.raises(RuntimeError) as e:
tf.backend.configure()
assert "Missing required option 'device' for nextpnr-nexus" in str(e.value)
| 26.66129 | 87 | 0.658197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 512 | 0.30974 |
21d580ac0342437490dbebbfefc2c13b2463ec74 | 4,265 | py | Python | ds5-scripts/aosp_8_1/arm/time.py | rewhy/happer | 3b48894e2d91f150f1aee0ce75291b9ca2a29bbe | [
"Apache-2.0"
] | 32 | 2021-04-08T05:39:51.000Z | 2022-03-31T03:49:35.000Z | ds5-scripts/aosp_8_1/arm/time.py | rewhy/happer | 3b48894e2d91f150f1aee0ce75291b9ca2a29bbe | [
"Apache-2.0"
] | 2 | 2021-04-14T08:31:30.000Z | 2021-08-29T19:12:09.000Z | ds5-scripts/aosp_8_1/arm/time.py | rewhy/happer | 3b48894e2d91f150f1aee0ce75291b9ca2a29bbe | [
"Apache-2.0"
] | 3 | 2021-06-08T08:52:56.000Z | 2021-06-23T17:28:51.000Z | # time.py
import gc
import os
import sys
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
import config
import memory
import mmu
# obtain current execution state
debugger = Debugger()
execution_state = debugger.getCurrentExecutionContext()
def cleanup():
if mmu.page_table is not None:
del mmu.page_table
gc.collect()
def start_prolog():
# disable the time breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_time:
brk_object.disable()
def end_prolog():
# enable the time breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_time:
brk_object.enable()
TIME_INTERVAL = 1000000L # usec
def time():
# -- HEAD -- #
start_prolog()
# -- BODY -- #
pid = int(execution_state.getVariableService().readValue("$AARCH64::$System::$Memory::$CONTEXTIDR_EL1.PROCID")) & 0xffffffff
# only focus on the invocation from app -> gettimeofday
lr = int(execution_state.getRegisterService().getValue("LR")) & 0xffffffff
if not config.in_app_range(lr):
# -- TAIL -- #
end_prolog()
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
# get timeval pointer
time_t_ptr = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
if config.debug:
print "[time] pid = %#x, lr = %0#10x, time_t_ptr = %0#10x" % (pid, lr, time_t_ptr)
config.log_print("[time] pid = %#x, lr = %0#10x, time_t_ptr = %0#10x" % (pid, lr, time_t_ptr))
brk_time = config.libc_base + config.time_end - config.libc_file_offset + config.libc_memory_offset
execution_state.getExecutionService().resumeTo(brk_time)
try:
execution_state.getExecutionService().waitForStop(60000) # wait for 60s
except DebugException:
raise RuntimeError("wtf !!!")
# obtain the obtained value
tv_sec = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
tv_usec = 0x0
if config.debug:
print "[time] (origin) pid = %#x, tv_sec = %0#10x, tv_usec = %0#10x" % (pid, tv_sec, tv_usec)
# config.log_print("[time] (origin) pid = %#x, tv_sec = %0#10x, tv_usec = %0#10x" % (pid, tv_sec, tv_usec))
# anti time checking
tv_sec_old, tv_usec_old = config.load_time_info()
if tv_sec <= tv_sec_old:
tv_sec = tv_sec_old + 0x1
if tv_sec < tv_sec_old:
# TODO: should raise an exception, but we just ignore it at this time
assert False
else:
if tv_sec_old != 0:
time_interval = (tv_sec * 1000000L) - (tv_sec_old * 1000000L)
if time_interval > TIME_INTERVAL:
tv_sec_new = int(((tv_sec_old * 1000000L) + TIME_INTERVAL) / 1000000L)
tv_usec_new = int(((tv_sec_old * 1000000L) + TIME_INTERVAL) - (tv_sec_new * 1000000L))
assert tv_usec_new == 0
# verification
time_old = tv_sec_old * 1000000L + tv_usec_old
time_new = tv_sec_new * 1000000L + tv_usec_new
assert time_new == (time_old + TIME_INTERVAL)
config.save_time_info(tv_sec_new, tv_usec_new)
execution_state.getRegisterService().setValue("R0", tv_sec_new)
# obtain the adjusted value
tv_sec = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
tv_usec = 0x0
if config.debug:
print "[time] (adjust) pid = %#x, tv_sec = %0#10x, tv_usec = %0#10x" % (pid, tv_sec, tv_usec)
# config.log_print("[time] (adjust) pid = %#x, tv_sec = %0#10x, tv_usec = %0#10x" % (pid, tv_sec, tv_usec))
else:
config.save_time_info(tv_sec, tv_usec)
elif tv_sec_old == 0 and tv_usec_old == 0:
config.save_time_info(tv_sec, tv_usec)
else:
raise RuntimeError("invalid timeval valus !!!")
# -- TAIL -- #
end_prolog()
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
if __name__ == '__main__':
time()
sys.exit()
| 32.557252 | 126 | 0.687222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,088 | 0.2551 |
21d74dd97abbafbab41d2b79624c65a5587f6d58 | 3,134 | py | Python | unsupervised_learning/kmeans.py | toorajtaraz/computational_intelligence_mini_projects | 79d1782c3b61ee15ac01dcf377bdc369962adb18 | [
"MIT"
] | 3 | 2022-02-09T21:35:14.000Z | 2022-02-10T15:31:43.000Z | unsupervised_learning/kmeans.py | toorajtaraz/computational_intelligence_mini_projects | 79d1782c3b61ee15ac01dcf377bdc369962adb18 | [
"MIT"
] | null | null | null | unsupervised_learning/kmeans.py | toorajtaraz/computational_intelligence_mini_projects | 79d1782c3b61ee15ac01dcf377bdc369962adb18 | [
"MIT"
] | null | null | null | from pathlib import Path
import sys
path = str(Path(Path(__file__).parent.absolute()).parent.absolute())
sys.path.insert(0, path)
from mnist_utils.util import _x, _y_int
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import accuracy_score, adjusted_rand_score
import numpy as np
from fast_pytorch_kmeans import KMeans
import torch
from tabulate import tabulate
#global vars
kmeans_main = None
cluster_ids_x = None
def classify_clusters(l1, l2):
ref_labels = {}
for i in range(len(np.unique(l1))):
index = np.where(l1 == i,1,0)
ref_labels[i] = np.bincount(l2[index==1]).argmax()
decimal_labels = np.zeros(len(l1))
for i in range(len(l1)):
decimal_labels[i] = ref_labels[l1[i]]
return decimal_labels
def init_clustring_scikit(cluster_count=10):
global kmeans_main
kmeans_main = MiniBatchKMeans(n_clusters=cluster_count, verbose=False)
kmeans_main.fit(_x)
def test_accuracy_scikit():
global kmeans_main
decimal_labels = classify_clusters(kmeans_main.labels_, _y_int)
print("predicted labels:\t", decimal_labels[:16].astype('int'))
print("true labels:\t\t",_y_int[:16])
print(60 * '_')
AP = accuracy_score(decimal_labels,_y_int)
RI = adjusted_rand_score(decimal_labels,_y_int)
print("Accuracy (PURITY):" , AP)
print("Accuracy (RAND INDEX):" , RI)
return AP, RI
def init_clustring_torch(cluster_count=10):
global clusters_from_label, cluster_ids_x
_kmeans = KMeans(n_clusters=cluster_count, mode='euclidean', verbose=1)
x = torch.from_numpy(_x)
cluster_ids_x = _kmeans.fit_predict(x)
def test_accuracy_torch():
global cluster_ids_x
decimal_labels = classify_clusters(cluster_ids_x.cpu().detach().numpy(), _y_int)
print("predicted labels:\t", decimal_labels[:16].astype('int'))
print("true labels:\t\t",_y_int[:16])
print(60 * '_')
AP = accuracy_score(decimal_labels,_y_int)
RI = adjusted_rand_score(decimal_labels,_y_int)
print("Accuracy (PURITY):" , AP)
print("Accuracy (RAND INDEX):" , RI)
return AP, RI
def pipeline(lib="torch", cluster_count_max=300, coefficient=2):
cluster_count = len(np.unique(_y_int))
result = []
if lib == "torch":
while cluster_count <= cluster_count_max:
print(10 * "*" + "TRYING WITH " + str(cluster_count) + 10 * "*")
init_clustring_torch(cluster_count)
AP, RI = test_accuracy_torch()
result.append([cluster_count, AP, RI])
cluster_count *= coefficient
cluster_count = int(cluster_count)
elif lib == "scikit":
while cluster_count <= cluster_count_max:
print(10 * "*" + "TRYING WITH " + str(cluster_count) + 10 * "*")
init_clustring_scikit(cluster_count)
AP, RI = test_accuracy_scikit()
result.append([cluster_count, AP, RI])
cluster_count *= coefficient
cluster_count = int(cluster_count)
else:
print("LIB NOT SUPPORTED")
print(tabulate(result, headers=['K', 'AP', 'RI']))
pipeline(cluster_count_max=200, coefficient=3, lib="scikit")
| 35.613636 | 84 | 0.678685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.09732 |
21d75135a125fe9f66fd6dd283f68fba32f5dd33 | 6,224 | py | Python | cbbc/qapackage/OnlineCLTrainer.py | Robert-xiaoqiang/Model-Capability-Assessment | 3cb8673ea66bfeded9d6421e15b288b485ccc53b | [
"Unlicense"
] | null | null | null | cbbc/qapackage/OnlineCLTrainer.py | Robert-xiaoqiang/Model-Capability-Assessment | 3cb8673ea66bfeded9d6421e15b288b485ccc53b | [
"Unlicense"
] | null | null | null | cbbc/qapackage/OnlineCLTrainer.py | Robert-xiaoqiang/Model-Capability-Assessment | 3cb8673ea66bfeded9d6421e15b288b485ccc53b | [
"Unlicense"
] | null | null | null | import os
import json
import random
random.seed(32767)
import shutil
import numpy as np
np.random.seed(32767)
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from torch.optim import Adam, SGD, lr_scheduler
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
BertConfig,
BertForQuestionAnswering,
BertTokenizer,
CamembertConfig,
CamembertForQuestionAnswering,
CamembertTokenizer,
DistilBertConfig,
DistilBertForQuestionAnswering,
DistilBertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
XLMConfig,
XLMForQuestionAnswering,
XLMTokenizer,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer,
get_linear_schedule_with_warmup
)
from .TrainHelper import AverageMeter, LoggerPather, DeviceWrapper
from .CLTrainerV2 import CLTrainerV2
class OnlineCLTrainer(CLTrainerV2):
def __init__(self, model, tokenizer, train_examples, train_features, train_dataset, train_dataloader,
dev_examples, dev_features, dev_dataloader, config):
super().__init__(model, tokenizer, train_examples, train_features, train_dataset, train_dataloader,
dev_examples, dev_features, dev_dataloader, config)
self.cur_percent = None # it is incompatible with online CL setting
start_limit = np.int(np.ceil(self.N * self.config.CURRICULUM.START_PERCENT))
self.cur_data_index = self.full_data_index[:start_limit]
self.cur_data_index_set = set(self.cur_data_index)
def on_dev_stage(self, iteration):
em, f1 = self.validate(iteration)
self.save_checkpoint(iteration)
# slow training time ????
factor_threshold = self.get_factor_threshold(iteration)
if self.best_result is None or f1 > self.best_result:
self.save_checkpoint(iteration, 'best')
self.best_result = f1
self.writer.add_scalar('val/em', em, iteration)
self.writer.add_scalar('val/f1', f1, iteration)
for factor_key, factor_score in factor_threshold.items():
self.writer.add_scalar('val/{}'.format(factor_key.lower()), factor_score, iteration)
self.model.train()
def get_factor_threshold(self, iteration):
dev_prediction_dirname = os.path.join(self.prediction_path, 'model_iteration_{}'.format(iteration))
dev_prediction_file = os.path.join(dev_prediction_dirname, 'prediction.json')
with open(dev_prediction_file) as f:
prediction_dict = json.load(f)
understood_dict = { k: v for k, v in prediction_dict.items() if v['f1_score'] >= 0.8001 and v['em_score'] >= 0.8001 }
threshold_dict = { }
for factor_entry in self.config.CURRICULUM.DEV_FACTORS:
factor_key, factor_keyed_difficulty_filename = list(factor_entry.items())[0]
factor_scores = 0.0
with open(factor_keyed_difficulty_filename) as f:
factor_keyed_difficulty_dict = json.load(f)
for qid in understood_dict.keys():
factor_scores += factor_keyed_difficulty_dict[qid]
if understood_dict:
factor_scores /= float(len(understood_dict))
threshold_dict[factor_key] = factor_scores
dev_factor_threshold_file = os.path.join(dev_prediction_dirname, 'factor_threshold.json')
with open(dev_factor_threshold_file, 'w') as f:
json.dump(threshold_dict, f, indent = 4)
backup_dirname = os.path.join(self.prediction_path, 'model_latest')
os.makedirs(backup_dirname, exist_ok = True)
# for use of sampling strategy
shutil.copy(dev_factor_threshold_file, backup_dirname)
return threshold_dict
def enlarge_data_index(self):
latest_factor_threshold_file = os.path.join(self.prediction_path, 'model_latest', 'factor_threshold.json')
with open(latest_factor_threshold_file) as f:
threshold_dict = json.load(f)
candidates = set()
# for each factor -> filter all samples -> add it into set
for factor_entry in self.config.CURRICULUM.TRAIN_FACTORS:
factor_key, factor_keyed_difficulty_filename = list(factor_entry.items())[0]
factor_threshold_score = threshold_dict[factor_key]
increased_score = factor_threshold_score * self.config.CURRICULUM.FACTOR_INCREASE_FACTOR
with open(factor_keyed_difficulty_filename) as f:
factor_keyed_difficulty_dict = json.load(f)
for qid, factor_score in factor_keyed_difficulty_dict.items():
if factor_score < increased_score:
# remove duplicates automatically
candidates.add(qid)
new_data_index = [ ]
new_data_index_set = set()
# enlarge 2 times
enlarge_size = len(self.cur_data_index)
# note that candidates is example-based counting.
# we cannot use np.sample.choice() because dataset is feature-based counting !!!
for feature_index, feature in enumerate(self.train_features):
example = self.train_examples[feature.example_index]
qid = example.qas_id
if qid in candidates and feature_index not in self.cur_data_index_set:
new_data_index.append(feature_index)
new_data_index_set.add(feature_index)
if len(new_data_index) == enlarge_size:
break
self.cur_data_index.extend(new_data_index) # +=
self.cur_data_index_set.update(new_data_index_set) # |=, union_update
def sample_batch_index(self, batch_index):
if batch_index and not batch_index % self.config.CURRICULUM.INCREASE_INTERVAL:
self.enlarge_data_index()
self.writer.add_scalar('cl/n_data', len(self.cur_data_index_set), batch_index + 1)
target_batch_index = np.random.choice(self.cur_data_index, self.config.TRAIN.BATCH_SIZE, replace = False)
return target_batch_index | 42.340136 | 125 | 0.692963 | 5,167 | 0.830174 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.085476 |
21d75b727cf9afea002e2b219228eabb6225a62d | 462 | py | Python | fluids/consts.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 26 | 2017-12-28T18:15:36.000Z | 2022-01-21T13:00:27.000Z | fluids/consts.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 61 | 2018-01-30T05:18:42.000Z | 2021-05-19T15:00:05.000Z | fluids/consts.py | BerkeleyAutomation/FLUIDS | 728da0d0fec5028ca4506aa9cc8e37a5b072e7a9 | [
"MIT"
] | 14 | 2017-12-11T04:59:21.000Z | 2021-05-19T12:21:31.000Z | STATE_CITY = "fluids_state_city"
OBS_QLIDAR = "fluids_obs_qlidar"
OBS_GRID = "fluids_obs_grid"
OBS_BIRDSEYE = "fluids_obs_birdseye"
OBS_NONE = "fluids_obs_none"
BACKGROUND_CSP = "fluids_background_csp"
BACKGROUND_NULL = "fluids_background_null"
REWARD_PATH = "fluids_reward_path"
REWARD_NONE = "fluids_reward_none"
RIGHT = "RIGHT"
LEFT = "LEFT"
STRAIGHT = "STRAIGHT"
RED = (0xf6, 0x11, 0x46)
YELLOW = (0xfc, 0xef, 0x5e),
GREEN = (0, 0xc6, 0x44)
| 22 | 42 | 0.74026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.439394 |
21d800daddf76d02f8c5063d12c46fb52f08fcb4 | 47 | py | Python | src/melbviz/wsgi.py | ned2/footviz | 4940882469df76b6af19282cf4fc4f3c81a7b410 | [
"MIT"
] | 1 | 2020-02-01T20:35:39.000Z | 2020-02-01T20:35:39.000Z | src/melbviz/wsgi.py | ned2/footviz | 4940882469df76b6af19282cf4fc4f3c81a7b410 | [
"MIT"
] | 2 | 2020-03-31T10:43:57.000Z | 2020-07-19T02:56:08.000Z | src/melbviz/wsgi.py | ned2/footviz | 4940882469df76b6af19282cf4fc4f3c81a7b410 | [
"MIT"
] | null | null | null | from .app import app
application = app.server
| 11.75 | 24 | 0.765957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21d8495b0fdeb7179e8f4818df4634dea3eb06dd | 312 | py | Python | 0118.Pascal's_Triangle/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 5 | 2020-05-23T02:18:26.000Z | 2021-07-05T05:36:01.000Z | 0118.Pascal's_Triangle/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2020-06-10T07:17:24.000Z | 2020-07-20T02:21:24.000Z | 0118.Pascal's_Triangle/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2019-04-23T13:01:50.000Z | 2019-04-23T13:01:50.000Z | class Solution:
def generate(self, num_rows):
if num_rows == 0:
return []
ans = [1]
result = [ans]
for _ in range(num_rows - 1):
ans = [1] + [ans[i] + ans[i + 1] for i in range(len(ans[:-1]))] + [1]
result.append(ans)
return result
| 28.363636 | 81 | 0.464744 | 311 | 0.996795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21dbf379b220ade4794e4ad2d117ca0df3cac919 | 472 | py | Python | ozzmeister00/AdventOfCode2021/Scripts/Python/utils/constants.py | techartorg/Advent_of_code_2021 | 0de46418e86743a2f3dee62c34f35e3007973c77 | [
"MIT"
] | null | null | null | ozzmeister00/AdventOfCode2021/Scripts/Python/utils/constants.py | techartorg/Advent_of_code_2021 | 0de46418e86743a2f3dee62c34f35e3007973c77 | [
"MIT"
] | null | null | null | ozzmeister00/AdventOfCode2021/Scripts/Python/utils/constants.py | techartorg/Advent_of_code_2021 | 0de46418e86743a2f3dee62c34f35e3007973c77 | [
"MIT"
] | 2 | 2021-12-12T06:42:02.000Z | 2021-12-26T01:41:28.000Z | """
Constants and constant generators
"""
import os
INPUTS_FOLDER_NAME = "inputData"
def getInputsFolder():
"""
:return str: the absolute path on the file system to the inputData folder, which should be relative to this package
"""
# figure out where we are
utilsFolder = os.path.dirname(os.path.abspath(__file__))
# go up one folder
sourceFolder = os.path.split(utilsFolder)[0]
return os.path.join(sourceFolder, INPUTS_FOLDER_NAME)
| 21.454545 | 119 | 0.707627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.480932 |
21dc1958ff8c27f13a30ce7881d7c3c522568e75 | 4,738 | py | Python | bin/ADFRsuite/CCSBpckgs/Volume/Operators/trilinterp.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/Volume/Operators/trilinterp.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/Volume/Operators/trilinterp.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
def trilinterp(pts, map, inv_spacing, origin, output_8pts=0):
"""returns a list of values looked up in a 3D grid (map) at
3D locations (tcoords).
INPUT:
pts 3D coordinates of points to lookup
map, grid data (has to be a Numeric array)
inv_spacing, 1. / grid spacing (3-tuple)
origin minimum coordinates in x, y and z
OUTPUT:
values values at points
"""
##
##
## Authors: Garrett M. Morris, TSRI, Accelerated C version 2.2 (C++ code)
## David Goodsell, UCLA, Original FORTRAN version 1.0 (C code)
## Michel Sanner (python port)
## Date: 10/06/94, march 26 03
values = []
invx, invy, invz = inv_spacing
xlo, ylo, zlo = origin
maxx = map.shape[0] - 1
maxy = map.shape[1] - 1
maxz = map.shape[2] - 1
for x,y,z in pts:
u = (x-xlo) * invx
u0 = max(0, int(u)) # clamp at lower bound of volume
u0 = min(maxx, u0)
u1 = min(maxx, u0 + 1) # clamp at upper bounds of volume
u1 = max(0, u1)
if u0>=maxx: # outside on X+ axis
p0u = 1.0
p1u = 0.0
elif u0<=0: # outside on X- axis
p0u = 0.0
p1u = 1.0
else:
p0u = u - u0
p1u = 1. - p0u
v = (y-ylo) * invy
v0 = max(0, int(v)) # clamp at lower bound of volume
v0 = min(maxy, v0)
v1 = min(maxy, v0 + 1) # clamp at upper bounds of volume
v1 = max(0, v1)
if v0>=maxy: # outside on Y+ axis
p0v = 1.0
p1v = 0.0
elif v0<=0: # outside on Y- axis
p0v = 0.0
p1v = 1.0
else:
p0v = v - v0
p1v = 1. - p0v
w = (z-zlo) * invz
w0 = max(0, int(w)) # clamp at lower bound of volume
w0 = min(maxz, w0)
w1 = min(maxz, w0 + 1) # clamp at upper bounds of volume
w1 = max(0, w1)
if w0>=maxz: # outside on Z+ axis
p0w = 1.0
p1w = 0.0
elif w0<=0: # outside on Z- axis
p0w = 0.0
p1w = 1.0
else:
p0w = w - w0
p1w = 1. - p0w
m = 0.0
if output_8pts:
print '0:', m," + p1u=", p1u, "*p1v=", p1v, "*p1w=", p1w, "*map[ ", u0, "][", v0,"][", w0,"]"
m = m + p1u * p1v * p1w * map[ u0 ][ v0 ][ w0 ]
if output_8pts:
print '1:', m," + p1u=", p1u, " p1v=", p1v, " p0w=", p0w, " map[ ", u0, "][", v0,"][", w1,"]"
m = m + p1u * p1v * p0w * map[ u0 ][ v0 ][ w1 ]
if output_8pts:
print '2:', m," + p1u=", p1u, " p0v=", p0v, " plw=", p1w, " map[ ", u0, "][", v1,"][", w0,"]"
m = m + p1u * p0v * p1w * map[ u0 ][ v1 ][ w0 ]
if output_8pts:
print '3:', m," + p1u=", p1u, " p0v=", p0v, " p0w=", p0w, " map[ ", u0, "][", v1,"][", w1,"]"
m = m + p1u * p0v * p0w * map[ u0 ][ v1 ][ w1 ]
if output_8pts:
print '4:', m," + p0u=", p0u, " p1v=", p1v, " p1w=", p1w, " map[ ", u1, "][", v0,"][", w0,"]"
m = m + p0u * p1v * p1w * map[ u1 ][ v0 ][ w0 ]
if output_8pts:
print '5:', m," + p0u=", p0u, " p1v=", p1v, " p0w=", p0w, " map[ ", u1, "][", v0,"][", w1,"]"
m = m + p0u * p1v * p0w * map[ u1 ][ v0 ][ w1 ]
if output_8pts:
print '6:', m," + p0u=", p0u, " p0v=", p0v, " p1w=", p1w, " map[ ", u1, "][", v1,"][", w0,"]"
m = m + p0u * p0v * p1w * map[ u1 ][ v1 ][ w0 ]
if output_8pts:
print '7:', m," + p0u=", p0u, " p0v=", p0v, " p0w=", p0w, " map[ ", u1, "][", v1,"][", w1,"]"
m = m + p0u * p0v * p0w * map[ u1 ][ v1 ][ w1 ]
if output_8pts:
print 'end: m=', m
values.append(m)
return values
| 37.904 | 105 | 0.47003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,196 | 0.463487 |
21dd78aa13bdce09f718bbaedda95f0b779a6c8a | 11,683 | py | Python | telegrampy/ext/commands/help.py | Fyssion/telegram.py | 41d94b9386cd1812dfe544a7f86ca4e0787a4dee | [
"MIT"
] | null | null | null | telegrampy/ext/commands/help.py | Fyssion/telegram.py | 41d94b9386cd1812dfe544a7f86ca4e0787a4dee | [
"MIT"
] | null | null | null | telegrampy/ext/commands/help.py | Fyssion/telegram.py | 41d94b9386cd1812dfe544a7f86ca4e0787a4dee | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-2021 ilovetocode
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import html
import itertools
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
from .cog import Cog
from .core import Command
from .errors import CommandError
if TYPE_CHECKING:
from .bot import Bot
CommandT = TypeVar("CommandT", bound="Command")
class _HelpCommandImplementation(Command):
"""Class that interfaces with :class:`telegrampy.ext.commands.Command`."""
def __init__(self, help_cmd: HelpCommand, bot: Bot, command_attrs: Dict[str, Any]):
self.help_cmd: HelpCommand = help_cmd
super().__init__(help_cmd, **command_attrs)
self.bot: Bot = bot
class HelpCommand:
"""Help command template.
Attributes
----------
ctx: :class:`telegrampy.ext.commands.Context`
The :class:`telegrampy.ext.commands.Context` for the command
bot: :class:`telegrampy.ext.commands.Bot`
The :class:`telegrampy.ext.commands.Bot` from the Context
"""
def __init__(self, **options: Any) -> None:
self.command_attrs: Dict[str, Any] = options.pop('command_attrs', {})
self.command_attrs.setdefault("name", "help")
self.command_attrs.setdefault("description", "The help command")
self.command_attrs.setdefault("aliases", ["start"])
self._implementation: Optional[_HelpCommandImplementation] = None
def _add_to_bot(self, bot: Bot) -> None:
implementation = _HelpCommandImplementation(self, bot, self.command_attrs)
bot.add_command(implementation)
self._implementation = implementation
def _remove_from_bot(self, bot: Bot) -> None:
if self._implementation is None:
raise RuntimeError("Help command is not implemented.")
bot.remove_command(self._implementation.name)
self._implementation = None
async def get_command_signature(self, command: Command) -> str:
"""|coro|
The method that gets a formatted command signature
Example:
/help [command]
"""
name = html.escape(command.name)
sig = html.escape(command.signature)
return f"/{name} {sig}"
async def send_bot_help(self) -> None:
"""|coro|
The method that sends help for the bot.
This is called when no query is provided.
This method should handle the sending of the help message.
"""
raise NotImplementedError("Subclasses must implement this.")
async def send_cog_help(self, cog: Cog) -> None:
"""|coro|
The method that sends help for a cog.
This is called when a cog matches the query.
This method should handle the sending of the help message.
Parameters
----------
cog: :class:`telegrampy.ext.commands.Cog`
The cog that matched the query
"""
raise NotImplementedError("Subclasses must implement this.")
async def send_command_help(self, command: Command) -> None:
"""The method that sends help for a command.
This is called when a command matches the query.
This method should handle the sending of the help message.
Parameters
----------
command: :class:`telegrampy.ext.commands.Command`
The command that matched the query
"""
raise NotImplementedError("Subclasses must implement this.")
async def send_not_found(self, query: str) -> None:
"""|coro|
The method that sends a 'not found' message or similar.
This method is called when no match is found for the query.
Parameters
----------
query: :class:`str`
The user's query
"""
await self.ctx.send(f"A command or cog named '{query}' was not found.")
async def help_callback(self, query: Optional[str]) -> None:
"""|coro|
The callback that searches for a matching commmand or cog.
This should not be overridden unless it is necessary.
Parameters
----------
query: Optional[:class:`str`]
The user's query. Defaults to ``None``.
"""
bot = self.bot
# Send the bot help if there is no query
if query is None:
await self.send_bot_help()
return
# Check if the query matches a cog
cogs = bot.cogs
if query in cogs.keys():
cog = cogs[query]
await self.send_cog_help(cog)
return
# If not, check if the query matches a command
command = bot.get_command(query)
if command:
await self.send_command_help(command)
return
# If neither, send the not found message
await self.send_not_found(query)
async def __call__(self, ctx, *, command=None):
self.ctx = ctx
self.bot = ctx.bot
await self.help_callback(command)
class DefaultHelpCommand(HelpCommand):
"""The default help command.
This help command mimics BotFather's help command look.
Parameters
----------
no_category: Optional[:class:`str`]
The heading for commands without a category.
Defaults to "No Category".
sort_commands: Optional[:class:`bool`]
Whether to sort the commands.
Defaults to ``True``.
"""
if TYPE_CHECKING:
no_category: str
sort_commands: bool
def __init__(self, **options: Any):
self.no_category: str = options.pop("no_category", "No Category")
self.sort_commands: bool = options.pop("sort_commands", True)
super().__init__(**options)
def get_ending_note(self) -> str:
"""Returns the command's ending note."""
if self._implementation is None:
raise RuntimeError("Help command is not implemented.")
name = self._implementation.name
return (
f"Type /{name} [command] for more info on a command.\n"
f"You can also type /{name} [category] for more info on a category."
)
async def format_commands(self, commands: List[Command], *, heading: str) -> List[str]:
"""|coro|
The method that formats a given list of commands.
Parameters
----------
commands: List[:class`telegrampy.ext.commands.Command`]
The list of commands to format.
heading: :class:`str`
The heading to display.
"""
if not commands:
return []
formatted = []
formatted.append(f"<b>{html.escape(heading)}:</b>")
def make_entry(sig, doc, *, alias_for=None):
alias = f"[Alias for {alias_for}] " if alias_for else ""
if doc:
return f"{sig} - {alias}{html.escape(doc)}"
else:
entry = f"{sig}"
if alias:
entry += f" {alias}"
return entry
for command in commands:
if command.hidden:
continue
sig = await self.get_command_signature(command)
doc = command.description
formatted.append(make_entry(sig, doc))
return formatted
async def format_command(self, command: Command) -> List[str]:
"""|coro|
The method that formats an individual command.
Parameters
------------
command: :class:`Command`
The command to format.
"""
help_text = [await self.get_command_signature(command)]
if command.description:
help_text.append(html.escape(command.description))
if command.aliases:
help_text.append(f"Aliases: {', '.join(command.aliases)}")
return help_text
async def filter_commands(self, commands: List[CommandT]) -> List[CommandT]:
"""|coro|
Takes a list of commands and filters them.
Parameters
----------
commands: List[:class:`telegrampy.ext.commands.Command`]
The commands to filter.
Returns
-------
List[:class:`telegrampy.ext.commands.Command`]
The filtered commands.
"""
filtered_commands = []
async def predicate(command):
try:
return await command.can_run(self.ctx)
except CommandError:
return False
for command in commands:
if not command.hidden and await predicate(command):
filtered_commands.append(command)
return filtered_commands
async def send_help_text(self, help_text: List[str]) -> None:
message = "\n".join(help_text)
await self.ctx.send(message, parse_mode="HTML")
async def send_bot_help(self) -> None:
bot = self.bot
help_text = []
if bot.description:
# <description> portion
help_text.append(html.escape(bot.description))
help_text.append("") # blank line
no_category = self.no_category
def get_category(command, *, no_category=no_category):
cog = command.cog
return cog.qualified_name if cog is not None else no_category
to_iterate = itertools.groupby(bot.commands, key=get_category)
# Now we can add the commands to the page.
for category, commands in to_iterate:
commands = await self.filter_commands(sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands))
if not commands:
continue
added = await self.format_commands(commands, heading=category)
if added:
help_text.extend(added)
help_text.append("") # blank line
note = self.get_ending_note()
if note:
# help_text.append("") # blank line
help_text.append(html.escape(note))
await self.send_help_text(help_text)
async def send_cog_help(self, cog: Cog) -> None:
help_text = []
if cog.description:
help_text.append(html.escape(cog.description))
help_text.append("") # blank line
commands = await self.filter_commands(cog.commands)
help_text.extend(await self.format_commands(commands, heading="Commands"))
note = self.get_ending_note()
if note:
help_text.append("") # blank line
help_text.append(html.escape(note))
await self.send_help_text(help_text)
async def send_command_help(self, command: Command) -> None:
await self.send_help_text(await self.format_command(command))
| 31.321716 | 131 | 0.620303 | 10,280 | 0.879911 | 0 | 0 | 0 | 0 | 7,566 | 0.647608 | 5,145 | 0.440383 |
21dea9e689cc2f2311728c0f3580dc22bc6df8bd | 3,011 | py | Python | backend/farming/graphql/types/natura2000.py | PwC-FaST/fast-webapp | 8c5640c04fcf0b200d5408a8354b4ab2263cd37a | [
"MIT"
] | 7 | 2019-08-30T05:19:27.000Z | 2021-12-22T14:56:00.000Z | backend/farming/graphql/types/natura2000.py | PwC-FaST/fast-webapp | 8c5640c04fcf0b200d5408a8354b4ab2263cd37a | [
"MIT"
] | 10 | 2020-06-05T19:45:05.000Z | 2022-02-17T19:15:37.000Z | backend/farming/graphql/types/natura2000.py | PwC-FaST/fast-webapp | 8c5640c04fcf0b200d5408a8354b4ab2263cd37a | [
"MIT"
] | 5 | 2020-03-05T10:23:02.000Z | 2020-12-06T10:53:07.000Z | import graphene
import os
from promise import Promise
from datetime import datetime
from promise.dataloader import DataLoader
import requests
from core.graphql.types import CountryType
from core.models import Country
class Natura2000FeatureType(graphene.ObjectType):
id = graphene.String()
site_code = graphene.String()
site_name = graphene.String()
country = graphene.Field(CountryType)
released_at = graphene.DateTime()
wkt_type = graphene.String()
site_types = graphene.List(graphene.String)
class Natura2000IntersectionType(graphene.ObjectType):
id = graphene.String()
intersects = graphene.Boolean()
minimum_distance = graphene.Float()
intersection = graphene.Float()
natura2000_feature = graphene.Field(Natura2000FeatureType)
class Natura2000IntersectionLoader(DataLoader):
def batch_load_fn(self, lpis_parcel_ids):
url = os.getenv('FAST_API_PARCEL_NATURA2000_URL')
data = requests.post(url, params={'search': '10000'}, json=lpis_parcel_ids).json()
# Sort the results in the same order as the request
sorting = {lpis_parcel_id: index for index, lpis_parcel_id in enumerate(lpis_parcel_ids)}
data = sorted(data, key=lambda x: sorting[x['_id']])
results = []
for lpis_parcel_id, d in zip(lpis_parcel_ids, data):
result = []
for n in d['natura2000']:
if n is None:
continue
# Create a real Country vertex
country = Country.objects.filter(pk=n.get('country').upper()).get()
released_at = datetime.strptime(n.get('releaseDate'), '%Y-%m-%d')
# The feature that is intersecting
natura2000_feature = Natura2000FeatureType(id=n.get('_id'),
site_code=n.get('siteCode'),
site_name=n.get('siteName'),
wkt_type=n.get('wktType'),
country=country,
released_at=released_at,
site_types=n.get('siteTypes'))
# The intersection itself
intersection = Natura2000IntersectionType(id=lpis_parcel_id + '.' + n.get('_id'),
intersects=n.get('intersects'),
minimum_distance=n.get('minDistance'),
intersection=n.get('intersection'),
natura2000_feature=natura2000_feature)
result += [intersection]
results += [result]
return Promise.resolve(results)
natura2000_intersections_loader = Natura2000IntersectionLoader() | 40.146667 | 97 | 0.548655 | 2,719 | 0.903022 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.108934 |
21df65a1c8478ea8f6b221397a365d1de4254a1f | 3,548 | py | Python | Lib/test/test_cmath_jy.py | weimingtom/j2mepython-midp | 472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d | [
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | 1 | 2015-11-07T12:22:17.000Z | 2015-11-07T12:22:17.000Z | Lib/test/test_cmath_jy.py | weimingtom/j2mepython-midp | 472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d | [
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | null | null | null | Lib/test/test_cmath_jy.py | weimingtom/j2mepython-midp | 472333ebc6a7f06d92c5ede85c8ed55e4ad66c6d | [
"CNRI-Jython",
"PSF-2.0",
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
""" Simple test script for cmathmodule.c
Roger E. Masse
"""
import cmath
import unittest
from test import test_support
from test.test_support import verbose
p = cmath.pi
e = cmath.e
if verbose:
print 'PI = ', abs(p)
print 'E = ', abs(e)
class CmathTestCase(unittest.TestCase):
def assertAlmostEqual(self, x, y, places=5, msg=None):
unittest.TestCase.assertAlmostEqual(self, x.real, y.real, places, msg)
unittest.TestCase.assertAlmostEqual(self, x.imag, y.imag, places, msg)
def test_acos(self):
self.assertAlmostEqual(complex(0.936812, -2.30551),
cmath.acos(complex(3, 4)))
def test_acosh(self):
self.assertAlmostEqual(complex(2.30551, 0.93681),
cmath.acosh(complex(3, 4)))
def test_asin(self):
self.assertAlmostEqual(complex(0.633984, 2.30551),
cmath.asin(complex(3, 4)))
def test_asinh(self):
self.assertAlmostEqual(complex(2.29991, 0.917617),
cmath.asinh(complex(3, 4)))
def test_atan(self):
self.assertAlmostEqual(complex(1.44831, 0.158997),
cmath.atan(complex(3, 4)))
def test_atanh(self):
self.assertAlmostEqual(complex(0.11750, 1.40992),
cmath.atanh(complex(3, 4)))
def test_cos(self):
self.assertAlmostEqual(complex(-27.03495, -3.851153),
cmath.cos(complex(3, 4)))
def test_cosh(self):
self.assertAlmostEqual(complex(-6.58066, -7.58155),
cmath.cosh(complex(3, 4)))
def test_exp(self):
self.assertAlmostEqual(complex(-13.12878, -15.20078),
cmath.exp(complex(3, 4)))
def test_log(self):
self.assertAlmostEqual(complex(1.60944, 0.927295),
cmath.log(complex(3, 4)))
def test_log10(self):
self.assertAlmostEqual(complex(0.69897, 0.40272),
cmath.log10(complex(3, 4)))
def test_sin(self):
self.assertAlmostEqual(complex(3.853738, -27.01681),
cmath.sin(complex(3, 4)))
def test_sinh(self):
self.assertAlmostEqual(complex(-6.54812, -7.61923),
cmath.sinh(complex(3, 4)))
def test_sqrt_real_positive(self):
self.assertAlmostEqual(complex(2, 1),
cmath.sqrt(complex(3, 4)))
def test_sqrt_real_zero(self):
self.assertAlmostEqual(complex(1.41421, 1.41421),
cmath.sqrt(complex(0, 4)))
def test_sqrt_real_negative(self):
self.assertAlmostEqual(complex(1, 2),
cmath.sqrt(complex(-3, 4)))
def test_sqrt_imaginary_zero(self):
self.assertAlmostEqual(complex(0.0, 1.73205),
cmath.sqrt(complex(-3, 0)))
def test_sqrt_imaginary_negative(self):
self.assertAlmostEqual(complex(1.0, -2.0),
cmath.sqrt(complex(-3, -4)))
def test_tan(self):
self.assertAlmostEqual(complex(-0.000187346, 0.999356),
cmath.tan(complex(3, 4)))
def test_tanh(self):
self.assertAlmostEqual(complex(1.00071, 0.00490826),
cmath.tanh(complex(3, 4)))
def test_main():
test_support.run_unittest(CmathTestCase)
if __name__ == "__main__":
test_main()
| 33.158879 | 78 | 0.55947 | 3,165 | 0.892052 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.03044 |
21df81c4df3e4f5c8e3d15ec62909fed82741b3f | 6,633 | py | Python | python/coffer/coins/impl/_segwittx.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/impl/_segwittx.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/impl/_segwittx.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | from _satoshitx import *
import struct
#https://bitcoincore.org/en/segwit_wallet_dev/
class SWitnessTransaction(STransaction):
def __init__(version,flag,ins,outs,witness,locktime):
super(SWitnessTransaction,self).__init__(version,ins,outs,locktime)
self.flag=flag
self.witness=witness
def serialize(self):
txo=self
#if(not isinstance(txo,SWitnessTransaction) and isinstance(txo,STransaction)):
# return STransaction._sc_serialize(txo)
out=bytearray()
out+=struct.pack('<L',txo.version)
out+=b'\x00'
out+=struct.pack('B',txo.flag)
out+=SVarInt(len(txo.ins)).serialize()
for inv in txo.ins:
out+=inv.serialize()
out+=SVarInt(len(txo.outs)).serialize()
for ot in txo.outs:
out+=ot.serialize()
if(len(txo.witness) != len(txo.ins)):
raise Exception("Witness data not the same length as number of inputs")
for wit in txo.witness: #load witness data
out+=SVarInt(len(wit)).serialize()
for wititem in wit:
out+=SVarInt(len(wititem)).serialize()
out+=wititem #TODO: .serialize()
out+=struct.pack('<L',txo.locktime)
return out
@staticmethod
def _sc_deserialize(sio):
version=struct.unpack('<L',sio.read(4))[0]
num_ins=SVarInt._sc_deserialize(sio)
if(num_ins!=0): #this is not a witness transaction
return STransaction._sc_deserialize(StringIO(sio.getvalue()))
flag=ord(sio.read(1))
num_ins=SVarInt._sc_deserialize(sio)
ins=[SInput._sc_deserialize(sio) for k in range(num_ins)]
num_outs=SVarInt._sc_deserialize(sio)
outs=[SOutput._sc_deserialize(sio) for k in range(num_outs)]
witness=[]
for _ in range(num_ins):
num_wititems=SVarInt._sc_deserialize(sio)
wititems=[]
for _ in range(num_wititems):
witsize=SVarInt._sc_deserialize(sio)
wititmes.append(sio.read(witsize))
witness.append(wititems)
locktime=struct.unpack('<L',sio.read(4))[0]
return SWitnessTransaction(version,flag,ins,outs,witness,locktime)
#TODO: from tx that calls coin.signature
def txid_hash(self):
return dblsha256(super(SWitnessTransaction,self).serialize())
def wtxid_hash(self):
return dblsha256(self.serialize())
def segwit_get_prevouthash(stxo):
out=bytearray()
for inp in stxo.ins:
out+=inp.outpoint.serialize()
return dblsha256(out)
"""template <class T>
uint256 GetPrevoutHash(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txin : txTo.vin) {
ss << txin.prevout;
}
return ss.GetHash();
}"""
def segwit_get_sequencehash(stxo):
out=bytearray()
for inp in stxo.ins:
out+=struct.pack('<L',inp.sequence)
return dblsha256(out)
"""template <class T>
uint256 GetSequenceHash(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txin : txTo.vin) {
ss << txin.nSequence;
}
return ss.GetHash();
}"""
def segwit_get_outputshash(stxo):
out=bytearray()
for outp in stxo.outs:
out+=outp.serialize()
return dblsha256(out)
"""template <class T>
uint256 GetOutputsHash(const T& txTo)
{
CHashWriter ss(SER_GETHASH, 0);
for (const auto& txout : txTo.vout) {
ss << txout;
}
return ss.GetHash();
}
"""
#TODO: segwit needs the right thing provided in script (redeemscript for p2sh or witness script or scriptPubKey for p2pkh)
#https://bitcoin.stackexchange.com/questions/57994/what-is-scriptcode
def segwit_preimage(stxo,script,input_index,nhashtype,amount=None):
hashPrevouts=b'\x00'*32
hashSequence=b'\x00'*32
hashOutputs=b'\x00'*32
nhashtype=int(nhashtype)
sho=SigHashOptions(nhashtype)
"""if (sigversion == SigVersion::WITNESS_V0) {
uint256 hashPrevouts;
uint256 hashSequence;
uint256 hashOutputs;
const bool cacheready = cache && cache->ready;
if (!(nHashType & SIGHASH_ANYONECANPAY)) {
hashPrevouts = cacheready ? cache->hashPrevouts : GetPrevoutHash(txTo);
}
if (!(nHashType & SIGHASH_ANYONECANPAY) && (nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
hashSequence = cacheready ? cache->hashSequence : GetSequenceHash(txTo);
}
if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
hashOutputs = cacheready ? cache->hashOutputs : GetOutputsHash(txTo);
} else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) {
CHashWriter ss(SER_GETHASH, 0);
ss << txTo.vout[nIn];
hashOutputs = ss.GetHash();
}"""
if(not sho.anyonecanpay):
hashPrevouts=segwit_get_prevouthash(stxo)
if(not sho.anyonecanpay and sho.mode != SIGHASH_NONE and sho.mode != SIGHASH_SINGLE):
hashSequence=segwit_get_sequencehash(stxo)
if(sho.mode != SIGHASH_SINGLE and sho.mode != SIGHASH_NONE):
hashOutputs=segwit_get_outputshash(stxo)
elif(sho.mode == SIGHASH_SINGLE and input_index < len(stxo.ins)):
hashOutputs=dblsha256(stxo.outs[input_index].serialize())
"""
CHashWriter ss(SER_GETHASH, 0);
// Version
ss << txTo.nVersion;
// Input prevouts/nSequence (none/all, depending on flags)
ss << hashPrevouts;
ss << hashSequence;
// The input being signed (replacing the scriptSig with scriptCode + amount)
// The prevout may already be contained in hashPrevout, and the nSequence
// may already be contain in hashSequence.
ss << txTo.vin[nIn].prevout;
ss << scriptCode;
ss << amount;
ss << txTo.vin[nIn].nSequence;
// Outputs (none/one/all, depending on flags)
ss << hashOutputs;
// Locktime
ss << txTo.nLockTime;
// Sighash type
ss << nHashType;
return ss.GetHash();"""
out=bytearray()
out+=struct.pack('<L',stxo.version)
out+=hashPrevouts
out+=hashSequence
out+=stxo.ins[input_index].outpoint.serialize()
out+=SVarInt(len(script)).serialize()
out+=script
if(amount is None):
a=stxo.ins[input_index].prevout.value
else:
a=int(amount)
out+=struct.pack('<Q',a)
out+=struct.pack('<L',stxo.ins[input_index].sequence)
out+=hashOutputs;
out+=struct.pack('<L',stxo.locktime)
out+=struct.pack('<L',sho.nhashtype)
return out
def segwit_sighash(stxo,input_index,nhashtype,script=None,amount=None):
if(script is None):
#if(p2pkh)USE for
script=stxo.ins[input_index].prevout.scriptPubKey #TODO: is this correct? script seems to be the redeemScript for p2sh and other stuff YEAH use for p2sh when redeemScript includes CHECKSIG
#if(p2sh)
#script=stxo.ins[input_index].scriptSig[0] #redeemscript from scriptSig of input gives pubkey o
preimage=segwit_preimage(stxo,script,input_index,nhashtype,amount)
return dblsha256(preimage)
| 30.013575 | 192 | 0.692748 | 2,026 | 0.305442 | 0 | 0 | 831 | 0.125283 | 0 | 0 | 3,144 | 0.473994 |
21e120b1fbe5f797e5a435a8ef7fbefb53f97408 | 506 | py | Python | tests/buffered_recorder_atexit.py | peterdemin/awsme | 13a2566171ee0849973fabc6e1d45ba2cc8d496d | [
"MIT"
] | 15 | 2019-01-25T09:45:45.000Z | 2020-08-27T08:47:27.000Z | tests/buffered_recorder_atexit.py | peterdemin/awsme | 13a2566171ee0849973fabc6e1d45ba2cc8d496d | [
"MIT"
] | 62 | 2019-03-06T16:36:45.000Z | 2020-11-19T00:21:00.000Z | tests/buffered_recorder_atexit.py | peterdemin/awsme | 13a2566171ee0849973fabc6e1d45ba2cc8d496d | [
"MIT"
] | 2 | 2019-03-05T18:28:55.000Z | 2020-07-27T23:27:27.000Z | from __future__ import print_function
import datetime
from awsme.metric import Metric
from awsme.buffered_recorder import BufferedRecorder
from typing import List, Dict, Any # noqa
class StdoutRecorder:
def put_metric_data(self, metric_data: List[Dict[str, Any]]) -> None:
print(metric_data)
recorder = BufferedRecorder(recorder=StdoutRecorder())
recorder.put_metric(
Metric(
event_time=datetime.datetime.min,
name="1",
dimensions={},
)
)
print("Exiting")
| 22 | 73 | 0.72332 | 123 | 0.243083 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.035573 |
21e1875ee0a248959a24b6c0e30c5fce3d5f3121 | 11,309 | py | Python | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 2 | 2019-02-08T08:45:27.000Z | 2020-09-07T05:55:18.000Z | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | shreyasvj25/turicreate | 32e84ca16aef8d04aff3d49ae9984bd49326bffd | [
"BSD-3-Clause"
] | 3 | 2022-02-15T04:42:24.000Z | 2022-03-12T01:05:15.000Z | src/unity/python/turicreate/toolkits/_feature_engineering/_transformer_chain.py | ZeroInfinite/turicreate | dd210c2563930881abd51fd69cb73007955b33fd | [
"BSD-3-Clause"
] | 1 | 2019-06-01T18:49:28.000Z | 2019-06-01T18:49:28.000Z | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _tc
# GLC Utils.
from turicreate.toolkits._internal_utils import _toolkit_repr_print
from turicreate.toolkits._internal_utils import _precomputed_field
from turicreate.util import _raise_error_if_not_of_type
# Feature engineering utils.
from . import _internal_utils
from ._feature_engineering import TransformerBase as _TransformerBase
from ._feature_engineering import Transformer as _Transformer
from copy import copy as _copy
import inspect as _inspect
import sys as _sys
class TransformerChain(_TransformerBase):
"""
Sequentially apply a list of transforms.
Each of the individual steps in the chain must be transformers (i.e a child
class of `TransformerBase`) which can be one of the following:
- Native transformer modules in Turi Create (e.g.
:py:class:`~turicreate.toolkits.feature_engineering._feature_hasher.FeatureHasher`).
- User-created modules (defined by inheriting
:py:class:`~turicreate.toolkits.feature_engineering._feature_engineering.TransformerBase`).
Parameters
----------
steps: list[Transformer]
The list of transformers to be chained. A step in the chain can be
another chain.
See Also
--------
turicreate.toolkits.feature_engineering.create
Examples
--------
.. sourcecode:: python
# Create data.
>>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]})
# Create a chain a transformers.
>>> from turicreate.feature_engineering import *
# Create a chain of transformers.
>>> chain = turicreate.feature_engineering.create(sf,[
QuadraticFeatures(),
FeatureHasher()
])
# Create a chain of transformers with names for each of the steps.
>>> chain = turicreate.feature_engineering.create(sf, [
('quadratic', QuadraticFeatures()),
('hasher', FeatureHasher())
])
# Transform the data.
>>> transformed_sf = chain.transform(sf)
# Save the transformer.
>>> chain.save('save-path')
# Access each of the steps in the transformer by name or index
>>> steps = chain['steps']
>>> steps = chain['steps_by_name']
"""
_TRANSFORMER_CHAIN_VERSION = 0
def __init__(self, steps):
"""
Parameters
----------
steps: list[Transformer] | list[tuple(name, Transformer)]
List of Transformers or (name, Transformer) tuples. These are
chained in the order in which they are provided in the list.
"""
# Basic type checking.
_raise_error_if_not_of_type(steps, [list])
# Split into (name, transformer) pairs. If the name is not present
# then use the index as name.
transformers = []
index = 0
for step in steps:
if isinstance(step, tuple):
name, tr = step
else:
tr = step
name = index
if isinstance(tr, list):
tr = TransformerChain(tr)
if not issubclass(tr.__class__, _TransformerBase):
raise TypeError("Each step in the chain must be a Transformer.")
transformers.append((name, tr))
index = index + 1
# Save into a dictionary for lookups by name and index.
self._state = {}
self._state["steps"] = steps
self._state["steps_by_name"] = {}
index = 0
for name, tr in transformers:
self._state["steps_by_name"][name] = tr
index = index + 1
# The transformers as (name, obj) tuple (used here for fitting
# and transforming).
self._transformers = transformers
@staticmethod
def _compact_class_repr(obj):
""" A compact version of __repr__ for each of the steps.
"""
dict_str_list = []
post_repr_string = ""
# If features are present, then shorten it.
init_func = obj.__init__
if _sys.version_info.major == 2:
init_func = init_func.__func__
fields = _inspect.getargspec(init_func).args
fields = fields[1:] # remove self
if 'features' in fields:
fields.remove('features')
features = obj.get("features")
if features is not None:
post_repr_string = ' on %s feature(s)' % len(features)
if 'excluded_features' in fields:
fields.remove('excluded_features')
# GLC transformers.
if issubclass(obj.__class__, _Transformer):
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.get(attr).__repr__()))
# Chains
elif obj.__class__ == TransformerChain:
_step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))
_steps = _internal_utils.pretty_print_list(
_step_classes, 'steps', False)
dict_str_list.append(_steps)
# For user defined transformers.
else:
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.__dict__[attr]))
return "%s(%s)%s" % (obj.__class__.__name__, ", ".join(dict_str_list),
post_repr_string)
def _get_struct_summary(self):
model_fields = []
for name, tr in self._transformers:
model_fields.append((name,
_precomputed_field(self._compact_class_repr(tr))))
sections = [model_fields]
section_titles = ['Steps']
return (sections, section_titles)
def __repr__(self):
(sections, section_titles) = self._get_struct_summary()
return _toolkit_repr_print(self, sections, section_titles, width=8)
@staticmethod
def __get_steps_repr__(steps):
def __repr__(steps):
for name, tr in self._transformers:
model_fields.append((name,
_precomputed_field(self._compact_class_repr(tr))))
return _toolkit_repr_print(steps, [model_fields], width=8,
section_titles = ['Steps'])
return __repr__
def _preprocess(self, data):
"""
Internal function to perform fit_transform() on all but last step.
"""
transformed_data = _copy(data)
for name, step in self._transformers[:-1]:
transformed_data = step.fit_transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise RuntimeError("The transform function in step '%s' did not"
" return an SFrame (got %s instead)." % (name,
type(transformed_data).__name__))
return transformed_data
def fit(self, data):
"""
Fits a transformer using the SFrame `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer.
Returns
-------
self (A fitted object)
See Also
--------
transform, fit_transform
Examples
--------
.. sourcecode:: python
>> chain = chain.fit(sf)
"""
if not self._transformers:
return
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
final_step[1].fit(transformed_data)
def fit_transform(self, data):
"""
First fit a transformer using the SFrame `data` and then return a transformed
version of `data`.
Parameters
----------
data : SFrame
The data used to fit the transformer. The same data is then also
transformed.
Returns
-------
Transformed SFrame.
See Also
--------
transform, fit_transform
Notes
-----
- The default implementation calls fit() and then calls transform().
You may override this function with a more efficient implementation."
Examples
--------
.. sourcecode:: python
>> transformed_sf = chain.fit_transform(sf)
"""
if not self._transformers:
return self._preprocess(data)
transformed_data = self._preprocess(data)
final_step = self._transformers[-1]
return final_step[1].fit_transform(transformed_data)
def transform(self, data):
"""
Transform the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, fit_transform
Examples
--------
.. sourcecode:: python
>> my_tr = turicreate.feature_engineering.create(train_data, MyTransformer())
>> transformed_sf = my_tr.transform(sf)
"""
transformed_data = _copy(data)
for name, step in self._transformers:
transformed_data = step.transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise TypeError("The transform function in step '%s' did not return"
" an SFrame." % name)
return transformed_data
def _list_fields(self):
"""
List the model's queryable fields.
Returns
-------
out : list
Each element in the returned list can be queried with the ``get``
method.
"""
return list(self._state.keys())
def _get(self, field):
"""
Return the value contained in the model's ``field``.
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field.
"""
try:
return self._state[field]
except:
raise ValueError("There is no model field called {}.".format(field))
def __getitem__(self, key):
return self.get(key)
def _get_version(self):
return self._TRANSFORMER_CHAIN_VERSION
@classmethod
def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
obj = unpickler.load()
return TransformerChain(obj._state["steps"])
| 31.240331 | 97 | 0.571934 | 10,448 | 0.923784 | 0 | 0 | 2,412 | 0.213263 | 0 | 0 | 5,819 | 0.5145 |
21e32b8735c2ff78ed3df5a83ce3ab8fa9c5647e | 729 | py | Python | FndngTeam.py | aveepsit/SnackDown19-Qualifier | c6037caca4ba38b9ab98076160118a999c1cc84b | [
"MIT"
] | null | null | null | FndngTeam.py | aveepsit/SnackDown19-Qualifier | c6037caca4ba38b9ab98076160118a999c1cc84b | [
"MIT"
] | null | null | null | FndngTeam.py | aveepsit/SnackDown19-Qualifier | c6037caca4ba38b9ab98076160118a999c1cc84b | [
"MIT"
] | null | null | null | for testcase in range(int(input())):
n = int(input())
dict = {}
comb = 1
m = (10**9)+7
for x in input().split():
no = int(x)
try:
dict[no] = dict[no] + 1
except:
dict[no] = 1
dict = list(dict.items())
dict.sort(key=lambda x: x[0], reverse=True)
dict = [x[1] for x in dict]
for ind in range(len(dict)):
if dict[ind]==0:
continue
if (dict[ind]%2==0):
for j in range(dict[ind]-1,2,-2):
comb = (comb*j) % m
else:
for j in range(dict[ind],2,-2):
comb = (comb*j) % m
comb = (comb*dict[ind+1]) % m
dict[ind+1] -= 1
print(comb)
| 22.78125 | 47 | 0.429355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21e55d19a6492e7090b8c9255be1b0cd0bb51197 | 709 | py | Python | ccal/read_correlate_copynumber_vs_mrnaseq.py | kberkey/ccal | 92aa8372997dccec2908928f71a11b6c8327d7aa | [
"MIT"
] | 9 | 2017-10-09T16:54:58.000Z | 2018-12-14T19:49:03.000Z | ccal/read_correlate_copynumber_vs_mrnaseq.py | kberkey/ccal | 92aa8372997dccec2908928f71a11b6c8327d7aa | [
"MIT"
] | 8 | 2017-03-11T04:43:04.000Z | 2018-12-10T09:47:14.000Z | ccal/read_correlate_copynumber_vs_mrnaseq.py | kberkey/ccal | 92aa8372997dccec2908928f71a11b6c8327d7aa | [
"MIT"
] | 4 | 2017-03-10T19:12:28.000Z | 2022-01-02T21:11:40.000Z | from tarfile import open as tarfile_open
from pandas import read_csv
def read_correlate_copynumber_vs_mrnaseq(tar_gz_file_path, genes):
with tarfile_open(tar_gz_file_path) as tar_gz_file:
n = read_csv(
tar_gz_file.extractfile(
tuple(file for file in tar_gz_file if file.name.endswith("qa.txt"))[0]
),
sep="\t",
index_col=0,
).loc["sample", "comm"]
df = read_csv(
tar_gz_file.extractfile(
tuple(file for file in tar_gz_file if file.name.endswith("cors.txt"))[0]
),
sep="\t",
index_col=1,
)
return n, df.loc[genes, "cor"].to_dict()
| 26.259259 | 88 | 0.568406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.06347 |
21e57118b1cdc8c15de5498103799297ecf434fd | 2,525 | py | Python | get_ships.py | ndujar/vessel-locator | 5feff371935e40c2aa22d95c50b9b458ab954dea | [
"MIT"
] | null | null | null | get_ships.py | ndujar/vessel-locator | 5feff371935e40c2aa22d95c50b9b458ab954dea | [
"MIT"
] | null | null | null | get_ships.py | ndujar/vessel-locator | 5feff371935e40c2aa22d95c50b9b458ab954dea | [
"MIT"
] | null | null | null | #module import
import urllib.request
from bs4 import BeautifulSoup
from datetime import datetime
def get_ships(imo_list):
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
items = []
for IMO in imo_list:
print(IMO)
url = r'https://www.vesselfinder.com/en/vessels/VOS-TRAVELLER-IMO-' + str(IMO)
req = urllib.request.Request(url, None, hdr)
with urllib.request.urlopen(req) as response:
the_page = response.read()
parsed_html = BeautifulSoup(the_page, features="lxml")
tables = parsed_html.findAll("table")
coords = "0,0"
for table in tables:
if table.findParent("table") is None:
for row in table.findAll('tr'):
aux = row.findAll('td')
try:
if aux[0].string == "Coordinates":
coords = aux[1].string
if aux[0].string == "Vessel Name":
name = aux[1].string
if aux[0].string == "Position received":
print(aux[1].get("data-title"))
time = datetime.strptime(aux[1].get("data-title"), '%b %d, %Y %H:%M %Z')
print(time)
except:
print("strange table found")
lat = parsed_html.find('div', class_ = "coordinate lat").string
lng = parsed_html.find('div', class_ = "coordinate lon").string
# name = parsed_html.find('td', class_ = "title").string
# time = parsed_html.find('td', class_ = 'v3 ttt1 valm0').string
coordsSplit = coords.split("/")
# def dms2dd(degrees,direction):
# dd = float(degrees) ;
# if direction == 'S' or direction == 'W':
# dd *= -1
# return dd
# def parse_dms(dms):
# parts = re.split(' ', dms)
# lat = dms2dd(parts[0], parts[1])
# return lat
# lat = parse_dms(coordsSplit[0])
# lng = parse_dms(coordsSplit[1])
items.append((lat, lng, name, time))
return items
| 42.083333 | 132 | 0.513267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,013 | 0.401188 |
21e723f45f87fc926e72e1075bacd22832327764 | 715 | py | Python | scripts/convert_0.0_to_0.1.py | codecraftingtools/hildegard | cc658ab4972dfaf67e995c797d0493a5d82a611f | [
"MIT"
] | null | null | null | scripts/convert_0.0_to_0.1.py | codecraftingtools/hildegard | cc658ab4972dfaf67e995c797d0493a5d82a611f | [
"MIT"
] | null | null | null | scripts/convert_0.0_to_0.1.py | codecraftingtools/hildegard | cc658ab4972dfaf67e995c797d0493a5d82a611f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020 Jeffrey A. Webb
import sys
f = open(sys.argv[1])
for line in f:
s = line.strip()
if s.startswith("source:"):
id = s.split(":")[-1].strip()
indent = ' '*line.index("source")
sys.stdout.write(f"{indent}source:\n")
sys.stdout.write(f"{indent} - Endpoint:\n")
sys.stdout.write(f"{indent} connector: {id}\n")
elif s.startswith("sink:"):
id = s.split(":")[-1].strip()
indent = ' '*line.index("sink")
sys.stdout.write(f"{indent}sink:\n")
sys.stdout.write(f"{indent} - Endpoint:\n")
sys.stdout.write(f"{indent} connector: {id}\n")
else:
sys.stdout.write(line)
| 29.791667 | 60 | 0.548252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.360839 |
21e735d47835a51bb81e229f9598da175bfa76cf | 10,093 | py | Python | bin/v0eval.py | m-takeuchi/ilislife_wxp | f243431da2852a6e8dc5fd0e1d68bc9220944f96 | [
"MIT"
] | null | null | null | bin/v0eval.py | m-takeuchi/ilislife_wxp | f243431da2852a6e8dc5fd0e1d68bc9220944f96 | [
"MIT"
] | null | null | null | bin/v0eval.py | m-takeuchi/ilislife_wxp | f243431da2852a6e8dc5fd0e1d68bc9220944f96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import gridspec
import datetime, time
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d, Akima1DInterpolator, PchipInterpolator
from sys import platform
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# %matplotlib inline
# Rprotect = 10e6 #ohm
Rs = 100e3 #ohm
def Ve_correct(Ve, Ig, Rprotect):
Vext = Ve - Ig*Rprotect
return Vext
def mydate(str_date):
"""convert from datetime str with original format into seconds
"""
str_date = str_date.rsplit('.')[0]
fmt_date = datetime.datetime.strptime(str_date, "%y%m%d-%H:%M:%S")
sec = time.mktime(fmt_date.timetuple())
return sec
def timeh(sec):
return sec/3600.0
def get_data_old(datafile):
# For emitter no.6 and befor
with open(datafiel, 'r') as f:
header = f.readline
data = pd.read_csv(datafile, delimiter='\t', comment='#',names=['date','Ve','Ig','Ic', 'P', 'IVno'], dtype={ 'date':'object', 'Ve':'float64','Ig':'float64','Ic':'float64','P':'float64'})
### convert date to laspe time in sec
tmpdate = data['date'].apply(lambda x: x.split('.')[0])
t0 = mydate(tmpdate[0])
SrTime = tmpdate.apply(lambda x: mydate(x)-t0 )
data['time'] = SrTime
cols = data.columns.tolist()
cols = cols[0:1]+cols[-1:]+cols[1:-1]
data = data[cols]
return data
def get_hdf(datafile):
return pd.read_hdf(datafile)
def prepare_data(datafile, oldtype=False):
ext = datafile.rsplit('.')[-1]
base = datafile.rsplit('.')[0]
if ext == 'dat':
if oldtype == False:
with open(datafile, 'r') as f:
cmt = f.readline()
data = pd.read_csv(datafile, delimiter='\t', comment='#',names=['date','time','Ve','Ig','Ic', 'P', 'IVno'], dtype={'Ve':'float64','Ig':'float64','Ic':'float64','P':'float64'})
else:
data = get_data_old(datafile)
elif (ext == 'hdf5') | (ext == 'h5'):
import h5py
with h5py.File(datafile, 'r') as hf:
# print(hf.keys())
cmt = hf.get('comment').value.decode('utf-8')
# print(cmt.value.decode('utf-8'))
data = pd.read_hdf(datafile, key='data')
### Omit Abnormal data
ignore1 = data['Ig'].abs() > 5e+0
ignore2 = data['Ic'].abs() > 5e+0
data = data[(ignore1 | ignore2) == False]
return data,cmt
def V0estimate(DataFrame, Rprotect, IVno=1, NoiseLevel=1e-4):
import scipy.optimize as so
# function to fit
def func(x, a, b):
return a*x + b
i=IVno
df = DataFrame[DataFrame['IVno']== i ][['date','IVno','Ve','Ig','Ic']].drop_duplicates()
ix_ini = df[df['Ve'] == 0].index[0] # IVno=iかつVe=0をデータの先頭インデックスとする
df = df.ix[ix_ini:] # 先頭インデックス以前の付加ゴミ行を除く
# print(df)
V = Ve_correct(df['Ve'], df['Ig']/Rs, Rprotect) # 保護抵抗Rprotectでの電圧降下分をVeから差し引き補正
df['V'] = V
df['I_raw'] = df['Ig']+df['Ic'] # 全電流
df['I'] = np.abs(df['Ig']+df['Ic']) # 全電流の絶対値
# print(df)
# print(DataFrame['date'][0])
# print(df['date'].iloc[0])
hour = timeh( mydate(df['date'].iloc[0])- mydate(DataFrame['date'][0]) )
# ### ln(I)-V**0.5 直線によるV0の導出
# Vlow = 1000 # V0判定に使うVの下限
# Ilow = 2e-5 # V0判定に使うI(shunt resistor volgate)の下限
# xdata = df[(df['I'] >= Ilow) & (df['V'] >= Vlow)]['Ve'].values**0.5
# ydata = np.log(df[(df['I'] >= Ilow) & (df['V'] >= Vlow)]['I'])
# ### initial guess for the parameters
# parameter_initial = np.array([0.0, 0.0]) #a, b
# parameter_optimal, covariance = so.curve_fit(func, xdata, ydata, p0=parameter_initial)
# y = func(xdata,parameter_optimal[0],parameter_optimal[1])
# ### 電流の自然対数vs電圧のルートとした上で, y = NoiseLevel と y = a*x+b との交点を求める
# a = parameter_optimal[0]
# b = parameter_optimal[1]
# c = np.log(NoiseLevel)
# A = np.array([[a, -1], [0, 1]]) # a*x -y = -b と 0*x + y = c の連立方程式の左辺係数
# P = np.array([-b,c]) # 右辺係数
# X = np.linalg.solve(A,P) # 逆行列から解を求める
# V0= X[0]**2
### スムージンング->補間->NoiseLevel閾値によりV0を導出
window = 3
df['I_savgol'] = savgol_filter(df['I'], window, polyorder=1) #savgol_filterを適用しスムージング
## ln(y) vs. (V**0.5)に変換
df['x'] = df['V'].values**0.5
df['y'] = np.log(df['I_savgol'].values)
df=df.dropna()
f = interp1d(df['x'].values, df['y'].values, kind='linear') # 全電流に対する電圧の補間関数fを求める
x_new = np.linspace(df['x'].min(), df['x'].max(), num=1001) # 電圧の最小値から最大値までを1000分割したx_newを作る
xy_new = np.c_[x_new, f(x_new)] # x_newとf(x_new)からなるアレイdf_new
# print(df['x'])
V0 = xy_new[xy_new[:,1] <= np.log(NoiseLevel)][-1,0]**2
# print(V0**0.5, V0)
return df, V0, hour, xy_new#, a, b,
def Jsc(V,M,d):
"""Estimation of space-charge limited current density
"""
import scipy.constants as sc
m = M*sc.atomic_mass
return (4.0/9.0)*sc.epsilon_0*(2*sc.elementary_charge/m)**0.5*V**(3.0/2)/d**2
def V0batch(DataFrame, Rprotect, IVno=1, NoiseLevel = 1e-4, window=0):
if IVno == 0: # IV番号が0の場合は全てのIV測定についてのV0とI0を出力する
IVno = DataFrame['IVno'].max()
output = []
for i in range(1,IVno+1):
# df, V0, hour, xy_new, a, b = V0estimate(DataFrame, Rprotect, i, NoiseLevel)
df, V0, hour, xy_new = V0estimate(DataFrame, Rprotect, i, NoiseLevel)
# print("{0:d}\t{1:f}".format(i,V0))
print("{0:d}\t{1:f}\t{2:f}".format(i,hour,V0))
output.append([i, hour, V0])
return output
else: # IV番号が0でない場合は指定されたIVnoのV0を求め, グラフを出力する
i=IVno
# df, V0, hour, xy_new, a, b = V0estimate(DataFrame, Rprotect, i, NoiseLevel)
df, V0, hour, xy_new = V0estimate(DataFrame, Rprotect, i, NoiseLevel)
print("{0:d}\t{1:f}".format(i,V0))
fig = plt.figure(figsize=(10,5))
# plt.plot(df['V'], df['I'], 'b-')
# plt.vlines(V0,ymin=0,ymax=df['I'].max(), linestyles='dashed')
# plt.hlines(NoiseLevel,xmin=0,xmax=df['V'].max(), linestyles='dashed')
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# plt.yscale("log")
# plt.plot((df['V'])**0.5, df['I'], 'bs')
# plt.plot(xy_new[:,0], np.e**xy_new[:,1], 'g-')
# plt.hlines(NoiseLevel,xmin=0,xmax=(df['V'].max())**0.5, linestyles='dashed')
# plt.vlines(V0**0.5, ymin=df['I'].min(), ymax=df['I'].max(), linestyles='dashed')
# plt.xlabel(r"Squre root voltage (V$^{0.5}$)")
# plt.ylabel("Log10 for shunt voltage")
ax1.set_aspect('1.0')
ax1.set_yscale("log")
ax1.plot((df['V'])**0.5, df['I'], 'bs')
ax1.plot(xy_new[:,0], np.e**xy_new[:,1], 'g-')
ax1.hlines(NoiseLevel,xmin=0,xmax=(df['V'].max())**0.5, linestyles='dashed')
ax1.vlines(V0**0.5, ymin=df['I'].min(), ymax=df['I'].max(), linestyles='dashed')
ax1.set_xlabel(r"Squre root voltage (V$^{0.5}$)")
ax1.set_ylabel("Log10 for shunt voltage (V)")
ax2.set_aspect('equal')
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.plot(df[df['V']>=100]['V'], df[df['V']>=100]['I'], 'bs')
ax2.set_xlabel("Log10 for voltage (V)")
ax2.set_ylabel("Log10 for shunt voltage (V)")
# plt.plot((df['V'])**0.5, Jsc(df['V'], 66, 0.5e-3)*1e-0/100e3, 'm-')
if platform == "linux" or platform == "linux2":
plt.show(block=True)
plt.show()
else:
plt.draw()
plt.pause(1)
input("<Hit Enter To Close>")
plt.close(fig)
__doc__ = """{f}
Usage:
{f} [ -o | --oldtype] [-i | --ivno=<num>] [-w | --window=<odd>] [-n | --noiselevel=<volt>] [-r | --rprotect=<ohm>] DATFILE
{f} -h | --help
Options:
-h --help Show this screen and exit.
-o --oldtype Spesify dat file is formated with old type
-i --ivno=<num> Specify no. of i-v. Default=None
-n --noiselevel=<volt> Specify noise level for Ig in (V). Default=2e-5
-r --rprotect=<ohm> Specify resistor Rprotect in (ohm). Default=10e6
""".format(f=__file__)
def main():
# start = time.time()
from docopt import docopt
args = docopt(__doc__)
oldtype = args["--oldtype"]
IVno = 0 if args["--ivno"] == [] else int(args["--ivno"][0])
noise = 1e-4 if args["--noiselevel"] == [] else float(args["--noiselevel"][0])
Rprotect = 10e6 if args["--rprotect"] == [] else float(args["--rprotect"][0])
datafile = args["DATFILE"]
start = time.time()
data,cmt = prepare_data(datafile, oldtype) # pandas dataframeとしてデータファイルを読み込み
# elapsed_time = time.time() - start
# print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
output = V0batch(data, Rprotect, IVno, noise) # V0batchを実行してoutputに格納
if IVno == 0:
ext = datafile.rsplit('.')[-1]
base = datafile.rsplit('.')[0]
outfile = base+'_v0.dat'
pdffile = base+'_v0.pdf'
svgfile = base+'_v0.svgz'
head = "".join(cmt)+str(args)+'\nIVno\tt(hour)\tVth(V)'
a = np.array(output)
plt.title(cmt)
plt.xlabel('Time (h)')
plt.ylabel(r'V$_{th}$ (V)')
plt.plot(a[:,1], a[:,2], 'bo-')
plt.show(block=False)
plt.savefig(pdffile)
# plt.savefig(svgfile)
input("<Hit Enter To Close>")
# with open(outfile, 'w') as f:
# f.write("".join(cmt))
# f.writelines(output)
# np.savetxt(outfile, a, fmt=['%i','%.2f','%.2e'], header=head, delimiter='\t')
np.savetxt(outfile, a, fmt=['%i','%.2f', '%.2f'], header=head, delimiter='\t')
# print(V0out)
# print("{0} is created.".format(IVno, V0Out))
# print("Total charge (C): {0:.3e}".format(tf))
if __name__ == '__main__':
# start = time.time()
main()
# elapsed_time = time.time() - start
# print("Elapsed_time:{0}".format(elapsed_time) + "[sec]")
| 36.046429 | 197 | 0.563064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,068 | 0.474576 |
21e7aa187114df23f75c99353a77c5e6edd5021a | 1,067 | py | Python | CoTeTo/CoTeTo/import_file.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 3 | 2016-05-30T15:12:16.000Z | 2022-03-22T08:11:13.000Z | CoTeTo/CoTeTo/import_file.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | 21 | 2016-06-13T11:33:45.000Z | 2017-05-23T09:46:52.000Z | CoTeTo/CoTeTo/import_file.py | EnEff-BIM/EnEffBIM-Framework | 6328d39b498dc4065a60b5cc9370b8c2a9a1cddf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
if sys.version_info >= (3, 3):
import importlib
def import_file(module_path='', module=''):
importlib.invalidate_caches()
if module in sys.modules:
del sys.modules[module]
sys.path.insert(0, module_path)
loader = importlib.find_loader(module)
del sys.path[0]
m = loader.load_module(module)
return m
elif sys.version_info >= (2, 7):
def import_file(module_path='', module=''):
if module in sys.modules:
del sys.modules[module]
sys.path.insert(0, module_path)
m = __import__(module)
del sys.path[0]
return m
else:
raise NotImplementedError('This modules functions are not implemented for python <2.7')
if __name__ == '__main__':
# test this module with path and module name as arguments
# will print modules namespace (without __builtins__)
import pprint
p, n = sys.argv[1:]
m = import_file(p, n)
d = m.__dict__
del d['__builtins__']
pprint.pprint(d)
| 23.711111 | 91 | 0.622306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.209934 |
21e807b163ce5ea32bc9f1fcfc02d9c78081538f | 3,411 | py | Python | sudeste/solicitacao/views.py | danielcamilo13/sudeste | be877c8dec07cfca84ebdc15b275a58aa29af98a | [
"bzip2-1.0.6"
] | 1 | 2019-10-15T19:43:22.000Z | 2019-10-15T19:43:22.000Z | sudeste/solicitacao/views.py | danielcamilo13/sudeste | be877c8dec07cfca84ebdc15b275a58aa29af98a | [
"bzip2-1.0.6"
] | 6 | 2020-06-05T23:48:29.000Z | 2022-02-10T09:32:01.000Z | sudeste/solicitacao/views.py | danielcamilo13/sudeste | be877c8dec07cfca84ebdc15b275a58aa29af98a | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render,get_object_or_404
from django.http import HttpResponseRedirect,HttpResponse
from cadastro.models import tipocacamba
from solicitacao.models import ordemServico
from .forms import pedidosForm,opcoesForm,textoForm,statusForm
import time
from django.utils import timezone
from datetime import datetime
def index(request):
return render(request,'solicitacao/index.html',{})
def pedidos(request):
return render(request,'solicitacao/pedidos.html',{})
def pedidosDetalhe(request):
#neworder = get_object_or_404(orderInstance,pk=pk)
neworder = ordemServico.objects.all()
usr = request.user.pk
ses = request.session
print(usr)
print(ses)
if request.method=='POST':
selecionado = request.POST['selecionar_opcoes']
localizado=''
dia = timezone.now
if selecionado == 'cacamba':
localizado ={'retorno':'retorno de cacamba'}
nros = time.time()
form = opcoesForm(initial={'nrOS':nros,'nmCliente':request.user,'dia':dia,'tpsolicitacao':'cacamba'})
return render(request,'solicitacao/pedidos.html',{'localizado':localizado,'form':form})
elif selecionado == 'retirar':
form = textoForm(initial={'dia':dia,'tpsolicitacao':'retirar'})
return render(request,'solicitacao/pedidos.html',{'localizado':localizado,'form':form})
else:
form = statusForm(initial={'dataInicio':dia,'dataFim':dia,'tpsolicitacao':'retirar'})
return render(request,'solicitacao/pedidos.html',{'localizado':localizado,'form':form})
else:
form = {'chave vazio':'valor vazio'}
return render(request,'solicitacao/pedidos.html',{'localizado':localizado,'form':form})
def confirmacao(request):
if request.method=='POST':
d = request.POST['dia_day']
m = request.POST['dia_month']
a = request.POST['dia_year']
dia_join = str(a)+'/'+str(m)+'/'+str(d)
dia_valor = datetime.strptime(dia_join,'%Y/%m/%d')
if request.POST['tpsolicitacao']=='cacamba':
os = request.POST['nrOS']
cli = request.POST['nmCliente']
loc = request.POST['localizacao']
contexto={'os':os,'cli':cli,'loc':loc,'dia_valor':dia_valor}
ordemServico.objects.create(nrOS=os,dtSaida=dia_valor,nmCliente=cli)
ordemServico.save
elif request.POST['tpsolicitacao']=='retirar':
contexto = {'dia_valor':dia_valor}
else:
contexto = {'dia_valor':dia_valor}
return render(request,'solicitacao/confirmacao.html',{'request':request,'contexto':contexto})
def opcoes(request):
selecionado = ''; pedidos=''
if 'select_opcoes' in request.POST:
selecionado = request.POST['select_opcoes']
if selecionado=='cacamba':
pedidos = tipocacamba.objects.values('tpCacamba')
pedidos = list(pedidos)
pedidos+=[{'quantidade':'quantidade'}]
elif selecionado=='retirar':
pedidos = ({'tpCacamba':'valor1'},{'tpCacamba':'valor2'},{'tpCacamba':'valor3'})
elif selecionado=='estado':
pedidos = ({'tpCacamba':'estado1'},{'tpCacamba':'estado2'},{'tpCacamba':'estado3'})
return render(request,'solicitacao/index.html',context={'pedidos':pedidos,'selecionado':selecionado})
def gravar(request):
return HttpResponse('Gravado')
| 43.730769 | 113 | 0.652301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 989 | 0.289944 |
21e82100cdf7b246d3b2ede27f4e43fbcde2d2b1 | 8,879 | py | Python | flows.py | Privacy-Police/Differential-Privacy | c9ac91bf478c00af2ac732bc815ba1ee2fa7e6e5 | [
"MIT"
] | null | null | null | flows.py | Privacy-Police/Differential-Privacy | c9ac91bf478c00af2ac732bc815ba1ee2fa7e6e5 | [
"MIT"
] | null | null | null | flows.py | Privacy-Police/Differential-Privacy | c9ac91bf478c00af2ac732bc815ba1ee2fa7e6e5 | [
"MIT"
] | null | null | null | import math
import types
import numpy as np
import scipy as sp
import scipy.linalg
import torch
import torch.nn as nn
import torch.nn.functional as F
# The following code is adapted from the following repository
# https://github.com/ikostrikov/pytorch-flows/blob/master/flows.py
def get_mask(in_features, out_features, in_flow_features, mask_type=None):
"""
mask_type: input | None | output
See Figure 1 for a better illustration:
https://arxiv.org/pdf/1502.03509.pdf
"""
if mask_type == 'input':
in_degrees = torch.arange(in_features) % in_flow_features
else:
in_degrees = torch.arange(in_features) % (in_flow_features - 1)
if mask_type == 'output':
out_degrees = torch.arange(out_features) % in_flow_features - 1
else:
out_degrees = torch.arange(out_features) % (in_flow_features - 1)
return (out_degrees.unsqueeze(-1) >= in_degrees.unsqueeze(0)).float()
class MaskedLinear(nn.Module):
def __init__(self,
in_features,
out_features,
mask,
cond_in_features=None,
bias=True):
super(MaskedLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
if cond_in_features is not None:
self.cond_linear = nn.Linear(
cond_in_features, out_features, bias=False)
self.register_buffer('mask', mask)
def forward(self, inputs, cond_inputs=None):
output = F.linear(inputs, self.linear.weight * self.mask,
self.linear.bias)
if cond_inputs is not None:
output += self.cond_linear(cond_inputs)
return output
nn.MaskedLinear = MaskedLinear
class MADE(nn.Module):
""" An implementation of MADE
(https://arxiv.org/abs/1502.03509).
"""
def __init__(self,
num_inputs,
num_hidden,
num_cond_inputs=None,
act='relu',
pre_exp_tanh=False):
super(MADE, self).__init__()
activations = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'tanh': nn.Tanh}
act_func = activations[act]
input_mask = get_mask(
num_inputs, num_hidden, num_inputs, mask_type='input')
hidden_mask = get_mask(num_hidden, num_hidden, num_inputs)
output_mask = get_mask(
num_hidden, num_inputs * 2, num_inputs, mask_type='output')
self.joiner = nn.MaskedLinear(num_inputs, num_hidden, input_mask,
num_cond_inputs)
self.trunk = nn.Sequential(act_func(),
nn.MaskedLinear(num_hidden, num_hidden,
hidden_mask), act_func(),
nn.MaskedLinear(num_hidden, num_inputs * 2,
output_mask))
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
h = self.joiner(inputs, cond_inputs)
m, a = self.trunk(h).chunk(2, 1)
u = (inputs - m) * torch.exp(-a)
return u, -a.sum(-1, keepdim=True)
else:
x = torch.zeros_like(inputs)
for i_col in range(inputs.shape[1]):
h = self.joiner(x, cond_inputs)
m, a = self.trunk(h).chunk(2, 1)
x[:, i_col] = inputs[:, i_col] * torch.exp(a[:, i_col]) + m[:, i_col]
return x, -a.sum(-1, keepdim=True)
class Sigmoid(nn.Module):
def __init__(self):
super(Sigmoid, self).__init__()
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
s = torch.sigmoid
return s(inputs), torch.log(s(inputs) * (1 - s(inputs))).sum(
-1, keepdim=True)
else:
return torch.log(inputs /
(1 - inputs)), -torch.log(inputs - inputs**2).sum(
-1, keepdim=True)
class Logit(Sigmoid):
def __init__(self):
super(Logit, self).__init__()
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return super(Logit, self).forward(inputs, 'inverse')
else:
return super(Logit, self).forward(inputs, 'direct')
class BatchNormFlow(nn.Module):
""" An implementation of a batch normalization layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs, momentum=0.0, eps=1e-5):
super(BatchNormFlow, self).__init__()
self.log_gamma = nn.Parameter(torch.zeros(num_inputs))
self.beta = nn.Parameter(torch.zeros(num_inputs))
self.momentum = momentum
self.eps = eps
self.register_buffer('running_mean', torch.zeros(num_inputs))
self.register_buffer('running_var', torch.ones(num_inputs))
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
if self.training:
self.batch_mean = inputs.mean(0)
self.batch_var = (
inputs - self.batch_mean).pow(2).mean(0) + self.eps
self.running_mean.mul_(self.momentum)
self.running_var.mul_(self.momentum)
self.running_mean.add_(self.batch_mean.data *
(1 - self.momentum))
self.running_var.add_(self.batch_var.data *
(1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - mean) / var.sqrt()
y = torch.exp(self.log_gamma) * x_hat + self.beta
log_det = (self.log_gamma - 0.5 * torch.log(var)).sum(-1, keepdim=True)
return y, log_det
else:
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - self.beta) / torch.exp(self.log_gamma)
y = x_hat * var.sqrt() + mean
log_det = (self.log_gamma - 0.5 * torch.log(var)).sum(-1, keepdim=True)
return y, log_det
class Reverse(nn.Module):
""" An implementation of a reversing layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs):
super(Reverse, self).__init__()
self.perm = np.array(np.arange(0, num_inputs)[::-1])
self.inv_perm = np.argsort(self.perm)
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
return inputs[:, self.perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
else:
return inputs[:, self.inv_perm], torch.zeros(
inputs.size(0), 1, device=inputs.device)
class FlowSequential(nn.Sequential):
""" A sequential container for flows.
In addition to a forward pass it implements a backward pass and
computes log jacobians.
"""
def forward(self, inputs, cond_inputs=None, mode='direct', logdets=None):
""" Performs a forward or backward pass for flow modules.
Args:
inputs: a tuple of inputs and logdets
mode: to run direct computation or inverse
"""
self.num_inputs = inputs.size(-1)
if logdets is None:
logdets = torch.zeros(inputs.size(0), 1, device=inputs.device)
assert mode in ['direct', 'inverse']
if mode == 'direct':
for module in self._modules.values():
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
else:
for module in reversed(self._modules.values()):
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
return inputs, logdets
def log_probs(self, inputs, cond_inputs = None):
u, log_jacob = self(inputs, cond_inputs)
log_probs = (-0.5 * u.pow(2) - 0.5 * math.log(2 * math.pi)).sum(
-1, keepdim=True)
return (log_probs + log_jacob).sum(-1, keepdim=True)
def sample(self, num_samples=None, noise=None, cond_inputs=None):
if noise is None:
noise = torch.Tensor(num_samples, self.num_inputs).normal_()
device = next(self.parameters()).device
noise = noise.to(device)
if cond_inputs is not None:
cond_inputs = cond_inputs.to(device)
samples = self.forward(noise, cond_inputs, mode='inverse')[0]
return samples
| 34.819608 | 85 | 0.56887 | 7,889 | 0.888501 | 0 | 0 | 0 | 0 | 0 | 0 | 1,172 | 0.131997 |
21e88c91163adb73844077bcc39fe14e4bf1e166 | 1,477 | py | Python | render.py | danieltes/tp_solver | 898354aa931c420dc1bf53fdd744885c4c6386d1 | [
"BSD-3-Clause"
] | null | null | null | render.py | danieltes/tp_solver | 898354aa931c420dc1bf53fdd744885c4c6386d1 | [
"BSD-3-Clause"
] | null | null | null | render.py | danieltes/tp_solver | 898354aa931c420dc1bf53fdd744885c4c6386d1 | [
"BSD-3-Clause"
] | null | null | null | import uuid
from PIL import Image
import graphviz as gv
styles = {
'graph': {
'label': 'Discreta - Representación de AST',
'fontsize': '16',
'fontcolor': 'white',
'bgcolor': '#333333',
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'hexagon',
'fontcolor': 'white',
'color': 'white',
'style': 'filled',
'fillcolor': '#006699',
},
'edges': {
'style': 'dashed',
'color': 'white',
'arrowhead': 'open',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'white',
}
}
def _render_children(g, n, parent=None):
id = str(uuid.uuid1())
if n.op is not None:
g.node(id, n.op)
if parent is not None:
g.edge(parent, id)
for each in n.children:
_render_children(g, each, id)
else:
g.node(id, n.value)
g.edge(parent, id)
def _set_styles(graph):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
def render_tree(tree):
graph = gv.Digraph(
format='jpg', comment="Arbol de representación semántico")
_set_styles(graph)
_render_children(graph, tree)
filename = graph.render("ast", view=True)
| 21.720588 | 70 | 0.531483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.272297 |
21edb5cf4127cb401dec53951ecbfc0d5dad821a | 1,557 | py | Python | flask/flask_r_interpolaton/app.py | andreipreda/py-r-interpolation | d7be5799b9cf1da95ce728c00eb0ce1c73bf4c02 | [
"MIT"
] | null | null | null | flask/flask_r_interpolaton/app.py | andreipreda/py-r-interpolation | d7be5799b9cf1da95ce728c00eb0ce1c73bf4c02 | [
"MIT"
] | 13 | 2019-12-26T17:31:05.000Z | 2022-02-26T10:36:46.000Z | flask/flask_r_interpolaton/app.py | andreipreda/py-r-interpolation | d7be5799b9cf1da95ce728c00eb0ce1c73bf4c02 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from flask import Flask, current_app, jsonify, request
from flask_cors import CORS
from mongoengine import connect, MongoEngineConnectionError
import namesgenerator
from model import Doc
from app_logic import random_df, call_r
def create_app(config=None):
app = Flask(__name__)
CORS(app)
app.config.from_object(config)
mongo_host = os.environ.get('MONGO_HOST', default='mongodb://127.0.0.1:27017')
try:
connect(db='pyr',
host=mongo_host)
except MongoEngineConnectionError as exc:
raise exc
@app.route('/api/python')
def test():
"""Random pandas df"""
df = random_df()
return jsonify({'py': df.to_json()}), 200
@app.route('/api/r')
def from_r():
"""Dataframe from an R tibble using rpy2"""
df = call_r(Path(current_app.config['R_LOCATION'], 'rapp.r'))
return jsonify({'r': df.to_json()}), 200
"""MONGO IO API SIMULATION"""
@app.route('/api/add', methods=['POST'])
def add_doc():
try:
d = Doc(title=namesgenerator.get_random_name())
d.save()
return d.to_json(), 201
except Exception as ex:
raise ex
@app.route('/api/remove', methods=['DELETE'])
def remove_doc():
id = request.args.get('id')
try:
d = Doc.objects.get(id=id)
if d:
d.delete()
return jsonify({'ok': 1}), 200
except Exception as ex:
raise ex
return app
| 25.112903 | 82 | 0.587669 | 0 | 0 | 0 | 0 | 898 | 0.57675 | 0 | 0 | 231 | 0.148362 |
21ee59da3e9f824ace6a440137a55162daab5528 | 200 | py | Python | Timers.py | elegenstein-tgm/astrosim | 1b09a32f543f5cc810621f8beaff20d57d0add22 | [
"MIT"
] | null | null | null | Timers.py | elegenstein-tgm/astrosim | 1b09a32f543f5cc810621f8beaff20d57d0add22 | [
"MIT"
] | null | null | null | Timers.py | elegenstein-tgm/astrosim | 1b09a32f543f5cc810621f8beaff20d57d0add22 | [
"MIT"
] | null | null | null | class Timer:
def __init__(self, duration, ticks):
self.duration = duration
self.ticks = ticks
self.thread = None
def start(self):
pass
# start Thread here
| 20 | 40 | 0.585 | 199 | 0.995 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.095 |
21f0be377a2b1d1f473cdf9e342362b4dfa9908f | 345 | py | Python | config_mypy_django_plugin.py | fj-fj-fj/tech-store | e07214354a51490df53acceec2091812ffd31360 | [
"MIT"
] | null | null | null | config_mypy_django_plugin.py | fj-fj-fj/tech-store | e07214354a51490df53acceec2091812ffd31360 | [
"MIT"
] | null | null | null | config_mypy_django_plugin.py | fj-fj-fj/tech-store | e07214354a51490df53acceec2091812ffd31360 | [
"MIT"
] | null | null | null | import os
from configurations.importer import install
from mypy.version import __version__ # noqa: F401
from mypy_django_plugin import main
def plugin(version):
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', 'Development')
install()
return main.plugin(version)
| 26.538462 | 68 | 0.776812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.249275 |
21f0f9312500016def30ca87e22275fe478c9678 | 9,866 | py | Python | Florence/FunctionSpace/JacobiPolynomials/NormalisedJacobi_Deprecated.py | romeric/florence | 6af96f2590adb776f74efc6fed96737a4edc4582 | [
"MIT"
] | 65 | 2017-08-04T10:21:13.000Z | 2022-02-21T21:45:09.000Z | Florence/FunctionSpace/JacobiPolynomials/NormalisedJacobi_Deprecated.py | romeric/florence | 6af96f2590adb776f74efc6fed96737a4edc4582 | [
"MIT"
] | 6 | 2018-06-03T02:29:20.000Z | 2022-01-18T02:30:22.000Z | Florence/FunctionSpace/JacobiPolynomials/NormalisedJacobi_Deprecated.py | romeric/florence | 6af96f2590adb776f74efc6fed96737a4edc4582 | [
"MIT"
] | 10 | 2018-05-30T09:44:10.000Z | 2021-05-18T08:06:51.000Z | import numpy as np
from JacobiPolynomials import *
import math
# 1D - LINE
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi1D(C,x):
p = np.zeros(C+2)
for i in range(0,C+2):
p[i] = JacobiPolynomials(i,x,0,0)[-1]*np.sqrt((2.*i+1.)/2.)
return p
# 2D - TRI
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi2D(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to C+1 at the point x=(r,s) in [-1,1]^2 (i.e. on the reference quad)
"""
N = int( (C+2.)*(C+3.)/2. )
p = np.zeros(N)
r = x[0]; s = x[1]
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0 # counter for the polynomials order
# Loop on degree
for nDeg in range(0,C+2):
# Loop by increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Value for j
j = nDeg-i
if j==0:
p_j = 1.
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]
# factor = np.sqrt( (2.*i+1.)*(i+j+1.)/2. )
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
p[ncount] = ( p_i*q_i*p_j )*factor
ncount += 1
return p
def NormalisedJacobiTri(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
xi = x[0]; eta = x[1]
if eta==1:
r = -1.; s=1.;
else:
r = 2.*(1+xi)/(1.-eta)-1.
s = eta
return NormalisedJacobi2D(C,np.array([r,s]))
def GradNormalisedJacobiTri(C,x,EvalOpt=0):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
N = int((C+2.)*(C+3.)/2.)
p = np.zeros(N);
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
r = x[0]; s = x[1]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if s==1:
s=0.99999999999999
xi = (1.+r)*(1.-s)/2.-1
eta = s
dr_dxi = 2./(1.-eta)
dr_deta = 2.*(1.+xi)/(1.-eta)**2
# Derivative of s is not needed because s=eta
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1; dp_i = 0; dq_i = 0
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = 1.*q_i*(-i)/(1-s)
# Value for j
j = nDeg-i
if j==0:
p_j = 1; dp_j = 0
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j )*factor
# Derivatives with respect to (r,s)
dp_dr = ( (dp_i)*q_i*p_j )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j) )*factor
# Derivatives with respect to (xi,eta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds
ncount += 1
return p,dp_dxi,dp_deta
# 3D - TET
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi3D(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# Ordering: 1st incresing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; q_j = q_j*(1.-t)/2.
# Value for k
k = nDeg-(i+j)
if k==0:
p_k = 1.
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
ncount += 1
return p
def NormalisedJacobiTet(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
xi = x[0]; eta = x[1]; zeta = x[2]
if (eta+zeta)==0:
r = -1; s=1
elif zeta==1:
r = -1; s=1 # or s=-1 (check that nothing changes)
else:
r = -2.*(1+xi)/(eta+zeta)-1.;
s = 2.*(1+eta)/(1-zeta)-1.;
t = zeta
return NormalisedJacobi3D(C,[r,s,t])
# return NormalisedJacobi3D_Native(C,[r,s,t])
def GradNormalisedJacobiTet(C,x,EvalOpt=0):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
dp_dzeta = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if t==1.:
t=0.999999999999
if np.isclose(s,1.):
s=0.999999999999
if np.isclose(s,1.):
s=0.99999999999999
eta = (1./2.)*(s-s*t-1.-t)
xi = -(1./2.)*(r+1)*(eta+t)-1.
zeta = 1.0*t
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if eta == 0. and zeta == 0.:
eta = 1.0e-14
zeta = 1e-14
eta_zeta = eta+zeta
if np.isclose(eta_zeta,0.):
eta_zeta = 0.000000001
dr_dxi = -2./eta_zeta
dr_deta = 2.*(1.+xi)/eta_zeta**2
dr_dzeta = dr_deta
ds_deta = 2./(1.-zeta)
ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
# Derivative of t is not needed because t=zeta
#--------------------------------------------------------
# if np.allclose(eta+zeta,0):
# dr_dxi = -2./(0.001)**2
# dr_deta = 2.*(1.+xi)/(0.001)**2
# else:
# dr_dxi = -2./(eta+zeta)
# dr_deta = 2.*(1.+xi)/(eta+zeta)**2
# dr_dzeta = dr_deta
# if np.allclose(eta+zeta,0):
# ds_deta = 2./(0.001)
# ds_dzeta = 2.*(1.+eta)/(0.001)**2
# else:
# ds_deta = 2./(1.-zeta)
# ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
#--------------------------------------------------------
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.; dp_i = 0.; dq_i = 0.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = q_i*(-i)/(1.-s)
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i; dp_j = 0; dq_j = q_j*(-(i+j))/(1.-t);
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
q_j = q_j*(1.-t)/2.; dq_j = q_j*(-(i+j))/(1.-t)
# Value for k
k = nDeg-(i+j);
if k==0:
p_k = 1.; dp_k = 0.;
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]; dp_k = JacobiPolynomials(k-1,t,2.*(i+j)+3.,1.)[-1]*(k+2.*i+2.*j+3.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
# Derivatives with respect to (r,s,t)
dp_dr = ( (dp_i)*q_i*p_j*q_j*p_k )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j)*q_j*p_k )*factor
dp_dt = ( p_i*q_i*p_j*(dq_j*p_k+q_j*dp_k) )*factor
# Derivatives with respect to (xi,eta,zeta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds*ds_deta
dp_dzeta[ncount] = dp_dr*dr_dzeta + dp_ds*ds_dzeta + dp_dt
ncount += 1
return p,dp_dxi,dp_deta,dp_dzeta | 32.668874 | 140 | 0.420738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,524 | 0.357186 |
21f110499fa4d164d3ecef0734601aa0553b4aba | 24,849 | py | Python | Homework1.py | nicolac1999/Homework-ADM | 3ab9f4afaa7fce4a1ffc38a45dbd3a199dba3737 | [
"MIT"
] | null | null | null | Homework1.py | nicolac1999/Homework-ADM | 3ab9f4afaa7fce4a1ffc38a45dbd3a199dba3737 | [
"MIT"
] | null | null | null | Homework1.py | nicolac1999/Homework-ADM | 3ab9f4afaa7fce4a1ffc38a45dbd3a199dba3737 | [
"MIT"
] | null | null | null | #Exercises of the Problem 1 (77/91)
#Say "Hello, World!" With Python
print ("Hello, World!")
#Python If-Else
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(raw_input().strip())
if n%2==1:
print("Weird")
else:
if n>2 and n<5 :
print("Not Weird")
if n>=6 and n<=20:
print("Weird")
if n>20 :
print ("Not Weird")
#Arithmetic Operators
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
print(a+b)
print(a-b)
print(a*b)
#Python: Division
from __future__ import division
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
print(a//b)
print(a/b)
#Loops
if __name__ == '__main__':
n = int(raw_input())
for i in range (0,n):
print(i*i)
#Write a function
def is_leap(year):
leap=False
if year%4==0:
leap= True
if year%100 == 0:
leap=False
if year%400==0:
leap= True
return leap
year = int(raw_input())
print is_leap(year)
year = int(raw_input())
print is_leap(year)
#Print Function
from __future__ import print_function
if __name__ == '__main__':
n = int(raw_input())
a=''
for i in range(1,n+1):
a+=str(i)
print(a)
#List Comprehensions
if __name__ == '__main__':
x = int(raw_input())
y = int(raw_input())
z = int(raw_input())
n = int(raw_input())
l=[[i,j,k] for i in range(0,x+1) for j in range(0,y+1) for k in range(0,z+1)]
s=[l[i] for i in range(0,len(l)) if sum(l[i])!=n]
print s
#Find the Runner-Up Score!
if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
arr2=[arr[i] for i in range(0,len(arr)) if arr[i]!=max(arr)]
print max(arr2)
#Nested Lists
if __name__ == '__main__':
l=[]
punteggi=[]
for _ in range(int(raw_input())):
name = raw_input()
score = float(raw_input())
l=l+[[name,score]]
punteggi+=[score]
punteggi2=[punteggi[i] for i in range(0,len(punteggi)) if punteggi[i]!=min(punteggi)]
minimo=min(punteggi2)
nomi=[l[i][0] for i in range (0,len(l)) if l[i][1]==minimo]
nomi.sort()
for n in nomi:
print (n)
#Finding the percentage
if __name__ == '__main__':
n = int(raw_input())
student_marks = {}
for _ in range(n):
line = raw_input().split()
name, scores = line[0], line[1:]
scores = map(float, scores)
student_marks[name] = scores
query_name = raw_input()
punteggio=student_marks[query_name]
print "%.2f"%(sum(punteggio)/len(punteggio))
#Lists
if __name__ == '__main__':
b=[]
N = int(input())
for _ in range(N):
a=input().split()
if a[0]=="insert":
b.insert(int(a[1]),int(a[2]))
elif a[0]=='print':
print(b)
elif a[0]=='remove':
b.remove(int(a[1]))
elif a[0]=='append':
b.append(int(a[1]))
elif a[0]=='sort':
b.sort()
elif a[0]=='pop':
b.pop()
elif a[0]=='reverse':
b.reverse()
#Tuples
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
t=tuple(integer_list)
print(hash(t))
#sWAP cASE
def swap_case(s):
nuovaparola=""
for i in s:
if i.islower()==True:
nuovaparola+=i.upper()
else:
nuovaparola+=i.lower()
return nuovaparola
if __name__ == '__main__':
s = raw_input()
result = swap_case(s)
print result
#String Split and Join
def split_and_join(line):
a=line.split(" ")
a="-".join(a)
return a
if __name__ == '__main__':
line = raw_input()
result = split_and_join(line)
print result
#What's Your Name?
def print_full_name(a, b):
print ('Hello '+a+' '+b+'! You just delved into python.')
if __name__ == '__main__':
first_name = raw_input()
last_name = raw_input()
print_full_name(first_name, last_name)
#Mutations
def print_full_name(a, b):
print ('Hello '+a+' '+b+'! You just delved into python.')
if __name__ == '__main__':
first_name = raw_input()
last_name = raw_input()
print_full_name(first_name, last_name)
#Find a string
def count_substring(string, sub_string):
count=0
for i in range(0,len(string)):
if string[i]==sub_string[0]:
if string[i:i+len(sub_string)]==sub_string:
count+=1
return count
if __name__ == '__main__':
string = raw_input().strip()
sub_string = raw_input().strip()
count = count_substring(string, sub_string)
print count
#String Validators
if __name__ == '__main__':
s = raw_input()
a=0
for i in s :
a=a+1
if i.isalnum()==True:
print('True')
break
if a==len(s):
print('False')
a=0
for i in s :
a=a+1
if i.isalpha()==True:
print('True')
break
if a==len(s):
print('False')
a=0
for i in s :
a=a+1
if i.isdigit()==True:
print('True')
break
if a==len(s):
print('False')
a=0
for i in s :
a=a+1
if i.islower()==True:
print('True')
break
if a==len(s):
print('False')
a=0
for i in s :
a=a+1
if i.isupper()==True:
print('True')
break
if a==len(s):
print('False')
#Text Alignment
a = int(input())
b = 'H'
for i in range(a):
print((b*i).rjust(a-1)+b+(b*i).ljust(a-1))
for i in range(a+1):
print((b*a).center(a*2)+(b*a).center(a*6))
for i in range((a+1)//2):
print((b*a*5).center(a*6))
for i in range(a+1):
print((b*a).center(a*2)+(b*a).center(a*6))
for i in range(a):
print(((b*(a-i-1)).rjust(a)+b+(b*(a-i-1)).ljust(a)).rjust(a*6))
#Text Wrap
import textwrap
def wrap(string, max_width):
a=''
k=0
for i in range(0,len(string)):
a+=string[i]
k+=1
if k==max_width:
print(a)
a=''
k=0
print(a)
return ''
if __name__ == '__main__':
string, max_width = raw_input(), int(raw_input())
result = wrap(string, max_width)
print result
#Designer Door Mat
l=list(map(int,input().split()))
n=l[0]
m=l[1]
for i in range(1,n,2):
print((i*'.|.').center(m,'-'))
print('WELCOME'.center(m,'-'))
for i in range(n-2,-1,-2):
print((i*'.|.').center(m, '-'))
#String Formatting
def print_formatted(number):
w=len(bin(number)[2:])
for i in range (1,number+1):
print(str(i).rjust(w)+' '+str(oct(i)[2:]).rjust(w)+' '+str(hex(i)[2:]).upper().rjust(w)+' '+str(bin(i)[2:]).rjust(w))
if __name__ == '__main__':
n = int(input())
print_formatted(n)
#Alphabet Rangoli
def print_rangoli(size):
lettere = 'abcdefghijklmnopqrstuvwxyz'
for i in range (size-1,0,-1):
riga=['-']*(4*size-3)
for j in range(0, size - i):
riga[2*(size-1+j)] = lettere[i+j]
riga[2*(size-1-j)] = lettere[i+j]
print("".join(riga))
for i in range(0,size):
riga=['-']*(4*size-3)
for j in range(0,size-i):
riga[2*(size-1+j)] = lettere[i+j]
riga[2*(size-1-j)] = lettere[i+j]
print("".join(riga))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
#Capitalize!
import math
import os
import random
import re
import sys
def solve(s):
n=s.split(' ')
for i in range(0,len(n)):
n[i]=n[i].capitalize()
s_up=' '.join(n)
return s_up
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = raw_input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
#The Minion Game
def minion_game(string):
s=0
k=0
vocali='AEIOU'
for i in range(len(string)):
if string[i] in vocali:
k+=len(string)-i#oppure len(string[i:])
else:
s+=len(string)-i
if s>k:
print('Stuart',s)
elif s<k:
print('Kevin',k)
else:
print('Draw')
if __name__ == '__main__':
s = input()
minion_game(s)
#Merge the Tools!
def merge_the_tools(string, k):
t=[]
for i in range (0,len(string),k):
t.append(string[i:i+k])
for i in t:
u=''
for j in i:
if j not in u:
u+=j
print (u)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
#collections.Counter()
from collections import Counter
n=int(input())
l=list(map(int,input().split()))
nclient=int(input())
p=0
c=Counter(l)
for _ in range (nclient):
client=list(map(int,input().split()))
if client[0] in c and c[client[0]]>0:
p+=client[1]
c[client[0]]-=1
print(p)
#Introduction to Sets
def average(array):
s=set(array)
m=sum(s)/len(s)
return m
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
#DefaultDict Tutorial
from collections import defaultdict
A=defaultdict(list)
n,m=map(int,input().split())
for i in range (n):
A[input()].append(i+1)
for i in range(m):
e=input()
if e in A:
print(' '.join(map(str,A[e])))
else :
print (-1)
#Calendar Module
import calendar
a=input().split(' ')
giorno=calendar.weekday(int(a[2]),int(a[0]),int(a[1]))
g=calendar.day_name[giorno]
print(g.upper())
#Exceptions
n=int(input())
for i in range(n):
try:
a,b=map(int,input().split())
print (a//b)
except ZeroDivisionError as e:
print('Error Code:',e)
except ValueError as v:
print('Error Code:',v)
#Collections.namedtuple()
from collections import namedtuple
n=int(input())
somma=0
l=input().split()
stud=namedtuple('stud',l)
for _ in range (n):
l1,l2,l3,l4=input().split()
s=stud(l1,l2,l3,l4)
somma+=int(s.MARKS)
print(somma/n)
#Time Delta
import math
import os
import random
import re
import sys
from datetime import datetime
def time_delta(t1, t2):
g1=datetime.strptime(t1,'%a %d %b %Y %H:%M:%S %z')
g2=datetime.strptime(t2,'%a %d %b %Y %H:%M:%S %z')
differenza=int(abs((g1-g2).total_seconds()))
return str(differenza)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
t1 = input()
t2 = input()
delta = time_delta(t1, t2)
fptr.write(delta + '\n')
fptr.close()
#No Idea!
n,m=map(int,input().split())
arr=list(map(int,input().split()))
A=set(map(int,input().split()))
B=set(map(int,input().split()))
happiness=0
for i in arr:
if i in A:
happiness+=1
if i in B:
happiness-=1
print(happiness)
#Collections.OrderedDict()
from collections import OrderedDict
n=int(input())
d=OrderedDict()
for _ in range (n):
i=input().split()
if len(i)==2:
if i[0] not in d:
d[i[0]]=int(i[1])
else:
d[i[0]]+=int(i[1])
else:
if i[0]+' '+i[1] not in d:
d[i[0]+' '+i[1]]=int(i[2])
else:
d[i[0]+' '+i[1]]+=int(i[2])
for e in d:
print(e,d[e])
#Symmetric Difference
n1=input()
a=input().split(' ')
n2=input()
b=input().split(' ')
a1=list(map(int,a))
b1=list(map(int,b))
s1=set(a1)
s2=set(b1)
s3=s1.symmetric_difference(s2)
l=list(s3)
l.sort()
for elem in l :
print (elem)
#Set .add()
n=int(input())
s=set()
for i in range(0,n):
s.add(input())
print(len(s))
#Word Order
from collections import OrderedDict
n=int(input())
d=OrderedDict()
for i in range(n):
s=input()
if s not in d:
d[s]=1
else:
d[s]+=1
print (len(d))
for e in d :
print(d[e],end=' ')
#Set .discard(), .remove() & .pop()
n = int(input())
s = set(map(int, input().split()))
comandi=int(input())
for i in range (0,comandi):
a=input().split(' ')
if a[0]=='pop':
s.pop()
if a[0] =='discard':
s.discard(int(a[1]))
if a[0] == 'remove':
s.remove(int(a[1]))
print(sum(s))
#Collections.deque()
from collections import deque
d=deque()
for _ in range(int(input())):
metodo,*valore=input().split()
getattr(d, metodo)(*valore)
for elem in d:
print(elem,end=' ')
#Company Logo
import math
import os
import random
import re
import sys
from collections import Counter
if __name__ == '__main__':
s = sorted(input())
c=Counter(s)
l=c.most_common(3)
for e in l :
print(e[0]+' '+str(e[1]))
#Set .union() Operation
n1=int(input())
s1=set(map(int,input().split(' ')))
n2=int(input())
s2=set(map(int,input().split(' ')))
s3=s1.union(s2)
print(len(s3))
#Set .intersection() Operation
n1=input()
s1=set(input().split(' '))
n2=input()
s2=set(input().split(' '))
print(len(s1.intersection(s2)))
#Set .difference() Operation
n1,s1=input(),set(input().split())
n2,s2=input(),set(input().split())
print(len(s1.difference(s2)))
#Set .symmetric_difference() Operation
n1,s1= input(),set(input().split())
n2,s2= input(),set(input().split())
print(len(s1.symmetric_difference(s2)))
#Set Mutations
n,s=input(),set(map(int,input().split()))
for _ in range(int(input())):
l,i=input().split(),set(map(int,input().split()))
if l[0]=='update':
s.update(i)
if l[0]=='intersection_update':
s.intersection_update(i)
if l[0]=='symmetric_difference_update':
s.symmetric_difference_update(i)
if l[0]=='difference_update':
s.difference_update(i)
print(sum(s))
#The Captain's Room
n=int(input())
l=input().split()
s1=set()
s2=set()
for i in l:
if i not in s1:
s1.add(i)
else:
s2.add(i)
s1.difference_update(s2)
print(list(s1)[0])
#Check Subset
for _ in range(n):
a,s1=input(),set(map(int,input().split()))
b,s2=input(),set(map(int,input().split()))
if s1.intersection(s2)==s1:
print('True')
else:
print('False')
#Check Strict Superset
s=set(map(int,input().split()))
n=int(input())
sup=True
for _ in range(n):
s1=set(map(int,input().split()))
for e in s1:
if e not in s:
sup=False
exit
if s==s1:
sup=False
exit
print(sup)
#Zipped!
n,x=map(int,input().split())
l=[]
for i in range (x):
l.append(list(map(float,input().split())))
for i in (zip(*l)):
media=sum(i)/len(i)
print (media)
#Athlete Sort
import math
import os
import random
import re
import sys
if __name__ == '__main__':
nm = input().split()
n = int(nm[0])
m = int(nm[1])
arr = []
for _ in range(n):
arr.append(list(map(int, input().rstrip().split())))
k = int(input())
colonna=[]
for i in range(n):
colonna.append(arr[i][k])
colonna.sort()
for i in range(n):
for j in range(n):
if colonna[i]==arr[j][k]:
print(*arr[j])
arr.remove(arr[j])
break
#ginortS
s=input()
p=[]
d=[]
m=[]
M=[]
for i in range(len(s)):
if s[i].isupper():
M.append(s[i])
elif s[i].islower():
m.append(s[i])
elif int(s[i])%2==0:
p.append(s[i])
else:
d.append(s[i])
M.sort()
m.sort()
p.sort()
d.sort()
print(''.join(m+M+d+p))
#Detect Floating Point Number
import re
n=int(input())
for i in range (n):
numero=input()
if re.match(r"^[-+]?[0-9]*\.[0-9]+$",numero):
print(True)
else :
print(False)
#Map and Lambda Function
cube = lambda x :x**3
def fibonacci(n):
l=[0,1]
if n<2:
return l[:n]
for _ in range(n-2):
l.append(l[-1]+l[-2])
return l
if __name__ == '__main__':
n = int(input())
print(list(map(cube, fibonacci(n))))
#Re.split()
regex_pattern = r"[,.]"
import re
print("\n".join(re.split(regex_pattern, input())))
#Validating phone numbers
import re
n=int(input())
for i in range(n):
if re.match(r'[789]\d{9}$',input()):
print('YES')
else:
print('NO')
#Validating and Parsing Email Addresses
import re
import email.utils
n=int(input())
for i in range(n):
e=email.utils.parseaddr(input())
if re.match(r'[a-z][-a-z._0-9]+@[a-z]+\.[a-z]{1,3}$',e[1]):
print(email.utils.formataddr(e))
#Hex Color Code
import re
n=int(input())
for _ in range(n):
color=re.findall(r':?.(#[0-9a-fA-F]{6}|#[0-9a-fA-F]{3})',input())
for c in color:
print(c)
#XML 1 - Find the Score
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
s=0
for child in node.iter():
s+=len(child.attrib)
return s
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
#Validating UID
import re
for i in range(int(input())):
carta=input()
if re.match(r'^(?!.*(.).*\1)(?=(?:.*[A-Z]){2,})(?=(?:.*\d){3,})[a-zA-Z0-9]{10}$',carta):
print('Valid')
else:
print('Invalid')
#XML2 - Find the Maximum Depth
import xml.etree.ElementTree as etree
maxdepth = 0
def depth(elem, level):#bisogna usare la ricorsione perchè per ogni figlio bidogna vedere quanti figli ha a sua volta ,ogni volta aumentare il livello di 1
global maxdepth#è una variabile globale quindi non la dobbiamo 'ritornare'
level+=1
if level >= maxdepth:
maxdepth = level
for child in elem:
depth(child, level)
if __name__ == '__main__':
n = int(input())
xml = ""
for i in range(n):
xml = xml + input() + "\n"
tree = etree.ElementTree(etree.fromstring(xml))
depth(tree.getroot(), -1)
print(maxdepth)
#Arrays
import numpy
def arrays(arr):
a=numpy.array(arr,float)
return numpy.flip(a)
arr = input().strip().split(' ')
result = arrays(arr)
print(result)
#Shape and Reshape
import numpy
#l=list(map(int,input().split()))
#a=numpy.array(l)
#print (numpy.reshape(a,(3,3)))
l=input().split()
a=numpy.array(l,int)
print (numpy.reshape(a,(3,3)))
#Transpose and Flatten
import numpy
n,m=map(int,input().split())
l=[]
for i in range(n):
l.append(input().split())
a=numpy.array(l,int)
print (numpy.transpose(a))
print (a.flatten())
#Concatenate
import numpy
n,m,p=map(int,input().split())
l1=[]
l2=[]
for i in range(n):
l1.append(input().split())
for i in range (m):
l2.append(input().split())
a=numpy.array(l1,int)
b=numpy.array(l2,int)
print(numpy.concatenate((a,b),axis=0))
#Zeros and Ones
import numpy
a,b,*c=map(int,input().split())
print (numpy.zeros((a,b,*c),dtype=numpy.int))
print (numpy.ones((a,b,*c),dtype=numpy.int))
#Eye and Identity
import numpy
r,c=map(int,input().split())
numpy.set_printoptions(sign=' ')
print (numpy.eye(r,c,k=0))
#Array Mathematics
import numpy
n,m=map(int,input().split())
l1=[]
l2=[]
for _ in range(n):
l1.append(input().split())
for _ in range(n):
l2.append(input().split())
a=numpy.array(l1,int)
b=numpy.array(l2,int)
print(a+b)
print(a-b)
print(a*b)
print(a//b)
print(a%b)
print(a**b)
#Floor, Ceil and Rint
import numpy
a=numpy.array(input().split(),float)
numpy.set_printoptions(sign=' ')
print(numpy.floor(a))
print(numpy.ceil(a))
print(numpy.rint(a))
#Sum and Prod
import numpy
n,m=map(int,input().split())
l=[]
for _ in range(n):
l.append(input().split())
a=numpy.array(l,int)
s= numpy.sum(a,axis=0)
print (numpy.prod(s))
#Min and Max
import numpy
n,m=map(int,input().split())
l=[]
for _ in range(n):
l.append(input().split())
a=numpy.array(l,int)
m=numpy.min(a,axis=1)
print(numpy.max(m))
#Mean, Var, and Std
import numpy
n,m=map(int,input().split())
l=[]
for _ in range(n):
l.append(input().split())
a=numpy.array(l,int)
numpy.set_printoptions(legacy='1.13')
print(numpy.mean(a,axis=1))
print(numpy.var(a,0))
print(numpy.std(a))
#Dot and Cross
import numpy
n=int(input())
l1=[]
l2=[]
for _ in range(n):
l1.append(input().split())
a=numpy.array(l1,int)
for _ in range(n):
l2.append(input().split())
b=numpy.array(l2,int)
print(numpy.dot(a,b))
#Inner and Outer
import numpy
a=numpy.array(input().split(),int)
b=numpy.array(input().split(),int)
print(numpy.inner(a,b))
print(numpy.outer(a,b))
#Polynomials
import numpy
a=numpy.array(input().split(),float)
val=int(input())
print(numpy.polyval(a,val))
#Linear Algebra
import numpy
n=int(input())
l=[]
for _ in range(n):
l.append(input().split())
a=numpy.array(l,float)
print(round(numpy.linalg.det(a),2))
#Exercises of the Problem 2 (6/6)
#Birthday Cake Candles
import math
import os
import random
import re
import sys
def birthdayCakeCandles(candles):
m=max(candles)
return candles.count(m)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(candles)
fptr.write(str(result) + '\n')
fptr.close()
#Number Line Jumps
import math
import os
import random
import re
import sys
def kangaroo(x1, v1, x2, v2):
if x2>x1 and v2>=v1:
risp='NO'
return risp
if x1>x2 and v1>=v2:
risp='NO'
return risp
if (x2-x1)%(v1-v2)==0:
risp ='YES'
return risp
else :
risp='NO'
return risp
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
#Viral Advertising
import math
import os
import random
import re
import sys
def viralAdvertising(n):
l=[2]
for i in range(n-1):
l.append(math.floor(l[-1]*3/2))
return (sum(l))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = viralAdvertising(n)
fptr.write(str(result) + '\n')
fptr.close()
#Recursive Digit Sum
import math
import os
import random
import re
import sys
def superDigit(n, k):
if len(n)==1:
return int(n)
l=list(map(int,n))
p=sum(l)*k
return superDigit(str(p),1)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = nk[0]
k = int(nk[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
#Insertion Sort - Part 1
import math
import os
import random
import re
import sys
def insertionSort1(n, arr):
num=arr[-1]
for i in range(2,n+1):
if arr[n-i]>num:
arr[n-i+1]=arr[n-i]
print(*arr)
else:
arr[n-i+1]=num
print(*arr)
break
if arr[0]>num:
arr[1]=arr[0]
arr[0]=num
print(*arr)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
insertionSort1(n, arr)
#Insertion Sort - Part 2
import math
import os
import random
import re
import sys
def insertionSort2(n, arr):
for i in range (1,n):
for j in range(0,i):
if arr[i]<arr[j]:
arr.remove(arr[i])
arr.insert(j,arr[i])
print(*arr)
print(*arr)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
insertionSort2(n, arr)
| 19.026799 | 156 | 0.540706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,317 | 0.133476 |
21f17d842da515de7fc906d22c723ce681702761 | 2,046 | py | Python | oms/test/test_order.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 5 | 2021-08-10T23:16:44.000Z | 2022-03-17T17:27:00.000Z | oms/test/test_order.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 330 | 2021-06-10T17:28:22.000Z | 2022-03-31T00:55:48.000Z | oms/test/test_order.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 6 | 2021-06-10T17:20:32.000Z | 2022-03-28T08:08:03.000Z | import logging
import helpers.hunit_test as hunitest
import oms.order as omorder
import oms.order_example as oordexam
_LOG = logging.getLogger(__name__)
class TestOrder1(hunitest.TestCase):
def test1(self) -> None:
"""
Test building and serializing an Order.
"""
order = oordexam.get_order_example1()
# Check.
act = str(order)
exp = r"""Order: order_id=0
creation_timestamp=2000-01-01 09:30:00-05:00
asset_id=101
type_=price@twap
start_timestamp=2000-01-01 09:35:00-05:00
end_timestamp=2000-01-01 09:40:00-05:00
curr_num_shares=0.0
diff_num_shares=100.0
tz=America/New_York"""
exp = exp.replace("\n", " ")
self.assert_equal(act, exp, fuzzy_match=True)
# Deserialize from string.
order2 = omorder.Order.from_string(act)
# Check.
act = str(order2)
self.assert_equal(act, exp, fuzzy_match=True)
class TestOrders1(hunitest.TestCase):
def test1(self) -> None:
"""
Test building and serializing a list of Orders.
"""
orders = [oordexam.get_order_example1(), oordexam.get_order_example1()]
act = omorder.orders_to_string(orders)
exp = r"""
Order: order_id=0 creation_timestamp=2000-01-01 09:30:00-05:00 asset_id=101 type_=price@twap start_timestamp=2000-01-01 09:35:00-05:00 end_timestamp=2000-01-01 09:40:00-05:00 curr_num_shares=0.0 diff_num_shares=100.0 tz=America/New_York
Order: order_id=0 creation_timestamp=2000-01-01 09:30:00-05:00 asset_id=101 type_=price@twap start_timestamp=2000-01-01 09:35:00-05:00 end_timestamp=2000-01-01 09:40:00-05:00 curr_num_shares=0.0 diff_num_shares=100.0 tz=America/New_York
"""
# exp = exp.replace("\n", " ")
self.assert_equal(act, exp, fuzzy_match=True)
# Deserialize from string.
orders2 = omorder.orders_from_string(act)
# Check.
act = omorder.orders_to_string(orders2)
self.assert_equal(act, exp, fuzzy_match=True)
| 37.888889 | 236 | 0.663245 | 1,885 | 0.92131 | 0 | 0 | 0 | 0 | 0 | 0 | 1,036 | 0.506354 |
21f180b857dbd23c3f25d5d18d9b868a2c717d34 | 1,147 | py | Python | mindhome_alpha/erpnext/hr/doctype/leave_type/leave_type.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/hr/doctype/leave_type/leave_type.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/hr/doctype/leave_type/leave_type.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import calendar
import frappe
from datetime import datetime
from frappe.utils import today
from frappe import _
from frappe.model.document import Document
class LeaveType(Document):
def validate(self):
if self.is_lwp:
leave_allocation = frappe.get_all("Leave Allocation", filters={
'leave_type': self.name,
'from_date': ("<=", today()),
'to_date': (">=", today())
}, fields=['name'])
leave_allocation = [l['name'] for l in leave_allocation]
if leave_allocation:
frappe.throw(_('Leave application is linked with leave allocations {0}. Leave application cannot be set as leave without pay').format(", ".join(leave_allocation))) #nosec
if self.is_lwp and self.is_ppl:
frappe.throw(_("Leave Type can be either without pay or partial pay"))
if self.is_ppl and (self.fraction_of_daily_salary_per_leave < 0 or self.fraction_of_daily_salary_per_leave > 1):
frappe.throw(_("The fraction of Daily Salary per Leave should be between 0 and 1"))
| 38.233333 | 174 | 0.741935 | 821 | 0.71578 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.378378 |
21f35dbbfba3587292969ac6f42df8409ca16d0e | 223 | py | Python | data/python/pattern_10/code.py | MKAbuMattar/grammind-api | ccf6e9898f50f9e4c7671abecf65029198e2dc72 | [
"MIT"
] | 3 | 2021-12-29T13:03:27.000Z | 2021-12-31T20:27:17.000Z | data/python/pattern_10/code.py | MKAbuMattar/grammind-api | ccf6e9898f50f9e4c7671abecf65029198e2dc72 | [
"MIT"
] | 2 | 2022-01-15T13:08:13.000Z | 2022-01-18T19:41:07.000Z | data/python/pattern_10/code.py | MKAbuMattar/grammind-api | ccf6e9898f50f9e4c7671abecf65029198e2dc72 | [
"MIT"
] | null | null | null | #MAIN PROGRAM STARTS HERE:
num = int(input('Enter the number of rows and columns for the square: '))
for x in range(1, num + 1):
for y in range(1, num - 2 + 1):
print ('{} {} '.format(x, y), end='')
print() | 31.857143 | 73 | 0.573991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.408072 |
21f37da3a047adbe8267c14542444fce93f2f143 | 628 | py | Python | vocoder.py | tapsoft/autovc | b89183b4f02facbeaee73c2c91ef05615e7985c0 | [
"MIT"
] | 1 | 2021-05-18T19:09:05.000Z | 2021-05-18T19:09:05.000Z | vocoder.py | tapsoft/autovc | b89183b4f02facbeaee73c2c91ef05615e7985c0 | [
"MIT"
] | null | null | null | vocoder.py | tapsoft/autovc | b89183b4f02facbeaee73c2c91ef05615e7985c0 | [
"MIT"
] | null | null | null | import os
import torch
import librosa
import pickle
import soundfile as sf
from synthesis import build_model
from synthesis import wavegen
spect_vc = pickle.load(open('results.pkl', 'rb'))
device = torch.device("cuda")
model = build_model().to(device)
checkpoint = torch.load("checkpoint_step001000000_ema.pth")
model.load_state_dict(checkpoint["state_dict"])
outputDir = './wavs'
for spect in spect_vc:
name = spect[0]
c = spect[1]
print(name)
waveform = wavegen(model, c=c)
#librosa.output.write_wav(name+'.wav', waveform, sr=16000)
sf.write(os.path.join(outputDir, name+'.wav'), waveform, 16000) | 27.304348 | 67 | 0.727707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.224522 |
21f395f0029b2866265b7a849d224eea97a12f20 | 2,067 | py | Python | my_drawing/bouncing_ball.py | YuanMaSa/stancode-projects | d4b8d07650786bdd25fb00c5bada6914cc18b5f4 | [
"MIT"
] | null | null | null | my_drawing/bouncing_ball.py | YuanMaSa/stancode-projects | d4b8d07650786bdd25fb00c5bada6914cc18b5f4 | [
"MIT"
] | null | null | null | my_drawing/bouncing_ball.py | YuanMaSa/stancode-projects | d4b8d07650786bdd25fb00c5bada6914cc18b5f4 | [
"MIT"
] | null | null | null | """
File: bouncing_ball.py
Name: Jonathan Ma
-------------------------
TODO:
"""
from campy.graphics.gobjects import GOval
from campy.graphics.gwindow import GWindow
from campy.gui.events.timer import pause
from campy.gui.events.mouse import onmouseclicked
VX = 3
DELAY = 10
GRAVITY = 1
SIZE = 20
REDUCE = 0.9
START_X = 30
START_Y = 40
window = GWindow(800, 500, title='bouncing_ball.py')
# ball creation
ball = GOval(SIZE, SIZE, x=START_X, y=START_Y)
ball.filled = True
ball.fill_color = "#000000"
# the number of bouncing
bouncing_count = 0
# check if the ball has been clicked
usr_clicked = False
# the number of clicks
count_clicks = 0
def main():
"""
This program simulates a bouncing ball at (START_X, START_Y)
that has VX as x velocity and 0 as y velocity. Each bounce reduces
y velocity to REDUCE of itself.
"""
window.add(ball)
onmouseclicked(click_event)
def click_event(mouse):
"""
:param mouse:
:return: None
"""
global usr_clicked
global bouncing_count
global count_clicks
vy = 0
if usr_clicked is False:
count_clicks += 1
while True:
usr_clicked = True
ball.move(VX, vy)
if ball.y + ball.height >= window.height:
# check if the ball hit the ground
print("hit!!!")
vy = (-vy + GRAVITY) * REDUCE
bouncing_count += 1
else:
# ball still not reach to the ground
vy += GRAVITY
if ball.x + ball.width >= window.width:
# check if ball move out of the scene
usr_clicked = False
break
print(f"ball position: {ball.y + ball.height}")
print(f"VY: {str(vy)}")
pause(DELAY)
if count_clicks == 3:
usr_clicked = True
window.remove(ball)
window.add(ball, START_X, START_Y)
if __name__ == "__main__":
main()
| 23.488636 | 71 | 0.562651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.306725 |
21f45310f90a55cefdff3888526c635be854305a | 373 | py | Python | test/py/RunClientServer.py | KirinDave/powerset_thrift | 283603cce87e6da4117af1d3c91570e7466846c2 | [
"BSL-1.0"
] | 1 | 2016-05-08T06:27:22.000Z | 2016-05-08T06:27:22.000Z | test/py/RunClientServer.py | wmorgan/thrift | d9ba3d7a3e25f0f88766c344b2e937422858320b | [
"BSL-1.0"
] | null | null | null | test/py/RunClientServer.py | wmorgan/thrift | d9ba3d7a3e25f0f88766c344b2e937422858320b | [
"BSL-1.0"
] | 1 | 2021-02-09T10:25:34.000Z | 2021-02-09T10:25:34.000Z | #!/usr/bin/env python
import subprocess
import sys
import os
import signal
serverproc = subprocess.Popen([sys.executable, "TestServer.py"])
try:
ret = subprocess.call([sys.executable, "TestClient.py"])
if ret != 0:
raise Exception("subprocess failed")
finally:
# fixme: should check that server didn't die
os.kill(serverproc.pid, signal.SIGKILL)
| 21.941176 | 64 | 0.707775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.30563 |