hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6978d12598c10eff9db5a49764ac11fad521f00e | 3,785 | py | Python | pyrsa/io/meadows.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | 4 | 2015-08-10T18:34:21.000Z | 2018-05-15T20:43:15.000Z | pyrsa/io/meadows.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | null | null | null | pyrsa/io/meadows.py | PeerHerholz/pyrsa | 994007086c59de93d86b982f1fff73fe6a8ea929 | [
"MIT"
] | 2 | 2018-03-26T03:02:07.000Z | 2021-11-10T21:09:48.000Z | """Covers import of data downloaded from the
`Meadows online behavior platform <https://meadows-research.com/>`_.
For information on available file types see the meadows
`documentation on downloads <https://meadows-research.com/documentation\
/researcher/downloads/>`_.
"""
from os.path import basename
import numpy
from scipy.io import loadmat
from pyrsa.rdm.rdms import RDMs
def load_rdms(fpath, sort=True):
"""Read a Meadows results file and return any RDMs as a pyrsa object
Args:
fpath (str): path to .mat Meadows results file
sort (bool): whether to sort the RDM based on the stimulus names
Raises:
ValueError: Will raise an error if the file is missing an expected
variable. This can happen if the file does not contain MA task
data.
Returns:
RDMs: All rdms found in the data file as an RDMs object
"""
info = extract_filename_segments(fpath)
data = loadmat(fpath)
if info['participant_scope'] == 'single':
for var in ('stimuli', 'rdmutv'):
if var not in data:
raise ValueError(f'File missing variable: {var}')
utvs = data['rdmutv']
stimuli_fnames = data['stimuli']
pnames = [info['participant']]
else:
stim_vars = [v for v in data.keys() if v[:7] == 'stimuli']
stimuli_fnames = data[stim_vars[0]]
pnames = ['-'.join(v.split('_')[1:]) for v in stim_vars]
utv_vars = ['rdmutv_' + p.replace('-', '_') for p in pnames]
utvs = numpy.squeeze(numpy.stack([data[v] for v in utv_vars]))
desc_info_keys = (
'participant',
'task_index',
'task_name',
'experiment_name'
)
conds = [f.split('.')[0] for f in stimuli_fnames]
rdms = RDMs(
utvs,
dissimilarity_measure='euclidean',
descriptors={k: info[k] for k in desc_info_keys if k in info},
rdm_descriptors=dict(participants=pnames),
pattern_descriptors=dict(conds=conds),
)
if sort:
rdms.sort_by(conds='alpha')
return rdms
def extract_filename_segments(fpath):
"""Get information from the name of a downloaded results file
Will determine:
* participant_scope: 'single' or 'multiple', how many participant
sessions this file covers.
* task_scope: 'single' or 'multiple', how many experiment tasks this
file covers.
* participant: the Meadows nickname of the participant, if this is a
single participation file.
* task_index: the 1-based index of the task in the experiment, if
this is a single participant file.
* task_name: the name of the task in the experiment, if
this is not a single participant file.
* version: the experiment version as a string.
* experiment_name: name of the experiment on Meadows.
* structure: the structure of the data contained, one of 'tree',
'events', '1D', '2D', etc.
* filetype: the file extension and file format used to serialize the
data.
Args:
fpath (str): File system path to downloaded file
Returns:
dict: Dictionary with the fields described above.
"""
fname, ext = basename(fpath).split('.')
segments = fname.split('_')
info = dict(
task_scope='single',
version=segments[3].replace('v', ''),
experiment_name=segments[1],
structure=segments[-1],
filetype=ext
)
if segments[-2].isdigit():
info['participant_scope'] = 'single'
info['participant'] = segments[-3]
info['task_index'] = int(segments[-2])
else:
info['participant_scope'] = 'multiple'
info['task_name'] = segments[-2]
return info
| 34.724771 | 76 | 0.624306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,190 | 0.5786 |
697c06dc9db41cd68206db686a97122825b501db | 2,406 | py | Python | wqxlib-python/wqxlib/wqx_v3_0/AttachedBinaryObject.py | FlippingBinary/wqxlib | 77cb9d98fca8872dedc7dfc93c7ada2a5193a8e9 | [
"MIT"
] | null | null | null | wqxlib-python/wqxlib/wqx_v3_0/AttachedBinaryObject.py | FlippingBinary/wqxlib | 77cb9d98fca8872dedc7dfc93c7ada2a5193a8e9 | [
"MIT"
] | null | null | null | wqxlib-python/wqxlib/wqx_v3_0/AttachedBinaryObject.py | FlippingBinary/wqxlib | 77cb9d98fca8872dedc7dfc93c7ada2a5193a8e9 | [
"MIT"
] | null | null | null | from ..common import WQXException
from .SimpleContent import (
BinaryObjectFileName,
BinaryObjectFileTypeCode
)
from yattag import Doc
class AttachedBinaryObject:
"""Reference document, image, photo, GIS data layer, laboratory material or other electronic object attached within a data exchange, as well as information used to describe the object."""
__binaryObjectFileName: BinaryObjectFileName
__binaryObjectFileTypeCode: BinaryObjectFileTypeCode
def __init__(self, o=None, *,
binaryObjectFileName:BinaryObjectFileName = None,
binaryObjectFileTypeCode:BinaryObjectFileTypeCode = None
):
if isinstance(o, AttachedBinaryObject):
# Assign attributes from object without typechecking
self.__binaryObjectFileName = o.binaryObjectFileName
self.__binaryObjectFileTypeCode = o.binaryObjectFileTypeCode
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.binaryObjectFileName = o.get('binaryObjectFileName', default = None)
self.binaryObjectFileTypeCode = o.get('binaryObjectFileTypeCode', default = None)
else:
# Assign attributes from named keywords with typechecking
self.binaryObjectFileName = binaryObjectFileName
self.binaryObjectFileTypeCode = binaryObjectFileTypeCode
@property
def binaryObjectFileName(self) -> BinaryObjectFileName:
return self.__binaryObjectFileName
@binaryObjectFileName.setter
def binaryObjectFileName(self, val:BinaryObjectFileName) -> None:
self.__binaryObjectFileName = BinaryObjectFileName(val)
@property
def binaryObjectFileTypeCode(self) -> BinaryObjectFileTypeCode:
return self.__binaryObjectFileTypeCode
@binaryObjectFileTypeCode.setter
def binaryObjectFileTypeCode(self, val:BinaryObjectFileTypeCode) -> None:
self.__binaryObjectFileTypeCode = BinaryObjectFileTypeCode(val)
def generateXML(self, name:str = 'AttachedBinaryObject') -> str:
doc, tag, text, line = Doc().ttl()
with tag(name):
if self.__binaryObjectFileName is None:
raise WQXException("Attribute 'binaryObjectFileName' is required.")
line('BinaryObjectFileName', self.__binaryObjectFileName)
if self.__binaryObjectFileTypeCode is None:
raise WQXException("Attribute 'binaryObjectFileTypeCode' is required.")
line('BinaryObjectFileTypeCode', self.__binaryObjectFileTypeCode)
return doc.getvalue()
| 42.210526 | 189 | 0.778055 | 2,265 | 0.941397 | 0 | 0 | 556 | 0.231089 | 0 | 0 | 565 | 0.23483 |
697c55e84fe6b0046982d7cd4e08ffb0f7ad65ef | 1,185 | py | Python | instrument.py | silvergl/spyder | b23698626a4b9e50af83aef390af14a01df7e540 | [
"MIT"
] | null | null | null | instrument.py | silvergl/spyder | b23698626a4b9e50af83aef390af14a01df7e540 | [
"MIT"
] | null | null | null | instrument.py | silvergl/spyder | b23698626a4b9e50af83aef390af14a01df7e540 | [
"MIT"
] | null | null | null | import os
import importlib
import pathlib
from tools.Aspect import ModuleAspectizer
path = os.getcwd()
dir_list = os.listdir(path)
print(dir_list)
aspect = ModuleAspectizer()
print('new')
exceptions = ['pyplot.py', 'setup.py', 'bootstrap.py', 'instrument.py',
'windows.py','pybloom.py', 'switcher.py','mainwindow.py']
def load_and_instrument(item):
pass
for root, dirs, files in os.walk('spyder'):
print(files)
if 'tests' in dirs:
dirs.remove('tests')
if 'config' in dirs:
dirs.remove('config')
for f in files:
if (f not in exceptions
and '_' not in f
#and 'py' in f
and f[-3:]=='.py'):
print(f)
filename = os.path.basename(f)[:-3]
filedir = os.path.join(root,f)
spec = importlib.util.spec_from_file_location(
filename,
filedir)
spec.__name__
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
#print(module)
aspect.add_module(module)
aspect.instrumentize()
import bootstrap | 28.214286 | 71 | 0.566245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.152743 |
697cb4df0fee5fb2e4b8167c3cfa2dce9ba8fde7 | 3,564 | py | Python | tests/test_puls_util.py | xiaohan2012/capitalization-restoration-train | 24f9236a553ac91f4e291625e5616d8558f80d3e | [
"MIT"
] | 1 | 2020-03-07T01:25:21.000Z | 2020-03-07T01:25:21.000Z | tests/test_puls_util.py | xiaohan2012/capitalization-restoration-train | 24f9236a553ac91f4e291625e5616d8558f80d3e | [
"MIT"
] | null | null | null | tests/test_puls_util.py | xiaohan2012/capitalization-restoration-train | 24f9236a553ac91f4e291625e5616d8558f80d3e | [
"MIT"
] | null | null | null | import json
import os
import codecs
from capitalization_train.puls_util import (separate_title_from_body,
extract_and_capitalize_headlines_from_corpus,
get_input_example,
get_doc_ids_from_file,
convert_sentence_auxil_to_request)
from nose.tools import assert_equal
CURDIR = os.path.dirname(os.path.realpath(__file__))
with codecs.open(CURDIR + '/data/001BBB8BFFE6841FA498FCE88C43B63A.title.json') as f:
title_sent = json.loads(f.read())
with codecs.open(CURDIR + '/data/001BBB8BFFE6841FA498FCE88C43B63A.title-cap.json') as f:
cap_title_sent = json.loads(f.read())
with codecs.open(CURDIR + '/data/001BBB8BFFE6841FA498FCE88C43B63A.body.json') as f:
body_sents = json.loads(f.read())
def test_separate_title_from_body():
assert_equal.__self__.maxDiff = None
rawpath = CURDIR + '/data/docs_okformed/001BBB8BFFE6841FA498FCE88C43B63A'
title_sents, body_sents = separate_title_from_body(rawpath + ".auxil",
rawpath + ".paf")
assert_equal(len(title_sents), 1)
assert_equal(len(body_sents), 20)
assert_equal(title_sents[0], title_sent)
def test_extract_and_capitalize_headlines_from_corpus():
doc_ids = ['EEBADC60811702C931B0F6CB61CE9054',
'4271571E96D5C726ECFDDDAACA74A264']
corpus_dir = '/cs/fs/home/hxiao/code/capitalization_train/test_data/puls_format_raw/'
result = list(extract_and_capitalize_headlines_from_corpus(
corpus_dir, doc_ids)
)
print(result[0])
assert_equal(len(result), 2)
assert_equal(result[0][0], None)
assert_equal(len(result[0][1][1]), 1)
assert_equal(result[0][1][0], 'EEBADC60811702C931B0F6CB61CE9054')
assert_equal(result[0][1][1],
[[u'Microsoft', u'Gives', u'New', u'Brand', u'Identity',
u'to', u'Nokia', u'Retail', u'Stores']])
result1 = filter(lambda (_, (docid, __)):
docid == '4271571E96D5C726ECFDDDAACA74A264',
result)
assert_equal(len(result1[0][1][1]), 2)
def test_input_example():
actual = get_input_example(
CURDIR + '/data/docs_okformed/',
CURDIR + '/data/docs_malformed/',
'001BBB8BFFE6841FA498FCE88C43B63A'
)
print(cap_title_sent)
expected = {"capitalizedSentences":
[convert_sentence_auxil_to_request(
cap_title_sent)],
"otherSentences": map(
convert_sentence_auxil_to_request,
body_sents)
}
print(expected)
assert_equal(actual, expected)
def test_convert_sentence_auxil_to_request():
sent_auxil = {"sentno":0,"start":51,"end":128,"features":[{"lemma":"nanobiotix","pos":"name_oov","token":"Nanobiotix"},{"lemma":"get","pos":"tv","token":"Gets"},{"lemma":"early","pos":"d","token":"Early"},{"lemma":"positive","pos":"adj","token":"Positive"},{"lemma":"safety","pos":"n","token":"Safety"},{"lemma":"result","pos":"n","token":"Results"}]}
actual = convert_sentence_auxil_to_request(sent_auxil)
expected = {'no': 0,
'tokens': ['Nanobiotix', 'Gets', 'Early', 'Positive', 'Safety', 'Results'],
'pos': ['name_oov', 'tv', 'd', 'adj', 'n', 'n']
}
assert_equal(actual, expected)
def test_get_doc_ids_from_file():
ids = get_doc_ids_from_file(CURDIR + '/data/docids.txt')
assert_equal(len(ids), 4)
| 40.044944 | 355 | 0.62486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,014 | 0.284512 |
697cf530cd2b74e55b1269aa017262e1f824f075 | 906 | py | Python | python/app.py | ncolesummers/microservices-calculator | 15ec48ef8eb7278f6d25854afe1ee79b0f6fae0c | [
"MIT"
] | null | null | null | python/app.py | ncolesummers/microservices-calculator | 15ec48ef8eb7278f6d25854afe1ee79b0f6fae0c | [
"MIT"
] | null | null | null | python/app.py | ncolesummers/microservices-calculator | 15ec48ef8eb7278f6d25854afe1ee79b0f6fae0c | [
"MIT"
] | null | null | null | import flask
from flask import request, jsonify
from flask_cors import CORS
import math
from wasmer import engine, Store, Module, Instance
app = flask.Flask(__name__)
CORS(app)
@app.route('/add', methods=['POST'])
def add():
store = Store()
# Let's compile the module to be able to execute it!
module = Module(store, """
(module
(type (func (param f32 f32) (result f32)))
(func (export "sum") (type 0) (param f32) (param f32) (result f32)
local.get 0
local.get 1
f32.add))
""")
# Now the module is compiled, we can instantiate it.
instance = Instance(module)
content = request.json
[operand_one, operand_two] = [float(content['operandOne']), float(content['operandTwo'])]
print(f"Calculating {operand_one} + {operand_two}", flush=True)
out = jsonify({"result": instance.exports.sum(operand_one, operand_two)})
return out
app.run(host="0.0.0.0")
| 24.486486 | 91 | 0.674393 | 0 | 0 | 0 | 0 | 699 | 0.771523 | 0 | 0 | 390 | 0.430464 |
697d4986488b41c5f319419410ec5864bd44270b | 1,815 | py | Python | src/tools/nuscenes-devkit/prediction/tests/test_backbone.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 475 | 2021-03-13T16:33:36.000Z | 2022-03-30T06:00:39.000Z | src/tools/nuscenes-devkit/prediction/tests/test_backbone.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 50 | 2021-03-17T04:48:20.000Z | 2022-03-08T13:55:32.000Z | src/tools/nuscenes-devkit/prediction/tests/test_backbone.py | jie311/TraDeS | 896491a159abe65f61c6ad05662cda6e28d137a6 | [
"MIT"
] | 98 | 2021-03-14T12:12:49.000Z | 2022-03-19T16:19:13.000Z | import unittest
import torch
from torchvision.models.resnet import BasicBlock, Bottleneck
from nuscenes.prediction.models.backbone import ResNetBackbone, MobileNetBackbone
class TestBackBones(unittest.TestCase):
def count_layers(self, model):
if isinstance(model[4][0], BasicBlock):
n_convs = 2
elif isinstance(model[4][0], Bottleneck):
n_convs = 3
else:
raise ValueError("Backbone layer block not supported!")
return sum([len(model[i]) for i in range(4, 8)]) * n_convs + 2
def test_resnet(self):
rn_18 = ResNetBackbone('resnet18')
rn_34 = ResNetBackbone('resnet34')
rn_50 = ResNetBackbone('resnet50')
rn_101 = ResNetBackbone('resnet101')
rn_152 = ResNetBackbone('resnet152')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(rn_18(tensor).shape[1], 512)
self.assertEqual(rn_34(tensor).shape[1], 512)
self.assertEqual(rn_50(tensor).shape[1], 2048)
self.assertEqual(rn_101(tensor).shape[1], 2048)
self.assertAlmostEqual(rn_152(tensor).shape[1], 2048)
self.assertEqual(self.count_layers(list(rn_18.backbone.children())), 18)
self.assertEqual(self.count_layers(list(rn_34.backbone.children())), 34)
self.assertEqual(self.count_layers(list(rn_50.backbone.children())), 50)
self.assertEqual(self.count_layers(list(rn_101.backbone.children())), 101)
self.assertEqual(self.count_layers(list(rn_152.backbone.children())), 152)
with self.assertRaises(ValueError):
ResNetBackbone('resnet51')
def test_mobilenet(self):
mobilenet = MobileNetBackbone('mobilenet_v2')
tensor = torch.ones((1, 3, 100, 100))
self.assertEqual(mobilenet(tensor).shape[1], 1280) | 34.903846 | 82 | 0.665014 | 1,639 | 0.90303 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.062259 |
697e7c1a9cd37680897c57f1ed904545d8a6557a | 379 | py | Python | py_code/Qrbar_test.py | xiaofu98/cv_projects | 15ccfab4f965247716057feb9149168ea2ee2adc | [
"Apache-2.0"
] | null | null | null | py_code/Qrbar_test.py | xiaofu98/cv_projects | 15ccfab4f965247716057feb9149168ea2ee2adc | [
"Apache-2.0"
] | null | null | null | py_code/Qrbar_test.py | xiaofu98/cv_projects | 15ccfab4f965247716057feb9149168ea2ee2adc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @File : Qrbar_test.py
import cv2
import numpy as np
from pyzbar.pyzbar import decode
img = cv2.imread('qrcode.png')
for barcode in decode(img):
print(barcode.data.decode('utf-8'))
print(barcode.data)
pts = np.array([barcode.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
print(pts)
print(barcode.rect)
| 22.294118 | 47 | 0.649077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.226913 |
697eaf7d0770ae77e9ee86fbc0c32df38b9e8710 | 1,336 | py | Python | jupyterlab-mlops/jupyter_server_config.py | ddebowczyk92/jupyter-images | 388b8b3f741d3d1b44c3f4609c70636c7c3cbc5e | [
"MIT"
] | null | null | null | jupyterlab-mlops/jupyter_server_config.py | ddebowczyk92/jupyter-images | 388b8b3f741d3d1b44c3f4609c70636c7c3cbc5e | [
"MIT"
] | null | null | null | jupyterlab-mlops/jupyter_server_config.py | ddebowczyk92/jupyter-images | 388b8b3f741d3d1b44c3f4609c70636c7c3cbc5e | [
"MIT"
] | null | null | null | import os
import requests
import time
c = get_config() # noqa: F821
c.ServerApp.ip = "0.0.0.0"
c.ServerApp.port = 8888
c.ServerApp.open_browser = False
def get_gooogle_instance_attribute(attribute_name):
try:
response = requests.get(
f'http://metadata.google.internal/computeMetadata/v1/instance/attributes/{attribute_name}',
headers={'Metadata-Flavor': 'Google'})
if response.status_code == 200:
return response.text
return None
except:
return None
try:
maybe_vertex_framework = get_gooogle_instance_attribute('framework')
assert maybe_vertex_framework == 'Container' # Vertex AI Notebook
for _ in range(60):
proxy_url = get_gooogle_instance_attribute('proxy-url')
if proxy_url is not None:
break
time.sleep(1)
assert proxy_url.endswith('notebooks.googleusercontent.com') # Proxy was set
c.ServerApp.allow_origin_pat = 'https://' + proxy_url
c.ServerApp.port = 8080
except Exception: # not running on Vertex AI
pass
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:
os.umask(int(os.environ["NB_UMASK"], 8))
| 31.809524 | 107 | 0.694611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.325599 |
697eefb12f54063cf9dd510696fac402496e0d04 | 1,346 | py | Python | src/loss/mixup.py | jiangtaoo2333/StaticGestureRecognition | 9d554b137f217f3bcb046b2c6978b9487685de2a | [
"MIT"
] | null | null | null | src/loss/mixup.py | jiangtaoo2333/StaticGestureRecognition | 9d554b137f217f3bcb046b2c6978b9487685de2a | [
"MIT"
] | null | null | null | src/loss/mixup.py | jiangtaoo2333/StaticGestureRecognition | 9d554b137f217f3bcb046b2c6978b9487685de2a | [
"MIT"
] | null | null | null | '''
@Author: Jiangtao
@Date: 2020-02-25 16:13:42
@LastEditors: Jiangtao
@LastEditTime: 2020-07-06 14:01:11
@Description:
'''
import numpy as np
np.set_printoptions(threshold=np.inf)
import torch
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def mixup_data(x, y, device,alpha=1.0,):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def randomMask(imgData,device):
batchSize = imgData.shape[0]
for i in range(batchSize):
mask = np.random.uniform(low=0.0, high=1.0, size=(32,32))
mask = torch.from_numpy(mask).to(device)
x,y = np.random.randint(0, 4, size=(2,), dtype='l')
imgData[i][0][(x)*32:(x+1)*32,(y)*32:(y+1)*32] = mask
return imgData
def mixup_criterion(pred, y_a, y_b, lam):
return lam * F.nll_loss(pred, y_a) + (1 - lam) * F.nll_loss(pred, y_b)
if __name__ =='__main__':
imgData1 = np.random.uniform(low=0.0, high=1.0, size=(1,1,128,128))
print(imgData1[0][0])
imgData2 = randomMask(imgData1)
print(imgData2[0][0])
| 24.035714 | 76 | 0.627043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.144131 |
697efc5cbbfa3ced1db0b2aa6c18af2b642f623b | 376 | py | Python | task2.py | gor-dimm/prog_lr5 | 5fea4920a3b6677dc57ec8358c879d0e7d0cdd02 | [
"MIT"
] | null | null | null | task2.py | gor-dimm/prog_lr5 | 5fea4920a3b6677dc57ec8358c879d0e7d0cdd02 | [
"MIT"
] | null | null | null | task2.py | gor-dimm/prog_lr5 | 5fea4920a3b6677dc57ec8358c879d0e7d0cdd02 | [
"MIT"
] | null | null | null | #Среднее гармоническое
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def harmid(*args):
if args and 0 not in args:
a = 0
for item in args:
a += 1 / item
return len(args) / a
else:
return None
if __name__ == "__main__":
print(harmid())
print(harmid(1, 3, 5, 7, 9))
print(harmid(2, 4, 6, 8, 10, 12)) | 20.888889 | 37 | 0.507979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.252525 |
6981e639a7cb29d73552df37a160915f52bb7650 | 1,187 | py | Python | login.py | josip8/EwilTwin-attack | d9bd8444af635177b87e48d7400e73aaf9a17f23 | [
"MIT"
] | null | null | null | login.py | josip8/EwilTwin-attack | d9bd8444af635177b87e48d7400e73aaf9a17f23 | [
"MIT"
] | null | null | null | login.py | josip8/EwilTwin-attack | d9bd8444af635177b87e48d7400e73aaf9a17f23 | [
"MIT"
] | null | null | null | import argparse
import sys
import datetime
import json
import logging
import re
import random
import requests
import shutil
from pyquery import PyQuery as pq
def main(username, password):
logging.basicConfig(filename='logging.log', level=logging.DEBUG)
session = requests.session()
uid, dtsg = login(session, username, password)
def login(session, username, password):
# Navigate to the Facebook homepage
response = session.get('https://facebook.com')
# Construct the DOM
dom = pq(response.text)
# Get the lsd value from the HTML. This is required to make the login request
lsd = dom('[name="lsd"]').val()
# Perform the login request
response = session.post('https://www.facebook.com/login.php?login_attempt=1', data={
'lsd': lsd,
'email': username,
'pass': password,
'default_persistent': '0',
'timezone': '-60',
'lgndim': '',
'lgnrnd': '',
'lgnjs': '',
'locale':'en_GB',
'qsstamp': ''
})
print len(response.text)
sys.stdout.flush()
try:
main(username=sys.argv[1], password=sys.argv[2])
except Exception, e:
logging.exception(e)
| 21.981481 | 88 | 0.6369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.31171 |
6982204821afb20223d10e2d3df47326e8eb7d6c | 1,347 | py | Python | piptegrator/piptegrator.py | MartinFalatic/piptegrator | 3c7efa76e8581afdbb5595232dea4ba6d6da2803 | [
"MIT"
] | 1 | 2020-07-23T22:19:07.000Z | 2020-07-23T22:19:07.000Z | piptegrator/piptegrator.py | MartinFalatic/piptegrator | 3c7efa76e8581afdbb5595232dea4ba6d6da2803 | [
"MIT"
] | 3 | 2019-11-05T23:31:03.000Z | 2020-05-17T03:03:11.000Z | piptegrator/piptegrator.py | MartinFalatic/piptegrator | 3c7efa76e8581afdbb5595232dea4ba6d6da2803 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
"""
from __future__ import print_function
import argparse
import sys
from . import common
from . import helper
from . import vcs_tool
PARAMS = {}
PARAMS['this_script'] = common.get_script_name_from_filename(__file__)
def setup_and_dispatch():
parser = argparse.ArgumentParser(
description=common.format_title(PARAMS['this_script']),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--compile', action='store_true',
help='Compile and scrub requirements')
parser.add_argument('--commit', action='store_true',
help='Commit to configured VCS')
try:
args, extra_args = parser.parse_known_args()
except BaseException as e:
raise e
print(common.format_title(PARAMS['this_script']))
print()
if sum(map(bool, [args.compile, args.commit])) > 1:
common.exit_with_error('Error: Only one top-level option may be specified', parser=parser)
if args.compile:
helper.main(scriptname=PARAMS['this_script'], args=extra_args)
elif args.commit:
vcs_tool.main(scriptname=PARAMS['this_script'], args=extra_args)
else:
parser.print_help(sys.stderr)
def main():
setup_and_dispatch()
sys.exit(0)
if __name__ == "__main__":
main()
| 24.490909 | 98 | 0.672606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.191537 |
69826f25e21c7ee10d4ed46dec1e788f5a40e5c2 | 411 | py | Python | seller/models.py | apt-developer/SoloMall | 40cdce6829910aad9955e49a90e386bf5d1b1a5f | [
"MIT"
] | null | null | null | seller/models.py | apt-developer/SoloMall | 40cdce6829910aad9955e49a90e386bf5d1b1a5f | [
"MIT"
] | null | null | null | seller/models.py | apt-developer/SoloMall | 40cdce6829910aad9955e49a90e386bf5d1b1a5f | [
"MIT"
] | null | null | null | from django.db import models
# from themall.models import Customer
# Create your models here.
class Seller(models.Model):
email = models.OneToOneField('themall.Customer', on_delete=models.CASCADE, to_field='email')
store_name = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
description = models.TextField(max_length=1000)
def __str__(self):
return self.email.__str__() | 25.6875 | 94 | 0.77129 | 312 | 0.759124 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.214112 |
698282c465446336db3bddcf6e550097754582e0 | 2,392 | py | Python | test/pytest/service-bluetooth/test_pairing_hmi_perspective.py | bitigchi/MuditaOS | 425d23e454e09fd6ae274b00f8d19c57a577aa94 | [
"BSL-1.0"
] | 369 | 2021-11-10T09:20:29.000Z | 2022-03-30T06:36:58.000Z | test/pytest/service-bluetooth/test_pairing_hmi_perspective.py | bitigchi/MuditaOS | 425d23e454e09fd6ae274b00f8d19c57a577aa94 | [
"BSL-1.0"
] | 149 | 2021-11-10T08:38:35.000Z | 2022-03-31T23:01:52.000Z | test/pytest/service-bluetooth/test_pairing_hmi_perspective.py | bitigchi/MuditaOS | 425d23e454e09fd6ae274b00f8d19c57a577aa94 | [
"BSL-1.0"
] | 41 | 2021-11-10T08:30:37.000Z | 2022-03-29T08:12:46.000Z | # Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
from harness import log
from harness.dom_parser_utils import *
from harness.interface.defs import key_codes
from bt_fixtures import *
@pytest.mark.rt1051
@pytest.mark.usefixtures("bt_all_devices")
@pytest.mark.usefixtures("bt_reset")
@pytest.mark.usefixtures("bt_main_window")
@pytest.mark.usefixtures("phone_in_desktop")
@pytest.mark.usefixtures("phone_unlocked")
@pytest.mark.skipif("not config.getvalue('--bt_device')", reason='--bt_device was not specified')
def test_bt_pairing_hmi(harness, bt_device):
if not bt_device:
return
bt_device_name = bt_device
current_window_content = get_window_content(harness, 1)
is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name )
if not is_device_in_history :
log.info("Device {} not in all devices history, scanning...".format(bt_device_name))
harness.connection.send_key_code(key_codes["left"])
max_try_count = 5
for _ in range(max_try_count, 0, -1) :
time.sleep(2)
current_window_content = get_window_content(harness, 1)
is_device_in_history = item_contains_recursively(current_window_content, 'TextValue', bt_device_name )
if is_device_in_history:
break
log.info("Device {} not found, retrying...".format(bt_device_name))
assert max_try_count
current_window_content = get_window_content(harness, 1)
parent_of_list_items = find_parent(current_window_content, 'ListItem')
steps_to_navigate_down = get_child_number_that_contains_recursively(parent_of_list_items, [('TextValue', bt_device_name)])
assert steps_to_navigate_down > -1
log.info("Navigating to the {} device, {} down".format(bt_device_name, steps_to_navigate_down ) )
for _ in range(steps_to_navigate_down) :
harness.connection.send_key_code(key_codes["down"])
log.info("Checking if device {} is focused...".format(bt_device_name))
current_window_content = get_window_content(harness, 1)
parent_of_list_items = find_parent(current_window_content, 'ListItem')
assert item_has_child_that_contains_recursively( parent_of_list_items, [('TextValue', bt_device_name), ('Focus', True)] )
| 44.296296 | 126 | 0.743729 | 0 | 0 | 0 | 0 | 2,095 | 0.875836 | 0 | 0 | 517 | 0.216137 |
6982d7079fdcf3a58b79ac8ac6683d28485634e2 | 1,147 | py | Python | server/src/utils/mailer.py | ocskier/TutorDashboard | 4dedfee7676418660cca0043ee71db720f915cca | [
"Apache-2.0"
] | 1 | 2020-10-28T21:36:13.000Z | 2020-10-28T21:36:13.000Z | server/src/utils/mailer.py | ocskier/TutorDashboard | 4dedfee7676418660cca0043ee71db720f915cca | [
"Apache-2.0"
] | 36 | 2020-10-14T15:12:21.000Z | 2021-07-15T21:33:39.000Z | server/src/utils/mailer.py | ocskier/TutorDashboard | 4dedfee7676418660cca0043ee71db720f915cca | [
"Apache-2.0"
] | 1 | 2020-10-22T07:50:59.000Z | 2020-10-22T07:50:59.000Z | import smtplib, ssl, os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .html_template import emailHtml
from .text_template import emailText
port = 465
context = ssl.create_default_context()
def sendEmail(emailData):
adminUser = os.getenv("ADMIN_USERNAME")
password = os.getenv("ADMIN_PASSWORD")
sender = emailData["tutor"]
receivers = emailData["recipient"]
message = MIMEMultipart("alternative")
message["Subject"] = "Tutor Confirmation"
message["From"] = adminUser
message["To"] = receivers
message["Cc"] = sender, "centraltutor@bcs.com"
text = emailText(emailData)
html = emailHtml(emailData)
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
message.attach(part1)
message.attach(part2)
try:
with smtplib.SMTP_SSL('smtp.gmail.com',port,context=context) as server:
server.login(adminUser,password)
server.sendmail(adminUser, receivers, message.as_string())
print("Successfully sent email")
except smtplib.SMTPException:
print("Error: unable to send email") | 29.410256 | 79 | 0.691369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.183958 |
6982db3c2b0bfe6506ab6f5f2f11222df69cf2f3 | 2,606 | py | Python | otcextensions/sdk/dis/v2/records.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/dis/v2/records.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/dis/v2/records.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import six
from openstack import exceptions
from openstack import resource
# Helper class to parse the returned object
class RecordsSpec(resource.Resource):
data = resource.Body('data', type=str)
explicit_hash_key = resource.Body('explicit_hash_key', type=str)
partition_id = resource.Body('partition_id', type=str)
partition_key = resource.Body('partition_key', type=str)
sequence_number = resource.Body('sequence_number', type=str)
timestamp = resource.Body('timestamp', type=int)
timestamp_type = resource.Body('timestamp_type', type=str)
class Records(resource.Resource):
base_path = '/records'
# resources_key = 'streams'
# resource_key = 'stream'
allow_create = True
allow_list = True
allow_commit = True
allow_delete = True
allow_fetch = True
allow_patch = True
# Properties
#: Stream names
stream_name = resource.Body('stream_name', type=str)
#: Specify whether there are more matching DIS streams to list
records = resource.Body('records', type=list, list_type=RecordsSpec)
failed_record_count = resource.Body('failed_record_count', type=int)
next_partition_cursor = resource.Body('next_partition_cursor', type=str)
partition_cursor = resource.Body('partition_cursor', type=str)
@classmethod
def list(cls, session, ignore_missing=True, base_path=None, **params):
session = cls._get_session(session)
base_path = f'records?partition-cursor={params["partition_cursor"]}'
microversion = cls._get_microversion_for_list(session)
data = session.get(
base_path,
headers={"Accept": "application/json"},
params=None,
microversion=microversion)
exceptions.raise_from_response(data)
result = data.json()
print(result)
if result is not None:
return result
if ignore_missing:
return None
raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, params['name_or_id']))
| 35.69863 | 76 | 0.699156 | 1,934 | 0.742134 | 0 | 0 | 762 | 0.292402 | 0 | 0 | 1,036 | 0.397544 |
698463ef4506f64508769bee2b45a19e5d85e6f5 | 664 | py | Python | ad-insertion/frontend/main.py | dahanhan/Ad-Insertion-Sample | 12019c70a95f1d83d792e7e03d1dd5f732630558 | [
"BSD-3-Clause"
] | 82 | 2019-04-07T04:27:47.000Z | 2022-02-04T07:35:58.000Z | ad-insertion/frontend/main.py | dahanhan/Ad-Insertion-Sample | 12019c70a95f1d83d792e7e03d1dd5f732630558 | [
"BSD-3-Clause"
] | 43 | 2019-04-04T22:03:02.000Z | 2020-08-25T10:11:44.000Z | ad-insertion/frontend/main.py | dahanhan/Ad-Insertion-Sample | 12019c70a95f1d83d792e7e03d1dd5f732630558 | [
"BSD-3-Clause"
] | 54 | 2019-04-04T23:27:05.000Z | 2022-01-30T14:27:16.000Z | #!/usr/bin/python3
from tornado import ioloop, web
from tornado.options import define, options, parse_command_line
from manifest import ManifestHandler
from segment import SegmentHandler
app = web.Application([
(r'/segment/.*',SegmentHandler),
(r'/manifest/.*',ManifestHandler),
])
if __name__ == "__main__":
define("port", default=2222, help="the binding port", type=int)
define("ip", default="127.0.0.1", help="the binding ip")
parse_command_line()
print("ad-insertion: frontend: Listening to " + options.ip + ":" + str(options.port), flush = True)
app.listen(options.port, address=options.ip)
ioloop.IOLoop.instance().start()
| 33.2 | 103 | 0.704819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.231928 |
69862f6c28259ef99ed2c352bbd7e6f2c8b73e19 | 488 | py | Python | cofile/output-filename.py | yikeke/python-side-projects | 13c49a6863eb7fc6b97c8727116ca737e29f9ea0 | [
"MIT"
] | 3 | 2020-08-10T02:48:48.000Z | 2021-09-28T16:04:05.000Z | cofile/output-filename.py | yikeke/python-side-projects | 13c49a6863eb7fc6b97c8727116ca737e29f9ea0 | [
"MIT"
] | null | null | null | cofile/output-filename.py | yikeke/python-side-projects | 13c49a6863eb7fc6b97c8727116ca737e29f9ea0 | [
"MIT"
] | null | null | null | import os
for root, dirs, files in os.walk("/Users/coco/Documents/GitHub/pingcap-upstream/docs-cn", topdown=True):
#for root, dirs, files in os.walk("/Users/coco/Documents/GitHub/python-side-projects/cofile", topdown=True):
for name in files:
if '.md' in name: # Check all markdown files
filepath = os.path.join(root, name)
# if '.md' in name and name not in filter_list: # Check all .md files except those in filter_list
print(filepath)
| 44.363636 | 108 | 0.670082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.60041 |
6986ba8ae10943bbe0a19ad1d63111e25988f309 | 499 | py | Python | dbml_from_api.py | ioatzim/getbigschema | 7ec9cde9099f6f7a9a45232d598b93f55357397b | [
"Apache-2.0"
] | null | null | null | dbml_from_api.py | ioatzim/getbigschema | 7ec9cde9099f6f7a9a45232d598b93f55357397b | [
"Apache-2.0"
] | null | null | null | dbml_from_api.py | ioatzim/getbigschema | 7ec9cde9099f6f7a9a45232d598b93f55357397b | [
"Apache-2.0"
] | null | null | null | import requests
import os
import json
import datetime
'''
Pulls a dbml file from the API. User must manually add the file id, found in the 'response_ids.json' file generated from dbml_post_to_api.py
'''
url='http://ec2-54-167-67-34.compute-1.amazonaws.com/api/dbmls' #url of the API
id = '6192b1f31c2a512293fea940' #id of the file, taken from 'response_ids.json' file generated from dbml_post_to_api.py
res = requests.get(f'{url}/{id}')
dbml_file = json.loads(res.json()['contents'])
| 35.642857 | 141 | 0.735471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.725451 |
698812bb45241671016a8d814e5ddf45839c4060 | 400 | py | Python | fabfile.py | undertherain/dagen | f4815127bb7b660c4ffadf5f01ad4c5c0f504ddc | [
"Apache-2.0"
] | 2 | 2017-10-21T02:29:21.000Z | 2017-10-21T02:35:50.000Z | fabfile.py | undertherain/dagen | f4815127bb7b660c4ffadf5f01ad4c5c0f504ddc | [
"Apache-2.0"
] | null | null | null | fabfile.py | undertherain/dagen | f4815127bb7b660c4ffadf5f01ad4c5c0f504ddc | [
"Apache-2.0"
] | null | null | null | import os
from fabric.api import local, lcd
def clean():
with lcd(os.path.dirname(__file__)):
local("python3.6 setup.py clean --all")
local("find . | grep -E \"(__pycache__|\.pyc$)\" | xargs rm -rf")
def make():
local("python3.6 setup.py bdist_wheel")
def deploy():
test()
make()
local("twine upload dist/*")
def test():
local("python3.6 -m unittest")
| 17.391304 | 73 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.415 |
69883b20a029aa25992e0951d51a93d66e81c5a0 | 544 | py | Python | Package1/Util.py | mgi2792/WebTest | c3441c213c97dbd290b948e162fd1560da33bdd6 | [
"MIT"
] | null | null | null | Package1/Util.py | mgi2792/WebTest | c3441c213c97dbd290b948e162fd1560da33bdd6 | [
"MIT"
] | null | null | null | Package1/Util.py | mgi2792/WebTest | c3441c213c97dbd290b948e162fd1560da33bdd6 | [
"MIT"
] | null | null | null | from openpyxl import load_workbook
def getRowCount(file):
wb = load_workbook(file)
sheet = wb.active
return sheet.max_row
def getColumnCount(file):
wb = load_workbook(file)
sheet = wb.active
return sheet.max_column
def getCellData(file, cell):
wb = load_workbook(file)
sheet = wb.active
return sheet.cell(row=cell[0], column=cell[1]).value
def setCellData(file, cell, data):
wb = load_workbook(file)
sheet = wb.active
sheet.cell(row=cell[0], column=cell[1]).value = data
wb.save(file)
| 20.923077 | 56 | 0.681985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6988900c33f8027bb264778efef7f83d1aa37d8a | 200 | py | Python | clancy_database/__init__.py | arthurian/visualizing_russian_tools | 65fd37839dc0650bb25d1f98904da5b79ae1a754 | [
"BSD-3-Clause"
] | 2 | 2020-07-10T14:17:03.000Z | 2020-11-17T09:18:26.000Z | clancy_database/__init__.py | eelegiap/visualizing_russian_tools | 9c36baebc384133c7c27d7a7c4e0cedc8cb84e74 | [
"BSD-3-Clause"
] | 13 | 2019-03-17T13:27:31.000Z | 2022-01-18T17:03:14.000Z | clancy_database/__init__.py | eelegiap/visualizing_russian_tools | 9c36baebc384133c7c27d7a7c4e0cedc8cb84e74 | [
"BSD-3-Clause"
] | 2 | 2019-10-19T16:37:44.000Z | 2020-06-22T13:30:20.000Z | # List of tables that should be routed to this app.
# Note that this is not intended to be a complete list of the available tables.
TABLE_NAMES = (
'lemma',
'inflection',
'aspect_pair',
)
| 25 | 79 | 0.69 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.81 |
698a0fd866792d3cb9602f07e37599bb1277b5c0 | 399 | py | Python | differential/plugins/gazelle.py | funqc/Differential | 738ebf9a2a54ea04498b3394f80d980aad083ea7 | [
"MIT"
] | 52 | 2021-10-12T11:23:45.000Z | 2022-03-18T04:15:03.000Z | differential/plugins/gazelle.py | funqc/Differential | 738ebf9a2a54ea04498b3394f80d980aad083ea7 | [
"MIT"
] | 4 | 2021-10-15T13:58:42.000Z | 2022-03-15T12:42:35.000Z | differential/plugins/gazelle.py | funqc/Differential | 738ebf9a2a54ea04498b3394f80d980aad083ea7 | [
"MIT"
] | 5 | 2021-11-18T05:41:23.000Z | 2022-03-09T03:13:15.000Z | import argparse
from differential.plugins.base import Base
class Gazelle(Base):
@classmethod
def get_aliases(cls):
return "gz",
@classmethod
def get_help(cls):
return "Gazelle插件,适用于未经过大规模结构改动的Gazelle站点"
@classmethod
def add_parser(cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
super().add_parser(parser)
return parser
| 21 | 84 | 0.691729 | 374 | 0.855835 | 0 | 0 | 337 | 0.771167 | 0 | 0 | 77 | 0.176201 |
698b7be724cf94857043bb74abc7ca1f3ac92685 | 2,398 | py | Python | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[tr_TR-2014] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 32 | 2019-04-12T08:01:34.000Z | 2022-02-28T04:41:50.000Z | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[tr_TR-2014] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 74 | 2019-07-09T16:35:20.000Z | 2022-03-09T16:41:34.000Z | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[tr_TR-2014] 1.py | gour/holidata | 89c7323f9c5345a3ecbf5cd5a835b0e08cfebc13 | [
"MIT"
] | 20 | 2019-01-28T07:41:02.000Z | 2022-02-16T02:38:57.000Z | [
{
'date': '2014-01-01',
'description': 'Yılbaşı',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-04-23',
'description': 'Ulusal Egemenlik ve Çocuk Bayramı',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-05-01',
'description': 'Emek ve Dayanışma Günü',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-05-19',
'description': "Atatürk'ü Anma, Gençlik ve Spor Bayramı",
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-07-28',
'description': 'Ramazan Bayramı (1. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-07-29',
'description': 'Ramazan Bayramı (2. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-07-30',
'description': 'Ramazan Bayramı (3. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-08-30',
'description': 'Zafer Bayramı',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2014-10-04',
'description': 'Kurban Bayramı (1. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-10-05',
'description': 'Kurban Bayramı (2. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-10-06',
'description': 'Kurban Bayramı (3. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-10-07',
'description': 'Kurban Bayramı (4. Gün)',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2014-10-29',
'description': 'Cumhuriyet Bayramı',
'locale': 'tr-TR',
'notes': '',
'region': '',
'type': 'NF'
}
] | 22.622642 | 65 | 0.373228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,333 | 0.549238 |
698cd08ed49ed9b0de22135fa30dfe66e162d6d7 | 2,512 | py | Python | pommerman/agents/TensorFlowAgent/pit.py | IshchenkoRoman/pommerman | 117824dca6974822d90e8fc3345da32eeb43cb43 | [
"Apache-2.0"
] | null | null | null | pommerman/agents/TensorFlowAgent/pit.py | IshchenkoRoman/pommerman | 117824dca6974822d90e8fc3345da32eeb43cb43 | [
"Apache-2.0"
] | 7 | 2021-03-18T21:23:29.000Z | 2022-03-11T23:34:05.000Z | pommerman/agents/TensorFlowAgent/pit.py | IshchenkoRoman/pommerman | 117824dca6974822d90e8fc3345da32eeb43cb43 | [
"Apache-2.0"
] | null | null | null | # pommerman/cli/run_battle.py
# pommerman/agents/TensorFlowAgent/pit.py
import atexit
from datetime import datetime
import os
import random
import sys
import time
import argparse
import numpy as np
from pommerman import helpers, make
from TensorFlowAgent import TensorFlowAgent
from pommerman import utility
import tensorflow as tf
class Pit(object):
def __init__(self, tfa, saver, game_nums=2):
self.tfa = tfa
self.saver = saver
self.game_nums = game_nums
def launch_games(self, sess, render=True):
sess.run(tf.global_variables_initializer())
self.tfa.restore_weigths(sess, self.saver)
env = self.tfa.getEnv()
reward_board = np.zeros((1, 4))
for i in range(self.game_nums):
curr_state = env.reset()
while True:
if render:
env.render()
all_actions = env.act(curr_state)
next_state, reward, terminal, _ = env.step(all_actions)
if terminal:
reward_board += np.array(reward)
print("Game #{0}, rewards = {1}, reward agent = {2}".format(i, "".join(str(i) + " " for i in reward), reward[self.tfa.agent_id]))
break
def main(args):
tf.reset_default_graph()
with tf.Session() as sess:
tfa = TensorFlowAgent(name="TFA", args=args, sess=sess)
saver = tf.train.Saver(allow_empty=True)
pit = Pit(tfa, saver, game_nums=2)
pit.launch_games(sess)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--environment", type=str, default="pommerman")
parser.add_argument("--policy", type=str, default="MlpPolicy")
parser.add_argument("--checkpoint_dir", type=str, default="./save_model")
parser.add_argument("--a_learning_rate", type=float, default=0.0001)
parser.add_argument("--c_learning_rate", type=float, default=0.0002)
parser.add_argument('--summary_dir', type=str, default='./summary_log')
parser.add_argument("--cliprange", type=float, default=0.2)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--training_step", type=int, default=10)
parser.add_argument("--gamma", type=float, default=0.9)
parser.add_argument("--train", type=str, default="False", choices=["False"])
parser.add_argument("--type", type=str, default="Simple", choices=["Simple, CNN"])
args = parser.parse_args()
main(args) | 27.911111 | 149 | 0.644506 | 928 | 0.369427 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.154459 |
698cd0b8fccf1402595d508e1fedc902f75ee5a1 | 4,645 | py | Python | test/test_mean_average_precision.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | 171 | 2020-07-23T08:05:35.000Z | 2022-03-15T02:55:51.000Z | test/test_mean_average_precision.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | 5 | 2020-08-10T11:49:50.000Z | 2021-03-30T11:44:02.000Z | test/test_mean_average_precision.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | 22 | 2020-08-04T06:39:30.000Z | 2021-08-20T20:14:36.000Z | import numpy as np
import tensorflow as tf
import unittest
from xcenternet.model.evaluation.overlap import compute_overlap
from xcenternet.model.evaluation.mean_average_precision import MAP
class TestMeanAveragePrecision(unittest.TestCase):
def setUp(self):
self.map_bboxes = np.array(
[
[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]],
[[0, 0, 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]],
],
dtype=np.float64,
)
self.map_labels = np.array([[0, 0, 1], [0, 0, 0]])
self.map_predictions = np.array(
[
[
[10, 40, 40, 90, 0.1, 0], # overlap 1.00 with bbox #2, low prob
[60, 10, 90, 60, 0.5, 0], # overlap 0.29 with bbox #1
[10, 30, 50, 90, 0.7, 0], # overlap 0.625 with bbox #2
[0, 0, 100, 90, 0.7, 1], # overlap 0.9 with bbox #3
[0, 0, 100, 80, 0.7, 1], # overlap 0.8 with bbox #3
],
[
[20, 20, 30, 50, 0.6, 0], # 0.21 overlap with #2
[2, 0, 10, 11, 0.8, 0], # overlap with #1
[0, 2, 14, 10, 0.9, 0], # overlap with #1
[0, 0, 10, 10, 0.7, 1], # no ground truth for 1
[80, 20, 100, 50, 0.1, 1], # no ground truth for 1
],
],
dtype=np.float32,
)
self.map_masks = np.array([[1, 1, 1], [1, 1, 1]], dtype=np.float32)
self.result_1 = {"overall": 3 / 4, "weighted": 2 / 3, "per_class": {0: (0.5, 2), 1: (1.0, 1)}}
self.result_both = {"overall": 2 / 3, "weighted": 4 / 9, "per_class": {0: (1 / 3, 5), 1: (1.0, 1)}}
def test_compute_overlap(self):
boxes1 = np.array([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)
boxes2 = np.array([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np.float64)
overlap = compute_overlap(boxes1, boxes2)
self.assertAlmostEqual(1.0, overlap[0][0])
self.assertAlmostEqual(6 / 11, overlap[0][1])
self.assertAlmostEqual(0.0, overlap[0][2])
self.assertAlmostEqual(0.5, overlap[1][0])
self.assertAlmostEqual(4 / 9, overlap[1][1])
self.assertAlmostEqual(0.0, overlap[1][2])
def test_map_update_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
mean_average_precision.update_state(self.map_predictions[1], self.map_bboxes[1], self.map_labels[1])
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def test_map_update_batch_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant([self.map_predictions[0]]),
tf.constant([self.map_bboxes[0]]),
tf.constant([self.map_labels[0]]),
tf.constant([self.map_masks[0]]),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_batch_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant(self.map_predictions),
tf.constant(self.map_bboxes),
tf.constant(self.map_labels),
tf.constant(self.map_masks),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def _assert_map(self, first, second):
self.assertAlmostEqual(first["overall"], second["overall"])
self.assertAlmostEqual(first["weighted"], second["weighted"])
self.assertAlmostEqual(first["per_class"][0][0], second["per_class"][0][0]) # mAP
self.assertAlmostEqual(first["per_class"][0][1], second["per_class"][0][1]) # num objects
self.assertAlmostEqual(first["per_class"][1][0], second["per_class"][1][0]) # mAP
self.assertAlmostEqual(first["per_class"][1][1], second["per_class"][1][1]) # num objects
if __name__ == "__main__":
unittest.main()
| 44.238095 | 108 | 0.579763 | 4,402 | 0.947686 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.102906 |
698d47d5d2d82e65bd49fd87180ffd6403f3b50a | 3,063 | py | Python | sonnet/examples/rmc_learn_to_execute_test.py | ankitshah009/sonnet | a07676192c6d0f2ed5967d6bc367d62e55835baf | [
"Apache-2.0"
] | 3 | 2019-07-31T12:36:26.000Z | 2020-12-16T14:37:19.000Z | sonnet/examples/rmc_learn_to_execute_test.py | ankitshah009/sonnet | a07676192c6d0f2ed5967d6bc367d62e55835baf | [
"Apache-2.0"
] | null | null | null | sonnet/examples/rmc_learn_to_execute_test.py | ankitshah009/sonnet | a07676192c6d0f2ed5967d6bc367d62e55835baf | [
"Apache-2.0"
] | 3 | 2019-07-29T08:55:20.000Z | 2019-07-30T06:36:56.000Z | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.examples.rmc_nth_farthest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
from sonnet.examples import learn_to_execute
from sonnet.examples import rmc_learn_to_execute
import tensorflow as tf
class RMCLearnTest(tf.test.TestCase):
def setUp(self):
self._batch_size = 2
self._seq_sz_in = 10
self._seq_sz_out = 3
self._feature_size = 8
self._nesting = 2
self._literal_length = 3
def test_object_sequence_model(self):
"""Test the model class."""
core = snt.RelationalMemory(
mem_slots=2, head_size=4, num_heads=1, num_blocks=1, gate_style="unit")
final_mlp = snt.nets.MLP(
output_sizes=(5,), activate_final=True)
model = rmc_learn_to_execute.SequenceModel(
core=core,
target_size=self._feature_size,
final_mlp=final_mlp)
dummy_in = tf.zeros(
(self._seq_sz_in, self._batch_size, self._feature_size))
dummy_out = tf.zeros(
(self._seq_sz_out, self._batch_size, self._feature_size))
sizes = tf.ones((self._batch_size))
logits = model(dummy_in, dummy_out, sizes, sizes)
self.assertAllEqual(
logits.shape, (self._seq_sz_out, self._batch_size, self._feature_size))
def test_build_and_train(self):
"""Test the example TF graph build."""
total_iterations = 2
reporting_interval = 1
rmc_learn_to_execute.build_and_train(
total_iterations, reporting_interval, test=True)
def test_learn_to_execute_datset(self):
"""Test the dataset class."""
dataset = learn_to_execute.LearnToExecute(
self._batch_size, self._literal_length, self._nesting)
dataset_iter = dataset.make_one_shot_iterator().get_next()
logit_size = dataset.state.vocab_size
seq_sz_in = dataset.state.num_steps
seq_sz_out = dataset.state.num_steps_out
self.assertAllEqual(
dataset_iter[0].shape, (seq_sz_in, self._batch_size, logit_size))
self.assertAllEqual(
dataset_iter[1].shape, (seq_sz_out, self._batch_size, logit_size))
self.assertAllEqual(
dataset_iter[2].shape, (seq_sz_out, self._batch_size, logit_size))
self.assertAllEqual(dataset_iter[3].shape, (self._batch_size,))
self.assertAllEqual(dataset_iter[4].shape, (self._batch_size,))
if __name__ == "__main__":
tf.test.main()
| 37.353659 | 79 | 0.713353 | 2,032 | 0.663402 | 0 | 0 | 0 | 0 | 0 | 0 | 828 | 0.270323 |
699310c68f6ee0233724f50d1b8ed775e875d6af | 714 | py | Python | reflex/src/reflex/reflex_polling.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | reflex/src/reflex/reflex_polling.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | reflex/src/reflex/reflex_polling.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy
from reflex_msgs.msg import HandCommand
from time import sleep
from reflex_base_services import *
class ReFlex_Polling(ReFlex):
def __init__(self):
super(ReFlex_Polling, self).__init__()
def callback(data):
# data is a HandCommand variable
self.move_finger(0, data.angles[0])
self.move_finger(1, data.angles[1])
#self.move_finger(2, data.angles[2])
rospy.Subscriber("reflex_commander", HandCommand, callback)
# spin: this function generate the polling
rospy.spin()
if __name__ == '__main__':
rospy.init_node('ReflexPollingNode')
reflex_hand = ReFlex_Polling()
| 25.5 | 67 | 0.658263 | 479 | 0.670868 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.2493 |
699348aba420825cc6ea4bff1f7b57cdbe433c5f | 7,630 | py | Python | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | kf_d3m_primitives/natural_language_processing/sent2vec/sent2vec.py | cdbethune/d3m-primitives | 5530da1b8efba7de8cec6890401c5d4091acd45a | [
"MIT"
] | null | null | null | import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:jeffrey.gleason@kungfu.ai"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs : Input pandas dataframe
Returns
-------
Outputs
The output is a pandas dataframe
"""
metadata = metadata_base.PrimitiveMetadata(
{
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": [
# Unstructured URIs.
"https://github.com/kungfuai/d3m-primitives"
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
# The same path the primitive is registered with entry points in setup.py.
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
# class instance to avoid unnecessary re-init on subsequent produce calls
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs : Input pandas dataframe
Returns
-------
Outputs
The output is a pandas dataframe
"""
# figure out columns to operate on
cols = self._get_operating_columns(inputs, self.hyperparams['use_columns'], ('http://schema.org/Text',))
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
# lazy load the model and keep it around for subsequent produce calls
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(path=self.volumes["sent2vec_model"])
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(sentences=text)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(np.array(output_vectors).reshape(len(embedded_sentences), -1))
except ValueError:
# just return inputs with file names deleted if vectorizing fails
return CallResult(outputs)
# create df with vectorized columns and append to input df
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col)))
col_dict['structural_type'] = type(1.0)
col_dict['name'] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict['dimension'] = df_dict_1
df_dict_1['name'] = 'columns'
df_dict_1['semantic_types'] = ('https://metadata.datadrivendiscovery.org/types/TabularColumn',)
df_dict_1['length'] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update((metadata_base.ALL_ELEMENTS,), df_dict)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(cls, inputs: container.DataFrame, use_columns: Sequence[int],
semantic_types: Sequence[str], require_attribute: bool = True) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(inputs.metadata.list_columns_with_semantic_types(semantic_types))
if require_attribute:
attributes = set(inputs.metadata.list_columns_with_semantic_types(('https://metadata.datadrivendiscovery.org/types/Attribute',)))
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols) | 43.6 | 141 | 0.627261 | 7,032 | 0.921625 | 0 | 0 | 735 | 0.09633 | 0 | 0 | 2,877 | 0.377064 |
6993bc13615580474978d13c5f5a83a136a5e9f1 | 1,173 | py | Python | tests/integration/test_user_defined_object_persistence/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 15,577 | 2019-09-23T11:57:53.000Z | 2022-03-31T18:21:48.000Z | tests/integration/test_user_defined_object_persistence/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 16,476 | 2019-09-23T11:47:00.000Z | 2022-03-31T23:06:01.000Z | tests/integration/test_user_defined_object_persistence/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 3,633 | 2019-09-23T12:18:28.000Z | 2022-03-31T15:55:48.000Z | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', stay_alive=True)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_persistence():
create_function_query1 = "CREATE FUNCTION MySum1 AS (a, b) -> a + b"
create_function_query2 = "CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b"
instance.query(create_function_query1)
instance.query(create_function_query2)
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
instance.restart_clickhouse()
assert instance.query("SELECT MySum1(1,2)") == "3\n"
assert instance.query("SELECT MySum2(1,2)") == "5\n"
instance.query("DROP FUNCTION MySum2")
instance.query("DROP FUNCTION MySum1")
instance.restart_clickhouse()
assert "Unknown function MySum1" in instance.query_and_get_error("SELECT MySum1(1, 2)")
assert "Unknown function MySum2" in instance.query_and_get_error("SELECT MySum2(1, 2)")
| 29.325 | 91 | 0.700767 | 0 | 0 | 118 | 0.100597 | 164 | 0.139812 | 0 | 0 | 351 | 0.299233 |
6994f7160676cfbd47ee328ec88c4bf6782a75dc | 7,694 | py | Python | python/brainvisa/maker/brainvisa_clients.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | null | null | null | python/brainvisa/maker/brainvisa_clients.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | 77 | 2018-10-30T11:28:16.000Z | 2022-02-28T14:21:40.000Z | python/brainvisa/maker/brainvisa_clients.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | 1 | 2019-07-17T14:08:22.000Z | 2019-07-17T14:08:22.000Z | # -*- coding: utf-8 -*-
# This software and supporting documentation are distributed by
# Institut Federatif de Recherche 49
# CEA/NeuroSpin, Batiment 145,
# 91191 Gif-sur-Yvette cedex
# France
#
# This software is governed by the CeCILL-B license under
# French law and abiding by the rules of distribution of free software.
# You can use, modify and/or redistribute the software under the
# terms of the CeCILL-B license as circulated by CEA, CNRS
# and INRIA at the following URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
from __future__ import absolute_import, print_function
import sys
import posixpath
from subprocess import Popen, PIPE, STDOUT
from six.moves.urllib.parse import urlparse, urlunparse
from brainvisa.maker.version_number import VersionNumber, \
VersionFormat, \
version_format_unconstrained
def system( command,
simulate = False,
verbose = False ):
"""Execute a system command.
If the code returned by the executed command is not 0,
a SystemError is raised.
@type command: list
@param command: The list that contains a command and its parameters.
@type verbose: bool
@param verbose: Specify that the command must be printed to standard output.
[Default: False].
@type simulate: bool
@param simulate: Specify that the command must not be executed
[Default: False].
@rtype: string
@return: The standard output of the command.
"""
if verbose:
print(' '.join( ('"' + i + '"' for i in command) ))
if simulate :
return command
else :
cmd = Popen( command,
stdout = PIPE,
stderr = STDOUT )
output = cmd.stdout.read()
cmd.wait()
if cmd.returncode != 0:
if verbose:
print(output)
sys.stdout.flush()
raise SystemError( 'System command exited with error code '
+ repr( cmd.returncode ) + ': '
+ ' '.join( ('"' + i + '"' for i in command) ) )
return output
def normurl( url ):
"""Normalizes URL in order that URLs that point
to the same resource will return the same string.
@type url: string
@param url: The URL to normalize
@return: A normalized URL, i.e. without '..' or '.' elements.
"""
parsed = urlparse(url)
return urlunparse(
( parsed.scheme,
parsed.netloc,
posixpath.normpath(parsed.path),
parsed.params,
parsed.query,
parsed.fragment ) )
def find_remote_project_info( client,
url ):
"""Find a project_info.cmake or the info.py file
in subdirectories of the specified url.
Files are searched using the patterns :
1) <url>/project_info.cmake
2) <url>/python/*/info.py
3) <url>/*/info.py
4) <url>/info.py
@type client: Client
@param client: The Client instance to get access to files.
@type url: string
@param url: The url to search project_info.cmake or info.py
@rtype: string
@return: The url of the found file containing project information
"""
project_info_patterns = ( posixpath.join( url,
'project_info.cmake' ),
posixpath.join( url,
'python',
'*',
'info.py' ),
posixpath.join( url,
'*',
'info.py' ),
posixpath.join( url,
'info.py' ))
# Searches for project_info.cmake and info.py file
for pattern in project_info_patterns:
project_info_url = client.vcs_glob( pattern )
if project_info_url:
return project_info_url[0]
return None
def read_remote_project_info( client,
url,
version_format = version_format_unconstrained ):
"""Search a project_info.cmake or a info.py file
in subdirectories of the specified url and parses its content.
Files are searched using the patterns :
1) <url>/project_info.cmake
2) <url>/python/*/info.py
3) <url>/*/info.py
@type client: Client
@param client: The Client instance to get access to files.
@type url: string
@param url: The url to search project_info.cmake or info.py
@type version_format: VersionFormat
@param version_format: The format to use to return version.
@rtype: list
@return: a list that contains project name, component name and version
"""
import os, tempfile
from brainvisa.maker.brainvisa_projects import parse_project_info_cmake, \
parse_project_info_python
project_info_url = find_remote_project_info( client, url )
if project_info_url is not None:
fd, path = tempfile.mkstemp()
os.close(fd)
os.unlink(path)
project_info = None
if project_info_url.endswith( '.cmake' ):
# Read the content of project_info.cmake file
client.vcs_export( project_info_url, path )
project_info = parse_project_info_cmake(
path,
version_format
)
os.unlink( path )
elif project_info_url.endswith( '.py' ):
# Read the content of info.py file
client.vcs_export( project_info_url, path )
project_info = parse_project_info_python(
path,
version_format
)
os.unlink( path )
else:
raise RuntimeError( 'Url ' + project_info_url + ' has unknown '
+ 'extension for project info file.' )
return project_info
else:
return None
| 36.990385 | 80 | 0.564856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,033 | 0.524175 |
6996e841b5afa44da1500734468b8fd4e49b092a | 1,000 | py | Python | test/test_del_contact.py | rokimaru/python_training | 8ac7e2bbe964ab998c3ec27d4d360043e92bdd56 | [
"Apache-2.0"
] | null | null | null | test/test_del_contact.py | rokimaru/python_training | 8ac7e2bbe964ab998c3ec27d4d360043e92bdd56 | [
"Apache-2.0"
] | null | null | null | test/test_del_contact.py | rokimaru/python_training | 8ac7e2bbe964ab998c3ec27d4d360043e92bdd56 | [
"Apache-2.0"
] | null | null | null | import random
import pytest
from model.contact import Contact
def test_delete_some_contact(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.contact.create(Contact(firstname="del_test"))
with pytest.allure.step('Get contact list'):
old_contacts = db.get_contact_list()
with pytest.allure.step('Choice random contact in contact list'):
contact = random.choice(old_contacts)
with pytest.allure.step('Delete contact from addressbook'):
app.contact.del_contact_by_id(contact.id)
with pytest.allure.step('Get new contact list and compare to old contact list without removed contact'):
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(db.get_contact_list(), key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
key=Contact.id_or_max)
| 38.461538 | 109 | 0.665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.178 |
699795829a131e6b9aa0af884cefeacb0e7cc459 | 3,513 | py | Python | slate/integrations/backtesting_py.py | EmersonDove/slate-python | fefd3e1275596d67e5d18e9e864657b4c67ce95e | [
"MIT"
] | 1 | 2022-03-05T17:08:24.000Z | 2022-03-05T17:08:24.000Z | slate/integrations/backtesting_py.py | blankly-finance/slate-python | fefd3e1275596d67e5d18e9e864657b4c67ce95e | [
"MIT"
] | null | null | null | slate/integrations/backtesting_py.py | blankly-finance/slate-python | fefd3e1275596d67e5d18e9e864657b4c67ce95e | [
"MIT"
] | null | null | null | from operator import itemgetter
import pandas as pd
import slate
from slate.integrations.common import b_id, DUMMY_METRICS, DUMMY_INDICATORS
try:
import backtesting
except ImportError:
pass
class BacktestingPy:
slate: 'slate.Slate'
api: 'slate.api.API'
def __init__(self, slate, api):
self.slate = slate
self.api = api
def post_backtest(self, result: 'backtesting.Backtest', symbol: str = None):
self.slate.model.add_symbol(symbol)
symbol = symbol or 'Unknown'
quote = symbol.split('-')[1] if '-' in symbol else 'USD'
trades = result['_trades'] \
.apply(map_trades, axis=1, result_type='expand', symbol=symbol) \
.unstack() \
.reset_index(drop=True) \
.apply(pd.Series) \
.sort_values('time', ascending=True) \
.to_dict('records')
equity = result['_equity_curve']['Equity']
account_values = equity.loc[equity.shift() != equity] \
.reset_index() \
.rename(columns={'index': 'time', 'Equity': 'value'})
account_values['time'] = account_values['time'].map(lambda t: t.timestamp())
account_values = account_values.to_dict('records')
id = b_id()
self.slate.backtest.result(symbols=[symbol],
quote_asset=quote,
start_time=result['Start'].timestamp(),
stop_time=result['End'].timestamp(),
account_values=account_values,
trades=trades,
backtest_id=id,
metrics=DUMMY_INDICATORS,
indicators=DUMMY_METRICS)
self.slate.backtest.status(backtest_id=id,
successful=True,
status_summary='Completed',
status_details='',
time_elapsed=0)
def map_trades(row: pd.Series, symbol: str) -> list:
common = {'symbol': symbol,
'size': abs(row['Size']),
'type': 'market'}
entry = {**common,
'time': row['EntryTime'].timestamp(),
'side': 'buy' if row['Size'] > 0 else 'sell',
'id': b_id(),
'price': row['EntryPrice']}
exit = {**common,
'time': row['ExitTime'].timestamp(),
'side': 'sell' if row['Size'] > 0 else 'buy',
'id': b_id(),
'price': row['ExitPrice']}
return [entry, exit]
if __name__ == '__main__':
# run backtesting.py backtest
from backtesting import Backtest, Strategy
from backtesting.lib import crossover
from backtesting.test import SMA, GOOG
class SmaCross(Strategy):
n1 = 10
n2 = 20
def init(self):
close = self.data.Close
self.sma1 = self.I(SMA, close, self.n1)
self.sma2 = self.I(SMA, close, self.n2)
def next(self):
if crossover(self.sma1, self.sma2):
self.buy()
elif crossover(self.sma2, self.sma1):
self.sell()
bt = Backtest(GOOG, SmaCross,
cash=10000, commission=.002,
exclusive_orders=True)
result = bt.run()
# post to slate
slate = slate.Slate()
slate.integrations.backtesting.post_backtest(result, 'GOOG')
| 32.527778 | 84 | 0.519784 | 2,270 | 0.646171 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.118702 |
6997a5dd5ddb99540a69c42d6cd9bb74efb247be | 1,029 | py | Python | python exercicios/Listas/lista6.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | python exercicios/Listas/lista6.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | python exercicios/Listas/lista6.py | gabrielqoliveiraa/bomdia | b5e0fe6aa347a0e31b5960a69fbd6f32df352094 | [
"MIT"
] | null | null | null | nome = []
temp = []
pesoMaior = pesoMenor = 0
count = 1
while True:
temp.append(str(input('Nome: ')))
temp.append(float(input('Peso: ')))
if count == 1:
pesoMaior = pesoMenor = temp[1]
else:
if temp[1] >= pesoMaior:
pesoMaior = temp[1]
elif temp[1] <= pesoMenor:
pesoMenor = temp[1]
nome.append(temp[:])
temp.clear()
usuario = 'O'
while usuario != 'S' or usuario != 'N':
usuario = str(input('Deseja Continuar ? S/N: ')).upper().strip()[0]
if usuario == 'S':
break
elif usuario == 'N':
break
if usuario == 'N':
break
count += 1
print(f'Foram cadastradas {len(nome)} pessoas')
print(f'O menor peso foi {pesoMenor}.', end=' ')
for c in nome:
if c[1] == pesoMenor:
print(c[0], end=' ')
print()
print(f'O maior peso foi {pesoMaior}.')
for c in nome:
if c[1] == pesoMaior:
print(c[0])
| 17.15 | 75 | 0.483965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.165209 |
69982bd6d471edd0b6269e3319bea4f90a9b9ecf | 8,685 | py | Python | beetles/scripts/inference_runner.py | ESA-PhiLab/hypernet | b33f7893d3dfcbbc2c10076fb61b2b1f1316402a | [
"MIT"
] | 34 | 2018-11-14T09:38:00.000Z | 2022-01-31T17:44:51.000Z | beetles/scripts/inference_runner.py | ESA-PhiLab/hypernet | b33f7893d3dfcbbc2c10076fb61b2b1f1316402a | [
"MIT"
] | 5 | 2018-09-11T14:52:35.000Z | 2022-03-24T09:32:01.000Z | beetles/scripts/inference_runner.py | ESA-PhiLab/hypernet | b33f7893d3dfcbbc2c10076fb61b2b1f1316402a | [
"MIT"
] | 11 | 2018-10-24T12:42:59.000Z | 2022-03-12T03:50:50.000Z | """
Run inference N times on the provided model given set of hyperparameters. Has
the option to inject noise into the test set.
"""
import os
import shutil
import re
import clize
import mlflow
import tensorflow as tf
from clize.parameters import multi
from ml_intuition.data.io import load_processed_h5
from ml_intuition.data.loggers import log_params_to_mlflow, log_tags_to_mlflow
from ml_intuition.data.utils import get_mlflow_artifacts_path, parse_train_size
from ml_intuition.enums import Splits, Experiment
from scripts import evaluate_model, prepare_data, artifacts_reporter
def run_experiments(*,
data_file_path: str = None,
ground_truth_path: str = None,
dataset_path: str = None,
train_size: ('train_size', multi(min=0)),
val_size: float = 0.1,
stratified: bool = True,
background_label: int = 0,
channels_idx: int = 0,
neighborhood_size: int = None,
save_data: bool = False,
n_runs: int,
dest_path: str,
models_path: str,
model_name: str = 'model_2d',
n_classes: int,
use_ensemble: bool = False,
ensemble_copies: int = None,
voting: str = 'hard',
batch_size: int = 1024,
post_noise_sets: ('spost', multi(min=0)),
post_noise: ('post', multi(min=0)),
noise_params: str = None,
use_mlflow: bool = False,
experiment_name: str = None,
model_exp_name: str = None,
run_name: str = None):
"""
Run inference on the provided model given set of hyperparameters.
:param data_file_path: Path to the data file. Supported types are: .npy
:param ground_truth_path: Path to the ground-truth data file.
:param dataset_path: Path to the already extracted .h5 dataset
:param train_size: If float, should be between 0.0 and 1.0.
If stratified = True, it represents percentage of each class to be extracted,
If float and stratified = False, it represents percentage of the whole
dataset to be extracted with samples drawn randomly, regardless of their class.
If int and stratified = True, it represents number of samples to be
drawn from each class.
If int and stratified = False, it represents overall number of samples
to be drawn regardless of their class, randomly.
Defaults to 0.8
:type train_size: Union[int, float]
:param val_size: Should be between 0.0 and 1.0. Represents the percentage of
each class from the training set to be extracted as a
validation set.
Defaults to 0.1.
:param stratified: Indicated whether the extracted training set should be
stratified.
Defaults to True.
:param background_label: Label indicating the background in GT file.
:param channels_idx: Index specifying the channels position in the provided
data.
:param neighborhood_size: Size of the neighbourhood for the model.
:param save_data: Whether to save the prepared dataset
:param n_runs: Number of total experiment runs.
:param dest_path: Path to where all experiment runs will be saved as
subfolders in this directory.
:param models_path: Name of the model, it serves as a key in the
dictionary holding all functions returning models.
:param model_name: The name of model for the inference.
:param n_classes: Number of classes.
:param use_ensemble: Use ensemble for prediction.
:param ensemble_copies: Number of model copies for the ensemble.
:param voting: Method of ensemble voting. If ‘hard’, uses predicted class
labels for majority rule voting. Else if ‘soft’, predicts the class
label based on the argmax of the sums of the predicted probabilities.
:param batch_size: Size of the batch for the inference
:param post_noise_sets: The list of sets to which the noise will be
injected. One element can either be "train", "val" or "test".
:type post_noise_sets: list[str]
:param post_noise: The list of names of noise injection methods after
the normalization transformations.
:type post_noise: list[str]
:param noise_params: JSON containing the parameter setting of injection methods.
Exemplary value for this parameter: "{"mean": 0, "std": 1, "pa": 0.1}".
This JSON should include all parameters for noise injection
functions that are specified in pre_noise and post_noise arguments.
For the accurate description of each parameter, please
refer to the ml_intuition/data/noise.py module.
:param use_mlflow: Whether to log metrics and artifacts to mlflow.
:param experiment_name: Name of the experiment. Used only if
use_mlflow = True.
:param run_name: Name of the run. Used only if use_mlflow = True.
"""
train_size = parse_train_size(train_size)
if use_mlflow:
args = locals()
mlflow.set_tracking_uri("http://beetle.mlflow.kplabs.pl")
mlflow.set_experiment(experiment_name)
mlflow.start_run(run_name=run_name)
log_params_to_mlflow(args)
log_tags_to_mlflow(args['run_name'])
models_path = get_mlflow_artifacts_path(models_path, model_exp_name)
for experiment_id in range(n_runs):
experiment_dest_path = os.path.join(
dest_path, 'experiment_' + str(experiment_id))
model_name_regex = re.compile('model_.*')
model_dir = os.path.join(models_path, f'experiment_{experiment_id}')
model_name = list(filter(model_name_regex.match, os.listdir(model_dir)))[0]
model_path = os.path.join(model_dir, model_name)
if dataset_path is None:
data_source = os.path.join(models_path,
'experiment_' + str(experiment_id),
'data.h5')
else:
data_source = dataset_path
os.makedirs(experiment_dest_path, exist_ok=True)
if data_file_path.endswith('.h5') and ground_truth_path is None and 'patches' not in data_file_path:
data_source = load_processed_h5(data_file_path=data_file_path)
elif not os.path.exists(data_source):
data_source = prepare_data.main(data_file_path=data_file_path,
ground_truth_path=ground_truth_path,
output_path=data_source,
train_size=train_size,
val_size=val_size,
stratified=stratified,
background_label=background_label,
channels_idx=channels_idx,
neighborhood_size=neighborhood_size,
save_data=save_data,
seed=experiment_id)
evaluate_model.evaluate(
model_path=model_path,
data=data_source,
dest_path=experiment_dest_path,
n_classes=n_classes,
use_ensemble=use_ensemble,
ensemble_copies=ensemble_copies,
voting=voting,
noise=post_noise,
noise_sets=post_noise_sets,
noise_params=noise_params,
batch_size=batch_size,
seed=experiment_id)
tf.keras.backend.clear_session()
artifacts_reporter.collect_artifacts_report(experiments_path=dest_path,
dest_path=dest_path,
use_mlflow=use_mlflow)
if Splits.GRIDS in data_file_path:
fair_report_path = os.path.join(dest_path, Experiment.REPORT_FAIR)
artifacts_reporter.collect_artifacts_report(experiments_path=dest_path,
dest_path=fair_report_path,
filename=Experiment.INFERENCE_FAIR_METRICS,
use_mlflow=use_mlflow)
if use_mlflow:
mlflow.set_experiment(experiment_name)
mlflow.log_artifacts(dest_path, artifact_path=dest_path)
shutil.rmtree(dest_path)
if __name__ == '__main__':
clize.run(run_experiments)
| 47.983425 | 108 | 0.617041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,643 | 0.419073 |
699880915260268a701c6708794dc8488d47a30e | 3,629 | py | Python | statutes_pipeline_steps/de_authority_edgelist.py | QuantLaw/legal-data-preprocessing | c5462ba946e858d5e33a4698e9da350402903bca | [
"BSD-2-Clause"
] | 5 | 2021-01-06T10:59:53.000Z | 2022-03-18T19:44:11.000Z | statutes_pipeline_steps/de_authority_edgelist.py | QuantLaw/legal-data-preprocessing | c5462ba946e858d5e33a4698e9da350402903bca | [
"BSD-2-Clause"
] | 1 | 2021-01-03T18:54:47.000Z | 2021-01-03T18:54:47.000Z | statutes_pipeline_steps/de_authority_edgelist.py | QuantLaw/legal-data-preprocessing | c5462ba946e858d5e33a4698e9da350402903bca | [
"BSD-2-Clause"
] | null | null | null | import json
import os
import numpy
import pandas as pd
from quantlaw.utils.beautiful_soup import create_soup
from quantlaw.utils.files import ensure_exists
from quantlaw.utils.pipeline import PipelineStep
from statics import (
DE_REFERENCE_PARSED_PATH,
DE_REG_AUTHORITY_EDGELIST_PATH,
DE_REG_CROSSREFERENCE_LOOKUP_PATH,
DE_REG_REFERENCE_PARSED_PATH,
)
from utils.common import get_snapshot_law_list
def get_filename(date):
return f"{date}.csv"
class DeAuthorityEdgelist(PipelineStep):
def __init__(self, law_names_data, *args, **kwargs):
self.law_names_data = law_names_data
super().__init__(*args, **kwargs)
def get_items(self, overwrite, snapshots) -> list:
ensure_exists(DE_REG_AUTHORITY_EDGELIST_PATH)
if not overwrite:
existing_files = os.listdir(DE_REG_AUTHORITY_EDGELIST_PATH)
snapshots = list(
filter(lambda f: get_filename(f) not in existing_files, snapshots)
)
return snapshots
def execute_item(self, item):
files = get_snapshot_law_list(item, self.law_names_data)
source_folder = DE_REG_CROSSREFERENCE_LOOKUP_PATH
target_folder = DE_REG_AUTHORITY_EDGELIST_PATH
key_df = (
pd.read_csv(f"{source_folder}/{item}.csv").dropna().set_index("citekey")
)
law_citekeys_dict = {
citekey.split("_")[0]: "_".join(row["key"].split("_")[:-1]) + "_000001"
for citekey, row in key_df.iterrows()
}
df = None
for file in files:
edge_df = make_edge_list(file, key_df, law_citekeys_dict, regulations=True)
df = edge_df if df is None else df.append(edge_df, ignore_index=True)
df.to_csv(f"{target_folder}/{item}.csv", index=False)
def make_edge_list(file, key_df, law_citekeys_dict, regulations):
soup = create_soup(
os.path.join(
DE_REG_REFERENCE_PARSED_PATH if regulations else DE_REFERENCE_PARSED_PATH,
file,
)
)
edges = []
# FOR DEBUG
problem_matches = set()
problem_keys = set()
for item in soup.find_all(["document", "seqitem"], attrs={"parsed": True}):
item_parsed_ref_str = item.attrs["parsed"]
if not item_parsed_ref_str or item_parsed_ref_str == "[]":
continue
node_out = item.get("key")
refs = json.loads(item_parsed_ref_str)
for ref in refs:
# TODO multiple laws with the same bnormabk
if len(ref) > 1: # Ref to seqitem at least
try:
key = "_".join(ref[:2])
matches = key_df.at[key, "key"]
if type(matches) == numpy.ndarray:
print(f"Multiple matches for {key}")
matches = matches[0]
if type(matches) is not str:
problem_matches.add(tuple(matches))
node_in = matches if type(matches) == str else matches[0]
edges.append((node_out, node_in))
except KeyError:
problem_keys.add(key)
else: # ref to document only
node_in = law_citekeys_dict.get(ref[0])
if node_in:
edges.append((node_out, node_in))
# FOR DEBUG
# if len(problem_matches) > 0:
# print(f"{file} Problem Matches:\n", sorted(list(problem_matches)))
# if len(problem_keys) > 0:
# print(f"{file} Problem Matches:\n", sorted(list(problem_keys)))
return pd.DataFrame(edges, columns=["out_node", "in_node"])
| 34.894231 | 87 | 0.610361 | 1,325 | 0.365114 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.141361 |
6998bcb96c84eee08055afeb394e0c10dfe55e4a | 248 | py | Python | test-crates/hello-world/check_installed/check_installed.py | pattonw/pyo3-pack | 675d92819faf56e972d1553ea8199425cb7f7e94 | [
"Apache-2.0",
"MIT"
] | null | null | null | test-crates/hello-world/check_installed/check_installed.py | pattonw/pyo3-pack | 675d92819faf56e972d1553ea8199425cb7f7e94 | [
"Apache-2.0",
"MIT"
] | null | null | null | test-crates/hello-world/check_installed/check_installed.py | pattonw/pyo3-pack | 675d92819faf56e972d1553ea8199425cb7f7e94 | [
"Apache-2.0",
"MIT"
] | null | null | null | from subprocess import check_output
def main():
output = check_output(["hello-world"]).decode("utf-8").strip()
if not output == "Hello, world!":
raise Exception(output)
print("SUCCESS")
if __name__ == '__main__':
main()
| 19.076923 | 66 | 0.633065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.217742 |
69990cfc9621ec7083d9cf51ca5e92ac2a42610b | 5,567 | py | Python | opp/baseline_lrrf_upper.py | heeryoncho/sensors2018cnnhar | 2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f | [
"MIT"
] | 10 | 2018-09-25T07:55:30.000Z | 2020-05-08T15:01:56.000Z | opp/baseline_lrrf_upper.py | heeryoncho/sensors2018cnnhar | 2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f | [
"MIT"
] | null | null | null | opp/baseline_lrrf_upper.py | heeryoncho/sensors2018cnnhar | 2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f | [
"MIT"
] | 5 | 2018-12-12T16:40:26.000Z | 2020-10-29T01:24:07.000Z | from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import select_data as sd
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by Heeryon Cho & Sang Min Yoon
This code outputs the UPPER body sensors data HAR performance using
other baseline machine learning techniques, such as
logistic regression and random forest,
given in the bar graph of Figure 15 (blue bars indicating Upper Body Sensors).
(Sensors 2018, 18(4), 1055, page 17 of 24)
'''
print "========================================================="
print " Outputs performance of other ML techniques, namely,"
print " Logistic Regression & Random Forest"
print " Using UPPER body sensors data."
print "========================================================="
print "\n==========================="
print " [UPPER] 4-Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "end2end")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "\n------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] Abstract Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "abst")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test ACC: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] UP Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "up")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
#---------------------------------------------
print "\n==========================="
print " [UPPER] DOWN Class"
print "===========================\n"
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "down")
clf_lr = LogisticRegression(random_state=2018)
clf_lr.fit(X_train, y_train)
pred_lr = clf_lr.predict(X_test)
print "--- Logistic Regression ---"
print "Test Acc: ", accuracy_score(y_test, pred_lr)
print confusion_matrix(y_test, pred_lr), '\n'
clf_dt = RandomForestClassifier(random_state=2018, max_depth=5, n_estimators=10, max_features=1)
clf_dt.fit(X_train, y_train)
pred_dt = clf_dt.predict(X_test)
print "------ Random Forest ------"
print "Test Acc: ", accuracy_score(y_test, pred_dt)
print confusion_matrix(y_test, pred_dt)
print "\n--- End Output ---"
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/baseline_lrrf_upper.py
=========================================================
Outputs performance of other ML techniques, namely,
Logistic Regression & Random Forest
Using UPPER body sensors data.
=========================================================
===========================
[UPPER] 4-Class
===========================
--- Logistic Regression ---
Test Acc: 0.833184789067
[[4860 333 133 0]
[1379 2497 9 0]
[ 316 76 3068 0]
[ 0 0 0 793]]
------ Random Forest ------
Test Acc: 0.80830362448
[[4959 218 149 0]
[1620 2199 66 0]
[ 32 12 3416 0]
[ 9 0 475 309]]
===========================
[UPPER] Abstract Class
===========================
--- Logistic Regression ---
Test ACC: 0.973336304219
[[9131 80]
[ 279 3974]]
------ Random Forest ------
Test Acc: 0.982174688057
[[9176 35]
[ 205 4048]]
===========================
[UPPER] UP Class
===========================
--- Logistic Regression ---
Test Acc: 0.812289653675
[[4875 451]
[1278 2607]]
------ Random Forest ------
Test Acc: 0.809358375855
[[5064 262]
[1494 2391]]
===========================
[UPPER] DOWN Class
===========================
--- Logistic Regression ---
Test Acc: 1.0
[[3460 0]
[ 0 793]]
------ Random Forest ------
Test Acc: 0.981189748413
[[3460 0]
[ 80 713]]
--- End Output ---
Process finished with exit code 0
''' | 29.146597 | 96 | 0.60679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,137 | 0.563499 |
699a6e09d5177bd3605891a0241caa9a9c07185e | 22,060 | py | Python | core/validators/admin.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-07-09T20:02:48.000Z | 2021-11-21T20:00:37.000Z | core/validators/admin.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | core/validators/admin.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
import validation_helper as helper
from java.io import File
def run(config):
if validateAdminServerProperty(config):
return False
return True
def validateAdminServerProperty(domainProperties):
error = 0
helper.printHeader('[VALIDATING] admin server properties')
adminPort = domainProperties.getProperty('wls.admin.listener.port')
if not adminPort is None and len(adminPort)>0:
try:
int(adminPort)
except ValueError:
log.error('Please verify wls.admin.listener.port [' + str(adminPort) + '] property.')
else:
if int(adminPort)<0 or int(adminPort)>65535:
log.error('Please verify wls.admin.listener.port property, port number is not in valid range [0-65535].')
else:
log.debug('Admin server port [' + str(adminPort) + '] is valid.')
enableSSL = domainProperties.getProperty('wls.admin.listener.enableSSL')
if not enableSSL is None and len(enableSSL)>0:
if not enableSSL.upper()=='TRUE' and not enableSSL.upper()=='FALSE':
error = 1
log.error('The wls.admin.listener.enableSSL property supports only [true,false].')
else:
log.debug('Admin server ssl enable property [' + str(enableSSL) + '] is valid.')
if enableSSL.upper()=='TRUE':
sslPort = domainProperties.getProperty('wls.admin.listener.sslPort')
if not sslPort is None and len(sslPort)>0:
try:
int(sslPort)
except ValueError:
log.error('Please verify wls.admin.listener.sslPort [' + str(sslPort) + '] property.')
else:
if int(sslPort)<0 or int(sslPort)>65535:
log.error('Please verify wls.admin.listener.sslPort property, port number is not in valid range [0-65535].')
else:
log.debug('Admin server ssl port [' + str(sslPort) + '] is valid.')
adminchprotocol = domainProperties.getProperty('wls.admin.channel.protocol')
if not adminchprotocol is None and len(adminchprotocol)>0:
if not adminchprotocol=='t3' and not adminchprotocol=='t3s' and not adminchprotocol=='http' and not adminchprotocol=='https' and not adminchprotocol=='iiop' and not adminchprotocol=='iiops' and not adminchprotocol=='ldap' and not adminchprotocol=='ldaps' and not adminchprotocol=='admin':
error = 1
log.error('The wls.admin.channel.protocol property supports only [t3,t3s,http,https,iiop,iiops,ldap,ldaps,admin].')
else:
log.debug('Admin channel protocol property [' + str(adminchprotocol) + '] is valid.')
adminChannelPort = domainProperties.getProperty('wls.admin.channel.listener.port')
if not adminChannelPort is None and len(adminChannelPort)>0:
try:
int(adminChannelPort)
except ValueError:
log.error('Please verify wls.admin.channel.listener.port [' + str(adminChannelPort) + '] property.')
else:
if int(adminChannelPort)<0 or int(adminChannelPort)>65535:
log.error('Please verify wls.admin.channel.listener.port property, port number is not in valid range [0-65535].')
else:
log.debug('Admin channel port [' + str(adminChannelPort) + '] is valid.')
adminChannelPublicPort = domainProperties.getProperty('wls.admin.channel.listener.publicPort')
if not adminChannelPublicPort is None and len(adminChannelPublicPort)>0:
try:
int(adminChannelPublicPort)
except ValueError:
log.error('Please verify wls.admin.channel.listener.publicPort [' + str(adminChannelPublicPort) + '] property.')
else:
if int(adminChannelPublicPort)<0 or int(adminChannelPublicPort)>65535:
log.error('Please verify wls.admin.channel.listener.publicPort property, port number is not in valid range [0-65535].')
else:
log.debug('Admin channel public port [' + str(adminChannelPublicPort) + '] is valid.')
httpEnable = domainProperties.getProperty('wls.admin.channel.httpEnable')
if not httpEnable is None and len(httpEnable)>0:
if not httpEnable.upper()=='TRUE' and not httpEnable.upper()=='FALSE':
error = 1
log.error('The wls.admin.channel.httpEnable property supports only [true,false].')
else:
log.debug('Admin http channel enable property [' + str(httpEnable) + '] is valid.')
enableTunneling = domainProperties.getProperty('wls.admin.enableTunneling')
if not enableTunneling is None and len(enableTunneling)>0:
if not enableTunneling.upper()=='TRUE' and not enableTunneling.upper()=='FALSE':
error = 1
log.error('The wls.admin.enableTunneling property supports only [true,false].')
else:
log.debug('Admin tunnelling enable property [' + str(enableTunneling) + '] is valid.')
admincustomlog = domainProperties.getProperty('wls.admin.log.custom')
if not admincustomlog is None and len(admincustomlog)>0:
if not admincustomlog.upper()=='TRUE' and not admincustomlog.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.custom property supports only [true,false].')
else:
log.debug('Admin custom log enable property [' + str(admincustomlog) + '] is valid.')
if admincustomlog.upper()=='TRUE':
filename = domainProperties.getProperty('wls.admin.log.filename')
if not filename is None and len(filename)>0:
file = File(filename)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(filename) + '].')
limitNumberOfFile = domainProperties.getProperty('wls.admin.log.limitNumOfFile')
if not limitNumberOfFile is None and len(limitNumberOfFile)>0:
if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.limitNumOfFile property supports only [true,false].')
else:
log.debug('Admin log limit number of file property [' + str(limitNumberOfFile) + '] is valid.')
fileToRetain = domainProperties.getProperty('wls.admin.log.fileToRetain')
if not fileToRetain is None and len(fileToRetain)>0:
if not fileToRetain is None and len(fileToRetain)>0:
try:
int(fileToRetain)
except ValueError:
log.error('Please verify wls.admin.log.fileToRetain [' + str(fileToRetain) + '] property.')
else:
if int(fileToRetain)<1 or int(fileToRetain)>99999:
log.error('Please verify wls.admin.log.fileToRetain property, number is not in valid range [1-99999].')
else:
log.debug('Admin log file to retain [' + str(fileToRetain) + '] is valid.')
logRotateOnStartup = domainProperties.getProperty('wls.admin.log.rotateLogOnStartup')
if not logRotateOnStartup is None and len(logRotateOnStartup)>0:
if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE':
error = 1
log.error('The wls.admin.log.rotateLogOnStartup property supports only [true,false].')
else:
log.debug('Admin log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.')
rotationType = domainProperties.getProperty('wls.admin.log.rotationType')
if not rotationType is None and len(rotationType)>0:
if not rotationType == 'bySize' and not rotationType == 'byTime':
error = 1
log.error('The wls.admin.log.rotationType property supports only [bySize,byTime].')
else:
log.debug('Admin log rotation type property [' + str(rotationType) + '] is valid.')
if rotationType == 'bySize':
fileMinSize = domainProperties.getProperty('wls.admin.log.fileMinSize')
if not fileMinSize is None and len(fileMinSize)>0:
try:
int(fileMinSize)
except ValueError:
log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property.')
else:
if int(fileMinSize)<0 or int(fileMinSize)>65535:
log.error('Please verify wls.admin.log.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].')
else:
log.debug('Admin log file min size [' + str(fileMinSize) + '] is valid.')
if rotationType == 'byTime':
rotationTime = domainProperties.getProperty('wls.admin.log.rotationTime')
if not rotationTime is None and len(rotationTime)>0:
if rotationTime.find(':')==-1:
error = 1
log.error('Please verify wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
if len(rotationTime)<4 or len(rotationTime)>5:
error = 1
log.error('The wls.admin.log.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
log.debug('Admin log rotation time [' + str(rotationTime) + '] is valid.')
fileTimespan = domainProperties.getProperty('wls.admin.log.fileTimeSpan')
if not fileTimespan is None and len(fileTimespan)>0:
try:
int(fileTimespan)
except ValueError:
log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property.')
else:
if int(fileTimespan)<1:
log.error('Please verify wls.admin.log.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].')
else:
log.debug('Admin log file timespan [' + str(fileTimespan) + '] is valid.')
rotationDir = domainProperties.getProperty('wls.admin.log.rotationDir')
if not rotationDir is None and len(rotationDir)>0:
file = File(rotationDir)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file [' + str(rotationDir) + '].')
fileSeverity = domainProperties.getProperty('wls.admin.log.logFileSeverity')
if not fileSeverity is None and len(fileSeverity)>0:
if not fileSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Warning':
error = 1
log.error('The wls.admin.log.logFileSeverity property supports only [Debug,Info,Warning].')
else:
log.debug('Admin log file severity property [' + str(fileSeverity) + '] is valid.')
broadcastSeverity = domainProperties.getProperty('wls.admin.log.broadcastSeverity')
if not broadcastSeverity is None and len(broadcastSeverity)>0:
if not broadcastSeverity == 'Trace' and not broadcastSeverity == 'Debug' and not broadcastSeverity == 'Info' and not broadcastSeverity == 'Notice' and not broadcastSeverity == 'Warning' and not broadcastSeverity == 'Error' and not broadcastSeverity == 'Critical' and not broadcastSeverity == 'Alert' and not broadcastSeverity == 'Emergency' and not broadcastSeverity == 'Off':
error = 1
log.error('The wls.admin.log.broadcastSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
else:
log.debug('Admin broadcast severity property [' + str(broadcastSeverity) + '] is valid.')
memoryBufferSeverity = domainProperties.getProperty('wls.admin.log.memoryBufferSeverity')
if not memoryBufferSeverity is None and len(memoryBufferSeverity)>0:
if not memoryBufferSeverity == 'Trace' and not memoryBufferSeverity == 'Debug' and not fileSeverity == 'Info' and not fileSeverity == 'Notice' and not fileSeverity == 'Warning' and not fileSeverity == 'Error' and not fileSeverity == 'Critical' and not fileSeverity == 'Alert' and not fileSeverity == 'Emergency' and not fileSeverity == 'Off':
error = 1
log.error('The wls.admin.log.memoryBufferSeverity property supports only [Trace,Debug,Info,Notice,Warning,Error,Critical,Alert,Emergency,Off].')
else:
log.debug('Admin memory buffer severity property [' + str(memoryBufferSeverity) + '] is valid.')
adminhttpcustomlog = domainProperties.getProperty('wls.admin.httplog.enable')
if not adminhttpcustomlog is None and len(adminhttpcustomlog)>0:
if not adminhttpcustomlog.upper()=='TRUE' and not adminhttpcustomlog.upper()=='FALSE':
error = 1
log.error('The wls.admin.httplog.enable property supports only [true,false].')
else:
log.debug('Admin http custom log enable property [' + str(adminhttpcustomlog) + '] is valid.')
if adminhttpcustomlog.upper()=='TRUE':
filename = domainProperties.getProperty('wls.admin.httplog.filename')
if not filename is None and len(filename)>0:
file = File(filename)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file for [' + str(filename) + '].')
limitNumberOfFile = domainProperties.getProperty('wls.admin.httplog.limitNumOfFile')
if not limitNumberOfFile is None and len(limitNumberOfFile)>0:
if not limitNumberOfFile.upper()=='TRUE' and not limitNumberOfFile.upper()=='FALSE':
error = 1
log.error('The wls.admin.httplog.limitNumOfFile property supports only [true,false].')
else:
log.debug('Admin http log limit number of file property [' + str(limitNumberOfFile) + '] is valid.')
fileToRetain = domainProperties.getProperty('wls.admin.httplog.fileToRetain')
if not fileToRetain is None and len(fileToRetain)>0:
if not fileToRetain is None and len(fileToRetain)>0:
try:
int(fileToRetain)
except ValueError:
log.error('Please verify wls.admin.httplog.fileToRetain [' + str(fileToRetain) + '] property.')
else:
if int(fileToRetain)<1 or int(fileToRetain)>99999:
log.error('Please verify wls.admin.httplog.fileToRetain property, number is not in valid range [1-99999].')
else:
log.debug('Admin http log file to retain [' + str(fileToRetain) + '] is valid.')
logRotateOnStartup = domainProperties.getProperty('wls.admin.httplog.rotateLogOnStartup')
if not logRotateOnStartup is None and len(logRotateOnStartup)>0:
if not logRotateOnStartup.upper()=='TRUE' and not logRotateOnStartup.upper()=='FALSE':
error = 1
log.error('The wls.admin.httplog.rotateLogOnStartup property supports only [true,false].')
else:
log.debug('Admin http log rotate on startup property [' + str(logRotateOnStartup) + '] is valid.')
rotationType = domainProperties.getProperty('wls.admin.httplog.rotationType')
if not rotationType is None and len(rotationType)>0:
if not rotationType == 'bySize' and not rotationType == 'byTime':
error = 1
log.error('The wls.admin.httplog.rotationType property supports only [bySize,byTime].')
else:
log.debug('Admin http log rotation type property [' + str(rotationType) + '] is valid.')
if rotationType == 'bySize':
fileMinSize = domainProperties.getProperty('wls.admin.httplog.fileMinSize')
if not fileMinSize is None and len(fileMinSize)>0:
try:
int(fileMinSize)
except ValueError:
log.error('Please verify wls.admin.httplog.fileMinSize [' + str(fileMinSize) + '] property.')
else:
if int(fileMinSize)<0 or int(fileMinSize)>65535:
log.error('Please verify wls.admin.httplog.fileMinSize [' + str(fileMinSize) + '] property, number is not in valid range [0-65535].')
else:
log.debug('Admin http log file min size [' + str(fileMinSize) + '] is valid.')
if rotationType == 'byTime':
rotationTime = domainProperties.getProperty('wls.admin.httplog.rotationTime')
if not rotationTime is None and len(rotationTime)>0:
if rotationTime.find(':')==-1:
error = 1
log.error('Please verify wls.admin.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
if len(rotationTime)<4 or len(rotationTime)>5:
error = 1
log.error('The wls.admin.httplog.rotationTime [' + str(rotationTime) + '] property, the property supports time format [HH:MM].')
else:
log.debug('Admin http log rotation time [' + str(rotationTime) + '] is valid.')
fileTimespan = domainProperties.getProperty('wls.admin.httplog.fileTimeSpan')
if not fileTimespan is None and len(fileTimespan)>0:
try:
int(fileTimespan)
except ValueError:
log.error('Please verify wls.admin.httplog.fileTimeSpan [' + str(fileTimespan) + '] property.')
else:
if int(fileTimespan)<1:
log.error('Please verify wls.admin.httplog.fileTimeSpan [' + str(fileTimespan) + '] property, number is not in valid range [>=1].')
else:
log.debug('Admin http log file timespan [' + str(fileTimespan) + '] is valid.')
rotationDir = domainProperties.getProperty('wls.admin.httplog.rotationDir')
if not rotationDir is None and len(rotationDir)>0:
file = File(rotationDir)
if file.isAbsolute():
if not file.exists():
log.debug('[NOTE] Please make sure the user running this script has permission to create directory and file for [' + str(rotationDir) + '].')
return error | 66.047904 | 396 | 0.557752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,257 | 0.328966 |
699b6cce7ca9f407c75af01305648159edf5193e | 836 | py | Python | doc/plots/stats/moments_expw.py | breisfeld/pandas | f1fd50bb8e7603042fe93e01e862766673e33450 | [
"BSD-3-Clause"
] | 10 | 2015-07-21T06:35:13.000Z | 2021-10-30T00:15:05.000Z | doc/plots/stats/moments_expw.py | breisfeld/pandas | f1fd50bb8e7603042fe93e01e862766673e33450 | [
"BSD-3-Clause"
] | null | null | null | doc/plots/stats/moments_expw.py | breisfeld/pandas | f1fd50bb8e7603042fe93e01e862766673e33450 | [
"BSD-3-Clause"
] | 5 | 2017-05-28T05:31:12.000Z | 2020-09-01T03:08:01.000Z | from moment_plots import *
np.random.seed(1)
ts = test_series(500) * 10
# ts[::100] = 20
s = ts.cumsum()
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
ax0, ax1, ax2 = axes
ax0.plot(s.index, s.values)
ax0.set_title('time series')
ax1.plot(s.index, m.ewma(s, span=50, min_periods=1).values, color='b')
ax1.plot(s.index, m.rolling_mean(s, 50, min_periods=1).values, color='r')
ax1.set_title('rolling_mean vs. ewma')
line1 = ax2.plot(s.index, m.ewmstd(s, span=50, min_periods=1).values, color='b')
line2 = ax2.plot(s.index, m.rolling_std(s, 50, min_periods=1).values, color='r')
ax2.set_title('rolling_std vs. ewmstd')
fig.legend((line1, line2),
('Exp-weighted', 'Equal-weighted'),
loc='upper right')
fig.autofmt_xdate()
fig.subplots_adjust(bottom=0.10, top=0.95)
plt.show()
plt.close('all')
| 24.588235 | 80 | 0.673445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.162679 |
699cbcc2ace017ff27f255949e80dc0e9759b091 | 3,564 | py | Python | RunMatch.py | fredboe/monte-carlo-tree-search | ac43bed9a3d1b62e12d2853d1e59839fe7400572 | [
"MIT"
] | 1 | 2020-07-17T09:40:11.000Z | 2020-07-17T09:40:11.000Z | RunMatch.py | fredboe/monte-carlo-tree-search | ac43bed9a3d1b62e12d2853d1e59839fe7400572 | [
"MIT"
] | null | null | null | RunMatch.py | fredboe/monte-carlo-tree-search | ac43bed9a3d1b62e12d2853d1e59839fe7400572 | [
"MIT"
] | null | null | null | import sys
# import the GameState of the game
from Game.GameStateConnect4 import GameState
# import all agents
from Agents.MCTS import MCTSTree
from Agents.Random import RandomAgent
from Agents.AlphaBeta import AlphaBetaAgent
# creates the board string for connect4 (full of zeros)
start_board_list = ["000000 " for i in range(0, 7)]
start_board = "".join(start_board_list)
class Match:
def __init__(self, agents, start_board):
# names of agents: string
self.agent1, self.agent2 = agents
# board: string
self.start_board = start_board
# initial GameState: GameState
self.initialState = GameState(start_board, 1)
# functions of the agents: returns action (int)
self.func_agent1 = getattr(self, self.agent1)
self.func_agent2 = getattr(self, self.agent2)
# runs the match
self.run_Match(self.initialState)
def MCTS(self, state):
"""
returns:
the result of MCTS agent (action int)
"""
player_id = state.player_id
return MCTSTree(state).runMCTS(player_id)
def RANDOM(self, state):
"""
returns:
the result of RANDOM agent (action int)
"""
return RandomAgent().get_action(state)
def ALPHABETA(self, state):
"""
returns:
the result of ALPHABETA agent (action int)
"""
player_id = state.player_id
return AlphaBetaAgent().get_action(state, player_id)
def REALWORLD(self, state):
"""
returns:
action -> you can choose the action
-type in a number between 1 and 8
"""
int_input = False
action = None
while not int_input:
try:
action = int(input("Please type in your action. "
+ "It has to be a number between 1 and 7."
+ "Type in 0 to stop the game!"))
if action == 0:
print("\nSomebody gave up!")
sys.exit()
int_input = True
except ValueError:
print("Please type in a number between 1 and 7.")
continue
return action
def run_Match(self, state):
"""
runs the match: asks for actions until the game is over
then prints the winner
"""
while not state.terminal_state():
if state.player_id == 1:
action = self.func_agent1(state)
print(self.agent1 + " plays action: "+str(action))
else:
action = self.func_agent2(state)
print(self.agent2 + " plays action: "+str(action))
if action not in state.actions:
print("\n\nSorry, but this action isn't AVAILABLE.\n\n")
continue
state = state.result(action)
print(str(state))
winner = state.winner
print("Player "+str(winner)+" has won." if winner else "Nobody won!")
if __name__ == "__main__":
""" Create Match Object
first parameter = agents (choose between "MCTS", "ALPHABETA",
"RANDOM", "REALWORLD")
second parameter = start_board (string)
"""
match = Match(("MCTS", "ALPHABETA"), start_board)
| 34.941176 | 78 | 0.527217 | 2,850 | 0.799663 | 0 | 0 | 0 | 0 | 0 | 0 | 1,379 | 0.386925 |
699d582f893b5deba41c301c34bab99d402a5311 | 990 | py | Python | elizabot.py | batisteo/elizabot | 3329d0e263a86496b06d70046f70fda2550edfb4 | [
"WTFPL"
] | null | null | null | elizabot.py | batisteo/elizabot | 3329d0e263a86496b06d70046f70fda2550edfb4 | [
"WTFPL"
] | 1 | 2017-02-20T20:18:24.000Z | 2017-02-21T09:59:05.000Z | elizabot.py | batisteo/elizabot | 3329d0e263a86496b06d70046f70fda2550edfb4 | [
"WTFPL"
] | null | null | null | import os
from time import sleep
import aiohttp
from aiotg import Bot
CLEVERBOT = "https://www.cleverbot.com/getreply?key={key}&input={q}"
APERTIUM = 'http://batisteo.eu:2737/translate?markUnknown=no&q={q}&langpair={pair}'
CLEVERBOT_TOKEN = os.environ['CLEVERBOT_TOKEN']
bot = Bot(api_token=os.environ['BOT_TOKEN'])
@bot.command(r"(.+)")
async def babili(chat, match):
q = match.group(1) if match.group(1) else ''
in_en = await trans(q, 'epo|eng')
url = CLEVERBOT.format(key=CLEVERBOT_TOKEN, q=in_en)
async with aiohttp.get(url) as s:
response = await s.json()
out = response["output"]
await chat.send_chat_action('typing')
sleep(len(out) / 10)
in_eo = await trans(out, 'eng|epo')
await chat.send_text(in_eo)
async def trans(q, pair):
url = APERTIUM.format(q=q, pair=pair)
async with aiohttp.get(url) as s:
response = await s.json()
return response['responseData']['translatedText']
bot.run()
| 27.5 | 83 | 0.659596 | 0 | 0 | 0 | 0 | 456 | 0.460606 | 631 | 0.637374 | 229 | 0.231313 |
699dbfba09e4b2780e57672a2d8ecc9e67db0fe3 | 702 | py | Python | resilient-circuits/resilient_circuits/__init__.py | COLDTURNIP/resilient-python-api | 14423f1dec32af67f7203c8d4d36d0a9e2651802 | [
"MIT"
] | 28 | 2017-12-22T00:26:59.000Z | 2022-01-22T14:51:33.000Z | resilient-circuits/resilient_circuits/__init__.py | COLDTURNIP/resilient-python-api | 14423f1dec32af67f7203c8d4d36d0a9e2651802 | [
"MIT"
] | 18 | 2018-03-06T19:04:20.000Z | 2022-03-21T15:06:30.000Z | resilient-circuits/resilient_circuits/__init__.py | COLDTURNIP/resilient-python-api | 14423f1dec32af67f7203c8d4d36d0a9e2651802 | [
"MIT"
] | 28 | 2018-05-01T17:53:22.000Z | 2022-03-28T09:56:59.000Z | # (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
__version__ = None
from .actions_component import ResilientComponent
from .action_message import ActionMessageBase, ActionMessage, \
FunctionMessage, FunctionResult, FunctionError, \
StatusMessage, BaseFunctionError
from .decorators import function, inbound_app, app_function, handler, required_field, required_action_field, defer, debounce
from .actions_test_component import SubmitTestAction, SubmitTestFunction, SubmitTestInboundApp
from .app_function_component import AppFunctionComponent
| 43.875 | 124 | 0.837607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.082621 |
69a1b349167847ac7ceea431928ed62a147ebb5e | 2,838 | py | Python | pastas/transform.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | pastas/transform.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | pastas/transform.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | """The transforms module contains all the transforms that can be added to the
simulation of a model. These transforms are applied after the simulation,
to incorporate nonlinear effects.
"""
import numpy as np
from pandas import DataFrame
class ThresholdTransform:
"""ThresholdTransform lowers the simulation when it exceeds a certain value
In geohydrology this transform can for example be used in a situation where
the groundwater level reaches the surface level and forms a lake. Because
of the larger storage of the lake, the (groundwater) level then rises
slower when it rains.
Parameters
----------
value : float
The starting value above which the simulation is lowered
vmin : float
The minimum value above which the simulation is lowered
vmin : float
The maximum value above which the simulation is lowered
name: str
Name of the transform
nparam : int
The number of parameters. Default is nparam=2. The first parameter
then is the threshold, and the second parameter is the factor with
which the simulation is lowered.
"""
_name = "ThresholdTransform"
def __init__(self, value=np.nan, vmin=np.nan, vmax=np.nan,
name='ThresholdTransform', nparam=2):
self.value = value
self.vmin = vmin
self.vmax = vmax
self.name = name
self.nparam = nparam
self.parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
def set_model(self, ml):
obs = ml.observations()
if np.isnan(self.value):
self.value = obs.min() + 0.75 * (obs.max() - obs.min())
if np.isnan(self.vmin):
self.vmin = obs.min() + 0.5 * (obs.max() - obs.min())
if np.isnan(self.vmax):
self.vmax = obs.max()
self.set_init_parameters()
def set_init_parameters(self):
self.parameters.loc[self.name + '_1'] = (
self.value, self.vmin, self.vmax, 1, self.name)
if self.nparam == 2:
self.parameters.loc[self.name + '_2'] = (0.5, 0., 1., 1, self.name)
def simulate(self, h, p):
if self.nparam == 1:
# value above a threshold p[0] are equal to the threshold
h[h > p[0]] = p[0]
elif self.nparam == 2:
# values above a threshold p[0] are scaled by p[1]
mask = h > p[0]
h[mask] = p[0] + p[1] * (h[mask] - p[0])
else:
raise ValueError('Not yet implemented yet')
return h
def to_dict(self):
data = {
"transform": self._name,
"value": self.value,
"vmin": self.vmin,
"vmax": self.vmax,
"name": self.name,
'nparam': self.nparam
}
return data
| 33.785714 | 79 | 0.588795 | 2,594 | 0.914024 | 0 | 0 | 0 | 0 | 0 | 0 | 1,320 | 0.465116 |
69a221dbea88cda21e34ea6b75e9fa9d44aaad95 | 382 | py | Python | pymockdata/datasets/default.py | vladcalin/py-mock-data-generator | 747a22b1a26b2db3cd3f9d2c4a35b5ba79d943c8 | [
"MIT"
] | 2 | 2016-08-04T13:40:39.000Z | 2016-10-07T20:30:07.000Z | pymockdata/datasets/default.py | vladcalin/py-mock-data-generator | 747a22b1a26b2db3cd3f9d2c4a35b5ba79d943c8 | [
"MIT"
] | 2 | 2016-08-18T08:02:13.000Z | 2016-08-18T08:09:18.000Z | pymockdata/datasets/default.py | vladcalin/pymockdata | 747a22b1a26b2db3cd3f9d2c4a35b5ba79d943c8 | [
"MIT"
] | null | null | null | import string
from ..datasets import Dataset
uppercase_ascii_letters = Dataset("uppercase_letter", None, string.ascii_uppercase)
lowercase_ascii_letters = Dataset("lowercase_letter", None, string.ascii_lowercase)
ascii_letters = Dataset("letter", None, string.ascii_letters)
digits = Dataset("digit", None, string.digits)
hex_digit = Dataset("hex_digit", None, string.hexdigits)
| 34.727273 | 83 | 0.801047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.162304 |
69a258f2db5f02076d0ecb02bb0169664304388a | 1,732 | py | Python | Day 23/ViralAdvertising.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 23/ViralAdvertising.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 23/ViralAdvertising.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | '''
HackerLand Enterprise is adopting a new viral advertising strategy. When they launch a new product, they advertise it to exactly people on social media.
On the first day, half of those people (i.e., ) like the advertisement and each shares it with of their friends. At the beginning of the second day, people receive the advertisement.
Each day, of the recipients like the advertisement and will share it with friends on the following day. Assuming nobody receives the advertisement twice, determine how many people have liked the ad by the end of a given day, beginning with launch day as day .
For example, assume you want to know how many have liked the ad by the end of the day.
Day Shared Liked Cumulative
1 5 2 2
2 6 3 5
3 9 4 9
4 12 6 15
5 18 9 24
The cumulative number of likes is .
Function Description
Complete the viralAdvertising function in the editor below. It should return the cumulative number of people who have liked the ad at a given time.
viralAdvertising has the following parameter(s):
n: the integer number of days
Input Format
A single integer, , denoting a number of days
Output Format
Print the number of people who liked the advertisement during the first days.
Sample Input
3
Sample Output
9
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the viralAdvertising function below.
def viralAdvertising(n):
ppl = [2]
for i in range(n-1):
ppl.append(ppl[-1]*3//2)
return sum(ppl)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = viralAdvertising(n)
fptr.write(str(result) + '\n')
fptr.close()
| 28.866667 | 261 | 0.714781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,406 | 0.811778 |
69a520f1866db12311093e17ff6200b4515c5bbf | 33,413 | py | Python | zss_debug_pb2.py | StopPointTeam/APF-RRT | 2c68432d888b0886138c169e9fdcdfe0e41ca974 | [
"MIT"
] | null | null | null | zss_debug_pb2.py | StopPointTeam/APF-RRT | 2c68432d888b0886138c169e9fdcdfe0e41ca974 | [
"MIT"
] | null | null | null | zss_debug_pb2.py | StopPointTeam/APF-RRT | 2c68432d888b0886138c169e9fdcdfe0e41ca974 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: zss_debug.proto
from google.protobuf import descriptor_pb2
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import reflection as _reflection
from google.protobuf import message as _message
from google.protobuf import descriptor as _descriptor
import sys
_b = sys.version_info[0] < 3 and (
lambda x: x) or (lambda x: x.encode('latin1'))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='zss_debug.proto',
package='ZSS.Protocol',
syntax='proto2',
serialized_pb=_b('\n\x0fzss_debug.proto\x12\x0cZSS.Protocol\"\x1d\n\x05Point\x12\t\n\x01x\x18\x01 \x02(\x02\x12\t\n\x01y\x18\x02 \x02(\x02\"U\n\tRectangle\x12#\n\x06point1\x18\x01 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12#\n\x06point2\x18\x02 \x02(\x0b\x32\x13.ZSS.Protocol.Point\"<\n\x0b\x44\x65\x62ug_Robot\x12 \n\x03pos\x18\x01 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12\x0b\n\x03\x64ir\x18\x02 \x02(\x02\"q\n\nDebug_Line\x12\"\n\x05start\x18\x01 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12 \n\x03\x65nd\x18\x02 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12\x0f\n\x07\x46ORWARD\x18\x03 \x02(\x08\x12\x0c\n\x04\x42\x41\x43K\x18\x04 \x02(\x08\"a\n\tDebug_Arc\x12*\n\trectangle\x18\x01 \x02(\x0b\x32\x17.ZSS.Protocol.Rectangle\x12\r\n\x05start\x18\x02 \x02(\x02\x12\x0b\n\x03\x65nd\x18\x03 \x02(\x02\x12\x0c\n\x04\x46ILL\x18\x04 \x02(\x08\"B\n\rDebug_Polygon\x12#\n\x06vertex\x18\x01 \x03(\x0b\x32\x13.ZSS.Protocol.Point\x12\x0c\n\x04\x46ILL\x18\x02 \x02(\x08\"<\n\nDebug_Text\x12 \n\x03pos\x18\x01 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12\x0c\n\x04text\x18\x02 \x02(\t\"?\n\x0c\x44\x65\x62ug_Curve_\x12\x0b\n\x03num\x18\x01 \x02(\x02\x12\x10\n\x08maxLimit\x18\x02 \x02(\x02\x12\x10\n\x08minLimit\x18\x03 \x02(\x02\"\x95\x01\n\x0b\x44\x65\x62ug_Curve\x12\"\n\x05start\x18\x01 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12\x1f\n\x02p1\x18\x02 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12\x1f\n\x02p2\x18\x03 \x02(\x0b\x32\x13.ZSS.Protocol.Point\x12 \n\x03\x65nd\x18\x04 \x02(\x0b\x32\x13.ZSS.Protocol.Point\"2\n\x0c\x44\x65\x62ug_Points\x12\"\n\x05point\x18\x01 \x03(\x0b\x32\x13.ZSS.Protocol.Point\"\xdf\x04\n\tDebug_Msg\x12\x30\n\x04type\x18\x01 \x02(\x0e\x32\".ZSS.Protocol.Debug_Msg.Debug_Type\x12,\n\x05\x63olor\x18\x02 \x02(\x0e\x32\x1d.ZSS.Protocol.Debug_Msg.Color\x12$\n\x03\x61rc\x18\x03 \x01(\x0b\x32\x17.ZSS.Protocol.Debug_Arc\x12&\n\x04line\x18\x04 \x01(\x0b\x32\x18.ZSS.Protocol.Debug_Line\x12&\n\x04text\x18\x05 \x01(\x0b\x32\x18.ZSS.Protocol.Debug_Text\x12(\n\x05robot\x18\x06 \x01(\x0b\x32\x19.ZSS.Protocol.Debug_Robot\x12)\n\x05\x63urve\x18\x07 \x01(\x0b\x32\x1a.ZSS.Protocol.Debug_Curve_\x12,\n\x07polygon\x18\x08 \x01(\x0b\x32\x1b.ZSS.Protocol.Debug_Polygon\x12*\n\x06points\x18\t \x01(\x0b\x32\x1a.ZSS.Protocol.Debug_Points\"X\n\nDebug_Type\x12\x07\n\x03\x41RC\x10\x00\x12\x08\n\x04LINE\x10\x01\x12\x08\n\x04TEXT\x10\x02\x12\t\n\x05ROBOT\x10\x03\x12\t\n\x05\x43URVE\x10\x04\x12\x0b\n\x07POLYGON\x10\x05\x12\n\n\x06Points\x10\x06\"s\n\x05\x43olor\x12\t\n\x05WHITE\x10\x00\x12\x07\n\x03RED\x10\x01\x12\n\n\x06ORANGE\x10\x02\x12\n\n\x06YELLOW\x10\x03\x12\t\n\x05GREEN\x10\x04\x12\x08\n\x04\x43YAN\x10\x05\x12\x08\n\x04\x42LUE\x10\x06\x12\n\n\x06PURPLE\x10\x07\x12\x08\n\x04GRAY\x10\x08\x12\t\n\x05\x42LACK\x10\t\"3\n\nDebug_Msgs\x12%\n\x04msgs\x18\x01 \x03(\x0b\x32\x17.ZSS.Protocol.Debug_Msg\"<\n\x0b\x44\x65\x62ug_Score\x12\r\n\x05\x63olor\x18\x01 \x02(\x05\x12\x1e\n\x01p\x18\x02 \x03(\x0b\x32\x13.ZSS.Protocol.Point\"9\n\x0c\x44\x65\x62ug_Scores\x12)\n\x06scores\x18\x01 \x03(\x0b\x32\x19.ZSS.Protocol.Debug_Score')
)
_DEBUG_MSG_DEBUG_TYPE = _descriptor.EnumDescriptor(
name='Debug_Type',
full_name='ZSS.Protocol.Debug_Msg.Debug_Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ARC', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ROBOT', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CURVE', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POLYGON', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Points', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1229,
serialized_end=1317,
)
_sym_db.RegisterEnumDescriptor(_DEBUG_MSG_DEBUG_TYPE)
_DEBUG_MSG_COLOR = _descriptor.EnumDescriptor(
name='Color',
full_name='ZSS.Protocol.Debug_Msg.Color',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='WHITE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ORANGE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='YELLOW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GREEN', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CYAN', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLUE', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PURPLE', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAY', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLACK', index=9, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1319,
serialized_end=1434,
)
_sym_db.RegisterEnumDescriptor(_DEBUG_MSG_COLOR)
_POINT = _descriptor.Descriptor(
name='Point',
full_name='ZSS.Protocol.Point',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='ZSS.Protocol.Point.x', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='ZSS.Protocol.Point.y', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=62,
)
_RECTANGLE = _descriptor.Descriptor(
name='Rectangle',
full_name='ZSS.Protocol.Rectangle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='point1', full_name='ZSS.Protocol.Rectangle.point1', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='point2', full_name='ZSS.Protocol.Rectangle.point2', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=149,
)
_DEBUG_ROBOT = _descriptor.Descriptor(
name='Debug_Robot',
full_name='ZSS.Protocol.Debug_Robot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pos', full_name='ZSS.Protocol.Debug_Robot.pos', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dir', full_name='ZSS.Protocol.Debug_Robot.dir', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=211,
)
_DEBUG_LINE = _descriptor.Descriptor(
name='Debug_Line',
full_name='ZSS.Protocol.Debug_Line',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='ZSS.Protocol.Debug_Line.start', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='ZSS.Protocol.Debug_Line.end', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='FORWARD', full_name='ZSS.Protocol.Debug_Line.FORWARD', index=2,
number=3, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='BACK', full_name='ZSS.Protocol.Debug_Line.BACK', index=3,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=326,
)
_DEBUG_ARC = _descriptor.Descriptor(
name='Debug_Arc',
full_name='ZSS.Protocol.Debug_Arc',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rectangle', full_name='ZSS.Protocol.Debug_Arc.rectangle', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='start', full_name='ZSS.Protocol.Debug_Arc.start', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='ZSS.Protocol.Debug_Arc.end', index=2,
number=3, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='FILL', full_name='ZSS.Protocol.Debug_Arc.FILL', index=3,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=425,
)
_DEBUG_POLYGON = _descriptor.Descriptor(
name='Debug_Polygon',
full_name='ZSS.Protocol.Debug_Polygon',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='vertex', full_name='ZSS.Protocol.Debug_Polygon.vertex', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='FILL', full_name='ZSS.Protocol.Debug_Polygon.FILL', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=427,
serialized_end=493,
)
_DEBUG_TEXT = _descriptor.Descriptor(
name='Debug_Text',
full_name='ZSS.Protocol.Debug_Text',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pos', full_name='ZSS.Protocol.Debug_Text.pos', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='ZSS.Protocol.Debug_Text.text', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=495,
serialized_end=555,
)
_DEBUG_CURVE_ = _descriptor.Descriptor(
name='Debug_Curve_',
full_name='ZSS.Protocol.Debug_Curve_',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num', full_name='ZSS.Protocol.Debug_Curve_.num', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxLimit', full_name='ZSS.Protocol.Debug_Curve_.maxLimit', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='minLimit', full_name='ZSS.Protocol.Debug_Curve_.minLimit', index=2,
number=3, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=557,
serialized_end=620,
)
_DEBUG_CURVE = _descriptor.Descriptor(
name='Debug_Curve',
full_name='ZSS.Protocol.Debug_Curve',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='ZSS.Protocol.Debug_Curve.start', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='p1', full_name='ZSS.Protocol.Debug_Curve.p1', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='p2', full_name='ZSS.Protocol.Debug_Curve.p2', index=2,
number=3, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='ZSS.Protocol.Debug_Curve.end', index=3,
number=4, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=623,
serialized_end=772,
)
_DEBUG_POINTS = _descriptor.Descriptor(
name='Debug_Points',
full_name='ZSS.Protocol.Debug_Points',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='point', full_name='ZSS.Protocol.Debug_Points.point', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=774,
serialized_end=824,
)
_DEBUG_MSG = _descriptor.Descriptor(
name='Debug_Msg',
full_name='ZSS.Protocol.Debug_Msg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='ZSS.Protocol.Debug_Msg.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='color', full_name='ZSS.Protocol.Debug_Msg.color', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='arc', full_name='ZSS.Protocol.Debug_Msg.arc', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='line', full_name='ZSS.Protocol.Debug_Msg.line', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='ZSS.Protocol.Debug_Msg.text', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='robot', full_name='ZSS.Protocol.Debug_Msg.robot', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='curve', full_name='ZSS.Protocol.Debug_Msg.curve', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='polygon', full_name='ZSS.Protocol.Debug_Msg.polygon', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='points', full_name='ZSS.Protocol.Debug_Msg.points', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_DEBUG_MSG_DEBUG_TYPE,
_DEBUG_MSG_COLOR,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=827,
serialized_end=1434,
)
_DEBUG_MSGS = _descriptor.Descriptor(
name='Debug_Msgs',
full_name='ZSS.Protocol.Debug_Msgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msgs', full_name='ZSS.Protocol.Debug_Msgs.msgs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1436,
serialized_end=1487,
)
_DEBUG_SCORE = _descriptor.Descriptor(
name='Debug_Score',
full_name='ZSS.Protocol.Debug_Score',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color', full_name='ZSS.Protocol.Debug_Score.color', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='p', full_name='ZSS.Protocol.Debug_Score.p', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1489,
serialized_end=1549,
)
_DEBUG_SCORES = _descriptor.Descriptor(
name='Debug_Scores',
full_name='ZSS.Protocol.Debug_Scores',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scores', full_name='ZSS.Protocol.Debug_Scores.scores', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1551,
serialized_end=1608,
)
_RECTANGLE.fields_by_name['point1'].message_type = _POINT
_RECTANGLE.fields_by_name['point2'].message_type = _POINT
_DEBUG_ROBOT.fields_by_name['pos'].message_type = _POINT
_DEBUG_LINE.fields_by_name['start'].message_type = _POINT
_DEBUG_LINE.fields_by_name['end'].message_type = _POINT
_DEBUG_ARC.fields_by_name['rectangle'].message_type = _RECTANGLE
_DEBUG_POLYGON.fields_by_name['vertex'].message_type = _POINT
_DEBUG_TEXT.fields_by_name['pos'].message_type = _POINT
_DEBUG_CURVE.fields_by_name['start'].message_type = _POINT
_DEBUG_CURVE.fields_by_name['p1'].message_type = _POINT
_DEBUG_CURVE.fields_by_name['p2'].message_type = _POINT
_DEBUG_CURVE.fields_by_name['end'].message_type = _POINT
_DEBUG_POINTS.fields_by_name['point'].message_type = _POINT
_DEBUG_MSG.fields_by_name['type'].enum_type = _DEBUG_MSG_DEBUG_TYPE
_DEBUG_MSG.fields_by_name['color'].enum_type = _DEBUG_MSG_COLOR
_DEBUG_MSG.fields_by_name['arc'].message_type = _DEBUG_ARC
_DEBUG_MSG.fields_by_name['line'].message_type = _DEBUG_LINE
_DEBUG_MSG.fields_by_name['text'].message_type = _DEBUG_TEXT
_DEBUG_MSG.fields_by_name['robot'].message_type = _DEBUG_ROBOT
_DEBUG_MSG.fields_by_name['curve'].message_type = _DEBUG_CURVE_
_DEBUG_MSG.fields_by_name['polygon'].message_type = _DEBUG_POLYGON
_DEBUG_MSG.fields_by_name['points'].message_type = _DEBUG_POINTS
_DEBUG_MSG_DEBUG_TYPE.containing_type = _DEBUG_MSG
_DEBUG_MSG_COLOR.containing_type = _DEBUG_MSG
_DEBUG_MSGS.fields_by_name['msgs'].message_type = _DEBUG_MSG
_DEBUG_SCORE.fields_by_name['p'].message_type = _POINT
_DEBUG_SCORES.fields_by_name['scores'].message_type = _DEBUG_SCORE
DESCRIPTOR.message_types_by_name['Point'] = _POINT
DESCRIPTOR.message_types_by_name['Rectangle'] = _RECTANGLE
DESCRIPTOR.message_types_by_name['Debug_Robot'] = _DEBUG_ROBOT
DESCRIPTOR.message_types_by_name['Debug_Line'] = _DEBUG_LINE
DESCRIPTOR.message_types_by_name['Debug_Arc'] = _DEBUG_ARC
DESCRIPTOR.message_types_by_name['Debug_Polygon'] = _DEBUG_POLYGON
DESCRIPTOR.message_types_by_name['Debug_Text'] = _DEBUG_TEXT
DESCRIPTOR.message_types_by_name['Debug_Curve_'] = _DEBUG_CURVE_
DESCRIPTOR.message_types_by_name['Debug_Curve'] = _DEBUG_CURVE
DESCRIPTOR.message_types_by_name['Debug_Points'] = _DEBUG_POINTS
DESCRIPTOR.message_types_by_name['Debug_Msg'] = _DEBUG_MSG
DESCRIPTOR.message_types_by_name['Debug_Msgs'] = _DEBUG_MSGS
DESCRIPTOR.message_types_by_name['Debug_Score'] = _DEBUG_SCORE
DESCRIPTOR.message_types_by_name['Debug_Scores'] = _DEBUG_SCORES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Point = _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), dict(
DESCRIPTOR=_POINT,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Point)
))
_sym_db.RegisterMessage(Point)
Rectangle = _reflection.GeneratedProtocolMessageType('Rectangle', (_message.Message,), dict(
DESCRIPTOR=_RECTANGLE,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Rectangle)
))
_sym_db.RegisterMessage(Rectangle)
Debug_Robot = _reflection.GeneratedProtocolMessageType('Debug_Robot', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_ROBOT,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Robot)
))
_sym_db.RegisterMessage(Debug_Robot)
Debug_Line = _reflection.GeneratedProtocolMessageType('Debug_Line', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_LINE,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Line)
))
_sym_db.RegisterMessage(Debug_Line)
Debug_Arc = _reflection.GeneratedProtocolMessageType('Debug_Arc', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_ARC,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Arc)
))
_sym_db.RegisterMessage(Debug_Arc)
Debug_Polygon = _reflection.GeneratedProtocolMessageType('Debug_Polygon', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_POLYGON,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Polygon)
))
_sym_db.RegisterMessage(Debug_Polygon)
Debug_Text = _reflection.GeneratedProtocolMessageType('Debug_Text', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_TEXT,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Text)
))
_sym_db.RegisterMessage(Debug_Text)
Debug_Curve_ = _reflection.GeneratedProtocolMessageType('Debug_Curve_', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_CURVE_,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Curve_)
))
_sym_db.RegisterMessage(Debug_Curve_)
Debug_Curve = _reflection.GeneratedProtocolMessageType('Debug_Curve', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_CURVE,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Curve)
))
_sym_db.RegisterMessage(Debug_Curve)
Debug_Points = _reflection.GeneratedProtocolMessageType('Debug_Points', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_POINTS,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Points)
))
_sym_db.RegisterMessage(Debug_Points)
Debug_Msg = _reflection.GeneratedProtocolMessageType('Debug_Msg', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_MSG,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Msg)
))
_sym_db.RegisterMessage(Debug_Msg)
Debug_Msgs = _reflection.GeneratedProtocolMessageType('Debug_Msgs', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_MSGS,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Msgs)
))
_sym_db.RegisterMessage(Debug_Msgs)
Debug_Score = _reflection.GeneratedProtocolMessageType('Debug_Score', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_SCORE,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Score)
))
_sym_db.RegisterMessage(Debug_Score)
Debug_Scores = _reflection.GeneratedProtocolMessageType('Debug_Scores', (_message.Message,), dict(
DESCRIPTOR=_DEBUG_SCORES,
__module__='zss_debug_pb2'
# @@protoc_insertion_point(class_scope:ZSS.Protocol.Debug_Scores)
))
_sym_db.RegisterMessage(Debug_Scores)
# @@protoc_insertion_point(module_scope)
| 38.186286 | 3,019 | 0.671864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,086 | 0.212073 |
69a62a85476caabb3a971fbe3016b560f4044dd1 | 1,055 | py | Python | python2.7libs/hammer_tools/material_library/image.py | anvdev/Hammer-Tools | 0211ec837da6754e537c98624ecd07c23abab28e | [
"Apache-2.0"
] | 19 | 2019-10-09T13:48:11.000Z | 2021-06-14T01:25:23.000Z | python2.7libs/hammer_tools/material_library/image.py | anvdev/Hammer-Tools | 0211ec837da6754e537c98624ecd07c23abab28e | [
"Apache-2.0"
] | 219 | 2019-10-08T14:44:48.000Z | 2021-06-19T06:27:46.000Z | python2.7libs/hammer_tools/material_library/image.py | anvdev/Hammer-Tools | 0211ec837da6754e537c98624ecd07c23abab28e | [
"Apache-2.0"
] | 3 | 2020-02-14T06:18:06.000Z | 2020-11-25T20:47:06.000Z | import os
import subprocess
import tempfile
try:
from PyQt5.QtCore import QBuffer, QIODevice, Qt
from PyQt5.QtGui import QImage
except ImportError:
from PySide2.QtCore import QBuffer, QIODevice, Qt
from PySide2.QtGui import QImage
from .texture_format import TextureFormat
def imageToBytes(image):
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
image.save(buffer, 'png')
data = buffer.data()
buffer.close()
return data
def loadImage(path):
tex_format = TextureFormat(path)
if tex_format in {'png', 'bmp', 'tga', 'tif', 'tiff', 'jpg', 'jpeg'}:
image = QImage(path)
if not image.isNull():
return image
else:
return
temp_path = os.path.join(tempfile.gettempdir(), str(os.getpid()) + 'hammer_temp_image.png')
temp_path = temp_path.replace('\\', '/')
subprocess.call('iconvert -g off "{0}" "{1}"'.format(path, temp_path))
if os.path.exists(temp_path):
image = QImage(temp_path)
os.remove(temp_path)
return image
| 26.375 | 95 | 0.650237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.095735 |
69a7b9c751b9a5826861e0006bc21e2f11a6d258 | 4,324 | py | Python | pypipe/apps/utils/fill_treectrl.py | AGrigis/pypipe | a77fc2c81cb469535b650c79718f811c5c056238 | [
"CECILL-B"
] | null | null | null | pypipe/apps/utils/fill_treectrl.py | AGrigis/pypipe | a77fc2c81cb469535b650c79718f811c5c056238 | [
"CECILL-B"
] | null | null | null | pypipe/apps/utils/fill_treectrl.py | AGrigis/pypipe | a77fc2c81cb469535b650c79718f811c5c056238 | [
"CECILL-B"
] | null | null | null | ##########################################################################
# PyPipe - Copyright (C) AGrigis, 2017
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# Soma import
from PySide2 import QtWidgets
from PySide2 import QtGui
# Global parameters
font = QtGui.QFont("", 9, QtGui.QFont.Bold)
def fill_treectrl(treectrl, menu, match=""):
""" Fill a tree control with the different menu items.
This procedure is able to filter the menu items.
Loadable functions appear in bold in the tree control.
Insert four elements (current name, function module path, function
input parameters, function output parameters)
When the function module path is not None we have reached a leaf
that contains a function description.
Parameters
----------
treectrl: QTreeControl (mandatory)
the tree control where we want to insert the menu
menu: hierachic dict (mandatory)
each key is a sub module of the module. Leafs contain a list with
the url to the documentation.
match: str (optional)
the string used to filter the menu items
"""
treectrl.headerItem().setText(0, "Functions")
add_tree_nodes(treectrl, menu, match)
def add_tree_nodes(parent_item, menu, match, parent_module=""):
""" Add the menu to tree control if match in current module name or
child modules.
The match is insensitive to the cast.
Parameters
----------
parent_item: QTreeWidgetItem (mandatory)
a tree control item where we want to insert the menu
menu: hierachic dict (mandatory)
each key is a sub module of the module. Leafs contain a list with
the url to the documentation.
match: str (mandatory)
the string used to filter the menu items
parent_module: str (optional)
the parent module string description ('module.sub_module')
"""
# Go through the current module sub modules
for module_name, child_modules in menu.items():
# Filtering: check if we need to add this module in the tree
if (match == "" or match in module_name.lower() or
search_in_menu(child_modules, match)):
# Add the module name to the tree control
if isinstance(child_modules, dict):
tree_item = QtWidgets.QTreeWidgetItem(
parent_item, [module_name, "None", "None", "None"])
if parent_module:
current_module = parent_module + "." + module_name
else:
current_module = module_name
add_tree_nodes(tree_item, child_modules, match, current_module)
else:
tree_item = QtWidgets.QTreeWidgetItem(
parent_item, [
module_name, child_modules[0], str(child_modules[1]),
str(child_modules[2])])
tree_item.setFont(0, font)
def search_in_menu(menu, match):
""" Recursive search in tree.
The search procedure is insensitive to the cast.
Parameters
----------
menu: hierachic dict (mandatory)
each key is a sub module of the module. Leafs contain a list with
the url to the documentation.
match: str (mandatory)
the string used to filter the menu items
Returns
-------
is_included: bool
True if we found match in the tree, False otherwise.
"""
# Initialize the default value: match not found
is_included = False
# If we are on a leaf, check in the module list
if isinstance(menu, list):
return is_included
# Go through the current module sub modules
for module_name, child_modules in menu.items():
# Stop criteria
if isinstance(child_modules, list):
return is_included or match in module_name.lower()
# Recursive search
is_included = (
is_included or match in module_name.lower() or
search_in_menu(child_modules, match))
# Stop criteria
if is_included:
return is_included
return is_included
| 34.592 | 79 | 0.623034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,599 | 0.601064 |
69a8de3030cd088d725760a1b68e9101a4f8a5ca | 7,945 | py | Python | lib/crossmatching.py | leejjoon/pystilts | 33980bf9e47d17bb981cce6b2861c6c750f245ba | [
"MIT"
] | 4 | 2020-01-05T22:30:54.000Z | 2022-02-12T11:47:07.000Z | lib/crossmatching.py | leejjoon/pystilts | 33980bf9e47d17bb981cce6b2861c6c750f245ba | [
"MIT"
] | 1 | 2019-07-03T21:41:03.000Z | 2019-07-05T18:08:01.000Z | lib/crossmatching.py | leejjoon/pystilts | 33980bf9e47d17bb981cce6b2861c6c750f245ba | [
"MIT"
] | 4 | 2019-07-12T10:08:13.000Z | 2021-05-27T15:57:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 02/06/17 at 2:24 PM
@author: neil
Program description here
Version 0.0.0
"""
from . import constants
from . import utils
from astropy import units as u
# =============================================================================
# Define variables
# =============================================================================
runcommand = utils.runcommand
command_arguments = utils.command_arguments
STILTS = constants.STILTS
# =============================================================================
# Define functions
# =============================================================================
def tapskymatch(**kwargs):
# define command
command = STILTS
command += ' tapskymatch '
# define allowed arguments (must be in allowed or special)
# v = aliases for command call
# r = will throw exception if not defined
# d = sets default value (if r = False)
keys = dict()
keys['tapurl'] = dict(v=['tapurl'], r=True)
keys['taptable'] = dict(v=['taptable'], r=True)
keys['taplon'] = dict(v=['taplon'], r=True)
keys['taplat'] = dict(v=['taplat'], r=True)
keys['inlon'] = dict(v=['inlon'], r=True)
keys['inlat'] = dict(v=['inlat'], r=True)
keys['icmd'] = dict(v=['icmd'], r=False)
keys['ocmd'] = dict(v=['ocmd'], r=False)
keys['sr'] = dict(v=['radius', 'error', 'sr'], r=True, u=u.deg)
keys['in'] = dict(v=['infile', 'in'], r=True)
keys['out'] = dict(v=['outfile', 'out'], r=True)
keys['fixcols'] = dict(v=['fixcols'], r=False, d='dups')
keys['suffixin'] = dict(v=['suffixin'], r=False)
keys['suffixremote'] = dict(v=['suffixremote'], r=False)
# write the command
commandargs = command_arguments(keys, kwargs, 'tapskymatch')
for key in commandargs:
command += commandargs[key]
# run command
runcommand(command)
def tmatch2(**kwargs):
"""
keywords are:
in1 string, The location of the first input table. This may take
one of the following forms:
- A filename.
- A URL.
in2 string, The location of the second input table. This may take
one of the following forms:
- A filename.
- A URL.
matcher string, Defines the nature of the matching that will be
performed. Depending on the name supplied, this may be
positional matching using celestial or Cartesian coordinates,
exact matching on the value of a string column, or other things.
must be one of the following:
- sky: The sky matcher compares positions on the celestial
sphere with a fixed error radius. Rows are considered
to match when the two (ra, dec) positions are within
max-error arcseconds of each other along a great circle.
values:
ra/degrees: Right Ascension
dec/degrees: Declination
params:
max-error/arcsec: Maximum separation along a great circle
- skyerr
- skyellipse
- sky3d
- exact
- 1d, 2d, ...
- 2d_anisotropic, ...
- 2d_cuboid, ...
- 1d_err, 2d_err, ...
- 2d_ellipse
this changes the values that need to be set
values1 string, Defines the values from table 1 which are used to
determine whether a match has occurred. These will typically
be coordinate values such as RA and Dec and perhaps some
per-row error values as well, though exactly what values are
required is determined by the kind of match as determined by
matcher.
value2 string, Defines the values from table 2 which are used to
determine whether a match has occurred. These will typically
be coordinate values such as RA and Dec and perhaps some
per-row error values as well, though exactly what values are
required is determined by the kind of match as determined by
matcher.
join string, Determines which rows are included in the output table.
The matching algorithm determines which of the rows from the
first table correspond to which rows from the second.
This parameter determines what to do with that information.
Perhaps the most obvious thing is to write out a table
containing only rows which correspond to a row in both of the
two input tables. However, you may also want to see the
unmatched rows from one or both input tables, or rows present
in one table but unmatched in the other, or other possibilities.
The options are:
1and2: An output row for each row represented in both input
tables (INNER JOIN)
1or2: An output row for each row represented in either or
both of the input tables (FULL OUTER JOIN)
all1: An output row for each matched or unmatched row in
table 1 (LEFT OUTER JOIN)
all2: An output row for each matched or unmatched row in
table 2 (RIGHT OUTER JOIN)
1not2: An output row only for rows which appear in the
first table but are not matched in the second table
2not1: An output row only for rows which appear in the
second table but are not matched in the first table
1xor2: An output row only for rows represented in one of
the input tables but not the other one
icmd1
icmd2
ocmd
params string, Fixed value(s) giving the parameters of the match
(typically an error radius). If more than one value is required,
the values should be separated by spaces.
out
fixcols
suffix1
suffix2
:param kwargs:
:return:
"""
# define command
command = STILTS
command += ' tmatch2 '
# define allowed arguments (must be in allowed or special)
# v = aliases for command call
# r = will throw exception if not defined
# d = sets default value (if r = False)
keys = dict()
keys['in1'] = dict(v=['in1'], r=True)
keys['in2'] = dict(v=['in2'], r=True)
keys['matcher'] = dict(v=['matcher'], r=False, d='sky')
keys['values1'] = dict(v=['values1'], r=True)
keys['values2'] = dict(v=['values2'], r=True)
keys['join'] = dict(v=['join'], r=False, d='1and2')
keys['icmd1'] = dict(v=['icmd1'], r=False)
keys['icmd2'] = dict(v=['icmd2'], r=False)
keys['ocmd'] = dict(v=['ocmd'], r=False)
keys['params'] = dict(v=['radius', 'params'], r=False, u=u.arcsec)
keys['out'] = dict(v=['outfile', 'out'], r=True)
keys['fixcols'] = dict(v=['fixcols'], r=False, d='dups')
keys['suffix1'] = dict(v=['suffix1'], r=False)
keys['suffix2'] = dict(v=['suffix2'], r=False)
# write the command
commandargs = command_arguments(keys, kwargs, 'tmatch2')
for key in commandargs:
command += commandargs[key]
# print(command)
# run command
runcommand(command)
| 40.126263 | 81 | 0.524984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,272 | 0.789427 |
69a8f7469bd91e8fef4f395d57b80e477d69a8db | 71 | py | Python | gather/glasgowpicarro/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | gather/glasgowpicarro/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | gather/glasgowpicarro/__init__.py | openghg/gather | 0096cfe66b0093cdd294fa2a67c060d7fc28d2fa | [
"Apache-2.0"
] | null | null | null | from ._process import process_pipeline
__all__ = ["process_pipeline"]
| 17.75 | 38 | 0.802817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.253521 |
69a9022124889eb67322e46007049fa8b6adf0de | 7,626 | py | Python | underworld/conditions/_conditions.py | longgangfan/underworld2 | 5c8acc17fa4d97e86a62b13b8bfb2af6e81a8ee4 | [
"CC-BY-4.0"
] | 116 | 2015-09-28T10:30:55.000Z | 2022-03-22T04:12:38.000Z | underworld/conditions/_conditions.py | longgangfan/underworld2 | 5c8acc17fa4d97e86a62b13b8bfb2af6e81a8ee4 | [
"CC-BY-4.0"
] | 561 | 2015-09-29T06:05:50.000Z | 2022-03-22T23:37:29.000Z | underworld/conditions/_conditions.py | longgangfan/underworld2 | 5c8acc17fa4d97e86a62b13b8bfb2af6e81a8ee4 | [
"CC-BY-4.0"
] | 68 | 2015-12-14T21:57:46.000Z | 2021-08-25T04:54:26.000Z | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Underworld geophysics modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This module contains conditions used for applying constraints on model dynamics.
"""
import underworld as uw
import underworld._stgermain as _stgermain
import underworld.libUnderworld as libUnderworld
import abc
class SystemCondition(_stgermain.StgCompoundComponent, metaclass = abc.ABCMeta):
def _add_to_stg_dict(self,componentDict):
pass
def __init__(self, variable, indexSetsPerDof):
if not isinstance( variable, uw.mesh.MeshVariable ):
raise TypeError("Provided variable must be of class 'MeshVariable'.")
self._variable = variable
if isinstance( indexSetsPerDof, uw.container.IndexSet ):
indexSets = ( indexSetsPerDof, )
elif isinstance( indexSetsPerDof, (list,tuple)):
indexSets = indexSetsPerDof
else:
raise TypeError("You must provide the required 'indexSetsPerDof' item\n"+
"as a list or tuple of 'IndexSet' items.")
for guy in indexSets:
if not isinstance( guy, (uw.container.IndexSet,type(None)) ):
raise TypeError("Provided list must only contain objects of 'NoneType' or type 'IndexSet'.")
self._indexSets = indexSets
if variable.nodeDofCount != len(self._indexSets):
raise ValueError("Provided variable has a nodeDofCount of {}, however you have ".format(variable.nodeDofCount)+
"provided {} index set(s). You must provide an index set for each degree ".format(len(self._indexSets))+
"of freedom of your variable, but no more.")
# ok, lets setup the c array
libUnderworld.StGermain._PythonVC_SetupIndexSetArray(self._cself,len(self._indexSets))
# now, lets add the indexSet objects
for position,set in enumerate(self._indexSets):
if set:
libUnderworld.StGermain._PythonVC_SetIndexSetAtArrayPosition( self._cself, set._cself, position );
@property
def indexSetsPerDof(self):
""" See class constructor for details. """
return self._indexSets
@property
def variable(self):
""" See class constructor for details. """
return self._variable
class DirichletCondition(SystemCondition):
"""
The DirichletCondition class provides the required functionality to imposed Dirichlet
conditions on your differential equation system.
The user is simply required to flag which nodes/DOFs should be considered by the system
to be a Dirichlet condition. The values at the Dirichlet nodes/DOFs is then left
untouched by the system.
Parameters
----------
variable : underworld.mesh.MeshVariable
This is the variable for which the Dirichlet condition applies.
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Dirichlet conditions.
Note that the user must provide an index set for each degree of
freedom of the variable. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Notes
-----
Note that it is necessary for the user to set the required value on the variable, possibly
via the numpy interface.
Constructor must be called collectively all processes.
Example
-------
Basic setup and usage of Dirichlet conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> IWalls = linearMesh.specialSets["MinI_VertexSet"] + linearMesh.specialSets["MaxI_VertexSet"] # get some wall index sets
>>> JWalls = linearMesh.specialSets["MinJ_VertexSet"] + linearMesh.specialSets["MaxJ_VertexSet"]
>>> freeSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls,JWalls) ) # this will give free slip sides
>>> noSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls+JWalls,IWalls+JWalls) ) # this will give no slip sides
"""
_objectsDict = { "_pyvc": "PythonVC" }
_selfObjectName = "_pyvc"
def __init__(self, variable, indexSetsPerDof):
super(DirichletCondition,self).__init__(variable, indexSetsPerDof)
class NeumannCondition(SystemCondition):
"""
This class defines Neumann conditions for a differential equation.
Neumann conditions specifiy a field's flux along a boundary.
As such the user specifices the field's flux as a uw.Function and the nodes where this flux
is to be applied - similar to uw.conditions.DirichletCondtion
Parameters
----------
fn_flux : underworld.function.Function
Function which determines flux values.
variable : underworld.mesh.MeshVariable
The variable that describes the discretisation (mesh & DOFs) for 'indexSetsPerDof'
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Neumann conditions.
Note that the user must provide an index set for each degree of
freedom of the variable above. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Example
-------
Basic setup and usage of Neumann conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> myFunc = (uw.function.coord()[1],0.0)
>>> bottomWall = linearMesh.specialSets["MinJ_VertexSet"]
>>> tractionBC = uw.conditions.NeumannCondition(variable=velocityField, fn_flux=myFunc, indexSetsPerDof=(None,bottomWall) )
"""
_objectsDict = { "_pyvc": "PythonVC" }
_selfObjectName = "_pyvc"
def __init__(self, variable, indexSetsPerDof=None, fn_flux=None ):
# call parent
super(NeumannCondition,self).__init__(variable, indexSetsPerDof)
_fn_flux = uw.function.Function.convert(fn_flux)
if not isinstance( _fn_flux, uw.function.Function):
raise TypeError( "Provided 'fn_flux' must be of or convertible to 'Function' class." )
self.fn_flux=_fn_flux
@property
def fn_flux(self):
""" Get the underworld.Function that defines the flux """
return self._fn_flux
@fn_flux.setter
def fn_flux(self, fn):
""" Set the underworld.Function that defines the flux """
_fn = uw.function.Function.convert(fn)
if not isinstance( _fn, uw.function.Function):
raise ValueError( "Provided '_fn' must be of or convertible to 'Function' class." )
self._fn_flux = _fn
| 46.5 | 133 | 0.642014 | 6,697 | 0.87818 | 0 | 0 | 697 | 0.091398 | 0 | 0 | 5,079 | 0.666011 |
69a9b216e3287800556dfe1beff0b79e23f28b95 | 589 | py | Python | tests/integration/cli/test_test.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | tests/integration/cli/test_test.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | tests/integration/cli/test_test.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | from .utils import skip_projects_except
@skip_projects_except(["test"])
def test_test(ape_cli, runner):
# test cases implicitly test built-in isolation
result = runner.invoke(ape_cli, ["test"])
assert result.exit_code == 0, result.output
@skip_projects_except(["test"])
def test_test_isolation_disabled(ape_cli, runner):
# check the disable isolation option actually disables built-in isolation
result = runner.invoke(ape_cli, ["test", "--disable-isolation", "--setup-show"])
assert result.exit_code == 1
assert "F _function_isolation" not in result.output
| 34.647059 | 84 | 0.73854 | 0 | 0 | 0 | 0 | 543 | 0.921902 | 0 | 0 | 202 | 0.342954 |
69aa25aac3f98986c9dae15f4594f5b7600a1d64 | 302 | py | Python | binary-list-generator.py | rj011/Hacktoberfest2021-4 | 0aa981d4ba5e71c86cc162d34fe57814050064c2 | [
"MIT"
] | 41 | 2021-10-03T16:03:52.000Z | 2021-11-14T18:15:33.000Z | binary-list-generator.py | rj011/Hacktoberfest2021-4 | 0aa981d4ba5e71c86cc162d34fe57814050064c2 | [
"MIT"
] | 175 | 2021-10-03T10:47:31.000Z | 2021-10-20T11:55:32.000Z | binary-list-generator.py | rj011/Hacktoberfest2021-4 | 0aa981d4ba5e71c86cc162d34fe57814050064c2 | [
"MIT"
] | 208 | 2021-10-03T11:24:04.000Z | 2021-10-31T17:27:59.000Z | # Necro(ネクロ)
# sidmishra94540@gmail.com
def binaryGenerator(n):
pad = [0]*n
res = []
for _ in range(2**n):
num = list(map(int, bin(_)[2:]))
num = pad[:n-len(num)]+num
res.append(num)
return res
if __name__ == '__main__':
print(binaryGenerator(int(input()))) | 23.230769 | 40 | 0.566225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.175325 |
69aa3b2782a1e4f6d886bd6026cd8fd3d7967980 | 403 | py | Python | inferlo/testing/experiment_runner_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2022-01-27T18:44:07.000Z | 2022-01-27T18:44:07.000Z | inferlo/testing/experiment_runner_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 3 | 2022-01-23T18:02:30.000Z | 2022-01-27T23:10:51.000Z | inferlo/testing/experiment_runner_test.py | InferLO/inferlo | a65efce721d7f99d2f274dd94a1aaf7ca159e944 | [
"Apache-2.0"
] | 1 | 2021-09-03T06:12:57.000Z | 2021-09-03T06:12:57.000Z | # Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
from inferlo.testing import ExperimentRunner
def test_run_experiment():
def my_experiment(x=0):
return {"square": x * x, "cube": x * x * x}
runner = ExperimentRunner()
result = runner.run_experiment(my_experiment, {'x': 2})
assert result['square'] == 4
| 28.785714 | 63 | 0.682382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.359801 |
69aa3ee89e74fa93b0d496cda02d89ca7460fa6f | 36,688 | py | Python | py2sqlite/py2sql.py | ehorTL/py2sqlite | ff22f7475ff12182a0b976cfc321263d9eade1e5 | [
"MIT"
] | null | null | null | py2sqlite/py2sql.py | ehorTL/py2sqlite | ff22f7475ff12182a0b976cfc321263d9eade1e5 | [
"MIT"
] | null | null | null | py2sqlite/py2sql.py | ehorTL/py2sqlite | ff22f7475ff12182a0b976cfc321263d9eade1e5 | [
"MIT"
] | null | null | null | """
Module implements simple ORM for SQLite.
Module excludes using many-to-many and one-to-many relationships.
Trying to save the same object (update) with another aggregated object
will rewrite old object!
"""
import os
import sqlite3
from array import array
from inspect import *
import builtins
import sys
import logging
from .util import *
from .demo_classes import *
class Py2SQL:
def __init__(self, logs_enabled=False, log_file=""):
self.filename = None
self.connection = None
self.cursor = None
def __setup_logger(self, logs_enabled: bool, log_file: str):
"""
Creates and returns logger.
:param logs_enabled: True to enable, False to disable
:param log_file: absolute path with file name of file for logging to
:return: logger instance from 'logging' module
"""
logging.basicConfig(level=logging.DEBUG,
filename=log_file, filemode="a")
logger = logging.getLogger("main_logger")
logger.addFilter(lambda r: bool(logs_enabled))
return logger
def db_connect(self, db_filepath: str) -> None:
"""
Connect to the database in given path
:type db_filepath: str
:param db_filepath: path to the database file
:return: None
"""
self.filename = db_filepath
self.connection = sqlite3.connect(db_filepath)
self.cursor = self.connection.cursor()
def db_disconnect(self) -> None:
"""
Disconnect from the current database
:return: None
"""
self.connection.close()
self.filename = None
self.connection = None
self.cursor = None
def db_engine(self) -> tuple:
"""
Retrieve database name and version
:rtype: tuple
:return: database name and version tuple
"""
self.cursor.execute('SELECT sqlite_version();')
version = self.cursor.fetchone()[0]
name = self.db_name()
return name, version
def db_name(self) -> str:
query = "PRAGMA database_list;"
self.cursor.execute(query)
db_info = self.cursor.fetchone()
if db_info:
return db_info[1]
return ""
def db_size(self) -> float:
"""
Retrieve connected database size in Mb
:rtype: float
:return: database size in Mb
"""
return os.path.getsize(self.filename) / (1024 * 1024.0)
def db_tables(self):
"""
Retrieve all the tables names present in database.
:return: list of database tables names
"""
query = "SELECT tbl_name FROM sqlite_master;"
self.cursor.execute(query)
tables_info = self.cursor.fetchall()
return list(map(lambda t: t[0], list(tables_info)))
def db_table_structure(self, table_name: str) -> list:
"""
Retrieve ordered list of tuples of form (id, name, type) which describe given table's columns
:type table_name: str
:param table_name: name of the table to retrieve structure of
:return: ordered list of tuples of form (id, name, type)
"""
return list(map(lambda x: x[:3], self.cursor.execute('PRAGMA table_info(' + table_name + ');').fetchall()))
def db_table_size(self, table_name: str) -> float:
"""
Dynamically calculates data size stored in the table with table name provided in Mb.
:table_name: table name to get size of
:rtype: float
:return: size of table ib Mb
"""
if not type(table_name) == str:
raise ValueError(
"str type expected as table_name. Got " + str(type(table_name)))
q = "SELECT * FROM {}".format(table_name)
try:
self.cursor.execute(q)
except Exception:
raise Exception('No table' + table_name + ' found')
rows = self.cursor.fetchall()
col_names = list(
map(lambda descr_tuple: descr_tuple[0], self.cursor.description))
int_size = 8
text_charsize = 2
bytes_size = 0
for r in rows:
for i in range(len(r)):
if r[i] is None:
continue
elif (col_names[i] == PY2SQL_COLUMN_ID_NAME) or (col_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME):
bytes_size += int_size
elif type(r[i]) == int:
bytes_size += int_size
elif type(r[i]) == str:
bytes_size += len(r[i]) * text_charsize
else:
continue
return float(bytes_size / 1024 / 1024)
# Python -> SQLite
def save_object(self, obj) -> int:
"""
Save representation of given object instance into database or update it if it already exists
:param obj: object instance to be saved
:rtype: int
:return: id of object instance that was saved
"""
table_name = Py2SQL.__get_object_table_name(obj)
# print('saving', obj, 'to', table_name, 'id:', id(obj))
if not self.__table_exists(table_name):
self.__create_table(type(obj))
else:
self.__update_table(type(obj))
if not Py2SQL.__is_of_primitive_type(obj): # object
values = []
self.__add_object_attrs_columns(obj, table_name)
columns = self.__get_object_bound_columns(table_name).split(', ')
for col in columns[:]:
if not Py2SQL.__has_attr_for_column(obj, col):
columns.remove(col)
continue
if col == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME:
values.append(id(obj))
continue
attr_value = Py2SQL.__get_attr_for_column(obj, col)
if isclass(attr_value):
continue
values.append(self.__get_sqlite_repr(attr_value))
else:
columns = self.__get_object_bound_columns(table_name).split(', ')
values = (id(obj), self.__get_sqlite_repr(obj))
obj_pk = self.__get_pk_if_exists(obj)
if obj_pk:
query = 'UPDATE {} SET {} WHERE {} = ?'.format(
table_name,
', '.join(['{} = ?'.format(c) for c in columns]),
PY2SQL_COLUMN_ID_NAME
)
params = (*values, obj_pk)
# print(query, params)
self.cursor.execute(query, params)
self.connection.commit()
return obj_pk
query = 'INSERT INTO {}({}) VALUES ({});'.format(
table_name,
', '.join(columns),
('?,' * len(values))[:-1]
)
# print(query, values)
try:
self.cursor.execute(query, values)
except sqlite3.OperationalError:
self.cursor.execute(
'ALTER TABLE {} ADD COLUMN {} TEXT'.format(
table_name, PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME)
)
columns = self.__get_object_bound_columns(table_name)
query = 'INSERT INTO {}({}) VALUES ({});'.format(
table_name,
columns,
('?,' * len(values))[:-1]
)
self.cursor.execute(query, values)
self.connection.commit()
return self.__get_last_inserted_id()
@staticmethod
def __get_attr_for_column(obj, column_name):
"""
Retrieve attribute of an object corresponding to the given column name
:param obj: object to get attribute of
:param column_name: column name corresponding to desired attribute
:return: attribute of an object corresponding to the given column name
"""
if column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME and Py2SQL.__is_of_primitive_type(obj):
return str(obj)
return getattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name))
@staticmethod
def __has_attr_for_column(obj, column_name):
"""
Check if object still has attribute corresponding to given column name
:param obj: object to check for
:param column_name: column name to check for
:return: True if object has attribute corresponding to given column name, False otherwise
"""
if column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME and Py2SQL.__is_of_primitive_type(obj):
return True
if column_name == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME:
return True
if isclass(getattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name), type)):
return False
return hasattr(obj, Py2SQL.__object_column_name_to_attr_name(column_name))
@staticmethod
def __object_column_name_to_attr_name(column_name):
"""
Retrieve name of object's attribute corresponding to given column name
:param column_name: column name to get attribute name for
:return: name of object's attribute corresponding to given column name
"""
attr_name = column_name.replace(PY2SQL_SEPARATOR, '').replace(PY2SQL_OBJECT_ATTR_PREFIX, '') \
.replace(PY2SQL_OBJECT_METHOD_PREFIX, '')
return attr_name
def __get_pk_if_exists(self, obj):
"""
Retrieve primary key of given object from corresponding table
:param obj: obj to get primary key of if it exists in corresponding table
:rtype: int or None
:return: primary key of object if it is in the table, otherwise None
"""
table_name = Py2SQL.__get_object_table_name(obj)
existed_id = self.cursor.execute(
'SELECT {} FROM {} WHERE {} = ?'.format(
PY2SQL_COLUMN_ID_NAME, table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME
),
(str(id(obj)),)
).fetchone()
if existed_id:
return existed_id[0]
return None
def __get_last_inserted_id(self):
"""
Retrieve last id inserted into the database
:rtype: int
:return: last id inserted into the database
"""
return self.cursor.execute('SELECT last_insert_rowid()').fetchone()[0]
@staticmethod
def __get_object_column_name(attr_name: str, attr_value):
"""
Retrieve name of the column responsible for storing given object instance attribute
:type attr_name: str
:param attr_name: name of the object instance attribute to get the column name
:return: name of the column responsible for storing given attribute
"""
if isfunction(attr_value) or ismethod(attr_value):
return PY2SQL_OBJECT_METHOD_PREFIX + PY2SQL_SEPARATOR + attr_name
return PY2SQL_OBJECT_ATTR_PREFIX + PY2SQL_SEPARATOR + attr_name
@staticmethod
def __get_class_column_name(attr_name: str, attr_value) -> str:
"""
Retrieve name of the column responsible for storing given class instance attribute
:type attr_name: str
:param attr_name: name of the class instance attribute to get the column name
:param attr_value: value of the class instance attribute to get the column name
:rtype: str
:return: name of the column responsible for storing given attribute
"""
if isfunction(attr_value) or ismethod(attr_value):
return PY2SQL_CLASS_METHOD_PREFIX + PY2SQL_SEPARATOR + attr_name
return PY2SQL_CLASS_ATTR_PREFIX + PY2SQL_SEPARATOR + attr_name
@staticmethod
def __get_association_reference(obj, ref_id):
"""
Retrieve association reference string for a given object instance and its primary key i.e. a string
that represents association relationship between two objects
:param obj: object instance to get the association reference for
:param ref_id: primary key of object instance to be referenced in the corresponding table
:rtype: str
:return: association reference string
"""
return PY2SQL_ASSOCIATION_REFERENCE_PREFIX + PY2SQL_SEPARATOR + Py2SQL.__get_object_table_name(obj) + \
PY2SQL_SEPARATOR + str(ref_id)
@staticmethod
def __get_base_class_table_reference_name(cls) -> str:
"""
Retrieve base class reference string for a given class instance i.e. a string
that represents inheritance relationship between two classes
:param cls: class instance to get base class table reference for
:rtype: str
:return: base class table reference string
"""
return PY2SQL_BASE_CLASS_REFERENCE_PREFIX + PY2SQL_SEPARATOR + Py2SQL.__get_class_table_name(cls)
@staticmethod
def __is_magic_attr(attr_name: str) -> bool:
"""
Defines is given attribute name is built-in magic attribute name
:param attr_name:
:return: bool
"""
return attr_name.startswith("__") and attr_name.endswith("__")
def __get_sqlite_repr(self, obj) -> str or None:
"""
Retrieve SQLite representation of given object
All primitives are represented by respective type copy constructor call string with the actual value passed,
so that object instances of primitive types can be easily recreated from the database via eval() function
Composite objects are represented by association reference strings, whereas functions are represented with
their source code
:param obj: object to be represented in SQLite database
:rtype: str or None
:return: sqlite representation of an object to be stored in the respective database table
"""
if obj is None:
result = None
elif type(obj) == array:
result = '{}("{}", {})'.format(
type(obj).__name__, obj.typecode, list(obj))
elif type(obj) == frozenset:
result = str(obj)
elif type(obj) == str:
result = '{}("{}")'.format(type(obj).__name__, obj)
elif Py2SQL.__is_of_primitive_type(obj):
result = '{}({})'.format(type(obj).__name__, obj)
elif isfunction(obj) or ismethod(obj):
result = getsource(obj)
else: # object
if obj.__dict__:
result = Py2SQL.__get_association_reference(
obj, self.save_object(obj))
else:
result = str(obj)
if result is not None:
return result.replace("'", '"')
@staticmethod
def __is_of_primitive_type(obj) -> bool:
"""
Check whether given object is of primitive type i.e. is represented by a single field in SQLite database, thus
can be embedded into 'composite' objects
:param obj: object instance to be type-checked
:rtype: bool
:return: True if object is of primitive type, False otherwise
"""
return Py2SQL.__is_primitive_type(type(obj)) or not hasattr(obj, '__dict__')
@staticmethod
def __is_primitive_type(cls):
"""
Checks if input class object belongs to primitive built-in types
:param cls: class instance to check
:rtype: bool
:return: True if class is primitive type, False otherwise
"""
return cls in (int, float, str, bool, dict, tuple, list, set, frozenset, array) or isbuiltin(cls)
@staticmethod
def __get_object_table_name(obj) -> str:
"""
Retrieve name of the table which should store objects of the same type as given one
:param obj: object to build respective table name from
:rtype: str
:return: name of table to store object in
"""
return Py2SQL.__get_class_table_name(type(obj))
@staticmethod
def __get_class_name_by_table_name(table_name: str) -> tuple:
"""
Parses given table name to find out name of class this table was created for
:param table_name: table name of class to get name of
:return: tuple (<full_module_name>, <class_name>)
"""
divider = '$'
ind = table_name.rfind(divider)
module = table_name[:ind].replace(divider, ".")
class_name = table_name[ind + 1:]
return module, class_name
@staticmethod
def __get_attribute_name(self, tbl_name, col_name) -> str:
"""
DO NOT USE
:param tbl_name: table the column taken from
:param col_name: column name
:return:
"""
cls = Py2SQL.__get_class_object_by_table_name(tbl_name)
attr_name = ""
if Py2SQL.__is_primitive_type(cls):
pass
else:
pass
# PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME
# PY2SQL_OBJECT_ATTR_PREFIX + PY2SQL_SEPARATOR
# todo
return attr_name
@staticmethod
def __get_class_object_by_table_name(tbl_name):
"""
Returns class object of corresponding tbl name or raise an Exception
:param tbl_name: table name to get corresponding class object of
:return: class object
"""
module_nm, cls_nm = Py2SQL.__get_class_name_by_table_name(tbl_name)
cls_obj = None
try:
cls_obj = getattr(sys.modules[module_nm], cls_nm)
except (AttributeError, KeyError) as e:
msg = 'No such class: ' + module_nm + "." + cls_nm
raise Exception(msg)
except Exception:
raise Exception('Unpredictable error')
return cls_obj
@staticmethod
def __get_class_table_name(cls) -> str:
"""
Retrieve name of the database table used to represent given class
:param cls: class instance to get table name for
:rtype: str
:return: name of the table that represents given class
"""
prefix = cls.__module__.replace(".", "$") + "$"
if Py2SQL.__is_of_primitive_type(cls):
return prefix + cls.__name__
return prefix + cls.__name__
def __table_exists(self, table_name):
"""
Check if table with table name exists in database
:param table_name: table name
:return: bool, exists or not
"""
for tbl_name in self.db_tables():
if tbl_name == table_name:
return True
return False
def __add_object_attrs_columns(self, obj, table_name):
"""
Add columns representing attributes of given object instance to the table with given name
:param obj: object to add attributes of to the table
:param table_name: name of the table to add columns into
:return: None
"""
for attr_name, attr_value in obj.__dict__.items():
if isclass(attr_value):
continue
try:
self.cursor.execute(
'ALTER TABLE {} ADD COLUMN {} TEXT'.format(
table_name,
Py2SQL.__get_object_column_name(attr_name, attr_value)
)
)
except sqlite3.OperationalError: # column already exists
pass
@staticmethod
def __get_data_fields(cls_obj):
"""
Retrieves from class object data field names.
Not includes magic attributes and functions (methods)
:param cls_obj:
:return: list of two-element tuples containing data field name and value respectively
"""
return [(k, v) for k, v in cls_obj.__dict__.items() if not Py2SQL.__is_magic_attr(k) and PY2SQL_ID_NAME != k]
def __table_is_empty(self, table_name) -> bool:
"""
Check if table is empty
:param table_name: name of the table to check
:rtype: bool
:return: True if table is empty, False otherwise
"""
return self.cursor.execute('SELECT count(*) FROM {}'.format(table_name)).fetchone()[0] == 0
def __get_object_bound_columns(self, table_name) -> str:
"""
Retrieve comma separated list of object bound column names as string
:param table_name: name of the table to get columns bound to object instances from
:rtype: str
:return: comma separated list of object bound column names
"""
columns = ', '.join([column_name for _, column_name, _ in self.db_table_structure(table_name) if
Py2SQL.__is_object_bound_column(column_name)])
return columns
@staticmethod
def __is_object_bound_column(column_name):
"""
Check if column is object bound attribute or method
:param column_name: column name to be checked
:return: True if column is object bound, False otherwise
"""
return column_name.startswith(PY2SQL_OBJECT_ATTR_PREFIX) or \
column_name.startswith(PY2SQL_OBJECT_METHOD_PREFIX) or \
column_name == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME or \
column_name == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME
@staticmethod
def __get_columns_to_be_modified(old_columns, new_columns):
"""
Retrieve columns to be deleted from the table during update, as well as columns to be added
:param old_columns: columns that were stored in the table prior to the class update call
:param new_columns: class columns to be added through class update call
:return: two-element tuple: column names to be deleted, column names to be added
"""
old_columns = [col for col in old_columns if not Py2SQL.__is_object_bound_column(col)
or col == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME]
to_be_deleted = set(old_columns) - set(new_columns)
to_be_added = set(new_columns) - set(old_columns)
return to_be_deleted, to_be_added
def __get_class_bound_columns_queries(self, cls, columns=None):
"""
Retrieve list of class bound column queries
:param cls: class to retrieve column queries for
:param columns: columns list which optionally extends class bound columns list
:return: list of class bound column queries
"""
data_fields = Py2SQL.__get_data_fields(cls)
base_ref_columns = ['{} REFERENCES {}(ID) DEFAULT {}'.format(
Py2SQL.__get_base_class_table_reference_name(b),
Py2SQL.__get_class_table_name(b),
PY2SQL_DEFAULT_CLASS_BOUND_ROW_ID
) for b in cls.__bases__ if b != object and (columns is None or
Py2SQL.__get_base_class_table_reference_name(b) in columns)]
class_bound_columns = ['{} TEXT DEFAULT \'{}\''.format(
Py2SQL.__get_class_column_name(k, v),
self.__get_sqlite_repr(v)
) for k, v in data_fields if not type(v) == cls # prevent undesired recursion
and (columns is None or Py2SQL.__get_class_column_name(k, v) in columns)]
if not columns:
columns = []
object_bound_columns = ['{} TEXT'.format(c) for c in columns if Py2SQL.__is_object_bound_column(c) and not
c == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME]
return base_ref_columns + class_bound_columns + object_bound_columns
@staticmethod
def __get_class_bound_columns(cls) -> list:
"""
Retrieve list of class bound column names
:param cls: class to retrieve column names for
:return: list of class bound column names
"""
data_fields = Py2SQL.__get_data_fields(cls)
base_ref_columns = [Py2SQL.__get_base_class_table_reference_name(
b) for b in cls.__bases__ if b != object]
# prevent undesired recursion
attr_columns = [Py2SQL.__get_class_column_name(
k, v) for k, v in data_fields if not type(v) == cls]
return [PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME] + base_ref_columns + attr_columns
def __get_columns(self, table_name):
return [column_name for _, column_name, _ in self.db_table_structure(table_name)
if not column_name == PY2SQL_COLUMN_ID_NAME]
def __update_table(self, cls):
"""
Updates table of class cls
:param cls:
:return: None
"""
table_name = Py2SQL.__get_class_table_name(cls)
old_columns = self.__get_columns(table_name)
new_columns = self.__get_class_bound_columns(cls)
to_be_deleted, to_be_added = Py2SQL.__get_columns_to_be_modified(
old_columns, new_columns)
if not to_be_deleted and not to_be_added:
return
columns = (set(old_columns) - set(to_be_deleted)) | set(to_be_added)
self.cursor.execute(
'ALTER TABLE {} RENAME TO {}$backup;'.format(table_name, table_name))
self.__create_table(cls, columns)
columns_query = ', '.join(columns - set(to_be_added))
query = 'INSERT INTO {}({}) SELECT {} FROM {}$backup WHERE {} <> ?;'.format(
table_name, columns_query, columns_query, table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME)
# print(query)
self.cursor.execute(
query, (PY2SQL_DEFAULT_CLASS_BOUND_ROW_ID,)
)
self.cursor.execute('DROP TABLE {}$backup;'.format(table_name))
self.connection.commit()
def __create_table(self, cls, columns=None) -> str:
"""
Create SQLite table representation for given class instance
:param cls: class instance to create SQLite table representation for
:rtype: str
:return: name of the table created
"""
table_name = self.__get_class_table_name(cls)
query_start = 'CREATE TABLE IF NOT EXISTS {} ({} INTEGER PRIMARY KEY AUTOINCREMENT, {} {}' \
.format(table_name,
PY2SQL_COLUMN_ID_NAME,
PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME,
PY2SQL_OBJECT_PYTHON_ID_COLUMN_TYPE
)
if self.__is_primitive_type(cls):
query = query_start + \
', {} TEXT)'.format(PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME)
else:
columns = self.__get_class_bound_columns_queries(cls, columns)
columns_query = ', '.join(columns)
if columns_query:
columns_query = ', ' + columns_query
query = query_start + ' ' + columns_query + ')'
# print(query)
self.cursor.execute(query)
if not self.__is_primitive_type(cls):
if self.__table_is_empty(table_name):
self.cursor.execute(
'INSERT INTO {} DEFAULT VALUES'.format(table_name))
self.connection.commit()
return table_name
def save_class(self, cls) -> None:
"""
Save given class instance's representation into database or update it if it already exists
Creates or updates tables structure to represent class object
:param cls: class instance to be saved
:return: None
"""
table_name = Py2SQL.__get_class_table_name(cls)
if not self.__table_exists(table_name):
self.__create_table(cls)
for base in cls.__bases__:
if not base == object:
self.__create_table(base)
if not self.__is_primitive_type(cls):
self.__update_table(cls)
self.connection.commit()
def save_hierarchy(self, root_class) -> None:
"""
Saves all classes derived from root_class and classes these classes depends on
:param root_class: Base class to save with all derived classes
:return: None
"""
self.save_class(root_class)
subclasses = root_class.__subclasses__()
if len(subclasses) == 0:
return
for c in subclasses:
self.save_hierarchy(c)
def delete_object(self, obj) -> None:
"""
Delete given object instance's representation from database if it already existed
:param obj: object instance to be deleted
:return: None
"""
table_name = Py2SQL.__get_object_table_name(obj)
self.cursor.execute(
'DELETE FROM {} WHERE {} = ?;'.format(
table_name, PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME), (id(obj),)
)
if not Py2SQL.__is_of_primitive_type(obj): # object
for value in obj.__dict__.values():
if not Py2SQL.__is_of_primitive_type(value) and isclass(value):
self.delete_object(value) # cascade delete
self.connection.commit()
def delete_class(self, cls) -> None:
"""
Delete given class instance's representation from database if it already existed.
Drops corresponding table.
:param cls: object instance to be delete
:return: None
"""
tbl_name = Py2SQL.__get_class_table_name(cls)
query = "DROP TABLE IF EXISTS {}".format(tbl_name)
self.cursor.execute(query)
self.connection.commit()
def delete_hierarchy(self, root_class) -> None:
"""
Deletes root_class representation from database with all derived classes.
Drops class corresponding table and all derived classes corresponding tables.
:param root_class: Class which representation to be deleted with all derived classes
:return: None
"""
# consider foreign key constraints! todo
self.delete_class(root_class)
subclasses = root_class.__subclasses__()
if len(subclasses) == 0:
return
for c in subclasses:
self.delete_hierarchy(c)
def __redefine_id_function(self, my_id):
"""
Replace id() global function so that it returns my_id
To cancel effect of this func call __reset_id_function() method.
Use carefully. Reflection used.
:param my_id: value to be returned after id() call
:return: my_id
"""
def id(ob):
return my_id
globals()['id'] = id
def __reset_id_function(self) -> None:
"""
Sets global module attribute 'id' to built-in python id() function
Use carefully. Reflection used.
"""
globals()['id'] = builtins.id
def __redefine_pyid_col_name(self) -> None:
"""
Replaces some constant values from util module.
To cancel effect of func call use __reset_pyid_col_name
Use carefully. Reflection used.
"""
global PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME
PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME = str(PY2SQL_COLUMN_ID_NAME)
def __reset_pyid_col_name(self) -> None:
"""
Cancels the effect of __redefine_pyid_col_name method.
Use carefully. Reflection used.
"""
global PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME
PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME = getattr(
sys.modules['util'], 'PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME')
def save_object_with_update(self, obj):
"""
Inserts or updates obj related data by ID provided.
Obj expected to be ModelPy2SQL instance object.
If so, row is updated if provided ID exists, and fails otherwise.
If not - object will be inserted or updated as provided
:param obj: object to be saved or updated in db
:return: object of type util.ModelPy2SQL
"""
w = None
if type(obj) != ModelPy2SQL:
new_id = self.save_object(obj)
w = ModelPy2SQL(obj, new_id)
else:
tbl_nm = Py2SQL.__get_object_table_name(obj.obj)
q = "SELECT * FROM {} WHERE {}={}" \
.format(tbl_nm, PY2SQL_COLUMN_ID_NAME, obj.get_id())
self.cursor.execute(q)
rows = self.cursor.fetchall()
if len(rows) == 0:
mes = "No " + str(obj.obj.__class__.__name__) + " instance objects in " + tbl_nm + " with id: " + str(
obj.get_id())
raise Exception(mes)
self.__redefine_id_function(obj.get_id())
self.__redefine_pyid_col_name()
self.save_object(obj.obj)
self.__reset_pyid_col_name()
self.__reset_id_function()
w = obj
return w
def __get_columns_names(self, table_name) -> list:
"""
Retrieves from database table columns name for table with name provided
:param table_name: table name
:rtype: list
:return: columns names
"""
self.cursor.execute('PRAGMA table_info({})'.format(table_name))
rows = self.cursor.fetchall()
return list(map(lambda t: t[1], list(rows)))
@staticmethod
def __get_tbl_nm_and_id_assoc(association_ref_value: str) -> tuple:
"""
Retrieves from given string table name find references on and row id
:param association_ref_value:
:return: table name, id
:rtype: tuple
"""
tbl_name = association_ref_value[
association_ref_value.find(PY2SQL_SEPARATOR) + 1: association_ref_value.rfind(PY2SQL_SEPARATOR)]
id_ = int(
association_ref_value[association_ref_value.rfind(PY2SQL_SEPARATOR) + 1:])
return tbl_name, id_
def get_object_by_id(self, table_name: str, id_: int, parent_obj=None) -> tuple:
"""
Retrieves the object related data from table with table name and converts it into the object.
:param table_name: table name tp represent object
:param id_: row id was given to the object as it was inserted
:param parent_obj: do not use this param externally
"""
ob = None
py_id, db_id = -1, -1
try:
cls_o = Py2SQL.__get_class_object_by_table_name(table_name)
obj = cls_o.__new__(cls_o)
cols_names = self.__get_columns_names(table_name)
q = "SELECT * FROM {} WHERE {}={}".format(
table_name, PY2SQL_COLUMN_ID_NAME, id_)
self.cursor.execute(q)
row = self.cursor.fetchone()
if Py2SQL.__is_primitive_type(cls_o):
for i in range(len(row)):
if cols_names[i] == PY2SQL_COLUMN_ID_NAME:
db_id = row[i]
elif cols_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME:
py_id = row[i]
elif cols_names[i] == PY2SQL_PRIMITIVE_TYPES_VALUE_COLUMN_NAME:
ob = cls_o(eval(row[i]))
else:
if parent_obj is not None:
obj = parent_obj
for i in range(len(row)):
if cols_names[i] == PY2SQL_COLUMN_ID_NAME:
db_id = row[i]
elif cols_names[i] == PY2SQL_OBJECT_PYTHON_ID_COLUMN_NAME:
py_id = row[i]
elif cols_names[i].startswith(PY2SQL_BASE_CLASS_REFERENCE_PREFIX):
ref_tbl_name = cols_names[i][cols_names[i].rfind(
PY2SQL_SEPARATOR) + 1:]
ref_id = int(row[i])
self.get_object_by_id(ref_tbl_name, ref_id, obj)
elif cols_names[i].startswith(PY2SQL_OBJECT_ATTR_PREFIX):
attr_real_name = cols_names[cols_names.rfind(
PY2SQL_SEPARATOR) + 1:]
if row[i].startswith(PY2SQL_ASSOCIATION_REFERENCE_PREFIX):
tbl_nm, prm_id = Py2SQL.__get_tbl_nm_and_id_assoc(
row[i])
if attr_real_name.startswith("__"):
attr_mdf = "_" + cls_o.__name__ + attr_real_name
setattr(obj, attr_mdf, self.get_object_by_id(
tbl_nm, prm_id)[0])
else:
setattr(obj, attr_real_name, self.get_object_by_id(
tbl_nm, prm_id)[0])
else:
if attr_real_name.startswith("__"):
attr_mdf = "_" + cls_o.__name__ + attr_real_name
setattr(obj, attr_mdf, row[i])
else:
setattr(obj, attr_real_name, row[i])
ob = obj
except Exception:
print("exc")
return ob, db_id, py_id
| 37.474974 | 118 | 0.605811 | 36,310 | 0.989697 | 0 | 0 | 11,039 | 0.300889 | 0 | 0 | 13,775 | 0.375463 |
69aa6a9832bfa5efcd1c75e435948454112b6d04 | 4,480 | py | Python | azure-iot-device/azure/iot/device/provisioning/security/sk_security_client.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | 35 | 2018-12-01T05:42:30.000Z | 2021-03-10T12:23:41.000Z | azure-iot-device/azure/iot/device/provisioning/security/sk_security_client.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | 81 | 2018-11-20T20:01:43.000Z | 2019-09-06T23:57:17.000Z | azure-iot-device/azure/iot/device/provisioning/security/sk_security_client.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | 18 | 2019-03-19T18:53:43.000Z | 2021-01-10T09:47:24.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains a client that is responsible for providing shared access tokens that will eventually establish
the authenticity of devices to Device Provisioning Service.
"""
from azure.iot.device.common.sastoken import SasToken
class SymmetricKeySecurityClient(object):
"""
A client that is responsible for providing shared access tokens that will eventually establish
the authenticity of devices to Device Provisioning Service.
:ivar provisioning_host: Host running the Device Provisioning Service
:ivar registration_id: : The registration ID is used to uniquely identify a device in the Device Provisioning Service.
:ivar id_scope: : The ID scope is used to uniquely identify the specific provisioning service the device will
register through.
"""
def __init__(self, provisioning_host, registration_id, id_scope, symmetric_key):
"""
Initialize the symmetric key security client.
:param provisioning_host: Host running the Device Provisioning Service. Can be found in the Azure portal in the
Overview tab as the string Global device endpoint
:param registration_id: The registration ID is used to uniquely identify a device in the Device Provisioning Service.
The registration ID is alphanumeric, lowercase string and may contain hyphens.
:param id_scope: The ID scope is used to uniquely identify the specific provisioning service the device will
register through. The ID scope is assigned to a Device Provisioning Service when it is created by the user and
is generated by the service and is immutable, guaranteeing uniqueness.
:param symmetric_key: The key which will be used to create the shared access signature token to authenticate
the device with the Device Provisioning Service. By default, the Device Provisioning Service creates
new symmetric keys with a default length of 32 bytes when new enrollments are saved with the Auto-generate keys
option enabled. Users can provide their own symmetric keys for enrollments by disabling this option within
16 bytes and 64 bytes and in valid Base64 format.
"""
self._provisioning_host = provisioning_host
self._registration_id = registration_id
self._id_scope = id_scope
self._symmetric_key = symmetric_key
self._sas_token = None
@property
def provisioning_host(self):
"""
:return: The registration ID is used to uniquely identify a device in the Device Provisioning Service.
The registration ID is alphanumeric, lowercase string and may contain hyphens.
"""
return self._provisioning_host
@property
def registration_id(self):
"""
:return: The registration ID is used to uniquely identify a device in the Device Provisioning Service.
The registration ID is alphanumeric, lowercase string and may contain hyphens.
"""
return self._registration_id
@property
def id_scope(self):
"""
:return: Host running the Device Provisioning Service.
"""
return self._id_scope
def _create_shared_access_signature(self):
"""
Construct SAS tokens that have a hashed signature formed using the symmetric key of this security client.
This signature is recreated by the Device Provisioning Service to verify whether a security token presented
during attestation is authentic or not.
:return: A string representation of the shared access signature which is of the form
SharedAccessSignature sig={signature}&se={expiry}&skn={policyName}&sr={URL-encoded-resourceURI}
"""
uri = self._id_scope + "/registrations/" + self._registration_id
key = self._symmetric_key
time_to_live = 3600
keyname = "registration"
return SasToken(uri, key, keyname, time_to_live)
def get_current_sas_token(self):
if self._sas_token is None:
self._sas_token = self._create_shared_access_signature()
else:
self._sas_token.refresh()
return str(self._sas_token)
| 50.909091 | 125 | 0.697321 | 3,928 | 0.876786 | 0 | 0 | 752 | 0.167857 | 0 | 0 | 3,318 | 0.740625 |
69aae4b96943444731c2e46c740aea18d36b17e4 | 3,833 | py | Python | scripts/geodata/phrases/extraction.py | Fillr/libpostal | bce153188aff9fbe65aef12c3c639d8069e707fc | [
"MIT"
] | 3,489 | 2015-03-03T00:21:38.000Z | 2022-03-29T09:03:05.000Z | scripts/geodata/phrases/extraction.py | StephenHildebrand/libpostal | d8c9847c5686a1b66056e65128e1774f060ff36f | [
"MIT"
] | 488 | 2015-05-29T23:04:28.000Z | 2022-03-29T11:20:24.000Z | scripts/geodata/phrases/extraction.py | StephenHildebrand/libpostal | d8c9847c5686a1b66056e65128e1774f060ff36f | [
"MIT"
] | 419 | 2015-11-24T16:53:07.000Z | 2022-03-27T06:51:28.000Z | import csv
import six
from collections import defaultdict, Counter
from itertools import izip, islice
from geodata.text.tokenize import tokenize, token_types
from geodata.encoding import safe_encode
class FrequentPhraseExtractor(object):
'''
Extract common multi-word phrases from a file/iterator using the
frequent itemsets method to keep memory usage low.
'''
WORD_TOKEN_TYPES = (token_types.WORD,
token_types.IDEOGRAPHIC_CHAR,
token_types.ABBREVIATION,
token_types.HANGUL_SYLLABLE,
token_types.ACRONYM)
def __init__(self, min_count=5):
self.min_count = min_count
self.vocab = defaultdict(int)
self.frequencies = defaultdict(int)
self.train_words = 0
def ngrams(self, words, n=2):
for t in izip(*(islice(words, i, None) for i in xrange(n))):
yield t
def add_tokens(self, s):
for t, c in tokenize(s):
if c in self.WORD_TOKEN_TYPES:
self.vocab[((t.lower(), c), )] += 1
self.train_words += 1
def create_vocab(self, f):
for line in f:
line = line.rstrip()
if not line:
continue
self.add_tokens(line)
self.prune_vocab()
def prune_vocab(self):
for k in self.vocab.keys():
if self.vocab[k] < self.min_count:
del self.vocab[k]
def add_ngrams(self, s, n=2):
sequences = []
seq = []
for t, c in tokenize(s):
if c in self.WORD_TOKEN_TYPES:
seq.append((t, c))
elif seq:
sequences.append(seq)
seq = []
if seq:
sequences.append(seq)
for seq in sequences:
for gram in self.ngrams(seq, n=n):
last_c = None
prev_tokens = tuple([(t.lower(), c) for t, c in gram[:-1]])
if prev_tokens in self.vocab:
t, c = gram[-1]
current_token = (t.lower(), c)
self.frequencies[(prev_tokens, current_token)] += 1
def add_frequent_ngrams_to_vocab(self):
for k, v in six.iteritems(self.frequencies):
if v < self.min_count:
continue
prev, current = k
self.vocab[prev + (current,)] = v
def find_ngram_phrases(self, f, n=2):
self.frequencies = defaultdict(int)
for line in f:
line = line.rstrip()
if not line:
continue
self.add_ngrams(line, n=n)
self.add_frequent_ngrams_to_vocab()
self.frequencies = defaultdict(int)
@classmethod
def from_file(cls, f, max_phrase_len=5, min_count=5):
phrases = cls()
print('Doing frequent words for {}'.format(filename))
f.seek(0)
phrases.create_vocab(f)
for n in xrange(2, max_phrase_len + 1):
print('Doing frequent ngrams, n={} for {}'.format(n, filename))
f.seek(0)
phrases.find_ngram_phrases(f, n=n)
print('Done with {}'.format(filename))
return phrases
def to_tsv(self, filename, mode='w', max_rows=None):
f = open(filename, mode)
writer = csv.writer(f, delimiter='\t')
for i, (k, v) in enumerate(Counter(self.vocab).most_common()):
if max_rows is not None and i == max_rows:
break
gram = []
for t, c in k:
gram.append(t)
if c != token_types.IDEOGRAPHIC_CHAR:
gram.append(six.text_type(' '))
phrase = six.text_type('').join(gram)
writer.writerow((safe_encode(phrase), safe_encode(len(k)), safe_encode(v)))
| 30.664 | 87 | 0.541612 | 3,629 | 0.946778 | 118 | 0.030785 | 473 | 0.123402 | 0 | 0 | 226 | 0.058962 |
69ab300f18ef0da610d86dd3cc10be0de5d8ac1c | 10,093 | py | Python | mestopy/mestopy.py | pyfar-seminar/mestopy | 5eed12b12bb58965fa70be591d774f149fcbf6e8 | [
"MIT"
] | null | null | null | mestopy/mestopy.py | pyfar-seminar/mestopy | 5eed12b12bb58965fa70be591d774f149fcbf6e8 | [
"MIT"
] | null | null | null | mestopy/mestopy.py | pyfar-seminar/mestopy | 5eed12b12bb58965fa70be591d774f149fcbf6e8 | [
"MIT"
] | null | null | null | from scipy.signal import oaconvolve
from pyfar import Signal
# Class to generate ref-Objects, that can bei part of the MeasurementChain
class Device(object):
"""Class for device in MeasurementChain.
This class holds methods and properties of a device in the
'MeasurementChain' class. A device can be e.g., a sound card or a
pre-amplifier, described by a frequency response and/or sensitivity.
"""
def __init__(self, name, data=None, sens=1, unit=None):
"""Init Device with data.
Attributes
----------
name : str
Name of the device.
data : Signal, None, optional
Signal data that reprensets the inversed frequency response of
the device. The default is None, in this case a perfect flat
frequency response is assumed and only sensitivity as a factor
is applied.
Caution: Avoid large gains in the frequency responses because
they will boost measurement noise and might cause numerical
instabilities. One possibility to avoid this is to use
regularized inversion.
sens : float, optional
Sensitivity of the device as a factor. If neither device_data nor
sens is given, add_device generates a device that has no effect to
the measurement chain as it has no frequency response and a
sesitivity (factor) default of 1.
unit : str, optional
The phyiscal unit of the device, e.g., mV/Pa.
"""
self.name = name
self.data = data
self.sens = sens
self.unit = unit
@property
def name(self):
"""The name of the device"""
return self._name
@name.setter
def name(self, name):
if not isinstance(name, str):
raise ValueError('Device name must be string.')
else:
self._name = name
@property
def data(self):
"""The freqeuncy dependent data, representing the device."""
return self._data
@data.setter
def data(self, data):
if not isinstance(data, (Signal, type(None))):
raise TypeError('Input data must be type Signal or None.')
else:
self._data = data
@property
def sens(self):
"""The sensitivity of the device."""
return self._sens
@sens.setter
def sens(self, sens):
if not isinstance(sens, (int, float)):
raise ValueError('Sensitivity must be a number (int or float).')
else:
self._sens = sens
@property
def unit(self):
"""The unit of the sensitivity."""
return self._unit
@unit.setter
def unit(self, unit):
if not (isinstance(unit, str) or unit is None):
raise ValueError('Unit of sensitivity must be string or None.')
else:
self._unit = unit
@property
def freq(self):
"""Return the inverted frequency multiplied by the sensitivity as a signal,
or the sensitivity as scalar, when the device has no frequency
response.
"""
if self.data is not None:
return self.data * self.sens
else:
return self.sens
def __repr__(self):
"""String representation of Device class."""
if self.data is None:
repr_string = (
f"{self.name} defined by "
f"sensitivity={self.sens} unit={self.unit}\n")
else:
repr_string = (
f"{self.name} defined by {self.data.n_bins} freq-bins, "
f"sensitivity={self.sens} unit={self.unit}\n")
return repr_string
# Class for MeasurementChain as frame for Devices
class MeasurementChain(object):
"""Class for complete measurement chain.
This class holds methods and properties of all devices in the
measurement chain. It can include a single or multiple objects of
the Device class.
"""
def __init__(self,
sampling_rate,
sound_device=None,
devices=None,
comment=None):
"""Init measurement chain with sampling rate.
Attributes
----------
sampling_rate : double
Sampling rate in Hertz.
sound_device : int
Number to identify the sound device used. The default is None.
devices : list
A list of Device objects. The default is an empty list.
comment : str
A comment related to the measurement chain. The default is None.
"""
self.sampling_rate = sampling_rate
self.sound_device = sound_device
self.comment = comment
if isinstance(devices, type(None)):
self.devices = []
else:
for dev in devices:
if not isinstance(dev, Device):
raise TypeError('Input data must be type Device.')
if dev.data is None:
continue
if not dev.data.sampling_rate == self.sampling_rate:
raise ValueError("Sampling rate of device does not agree "
"with the measurement chain.")
self.devices = devices
self._freq()
def _find_device_index(self, name):
"""Private method to find the index of a given device name."""
for i, dev in enumerate(self.devices):
if dev.name == name:
return i
raise ValueError(f"device {name} not found")
def _freq(self):
"""Private method to calculate the frequency response of the complete
measurement chain and save it to the private attribute _resp."""
if self.devices == []:
resp = 1.0
else:
resp = [[1.0]]
for dev in self.devices:
if isinstance(dev.freq, Signal):
resp = oaconvolve(resp, dev.freq.time)
else:
resp = oaconvolve(resp, [[dev.freq]])
resp = Signal(resp, self.sampling_rate, domain='time')
resp.domain = 'freq'
self._resp = resp
def add_device(self,
name,
data=None,
sens=1,
unit=None
):
"""Adds a new device to the measurement chain.
Refer to the documentation of Device class.
Attributes
----------
name : str
data : pyfar.Signal, optional
sens : float, optional
unit : str, optional
"""
# check if device_data is type Signal or None
if not isinstance(data, (Signal, type(None))):
raise TypeError('Input data must be type Signal or None.')
# check if there are no devices in measurement chain
if not self.devices == []:
# check if sampling_rate of new device and MeasurementChain
# is the same
if data is not None:
if not self.sampling_rate == data.sampling_rate:
raise ValueError("Sampling rate of the new device does"
"not agree with the measurement chain.")
# add device to chain
new_device = Device(name, data=data,
sens=sens, unit=unit)
self.devices.append(new_device)
self._freq()
def list_devices(self):
"""Returns a list of names of all devices in the measurement chain.
"""
# list all ref-objects in chain
device_names = []
for dev in self.devices:
name = dev.name
device_names.append(name)
return device_names
def remove_device(self, num):
"""Removes a single device from the measurement chain,
by name or number.
Attributes
----------
num : int or str
Identifier for device to remove. Device can be found by name as
string or by number in device list as int.
"""
# remove ref-object in chain position num
if isinstance(num, int):
self.devices.pop(num)
# remove ref-object in chain by name
elif isinstance(num, str):
self.remove_device(self._find_device_index(num))
else:
raise TypeError("device to remove must be int or str")
self._freq()
# reset complete ref-object-list
def reset_devices(self):
"""Resets the list of devices in the measurement chain.
Other global parameters such as sampling rate or sound device of the
measurement chain remain unchanged.
"""
self.devices = []
self._freq()
# get the freq-response of specific device in measurement chain
def device_freq(self, num):
"""Returns the frequency response of a single device from the
measurement chain, by name or number.
Attributes
----------
num : int or str
Identifier for device, can be name as string or by number
in device list as int.
"""
if isinstance(num, int):
return self.devices[num].freq
elif isinstance(num, str):
return self.device_freq(self._find_device_index(num))
else:
raise TypeError("Device must be called by int or str.")
# get the freq-response of whole measurement chain as pyfar.Signal
@property
def freq(self):
"""Returns the frequency response of the complete measurement chain.
All devices (frequency response and sensitivity) are considered.
"""
return self._resp
def __repr__(self):
"""String representation of MeasurementChain class.
"""
repr_string = (
f"measurement chain with {len(self.devices)} devices "
f"@ {self.sampling_rate} Hz sampling rate.\n")
for i, dev in enumerate(self.devices):
repr_string = f"{repr_string}# {i:{2}}: {dev}"
return repr_string
| 34.803448 | 83 | 0.574458 | 9,901 | 0.980977 | 0 | 0 | 1,779 | 0.176261 | 0 | 0 | 5,267 | 0.521847 |
69ab8661fbcc312d7db1662206a4daeec8008df9 | 482 | py | Python | setup.py | Chichilele/algorithms | acc7470631b3ced2a8e126011af1e6ff1ff62394 | [
"MIT"
] | null | null | null | setup.py | Chichilele/algorithms | acc7470631b3ced2a8e126011af1e6ff1ff62394 | [
"MIT"
] | null | null | null | setup.py | Chichilele/algorithms | acc7470631b3ced2a8e126011af1e6ff1ff62394 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="algorithms",
version="0.1",
description="Implements a few optimisation algorithms",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chichilele/algorithms",
packages=find_packages(),
entry_points={"console_scripts": ["root_finding=algorithms.root_finding:cli"]},
)
| 28.352941 | 83 | 0.728216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.392116 |
69abcd377de6101c4d2a16cd4c46eea6090dd3cc | 3,980 | py | Python | bifurcation.py | lisah298/PathReducer | 15f692c3a6712f26d64865d566ce4df5574c4a09 | [
"MIT"
] | null | null | null | bifurcation.py | lisah298/PathReducer | 15f692c3a6712f26d64865d566ce4df5574c4a09 | [
"MIT"
] | 1 | 2021-11-01T09:24:13.000Z | 2021-11-01T09:24:13.000Z | bifurcation.py | lisah298/PathReducer | 15f692c3a6712f26d64865d566ce4df5574c4a09 | [
"MIT"
] | null | null | null | import pandas as pd
import dimensionality_reduction_functions as dim_red
from plotting_functions import colored_line_plot, colored_line_and_scatter_plot, colored_line_plot_projected_data
# Number of PCA components
ndim = 3
####################################### EXAMPLE 4: CYCLOPROPYLIDENE BIFURCATION ########################################
# Inputs
file = './examples/bifurcation/bifur_IRC.xyz'
stereo_atoms_B = [3, 4, 5, 7]
# "New Files" to test transforming trajectories into already generated reduced dimensional space
new_file1 = './examples/bifurcation/bifur_traj1.xyz'
new_file2 = './examples/bifurcation/bifur_traj2.xyz'
new_file3 = './examples/bifurcation/bifur_traj3.xyz'
new_file4 = './examples/bifurcation/bifur_traj4.xyz'
# DISTANCES INPUT
system_name1, direc1, D_pca, D_pca_fit, D_pca_components, D_mean, D_values, traj_lengths1, aligned_original_coords = \
dim_red.pathreducer(
file, ndim, stereo_atoms=stereo_atoms_B, input_type="Distances")
# Transforming new data into RD space
new_data_df1 = dim_red.transform_new_data(new_file1, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean,
aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1]
# new_data_df2 = dim_red.transform_new_data(new_file2, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean,
# aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1]
# new_data_df3 = dim_red.transform_new_data(new_file3, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean,
# aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1]
# new_data_df4 = dim_red.transform_new_data(new_file4, direc1 + "/new_data", ndim, D_pca_fit, D_pca_components, D_mean,
# aligned_original_coords, stereo_atoms=stereo_atoms_B, input_type="Distances")[1]
# Plotting
# DISTANCES INPUT
D_pca_df = pd.DataFrame(D_pca)
D_pca_df1 = D_pca_df[0:183]
D_pca_df2 = D_pca_df.drop(D_pca_df.index[106:184], axis=0)
# Figure 14
colored_line_and_scatter_plot(D_pca_df1[0], y=D_pca_df1[1], y1=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], y12=D_pca_df2[2],
output_directory=direc1, imgname=(system_name1 + "_Distances_noMW"))
# figures 15 A-D aber ohne MD trajektorie
colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2],
same_axis=False, new_data_x=new_data_df1[0], new_data_y=new_data_df1[1], new_data_z=new_data_df1[2], output_directory=direc1 + "/new_data",
imgname=(system_name1 + "_Distances_noMW_traj1_D"))
# colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2],
# same_axis=False, new_data_x=new_data_df2[0], new_data_y=new_data_df2[1], new_data_z=new_data_df2[2], output_directory=direc1 + "/new_data",
# imgname=(system_name1 + "_Distances_noMW_traj2_A"))
# colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2],
# same_axis=False, new_data_x=new_data_df3[0], new_data_y=new_data_df3[1], new_data_z=new_data_df3[2], output_directory=direc1 + "/new_data",
# imgname=(system_name1 + "_Distances_noMW_traj3_B"))
# colored_line_plot_projected_data(D_pca_df1[0], y=D_pca_df1[1], z=D_pca_df1[2], x2=D_pca_df2[0], y2=D_pca_df2[1], z2=D_pca_df2[2],
# same_axis=False, new_data_x=new_data_df4[0], new_data_y=new_data_df4[1], new_data_z=new_data_df4[2], output_directory=direc1 + "/new_data",
# imgname=(system_name1 + "_Distances_noMW_traj4_C"))
| 71.071429 | 173 | 0.687437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,556 | 0.642211 |
69ac02c93958a2c5e75638d8d378d755f3ed4ffb | 2,381 | py | Python | test/hummingbot/connector/exchange/bitfinex/test_bitfinex_api_order_book_data_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/hummingbot/connector/exchange/bitfinex/test_bitfinex_api_order_book_data_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/hummingbot/connector/exchange/bitfinex/test_bitfinex_api_order_book_data_source.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | import asyncio
import json
from unittest import TestCase
from aioresponses import aioresponses
import hummingbot.connector.exchange.bitfinex.bitfinex_utils as utils
from hummingbot.connector.exchange.bitfinex import BITFINEX_REST_URL
from hummingbot.connector.exchange.bitfinex.bitfinex_api_order_book_data_source import BitfinexAPIOrderBookDataSource
class BitfinexAPIOrderBookDataSourceTests(TestCase):
# the level is required to receive logs from the data source logger
level = 0
def setUp(self) -> None:
super().setUp()
self.log_records = []
BitfinexAPIOrderBookDataSource.logger().setLevel(1)
BitfinexAPIOrderBookDataSource.logger().addHandler(self)
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message for record in self.log_records)
@aioresponses()
def test_get_last_traded_price(self, api_mock):
response = [
10645,
73.93854271,
10647,
75.22266119,
731.60645389,
0.0738,
10644.00645389,
14480.89849423,
10766,
9889.1449809]
api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}",
body=json.dumps(response))
last_price = asyncio.get_event_loop().run_until_complete(
BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))
self.assertEqual(response[6], last_price)
@aioresponses()
def test_get_last_traded_price_returns_zero_when_an_error_happens(self, api_mock):
response = {"error": "ERR_RATE_LIMIT"}
api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}",
body=json.dumps(response))
last_price = asyncio.get_event_loop().run_until_complete(
BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))
self.assertEqual(0, last_price)
self.assertTrue(self._is_logged(
"ERROR",
f"Error encountered requesting ticker information. The response was: {response} "
f"(There was an error requesting ticker information BTC-USDT ({response}))"
))
| 38.403226 | 117 | 0.684586 | 2,024 | 0.850063 | 0 | 0 | 1,413 | 0.593448 | 0 | 0 | 437 | 0.183536 |
69ac26e95480ef9ba55d71661068918c6ae8a979 | 2,445 | py | Python | pandas_loc_iloc.py | tseth92/pandas_experiments | ab26e0c6004546bea1ebdbe8807a6d4014189e64 | [
"MIT"
] | null | null | null | pandas_loc_iloc.py | tseth92/pandas_experiments | ab26e0c6004546bea1ebdbe8807a6d4014189e64 | [
"MIT"
] | null | null | null | pandas_loc_iloc.py | tseth92/pandas_experiments | ab26e0c6004546bea1ebdbe8807a6d4014189e64 | [
"MIT"
] | null | null | null | '''
This code compares the loc and iloc in pandas dataframe
'''
__author__ = "Tushar SEth"
__email__ = "tusharseth93@gmail.com"
import pandas as pd
import timeit
df_test = pd.DataFrame()
tlist = []
tlist2 = []
################ this code creates a dataframe df_test ##################
###############with two columns and 5000000 entries #####################
for i in range (0,50):
tlist.append(i)
tlist2.append(i+5)
df_test['A'] = tlist
df_test['B'] = tlist2
print('Original Dataframe:')
print(df_test.head(5))
print("-----------------")
######################### Done creating DF ##############################
############################ iloc #######################################
print('iloc dataframe: 3rd row and 1st to 2nd column:')
# since iloc ignores the last part of slice
# iloc works with only numbers for columns
print(df_test.iloc[2:3,0:2])
print("-----------------")
print('loc dataframe: 3rd row and 1st to 2nd column:')
# since loc includes the last part of slice
# loc works with only column names
print(df_test.loc[2:3,['A','B']])
print("-----------------")
######################### Done iloc ####################################
##########*******************************************####################
# ***** Observing loc and iloc when index is different ********** #
##########*******************************************####################
'''
Now the index is altered for dataframe which gives the actual difference
between what loc and iloc varies with in terms of rows. while iloc works
by checking index number and counting from start, loc works by checking
where the index label comes. eg. index: (4,5,6,1,2), iloc considers 2
index at 2nd position whereas loc considers it at 5th position
'''
############################### changing index ##########################
as_list = df_test.index.tolist()
print(as_list[3:7])
as_list[0:5] = [63,64,65,66,67]
for i in range(5,len(as_list)):
as_list[i] = as_list[i]-5
df_test.index = as_list
########################################################################
print('-----------------Dataframe after index updated -------------- ')
print(df_test.head(10))
print('-------------- iloc dataframe with updated index-------------')
print(df_test.iloc[:7]) # iloc watches for 7 index counts from start
print('-------------- loc dataframe with updated index-------------')
print(df_test.loc[:7]) # loc watches for index=7 where it appears
| 33.958333 | 73 | 0.521472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,807 | 0.739059 |
69ac2cf3f9092bced76304a2eff481f5a2f1681a | 900 | py | Python | lesson_6/task_2.py | ok-git/py_training | 76ac3a48c41ed0f7fe308a64aae6b8e447041f70 | [
"Apache-2.0"
] | null | null | null | lesson_6/task_2.py | ok-git/py_training | 76ac3a48c41ed0f7fe308a64aae6b8e447041f70 | [
"Apache-2.0"
] | null | null | null | lesson_6/task_2.py | ok-git/py_training | 76ac3a48c41ed0f7fe308a64aae6b8e447041f70 | [
"Apache-2.0"
] | null | null | null | """
Реализовать класс Road (дорога), в котором определить атрибуты: length (длина), width (ширина). Значения данных
атрибутов должны передаваться при создании экземпляра класса. Атрибуты сделать защищенными. Определить метод расчета
массы асфальта, необходимого для покрытия всего дорожного полотна. Использовать формулу: длина*ширина*масса асфальта
для покрытия одного кв метра дороги асфальтом, толщиной в 1 см*число см толщины полотна. Проверить работу метода.
Например: 20м*5000м*25кг*5см = 12500 т
"""
class Road:
asphalt_per_sqmeter = 25
def __init__(self, road_length, road_width, thickness=5):
self._road_length = road_length
self._road_width = road_width
self._thickness = thickness
def calc(self):
return self._road_length*self._road_width*self._thickness*self.asphalt_per_sqmeter
a = Road(5000, 20)
print(f'Масса асфальта {a.calc()} кг.')
| 37.5 | 116 | 0.761111 | 329 | 0.253662 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.720894 |
69ac8493ca58590ab03c7e2d5c8b17f4c7d44722 | 2,897 | py | Python | cmsc_210/examples/lecture_07/war.py | mazelife/cmsc-210 | dbaa1604ef49bcfe5a70e09c17fbd243a8b80220 | [
"MIT"
] | null | null | null | cmsc_210/examples/lecture_07/war.py | mazelife/cmsc-210 | dbaa1604ef49bcfe5a70e09c17fbd243a8b80220 | [
"MIT"
] | 5 | 2022-01-16T23:30:12.000Z | 2022-01-30T23:03:21.000Z | cmsc_210/examples/lecture_07/war.py | mazelife/cmsc-210 | dbaa1604ef49bcfe5a70e09c17fbd243a8b80220 | [
"MIT"
] | null | null | null | from functools import total_ordering
from random import shuffle
class Player:
def __init__(self, name):
self.name = name
self.hand = []
def __str__(self):
return self.name
def play(self):
return self.hand.pop()
def receive(self, cards):
for card in cards:
self.hand.insert(0, card)
def is_hand_empty(self):
return not self.hand
FACE = ("Jack", "Queen", "King", "Ace")
SUIT = ("Club", "Spade", "Diamond", "Heart")
@total_ordering
class Card:
def __init__(self, suit, value):
self.suit = suit
self.value = value
def __str__(self):
return f"{self.value} of {self.suit}"
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class FaceCard(Card):
def __init__(self, suit, face):
value = FACE.index(face) + 11
super().__init__(suit, value)
self.face = face
def __str__(self):
return f"{self.face} of {self.suit}"
class Deck:
def __init__(self):
self.cards = []
for suit in SUIT:
for i in range(2, 11):
self.cards.append(Card(suit, i))
for face in FACE:
self.cards.append(FaceCard(suit, face))
shuffle(self.cards)
def deal(self, players):
while self.cards:
for player in players:
card = self.cards.pop()
player.receive([card])
if not self.cards:
return
class Game:
def __init__(self, name_1, name_2):
self.player_1 = Player(name_1)
self.player_2 = Player(name_2)
deck = Deck()
deck.deal([self.player_1, self.player_2])
def is_game_over(self):
return self.player_1.is_hand_empty() or self.player_2.is_hand_empty()
def play(self):
previous_hands = []
total_hands = 0
while not self.is_game_over():
c1 = self.player_1.play()
c2 = self.player_2.play()
if c1 < c2: # player 2 is the winner
self.player_2.receive([c1, c2] + previous_hands)
previous_hands = []
elif c1 > c2:
self.player_1.receive([c1, c2] + previous_hands)
previous_hands = []
else:
previous_hands.extend([c1, c2])
for i in range(3):
if not self.is_game_over():
previous_hands.append(self.player_1.play())
previous_hands.append(self.player_2.play())
total_hands += 1
if self.player_1.is_hand_empty():
print(f"Player {self.player_2} is the winner in {total_hands} hands.")
else:
print(f"Player {self.player_1} is the winner in {total_hands} hands.")
| 26.099099 | 82 | 0.548498 | 2,715 | 0.937176 | 0 | 0 | 328 | 0.113221 | 0 | 0 | 262 | 0.090438 |
69acb4ac84d0426898b7a3cffed434b1a66dce6c | 1,191 | py | Python | paper/curve_context_acc.py | INK-USC/procedural-extraction | 6b53d8a03bdd24560e96960fd0eddeee9ff8bc6f | [
"Apache-2.0"
] | 5 | 2019-09-11T20:29:35.000Z | 2022-03-27T13:16:51.000Z | paper/curve_context_acc.py | INK-USC/procedural-extraction | 6b53d8a03bdd24560e96960fd0eddeee9ff8bc6f | [
"Apache-2.0"
] | null | null | null | paper/curve_context_acc.py | INK-USC/procedural-extraction | 6b53d8a03bdd24560e96960fd0eddeee9ff8bc6f | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
cz2 = (0.7, 0.7, 0.7)
cz = (0.3, 0.3, 0.3)
cy = (0.7, 0.4, 0.12)
ci = (0.1, 0.3, 0.5)
ct = (0.7, 0.2, 0.1)
ax = plt.figure(figsize=(5,4)).gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.grid(True)
ax.set_ylim([40,75])
plt.yticks(list(range(40,80,10)),[str(i) for i in range(40,80,10)])
ax.set_title('Test')
ax.set_xlabel('Context Level $K$')
ax.set_ylabel('Micro F$_1$ Score (%)')
y=[62.1,58.5,70.1,68.4]
x=[0,1,2,3]
bt, = ax.plot(x,y, '--', label='BERT Test', marker='^')
y=[54.0,64.0,72.2,66.9]
x=[0,1,2,3]
cat, = ax.plot(x,y, '-.', label='C. Attn. Test', marker='^')
y=[69.3, 66.4, 72.7, 68.8]
x=[0,1,2,3]
cet, = ax.plot(x,y, '-.', label='C. Emb. Test', marker='^')
y=[54.6,62.1,69.0,69.9]
x=[0,1,2,3]
mat, = ax.plot(x,y, '-', label='Mask$_{AVG}$ Test', marker='o')
y=[62.0,64.0,72.6,71.1]
x=[0,1,2,3]
mmt, = ax.plot(x,y, '-', label='Mask$_{MAX}$ Test', marker='o')
y=[49.2, 55.4, 67.4, 58.3]
x=[0,1, 2, 3]
ht, = ax.plot(x,y, ':', label='HBMP Test', marker='s')
plt.legend(handles=[bt, cat, cet, mat, mmt, ht])
#plt.show()
plt.savefig('curvetest.png', dpi=1500) | 24.306122 | 67 | 0.583543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.169605 |
69ad4f7f5bd628e678130664a2787d7ddc169bf0 | 3,079 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/ATI/pn_triangles.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/ATI/pn_triangles.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/ATI/pn_triangles.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | '''OpenGL extension ATI.pn_triangles
Overview (from the spec)
ATI_pn_triangles provides a path for enabling the GL to internally
tessellate input geometry into curved patches. The extension allows the
user to tune the amount of tessellation to be performed on each triangle as
a global state value. The intent of PN Triangle tessellation is
typically to produce geometry with a smoother silhouette and more organic
shape.
The tessellated patch will replace the triangles input into the GL.
The GL will generate new vertices in object-space, prior to geometry
transformation. Only the vertices and normals are required to produce
proper results, and the rest of the information per vertex is interpolated
linearly across the patch.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/ATI/pn_triangles.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_ATI_pn_triangles'
GL_PN_TRIANGLES_ATI = constant.Constant( 'GL_PN_TRIANGLES_ATI', 0x87F0 )
GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI = constant.Constant( 'GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI', 0x87F1 )
glget.addGLGetConstant( GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI, (1,) )
GL_PN_TRIANGLES_POINT_MODE_ATI = constant.Constant( 'GL_PN_TRIANGLES_POINT_MODE_ATI', 0x87F2 )
glget.addGLGetConstant( GL_PN_TRIANGLES_POINT_MODE_ATI, (1,) )
GL_PN_TRIANGLES_NORMAL_MODE_ATI = constant.Constant( 'GL_PN_TRIANGLES_NORMAL_MODE_ATI', 0x87F3 )
glget.addGLGetConstant( GL_PN_TRIANGLES_NORMAL_MODE_ATI, (1,) )
GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI = constant.Constant( 'GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI', 0x87F4 )
glget.addGLGetConstant( GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI, (1,) )
GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI = constant.Constant( 'GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI', 0x87F5 )
GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI = constant.Constant( 'GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI', 0x87F6 )
GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI = constant.Constant( 'GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI', 0x87F7 )
GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI = constant.Constant( 'GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI', 0x87F8 )
glPNTrianglesiATI = platform.createExtensionFunction(
'glPNTrianglesiATI', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLint,),
doc = 'glPNTrianglesiATI( GLenum(pname), GLint(param) ) -> None',
argNames = ('pname', 'param',),
)
glPNTrianglesfATI = platform.createExtensionFunction(
'glPNTrianglesfATI', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLfloat,),
doc = 'glPNTrianglesfATI( GLenum(pname), GLfloat(param) ) -> None',
argNames = ('pname', 'param',),
)
def glInitPnTrianglesATI():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 48.873016 | 116 | 0.815525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,568 | 0.509256 |
69ad7163f8d258608d0f58bb9ccc2e396ca4ee6f | 2,325 | py | Python | setup.py | Wi11iamDing/toad | 3b22cc9a5d83255d394da483ec47b0de5f862c07 | [
"MIT"
] | 1 | 2021-04-29T08:59:26.000Z | 2021-04-29T08:59:26.000Z | setup.py | lijihong111/toad | 3b22cc9a5d83255d394da483ec47b0de5f862c07 | [
"MIT"
] | null | null | null | setup.py | lijihong111/toad | 3b22cc9a5d83255d394da483ec47b0de5f862c07 | [
"MIT"
] | null | null | null | import os
import numpy as np
from setuptools import setup, find_packages, Extension
NAME = 'toad'
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
VERSION_FILE = os.path.join(CURRENT_PATH, NAME, 'version.py')
def get_version():
ns = {}
with open(VERSION_FILE) as f:
exec(f.read(), ns)
return ns['__version__']
def get_ext_modules():
from Cython.Build import cythonize
extensions = [
Extension('toad.c_utils', sources = ['toad/c_utils.pyx'], include_dirs = [np.get_include()]),
Extension('toad.merge', sources = ['toad/merge.pyx'], include_dirs = [np.get_include()]),
]
return cythonize(extensions)
def get_requirements(stage = None):
file_name = 'requirements'
if stage is not None:
file_name = f"{file_name}-{stage}"
requirements = []
with open(f"{file_name}.txt", 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('-'):
continue
requirements.append(line)
return requirements
setup(
name = NAME,
version = get_version(),
description = 'Toad is dedicated to facilitating model development process, especially for a scorecard.',
long_description = open('README.md', encoding = 'utf-8').read(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/amphibian-dev/toad',
author = 'ESC Team',
author_email = 'secbone@gmail.com',
packages = find_packages(exclude = ['tests']),
include_dirs = [np.get_include()],
ext_modules = get_ext_modules(),
include_package_data = True,
python_requires = '>=3.6',
install_requires = get_requirements(),
extras_require = {
'nn': get_requirements('nn')
},
tests_require = get_requirements('test'),
license = 'MIT',
classifiers = [
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
entry_points = {
'console_scripts': [
'toad = toad.cli:main',
],
},
)
| 28.012048 | 109 | 0.610753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 677 | 0.291183 |
69adc16bfc161b3aeeeb29f573076a958b052631 | 9,071 | py | Python | tvm_benchmark/test_resnet_inference.py | adwwsd/HAWQ | a8e40be0edd336b2554d88691b18ed51e7d32bf0 | [
"MIT"
] | null | null | null | tvm_benchmark/test_resnet_inference.py | adwwsd/HAWQ | a8e40be0edd336b2554d88691b18ed51e7d32bf0 | [
"MIT"
] | null | null | null | tvm_benchmark/test_resnet_inference.py | adwwsd/HAWQ | a8e40be0edd336b2554d88691b18ed51e7d32bf0 | [
"MIT"
] | null | null | null | import torch
import tvm
from tvm import autotvm
from tvm import relay
from tvm.contrib import download
from tvm.contrib.debugger import debug_runtime
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
from os.path import join, isfile
import sys
import json, requests
from io import BytesIO
import re
import mixed_precision_models.quantized_resnet_v1 as quantized_resnet_v1
from mixed_precision_models.layers import QConfig, QuantizeContext
import hawq_utils_resnet
import torch.cuda.profiler as profiler
import pyprof
pyprof.init()
import logging
logging.basicConfig(level=logging.CRITICAL)
parser = argparse.ArgumentParser(description='Resnet accuracy test',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model-dir', required=True,
help='Model data directory')
parser.add_argument('--debug-unit', default=None,
help='Debug specific unit input, compare the unit input to the pytorch result (stage1_unit1, stage1_unit2 ...)')
parser.add_argument('--rounding', default='TONEAREST',
help='Round scheme (TONEAREST, TRUNCATE)')
parser.add_argument('--num-classes', type=int, default=1000,
help='Total number of classes')
parser.add_argument('--arch', default='resnet50',
help='resnet architecture')
args = parser.parse_args()
###############################################################################
# Set target device
# -----------------
TARGET_NAME = 'cuda'
CTX = tvm.context(TARGET_NAME, 0)
#CTX = tvm.gpu(0)
###############################################################################
# Load params
# -----------------
if args.arch == 'resnet50':
isRes18 = False
if args.num_classes == 10: # Cifar 10
num_stages = 3
units = [3, 4, 6]
print("Use Cifar 10")
else:
num_stages = 4
units = [3, 4, 6, 3]
elif args.arch == 'resnet18':
isRes18 = True
num_stages = 4
units = [2, 2, 2, 2]
else:
assert 0
weights = np.load(os.path.join(args.model_dir, "weights.npy"), allow_pickle=True)[()]
bias = np.load(os.path.join(args.model_dir, "bias.npy"), allow_pickle=True)[()]
hawq_utils_resnet.load_qconfig("uint4", "int4", num_stages, units, file_name=os.path.join(args.model_dir, "quantized_checkpoint.pth.tar"), isRes18=isRes18)
#hawq_utils_resnet50.load_qconfig("int8", "int8", num_stages, units, file_name=os.path.join(args.model_dir, "quantized_checkpoint.pth.tar"))
input_image = np.load(os.path.join(args.model_dir, "input_image_batch_1.npy"))
input_image = input_image / QuantizeContext.qconfig_dict["conv0_qconfig"].input_scale
input_image = np.clip(input_image, -128, 127)
if args.rounding == "TONEAREST":
input_image = np.round(input_image)
elif args.rounding == "TRUNCATE":
input_image = np.trunc(input_image)
input_image = input_image.astype("int8")
params = {**weights, **bias}
###############################################################################
# Load model
# -----------------
batch_size = 8
shape = list(input_image.shape)
image_shape = (shape[3], shape[1], shape[2])
input_dtype = 'int8'
model_type = "int4"
num_layers = 18 if isRes18 else 50
data_layout = "NHWC"
kernel_layout = "HWOI"
func, _ = quantized_resnet_v1.get_workload(batch_size=batch_size,
image_shape=image_shape,
num_classes=args.num_classes,
num_layers=num_layers,
dtype=input_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
with_bn=False,
debug_unit=args.debug_unit,
rounding=args.rounding)
# Download ImageNet categories
categ_url = "https://github.com/uwsaml/web-data/raw/main/vta/models/"
categ_fn = "synset.txt"
download.download(join(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
image = input_image
input_data = np.repeat(image, batch_size, axis=0)
###############################################################################
# Run the model
# -----------------
log_filename = "/home/zach_zheng/hawq_tvm/mixed_precision_models/tuning_logs/resnet%d_%s_%s_batch_%d.log" % (num_layers, data_layout, model_type, batch_size)
if not os.path.exists(log_filename):
log_filename = None
else:
print("Apply tuning log " + log_filename)
with autotvm.apply_history_best(log_filename):
with relay.build_config(opt_level=3):
print("building relay")
graph, lib, params = relay.build(func, target=TARGET_NAME, params=params)
if args.debug_unit is not None:
m = tvm.contrib.graph_runtime.create(graph, lib, CTX)
#m = tvm.contrib.graph_executor.create(graph, lib, CTX)
# Set the network parameters and inputs
m.set_input(**params)
m.set_input('data', input_data)
m.run()
np.set_printoptions(threshold=sys.maxsize)
out = m.get_output(0).asnumpy()
if not os.path.exists(os.path.join(args.model_dir, "tvm_result")):
os.mkdir(os.path.join(args.model_dir, "tvm_result"))
unit_str_regex = re.search('stage(\d)_unit(\d)', args.debug_unit)
if unit_str_regex is not None:
unit_str = unit_str_regex.group(0)
else:
unit_str = ""
if args.debug_unit == "fc_input":
actual_result = out
np.save(os.path.join(args.model_dir, "tvm_result/fc_input_int8.npy"), actual_result[0])
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/fc_input_int8.npy")).astype("int8")
elif args.debug_unit == "fc_output":
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/fc_output_int32.npy"))
actual_result = out
np.save(os.path.join(args.model_dir, "tvm_result/fc_output_int32.npy"), actual_result[0])
# golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/fc_output_float32.npy"))#.astype("int32")
elif args.debug_unit == "avg_pool":
actual_result = out
np.save(os.path.join(args.model_dir, "tvm_result/avg_pool_int32.npy"), actual_result[0])
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/avg_pool_int32.npy")).astype("int32")
elif args.debug_unit == "softmax":
actual_result = out
np.save(os.path.join(args.model_dir, "tvm_result/avg_pool_int32.npy"), actual_result[0])
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/avg_pool_int32.npy")).astype("int32")
elif args.debug_unit == unit_str + "_output":
actual_result = out * QuantizeContext.qconfig_dict["%s_qconfig_add" % unit_str].output_scale
# actual_result = out
np.save(os.path.join(args.model_dir, "tvm_result/%s_output_int32.npy" % unit_str), actual_result[0])
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/%s_output_float32.npy" % unit_str))
elif args.debug_unit == unit_str + "_input":
actual_result = hawq_utils_resnet.unpack_int4_to_int32(out)
np.save(os.path.join(args.model_dir, "tvm_result/%s_input_int4.npy" % unit_str), actual_result[0])
golden_result = np.load(os.path.join(args.model_dir, "pytorch_result/%s_input_int4.npy" % unit_str)).astype("int32")
else:
print("Error: Unsupported debug unit.")
print("Above is Pytorch result, under is TVM result")
tvm.testing.assert_allclose(golden_result, actual_result[0])
print(args.debug_unit + " is 100% matched !")
else:
module = tvm.contrib.graph_runtime.create(graph, lib, ctx=CTX)
#module = tvm.contrib.graph_executor.create(graph, lib, ctx=CTX)
module.set_input(**params)
module.set_input('data', input_data)
module.run()
tvm_output = module.get_output(0)
print(tvm_output.shape)
for b in range(batch_size):
top_categories = np.argsort(tvm_output.asnumpy()[b])
# Report top-5 classification results
print("\n prediction for sample {}".format(b))
print("\t#1:", synset[top_categories[-1]])
print("\t#2:", synset[top_categories[-2]])
print("\t#3:", synset[top_categories[-3]])
print("\t#4:", synset[top_categories[-4]])
print("\t#5:", synset[top_categories[-5]])
| 40.86036 | 157 | 0.601698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,400 | 0.264579 |
69ae65d2b4f7488006dab65f0a909611be5333f5 | 2,061 | py | Python | felapps/apps/dataworkshop/dataworkshop.py | archman/felapps | 89532a592070d2a0cf07f0f2b4c723cbf1c1bd33 | [
"MIT"
] | 2 | 2018-04-01T14:37:39.000Z | 2021-03-12T04:16:12.000Z | felapps/apps/dataworkshop/dataworkshop.py | Archman/felapps | 89532a592070d2a0cf07f0f2b4c723cbf1c1bd33 | [
"MIT"
] | null | null | null | felapps/apps/dataworkshop/dataworkshop.py | Archman/felapps | 89532a592070d2a0cf07f0f2b4c723cbf1c1bd33 | [
"MIT"
] | 2 | 2016-07-10T11:14:33.000Z | 2019-07-06T05:42:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DataWorkshop: application to handle data, e.g. generated from imageviewer
Author: Tong Zhang
Created: Sep. 23rd, 2015
"""
from ...utils import datautils
from ...utils import miscutils
from ...utils import funutils
from ...utils import resutils
import wx
import wx.lib.mixins.inspection as wit
import os
__version__ = miscutils.AppVersions().getVersion('dataworkshop')
__author__ = "Tong Zhang"
class InspectApp(wx.App, wit.InspectionMixin):
def OnInit(self):
self.Init()
#configFile = os.path.expanduser("~/.felapps/config/imageviewer.xml")
#if not os.path.isfile(configFile):
# configFile = funutils.getFileToLoad(None, ext = 'xml')
myframe = datautils.DataWorkshop(None, config=None, title=u'DataWorkshop \u2014 Data Analysis Framwork (debug mode, CTRL+ALT+I)', appversion = __version__, style = wx.DEFAULT_FRAME_STYLE)
myframe.Show()
myframe.SetIcon(resutils.dicon_s.GetIcon())
self.SetTopWindow(myframe)
return True
def run(maximize = True, logon = False, debug=True):
"""
function to make dataworkshop app run.
"""
if debug == True:
app = InspectApp()
app.MainLoop()
else:
app = wx.App(redirect=logon, filename='log')
#configFile = os.path.expanduser("~/.felapps/config/imageviewer.xml")
#if not os.path.isfile(configFile):
# configFile = funutils.getFileToLoad(None, ext = 'xml')
if maximize == True:
myframe = datautils.DataWorkshop(None, config=None, title=u'DataWorkshop \u2014 Data Analysis Framwork', appversion=__version__, style=wx.DEFAULT_FRAME_STYLE)
else:
myframe = datautils.DataWorkshop(None, config=None, title = u'DataWorkshop \u2014 Data Analysis Framwork', appversion=__version__, style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
myframe.Show()
myframe.SetIcon(resutils.dicon_s.GetIcon())
app.MainLoop()
if __name__ == '__main__':
run()
| 34.35 | 212 | 0.675885 | 606 | 0.294032 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.364386 |
69aeb1d75e0847f7d192e1d5f127d4458a56ea39 | 2,953 | py | Python | bayesian/auth.py | ameenfarooqi/fabric8-analytics-server | 6e34d8199d33223a25e33511c194679865a712ca | [
"Apache-2.0"
] | null | null | null | bayesian/auth.py | ameenfarooqi/fabric8-analytics-server | 6e34d8199d33223a25e33511c194679865a712ca | [
"Apache-2.0"
] | 14 | 2020-10-11T12:56:38.000Z | 2020-10-28T06:36:45.000Z | bayesian/auth.py | ameenfarooqi/fabric8-analytics-server | 6e34d8199d33223a25e33511c194679865a712ca | [
"Apache-2.0"
] | 19 | 2020-10-12T05:14:23.000Z | 2020-10-19T13:25:29.000Z | """Authorization token handling."""
import logging
from functools import wraps
from flask import g, request
from requests import get
from pydantic.error_wrappers import ValidationError
from bayesian.utility.user_utils import get_user, UserException, UserNotFoundException
from bayesian.utility.v2.sa_models import HeaderData
from bayesian.exceptions import HTTPError
from f8a_utils.user_token_utils import UserStatus
from .default_config import AUTH_URL
logger = logging.getLogger(__name__)
def get_access_token(service_name):
"""Return the access token for service."""
services = {'github': 'https://github.com'}
url = '{auth_url}/api/token?for={service}'.format(
auth_url=AUTH_URL, service=services.get(service_name))
token = request.headers.get('Authorization')
headers = {"Authorization": token}
try:
_response = get(url, headers=headers)
if _response.status_code == 200:
response = _response.json()
return {"access_token": response.get('access_token')}
else:
return {"access_token": None}
except Exception:
logger.error('Unable to connect to Auth service')
def validate_user(view):
"""Validate and get user type based on UUID from the request."""
@wraps(view)
def wrapper(*args, **kwargs):
"""Read uuid and decides user type based on its validity."""
# Rule of UUID validation and setting user status ::
# ==============================================================
# UUID in request | UUID in RDS | RDS User State | User Status
# ==============================================================
# MISSING | -- NA -- | -- NA -- | FREE
# PRESENT | MISSING | -- NA -- | FREE
# PRESENT | PRESENT | REGISTERED | REGISTERED
# PRESENT | PRESENT | !REGISTERED | FREE
# ==============================================================
# By default set this to 'freetier' and uuid to None
g.user_status = UserStatus.FREETIER
g.uuid = None
try:
header_data = HeaderData(uuid=request.headers.get('uuid', None))
if header_data.uuid:
g.uuid = str(header_data.uuid)
user = get_user(g.uuid)
g.user_status = UserStatus[user.status]
except ValidationError as e:
raise HTTPError(400, "Not a valid uuid") from e
except UserNotFoundException:
logger.warning("No User Found corresponding to UUID {}".format(header_data.uuid))
except UserException:
logger.warning("Unable to get user status for uuid '{}'".format(header_data.uuid))
logger.debug('For UUID: %s, got user type: %s final uuid: %s',
header_data.uuid, g.user_status, g.uuid)
return view(*args, **kwargs)
return wrapper
| 40.452055 | 94 | 0.585168 | 0 | 0 | 0 | 0 | 1,660 | 0.56214 | 0 | 0 | 1,122 | 0.379953 |
69af023e0e9453a8bc99e8621f0b707e6285701a | 96 | py | Python | FLOW007.py | ankitpipalia/codechef-solutions | d10e7f15b74a11655b0e53953a8e2bc7efbf7377 | [
"MIT"
] | 1 | 2022-01-23T08:13:17.000Z | 2022-01-23T08:13:17.000Z | FLOW007.py | ankitpipalia/codechef-solutions | d10e7f15b74a11655b0e53953a8e2bc7efbf7377 | [
"MIT"
] | null | null | null | FLOW007.py | ankitpipalia/codechef-solutions | d10e7f15b74a11655b0e53953a8e2bc7efbf7377 | [
"MIT"
] | null | null | null | tcase = int(input())
while(tcase):
str= input() [::-1]
print(int(str))
tcase -= 1 | 12 | 23 | 0.510417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
69b2168377003bbbd37472a46ea76cd577d96277 | 1,082 | py | Python | setup.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: atekawade
"""
from setuptools import setup, find_packages
setup(
# Needed to silence warnings (and to be a worthwhile package)
name='tomo_encoders',
url='https://github.com/aniketkt/TomoEncoders',
author='Aniket Tekawade',
author_email='atekawade@anl.gov',
# Needed to actually package something
packages= ['tomo_encoders', 'tomo_encoders.neural_nets', 'tomo_encoders.misc', 'tomo_encoders.structures', 'tomo_encoders.tasks', 'tomo_encoders.rw_utils', 'tomo_encoders.reconstruction', 'tomo_encoders.labeling','tomo_encoders.mesh_processing'],
# Needed for dependencies
install_requires=['numpy', 'pandas', 'scipy', 'h5py', 'matplotlib', \
'opencv-python', 'scikit-image',\
'ConfigArgParse', 'tqdm', 'ipython', 'seaborn'],
version=open('VERSION').read().strip(),
license='BSD',
description='Representation learning for latent encoding of morphology in 3D tomographic images',
# long_description=open('README.md').read(),
)
| 38.642857 | 250 | 0.682994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.703327 |
69b25733b76fd553592c2532c0ede532a6527a95 | 17,328 | py | Python | reports/executive_fullfilment_requests/entrypoint.py | ireneperezddc/connect-reports | 9a7d56255f3ea4989d28e45a759e04315eef5504 | [
"Apache-2.0"
] | 7 | 2021-03-31T13:45:10.000Z | 2022-02-08T05:48:21.000Z | reports/executive_fullfilment_requests/entrypoint.py | ireneperezddc/connect-reports | 9a7d56255f3ea4989d28e45a759e04315eef5504 | [
"Apache-2.0"
] | null | null | null | reports/executive_fullfilment_requests/entrypoint.py | ireneperezddc/connect-reports | 9a7d56255f3ea4989d28e45a759e04315eef5504 | [
"Apache-2.0"
] | 8 | 2021-01-14T13:57:52.000Z | 2022-02-18T09:12:52.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, CloudBlue
# All rights reserved.
#
import pathlib
from datetime import date
from tempfile import NamedTemporaryFile
from collections import namedtuple
import math
import copy
from connect.client import R
from plotly import graph_objects as go
from ..utils import (
convert_to_datetime,
get_dict_element,
get_value,
)
from .constants import (
COUNTRIES,
ELEMENTS_PER_CHART,
THRESHOLD,
)
Part = namedtuple('Part', ('start_index', 'end_index', 'part', 'total'))
def _get_requests(client, parameters):
final_status = ('approved', 'failed', 'revoked')
query = R()
query &= R().created.ge(parameters['date']['after'])
query &= R().created.le(parameters['date']['before'])
query &= R().status.oneof(final_status)
if parameters.get('product') and parameters['product']['all'] is False:
query &= R().asset.product.id.oneof(parameters['product']['choices'])
if parameters.get('rr_type') and parameters['rr_type']['all'] is False:
query &= R().type.oneof(parameters['rr_type']['choices'])
return client.requests.filter(query)
def _get_request_count_group_by_type(client, parameters):
final_status = (
'approved', 'failed', 'revoked',
)
rr_types = ('adjustment', 'purchase', 'change', 'suspend', 'resume', 'cancel')
filters = R().created.ge(parameters['date']['after'])
filters &= R().created.le(parameters['date']['before'])
filters &= R().status.oneof(final_status)
if parameters.get('product') and parameters['product']['all'] is False:
filters &= R().asset.product.id.oneof(parameters['product']['choices'])
if parameters.get('rr_type') and parameters['rr_type']['all'] is False:
rr_types = parameters['rr_type']['choices']
result = {}
for rtype in rr_types:
result[rtype] = client.requests.filter(filters & R().type.eq(rtype)).count()
return result, client.requests.filter(filters).count()
def _calculate_the_average_and_sort(report_data):
for key, value in report_data.items():
report_data[key]['avg'] = round(
sum(value['provision_times']) / len(value['provision_times']),
2,
)
return dict(sorted(report_data.items(), key=lambda d: d[1]['avg'], reverse=True))
def _generate_pie_chart(
labels,
values,
title=None,
portions_colors=None,
show_legend=False,
show_values=False,
):
"""
Function that creates a PIE graph using the plotly library. The styling has been preconfigured
but could be changed.
The styling could be customized, this means moving elements position, the font and their
colors, personalize background colors, etc. Take a look at the official documentation.
:param labels: the labels that you want to display per value. In our case we are going to use
the request types.
:param values: the values of each label element. In our case we are going to use the amount of
request of each type.
:param title: the graph title displayed in the top.
:param portion_colors: the customized colors for each portion.
:param show_legend: if you want to display the legend of each portion.
:param show_values: instead of % display values for each portion.
"""
layout = None
if title:
title_element = go.layout.Title(
text=title,
x=0.5,
font=go.layout.title.Font(
size=30,
family='Arial',
color='#797979',
),
)
layout = go.Layout(title=title_element)
pie = {
'labels': labels,
'values': values,
'marker': {'line': {'color': '#000000', 'width': 2}},
'textfont': go.pie.Textfont(size=25, family='Arial'),
'textposition': 'inside',
'sort': False,
'textinfo': 'percent+value',
}
if portions_colors:
pie['marker'].update({'colors': portions_colors})
if show_values:
pie.update({'textinfo': 'value'})
f = go.Figure(
data=go.Pie(**pie),
layout=layout,
)
f.update_layout(
autosize=False,
width=1200,
height=800,
showlegend=show_legend,
)
if show_legend:
f.update_layout(
legend={
'font': {'size': 25},
'orientation': 'h',
'yanchor': 'top',
'xanchor': 'center',
'x': 0.5,
'y': -0.3,
},
)
with NamedTemporaryFile(delete=False) as file:
f.write_image(file)
return pathlib.Path(file.name).as_uri()
def _generate_bar_chart(x, y, x_title, y_title):
"""
Function that generates a BAR chart using the plotly library. The styling has been preconfigured
but could be changed.
:param x: the x axis values, usually names. In our case will be product names.
:param y: the y axis values, usually numbers. In our case will be product provision time avg.
:param x_title: the x axis title. Products
:param y_title: the y axis title. <b>Processing time (days)</b>
"""
f = go.Figure()
f.add_trace(
go.Bar(
x=x,
y=y,
marker_color='rgb(158,202,225)',
marker_line_color='rgb(8,48,107)',
marker_line_width=1.5,
),
)
f.update_layout(
bargap=0,
showlegend=False,
width=1200,
height=800,
)
f.update_xaxes(title_text=x_title, tickangle=-90)
max_value = max(y) if y else 0
m = max_value * 1.25 if max_value > 0 else 1
f.update_yaxes(
title_text=y_title,
range=[0, m],
)
with NamedTemporaryFile(delete=False) as file:
f.write_image(file)
return pathlib.Path(file.name).as_uri()
def _generate_vertical_bar_chart_by_type(x, traces, x_title=None, y_title=None, showlegend=True):
"""
Function that generates a BAR chart using the plotly library. The styling has been preconfigured
but could be changed.
Each bar contains inside the amounts for each type. Each trace is a type.
:param x: the x axis values, usually names. In our case will be product names.
:param traces: the traces dict that must contain per each dict the values, the name and the
desired color.
:param x_title: the text that will be displayed in the x axis.
:param y_title: the text that will be displayed in the y axis.
:param showlegend: if we want to display the legend (true by default).
"""
f = go.Figure()
for trace in traces.values():
f.add_trace(
go.Bar(
y=trace['values'],
x=x,
name=trace['name'],
orientation='v',
marker={
'color': trace['color'],
},
),
)
f.update_layout(
barmode='stack',
bargap=0.5,
showlegend=showlegend,
)
f.update_xaxes(title_text=x_title if x_title else 'Products')
f.update_yaxes(title_text=y_title if y_title else 'Requests')
with NamedTemporaryFile(delete=False) as file:
f.write_image(file)
return pathlib.Path(file.name).as_uri()
def _generate_map_chart(countries, values):
"""
Function that generates a Choropleth Map chart using the plotly library. The styling has been
preconfigured but could be changed.
The color scale has 3 colors where red is the max, yellow -20% and green the lower.
:param countries: a list with all relevant countries to show.
:param values: a list with all values per each country.
"""
simple_colorscale = [
[0, 'rgb(173,255,47)'],
[0.8, 'rgb(255,255,0)'],
[1, 'rgb(255,10,10)'],
]
f = go.Figure()
f.add_trace(
go.Choropleth(
locationmode='country names',
locations=countries,
colorscale=simple_colorscale,
z=values,
),
)
f.update_layout(
width=1200,
height=800,
)
f.update_geos(
resolution=110,
showcoastlines=True,
showcountries=True,
showlakes=False,
showland=True,
landcolor='royalblue',
showocean=True,
oceancolor='white',
)
with NamedTemporaryFile(delete=False) as file:
f.write_image(file)
return pathlib.Path(file.name).as_uri()
def _get_main_account(client):
accounts = client.accounts.all()
main_account = accounts[0]
return main_account['name'], main_account['id']
def _split_chart_data(data_length):
expected_charts = math.ceil(data_length / ELEMENTS_PER_CHART)
if data_length == 0:
yield Part(0, 0, 0, 0)
else:
for n in range(0, expected_charts):
start_range = n * ELEMENTS_PER_CHART
end_range = min(data_length, (n + 1) * ELEMENTS_PER_CHART)
yield Part(start_range, end_range, int(n + 1), expected_charts)
def _generate_pie_chart_from_datat(client, parameters):
r, total = _get_request_count_group_by_type(client, parameters)
return _generate_pie_chart(
labels=list(r.keys()),
values=list(r.values()),
show_legend=True,
), total
def _generate_bar_charts_from_data(report_data, x_title):
final_result = _calculate_the_average_and_sort(report_data)
x = []
y = []
for value in final_result.values():
if value['avg'] >= THRESHOLD:
x.append(value['name'])
y.append(value['avg'])
charts = []
parts = _split_chart_data(len(x))
for part in parts:
charts.append(
_generate_bar_chart(
x=x[part.start_index:part.end_index],
y=y[part.start_index:part.end_index],
x_title=f'{x_title} (chart {part.part} of {part.total})',
y_title='Processing time (days)',
),
)
return charts
def _generate_vertical_bar_charts_per_type_from_data(report_data):
x = []
traces = {
'cancel': {'values': [], 'name': 'Cancel', 'color': 'red'},
'adjustment': {'values': [], 'name': 'Adjustment', 'color': 'yellow'},
'purchase': {'values': [], 'name': 'Purchase', 'color': 'purple'},
'change': {'values': [], 'name': 'Change', 'color': 'blue'},
'suspend': {'values': [], 'name': 'Suspend', 'color': 'green'},
'resume': {'values': [], 'name': 'Resume', 'color': 'gray'},
}
charts = []
data_length = len(list(report_data['product'].keys()))
parts = _split_chart_data(data_length)
ordered_report_data = dict(
sorted(
report_data['product'].items(),
key=lambda d: d[1]['amount_per_type']['total'],
reverse=True,
),
)
for part in parts:
x = []
partial_traces = copy.deepcopy(traces)
for product in list(ordered_report_data.values())[part.start_index:part.end_index]:
x.append(product['name'])
for t in ('cancel', 'adjustment', 'purchase', 'change', 'suspend', 'resume'):
partial_traces[t]['values'].append(product['amount_per_type'][t])
charts.append(
_generate_vertical_bar_chart_by_type(
x=x,
traces=partial_traces,
x_title=f'Products (chart {part.part} of {part.total})',
),
)
return charts
def _generate_choropleth_map_and_table_from_data(report_data):
countries = list(report_data['country'].keys())
values = [element['amount'] for element in list(report_data['country'].values())]
chart = _generate_map_chart(countries, values)
result = {}
for row in zip(countries, values):
result[row[0]] = row[1]
ordered_result = dict(sorted(result.items(), key=lambda d: d[1], reverse=True))
table = []
n = 1
for country, amount in ordered_result.items():
table.append({'number': n, 'country': country.capitalize(), 'amount': amount})
n += 1
return chart, table
def _process_vendor_data(report_data, request):
vendor_id = get_value(request['asset']['connection'], 'vendor', 'id')
vendor = report_data['vendor'].get(
vendor_id,
{
'name': get_value(request['asset']['connection'], 'vendor', 'name'),
'data': [],
'provision_times': [],
'amount_per_type': {
'cancel': 0,
'adjustment': 0,
'purchase': 0,
'change': 0,
'suspend': 0,
'resume': 0,
'total': 0,
},
},
)
vendor['amount_per_type'][request['type']] += 1
vendor['amount_per_type']['total'] += 1
vendor['provision_times'].append(request['provision_time'])
report_data['vendor'][vendor_id] = vendor
def _process_product_data(report_data, request):
product_id = request['asset']['product']['id']
product = report_data['product'].get(
product_id,
{
'name': request['asset']['product']['name'],
'data': [],
'provision_times': [],
'amount_per_type': {
'cancel': 0,
'adjustment': 0,
'purchase': 0,
'change': 0,
'suspend': 0,
'resume': 0,
'total': 0,
},
},
)
product['amount_per_type'][request['type']] += 1
product['amount_per_type']['total'] += 1
product['provision_times'].append(request['provision_time'])
report_data['product'][product_id] = product
def _process_country_data(report_data, request):
country = get_dict_element(request, 'asset', 'tiers', 'customer', 'contact_info', 'country')
if country:
country_name = COUNTRIES[country.upper()]
country_data = report_data['country'].get(
country_name,
{'amount': 0},
)
country_data['amount'] += 1
report_data['country'][country_name] = country_data
def generate(
client=None,
parameters=None,
progress_callback=None,
renderer_type=None,
extra_context=None,
):
requests = _get_requests(client, parameters)
report_data = {
'product': {},
'vendor': {},
'country': {},
}
progress = 0
total = requests.count()
for request in requests:
request['provision_time'] = (
convert_to_datetime(request.get('updated'))
- convert_to_datetime(request.get('created'))
).days
_process_vendor_data(report_data, request)
_process_product_data(report_data, request)
_process_country_data(report_data, request)
progress += 1
progress_callback(progress, total)
pdf_reports = {'charts': []}
chart, total = _generate_pie_chart_from_datat(client, parameters)
pdf_reports['charts'].append(
{
'title': '1. Distribution of requests per type',
'description':
'Total amount of requests within the period from '
f"{parameters['date']['after'].split('T')[0]} to "
f"{parameters['date']['before'].split('T')[0]} : "
f"<b>{total}<b>.",
'images': [chart],
},
)
chart, table = _generate_choropleth_map_and_table_from_data(report_data)
pdf_reports['charts'].append(
{
'title': '2. Requests per country',
'description':
'Following charts represents the request amount per country.'
' The countries that have more than the 20% are near red.',
'table': table,
'images': [chart],
},
)
charts = _generate_vertical_bar_charts_per_type_from_data(report_data)
pdf_reports['charts'].append(
{
'title': '3. Requests per product per type',
'description':
'Following charts represents the request amount per product.'
' Bar contains the distribution of requests per type.',
'images': charts,
},
)
charts = _generate_bar_charts_from_data(report_data['vendor'], 'Vendors')
pdf_reports['charts'].append(
{
'title': '4. Averge Request Processing time (per vendor)',
'description':
'Following charts represents the average processing time of requests per vendor.',
'images': charts,
},
)
charts = _generate_bar_charts_from_data(report_data['product'], 'Products')
pdf_reports['charts'].append(
{
'title': '5. Averge Request Processing time (per product)',
'description':
'Following charts represents the average processing time of requests per product.'
' Bar contains the distribution of requests per type.',
'images': charts,
},
)
account_name, account_id = _get_main_account(client)
pdf_reports['range'] = {
'start': parameters['date']['after'].split('T')[0],
'end': parameters['date']['before'].split('T')[0],
}
pdf_reports['generation_date'] = date.today().strftime('%B %d, %Y')
return pdf_reports
| 32.029575 | 100 | 0.590028 | 0 | 0 | 407 | 0.023488 | 0 | 0 | 0 | 0 | 5,549 | 0.320233 |
69b4c48501471d49ec2d4fd4b79c4ecc8adb3282 | 204 | py | Python | util/data/gen/BloonsTD6.exe.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | util/data/gen/BloonsTD6.exe.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | util/data/gen/BloonsTD6.exe.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | symbols = []
exports = [{'type': 'function', 'name': 'AmdPowerXpressRequestHighPerformance', 'address': '0x7ff66dd14004'}, {'type': 'function', 'name': 'NvOptimusEnablement', 'address': '0x7ff66dd14000'}] | 102 | 191 | 0.70098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.75 |
69b54d1f2fc387e83bb92bd862115b2d1c6f2876 | 7,728 | py | Python | ROS/src/spiderplan_proxy/src/spiderplan_proxy.py | uwe-koeckemann/SpiderPlan | ae8666967ee9e4d3563c43934823f65e72f1d9ce | [
"MIT"
] | null | null | null | ROS/src/spiderplan_proxy/src/spiderplan_proxy.py | uwe-koeckemann/SpiderPlan | ae8666967ee9e4d3563c43934823f65e72f1d9ce | [
"MIT"
] | null | null | null | ROS/src/spiderplan_proxy/src/spiderplan_proxy.py | uwe-koeckemann/SpiderPlan | ae8666967ee9e4d3563c43934823f65e72f1d9ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (c) 2015 Uwe Köckemann <uwe.kockemann@oru.se>
#Permission is hereby granted, free of charge, to any person obtaining
#a copy of this software and associated documentation files (the
#"Software"), to deal in the Software without restriction, including
#without limitation the rights to use, copy, modify, merge, publish,
#distribute, sublicense, and/or sell copies of the Software, and to
#permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
#LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import fileinput
import socket
import os
import signal
import sys
import time
import rospy
import actionlib
from std_msgs.msg import *
from geometry_msgs.msg import *
from actionlib_tutorials.msg import *
import ROSMessageConversion
import ROSMessageConversion as msg_conv
currentTicket = 0
nextFreeTicket = 0
lastMessage = {}
publishers = {}
publisherMsg = {}
subscriberVar = {}
nextRequestID = 0
actionClientMap = {}
someone_writing = False
def give_back_ticket():
if nextFreeTicket > 0:
nextFreeTicket -= 1
# Provide callbacks with an ID:
class CallbackProvider:
def __init__(self,requestID):
self.requestID = requestID
def done_cb(self,state,data):
lastMessage[(self.requestID,"done")] = msg_conv.get_str_from_ros_msg("done", data)
def active_cb(self):
lastMessage[(self.requestID,"active")] = True
def feedback_cb(self,data):
lastMessage[(self.requestID,"feedback")] = msg_conv.get_str_from_ros_msg("feedback", data)
# Provide callbacks that know their topic name:
class SubscriberCallbackProvider:
def __init__(self,topicName):
self.topicName = topicName
def callback(self,data):
lastMessage[self.topicName] = msg_conv.get_str_from_ros_msg(subscriberVar[self.topicName],data)
def reg_simple_action_client(server_name,action_name):
print "Registering action", action_name, " at ", server_name
print rospy.get_name()
client = actionlib.SimpleActionClient(server_name, msg_conv.rosClassMap[action_name])
client.wait_for_server()
actionClientMap[(server_name,action_name)] = client
def send_goal(server_name,action_name,goal_msg_str):
global nextRequestID
cbp = CallbackProvider(nextRequestID)
nextRequestID += 1
print goal_msg_str
goal = ROSMessageConversion.create_ros_msg_from_str(goal_msg_str)[1]
client = actionClientMap[(server_name,action_name)]
client.send_goal(goal,feedback_cb=cbp.feedback_cb,done_cb=cbp.done_cb,active_cb=cbp.active_cb)
nextRequestID += 1
return cbp.requestID
def subscribe(topicName,msgType,varName):
print "SUBSCRIBE_TO:", topicName, msgType, varName
subscriberVar[topicName] = varName
#rospy.Subscriber(topicName.replace("/",""), msg_conv.rosClassMap.get(msgType), callback)
cbp = SubscriberCallbackProvider(topicName)
rospy.Subscriber(topicName, msg_conv.rosClassMap.get(msgType), cbp.callback)
def publish(topicName,msgType):
#publishers[topicName] = rospy.Publisher(topicName.replace("/",""), msg_conv.rosClassMap.get(msgType), queue_size=10)
publishers[topicName] = rospy.Publisher(topicName, msg_conv.rosClassMap.get(msgType), queue_size=10)
publisherMsg[topicName] = msgType
def send_msg(topicName,msg):
publishers[topicName].publish(msg_conv.create_ros_msg_from_str(msg)[1])
def signal_handler(signal, frame):
print('Caught Ctrl+C. Closing socket...')
conn.close()
s.close()
sys.exit(0)
def ros_service_call(arg_msgs):
request = msg_conv.create_ros_msg_from_str(arg_msgs)[1]
service_name = arg_msgs[1:].split(" ")[0]
rospy.wait_for_service(service_name)
try:
serviceProxy = rospy.ServiceProxy(service_name, ROSMessageConversion.rosServiceMap[service_name])
print "Request:\n", request
response = serviceProxy.call(request)
responseStr = ROSMessageConversion.get_str_from_ros_msg("response",response)
#responseStr = ROSMessageConversion.split(responseStr[1:len(responseStr)-1])[2]
print "Response:\n",responseStr
return responseStr
except rospy.ServiceException, e:
print "Service call failed: %s"%e
signal.signal(signal.SIGINT, signal_handler)
TCP_IP = '127.0.0.1'
TCP_PORT = 6790
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
rospy.init_node("SpiderPlanROSProxy", anonymous=True)
ros_namespace = rospy.get_namespace()
print ros_namespace
SUBSCRIBE_TO = 0
PUBLISH_TO = 1
READ_MSG = 2
SEND_MSG = 3
SERVICE_CALL = 4
REGISTER_ACTION = 5
SEND_GOAL = 6
HAS_STARTED = 7
HAS_FINISHED = 8
splitStr = "<//>"
while 1:
#print "Waiting..."
conn, addr = s.accept()
startAll = time.time()
#print 'Connection address:', addr
data = ""
while not "\n" in data:
data += conn.recv(BUFFER_SIZE)
data = data.replace("\n","")
print 'Request:', data.replace(splitStr,"|")
reqType = int(data.split(splitStr)[0])
returnMessage = ""
if reqType == SUBSCRIBE_TO:
topicName = ros_namespace + data.split(splitStr)[1]
topicName = topicName.replace("//", "/")
msgType = data.split(splitStr)[2]
varName = data.split(splitStr)[3]
subscribe(topicName,msgType,varName)
returnMessage = "<OK>"
elif reqType == PUBLISH_TO:
topicName = ros_namespace + "/"+data.split(splitStr)[1]
topicName = topicName.replace("//", "/")
msgType = data.split(splitStr)[2]
publish(topicName,msgType)
returnMessage = "<OK>"
elif reqType == READ_MSG:
topicName = ros_namespace + data.split(splitStr)[1]
if topicName in lastMessage.keys():
returnMessage = lastMessage[topicName]
lastMessage[topicName] = "<NONE>"
else:
returnMessage = "<NONE>"
elif reqType == SEND_MSG:
topicName = ros_namespace + data.split(splitStr)[1]
#topicName = topicName.replace("//", "/")
msg = data.split(splitStr)[2]
send_msg(topicName,msg)
returnMessage = "<OK>"
elif reqType == SERVICE_CALL:
request = data.split(splitStr)[1]
ros_service_call(request)
returnMessage = ros_service_call(request)
elif reqType == REGISTER_ACTION:
server_name = data.split(splitStr)[1]
action_name = data.split(splitStr)[2]
reg_simple_action_client(server_name,action_name)
returnMessage = "<OK>"
elif reqType == SEND_GOAL:
server_name = data.split(splitStr)[1]
action_name = data.split(splitStr)[2]
goal_msg_str = data.split(splitStr)[3]
requestID = send_goal(server_name,action_name,goal_msg_str)
returnMessage = str(requestID)
elif reqType == HAS_STARTED:
requestID = int(data.split(splitStr)[1])
if (requestID,"active") in lastMessage.keys():
returnMessage = "true"
else:
returnMessage = "false"
elif reqType == HAS_FINISHED:
requestID = int(data.split(splitStr)[1])
if (requestID,"done") in lastMessage.keys():
returnMessage = lastMessage[(requestID,"done")]
else:
returnMessage = "false"
elif reqType == "get_feedback":
requestID = int(data.split(splitStr)[1])
if (requestID,"feedback") in lastMessage.keys():
returnMessage = lastMessage[(requestID,"feedback")]
else:
returnMessage = "<NONE>"
conn.send(returnMessage)
conn.close()
endAll = time.time()
print 'Response: %s (took %.2fs)' % (returnMessage,endAll-startAll)
| 29.38403 | 118 | 0.749482 | 618 | 0.079959 | 0 | 0 | 0 | 0 | 0 | 0 | 1,987 | 0.257084 |
69b6e34070f0bb19eb726399767000b584136a44 | 6,979 | py | Python | components/gpio_control/GPIODevices/simple_button.py | steffakasid/RPi-Jukebox-RFID | 33520f81837710d88fa849c033676f274ebf4b59 | [
"MIT"
] | 1,010 | 2017-03-09T10:36:41.000Z | 2022-03-31T01:23:47.000Z | components/gpio_control/GPIODevices/simple_button.py | steffakasid/RPi-Jukebox-RFID | 33520f81837710d88fa849c033676f274ebf4b59 | [
"MIT"
] | 1,205 | 2017-06-08T11:12:47.000Z | 2022-03-27T19:02:06.000Z | components/gpio_control/GPIODevices/simple_button.py | steffakasid/RPi-Jukebox-RFID | 33520f81837710d88fa849c033676f274ebf4b59 | [
"MIT"
] | 421 | 2017-05-13T19:39:57.000Z | 2022-03-27T21:18:03.000Z | import time
from signal import pause
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
map_edge_parse = {'falling':GPIO.FALLING, 'rising':GPIO.RISING, 'both':GPIO.BOTH}
map_pull_parse = {'pull_up':GPIO.PUD_UP, 'pull_down':GPIO.PUD_DOWN, 'pull_off':GPIO.PUD_OFF}
map_edge_print = {GPIO.FALLING: 'falling', GPIO.RISING: 'rising', GPIO.BOTH: 'both'}
map_pull_print = {GPIO.PUD_UP:'pull_up', GPIO.PUD_DOWN: 'pull_down', GPIO.PUD_OFF: 'pull_off'}
def parse_edge_key(edge):
if edge in [GPIO.FALLING, GPIO.RISING, GPIO.BOTH]:
return edge
try:
result = map_edge_parse[edge.lower()]
except KeyError:
result = edge
raise KeyError('Unknown Edge type {edge}'.format(edge=edge))
return result
def parse_pull_up_down(pull_up_down):
if pull_up_down in [GPIO.PUD_UP, GPIO.PUD_DOWN, GPIO.PUD_OFF]:
return pull_up_down
try:
result = map_pull_parse[pull_up_down]
except KeyError:
result = pull_up_down
raise KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down))
return result
def print_edge_key(edge):
try:
result = map_edge_print[edge]
except KeyError:
result = edge
return result
def print_pull_up_down(pull_up_down):
try:
result = map_pull_print[pull_up_down]
except KeyError:
result = pull_up_down
return result
# This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function).
# It checks if the GPIO is in the state since the function was called. If the state
# changes it return False. If the time is over the function returns True.
def checkGpioStaysInState(holdingTime, gpioChannel, gpioHoldingState):
# Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter)
startTime = time.perf_counter()
# Continously check if time is not over
while True:
time.sleep(0.1)
currentState = GPIO.input(gpioChannel)
if holdingTime < (time.perf_counter() - startTime):
break
# Return if state does not match holding state
if (gpioHoldingState != currentState):
return False
# Else: Wait
if (gpioHoldingState != currentState):
return False
return True
class SimpleButton:
def __init__(self, pin, action=lambda *args: None, action2=lambda *args: None, name=None,
bouncetime=500, antibouncehack=False, edge='falling', hold_time=.3, hold_mode=None, pull_up_down='pull_up'):
self.edge = parse_edge_key(edge)
self.hold_time = hold_time
self.hold_mode = hold_mode
self.pull_up = True
self.pull_up_down = parse_pull_up_down(pull_up_down)
self.pin = pin
self.name = name
self.bouncetime = bouncetime
self.antibouncehack = antibouncehack
GPIO.setup(self.pin, GPIO.IN, pull_up_down=self.pull_up_down)
self._action = action
self._action2 = action2
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler,
bouncetime=self.bouncetime)
self.callback_with_pin_argument = False
def callbackFunctionHandler(self, *args):
if len(args) > 0 and args[0] == self.pin and not self.callback_with_pin_argument:
logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args))
args = args[1:]
logger.debug('args after: {}'.format(args))
if self.antibouncehack:
time.sleep(0.1)
inval = GPIO.input(self.pin)
if inval != GPIO.LOW:
return None
if self.hold_mode in ('Repeat', 'Postpone', 'SecondFunc', 'SecondFuncRepeat'):
return self.longPressHandler(*args)
else:
logger.info('{}: execute callback'.format(self.name))
return self.when_pressed(*args)
@property
def when_pressed(self):
logger.info('{}: action'.format(self.name))
return self._action
@property
def when_held(self):
logger.info('{}: action2'.format(self.name))
return self._action2
@when_pressed.setter
def when_pressed(self, func):
logger.info('{}: set when_pressed')
self._action = func
GPIO.remove_event_detect(self.pin)
logger.info('add new action')
GPIO.add_event_detect(self.pin, edge=self.edge, callback=self.callbackFunctionHandler, bouncetime=self.bouncetime)
def set_callbackFunction(self, callbackFunction):
self.when_pressed = callbackFunction
def longPressHandler(self, *args):
logger.info('{}: longPressHandler, mode: {}'.format(self.name, self.hold_mode))
# instant action (except Postpone mode)
if self.hold_mode != "Postpone":
self.when_pressed(*args)
# action(s) after hold_time
if self.hold_mode == "Repeat":
# Repeated call of main action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
elif self.hold_mode == "Postpone":
# Postponed call of main action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_pressed(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFunc":
# Call of secondary action (once)
if checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
pass
elif self.hold_mode == "SecondFuncRepeat":
# Repeated call of secondary action (multiple times if button is held long enough)
while checkGpioStaysInState(self.hold_time, self.pin, GPIO.LOW):
self.when_held(*args)
def __del__(self):
logger.debug('remove event detection')
GPIO.remove_event_detect(self.pin)
@property
def is_pressed(self):
if self.pull_up:
return not GPIO.input(self.pin)
return GPIO.input(self.pin)
def __repr__(self):
return '<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format(
self.name, self.pin, print_edge_key(self.edge), self.hold_mode, self.hold_time, self.bouncetime,self.antibouncehack,print_pull_up_down(self.pull_up_down)
)
if __name__ == "__main__":
print('please enter pin no to test')
pin = int(input())
func = lambda *args: print('FunctionCall with {}'.format(args))
btn = SimpleButton(pin=pin, action=func, hold_mode='Repeat')
pause()
| 38.136612 | 165 | 0.649233 | 4,381 | 0.62774 | 0 | 0 | 704 | 0.100874 | 0 | 0 | 1,458 | 0.208912 |
69b843ce0465649e724dde4ea3055810a4e4c8f5 | 10,308 | py | Python | pyatdllib/ui/immaculater.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | pyatdllib/ui/immaculater.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | pyatdllib/ui/immaculater.py | lisagorewitdecker/immaculater | fe46d282ae1d6325d67ebcf8f2b3d3b95580d5e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""A command-line interface to pyatdl, yet another to-do list.
The most important command is 'help'.
The key notions are Action, Context, Folder, and Project. The commands use the
filesystem idiom. Actions and Contexts are like regular files. Projects and
Folders are like directories.
This was the first UI used in development. Having a command line makes for
readable functional tests. Instead of a bunch of python, you can specify lines
of commands at the 'immaculater>' prompt.
All future user interfaces are expected to translate things into this
command-line interface, adding new commands if necessary.
After importing this file, you must call RegisterUICmds.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import base64
import hashlib
import os
import random
import six
from six.moves import input
from six.moves import xrange
import tempfile
import gflags as flags # https://code.google.com/p/python-gflags/ now called abseil-py
from third_party.google.apputils.google.apputils import app
from third_party.google.apputils.google.apputils import appcommands
from google.protobuf import text_format
from . import serialization
from . import state
from . import uicmd
FLAGS = flags.FLAGS
def _SingletonDatabasePath(): # pylint: disable=missing-docstring
# If there are two versions of pyatdl installed, this must vary between the two:
pyatld_installation_path = os.path.dirname(os.path.abspath(__file__))
try:
return os.path.join(
tempfile.gettempdir(),
hashlib.md5(pyatld_installation_path.encode('utf-8')).hexdigest(),
'saves',
'pyatdl_ToDoList_singleton.protobuf')
except NotImplementedError: # google_appengine only provides TemporaryFile
return None
flags.DEFINE_string(
'database_filename',
_SingletonDatabasePath(),
'Path of the save file, currently a serialized protobuf (see '
'https://developers.google.com/protocol-buffers/docs/overview) '
'of type pyatdl.ToDoList. If this file\'s '
'directory does not exist, it will be created. If the file does not exist, '
'it will be created.')
flags.DEFINE_string(
'pyatdl_prompt',
'immaculater> ',
'During interactive use, what text do you want to appear as the command line prompt (like bash\'s $PS1)?')
flags.ADOPT_module_key_flags(state)
flags.ADOPT_module_key_flags(uicmd)
class Error(Exception):
"""Base class for this module's exceptions."""
class NoCommandByThatNameExistsError(Error):
"""No such command found."""
class BadArgsForCommandError(Error):
"""Invalid arguments given."""
def _Print(s):
"""For easy mocking in the unittest."""
if six.PY2:
print(str(s))
else:
print(s)
def _Input(prompt):
"""For easy mocking in the unittest."""
return input(prompt)
def Base64RandomSlug(num_bits):
"""Returns a URL-safe slug encoding the given pseudorandom number.
Args:
num_bits: int # divisible by 8
Returns:
str
"""
if num_bits % 8:
raise ValueError("The sole argument needs to be a multiple of 8")
array = bytearray(random.getrandbits(8) for x in xrange(num_bits // 8))
b = base64.urlsafe_b64encode(six.binary_type(array))
return b.decode('utf-8').rstrip('=')
def MutateToDoListLoop(lst, printer=None, writer=None, html_escaper=None):
"""Loops forever (until EOFError) calling _Input for the User's input and mutating lst.
Args:
lst: tdl.ToDoList
printer: lambda unicode: None
writer: object with 'write(bytes)' method
html_escaper: lambda unicode: unicode
Returns:
None
"""
printer = printer if printer else _Print
the_state = state.State(printer, lst, uicmd.APP_NAMESPACE, html_escaper)
try:
while True:
ri = _Input(FLAGS.pyatdl_prompt)
ri = ri.strip()
if not ri:
continue
else:
try:
uicmd.ParsePyatdlPromptAndExecute(the_state, ri)
except uicmd.BadArgsError as e:
printer(six.text_type(e))
continue
try:
if FLAGS.database_filename is None:
serialization.SerializeToDoList2(the_state.ToDoList(), writer)
else:
serialization.SerializeToDoList(
the_state.ToDoList(), FLAGS.database_filename)
except AssertionError as e:
raise AssertionError('With ri=%s, %s' % (ri, str(e)))
except EOFError:
pass
def LoopInteractively(reader=None, writer=None):
"""Loads the to-do list from the save file, loops indefinitely, saving the file periodically.
"""
if FLAGS.database_filename is None:
todolist = serialization.DeserializeToDoList2(
reader, tdl_factory=uicmd.NewToDoList)
else:
todolist = serialization.DeserializeToDoList(
FLAGS.database_filename, tdl_factory=uicmd.NewToDoList)
_Print('Welcome to Immaculater!')
_Print('')
_Print('Autosave is ON. File: %s' % FLAGS.database_filename)
_Print('')
_Print('Type "help" to get started.')
MutateToDoListLoop(todolist, _Print, writer)
if writer:
_Print('')
_Print('To-do list saved -- it is in fact saved after each command.')
else:
_Print('')
_Print('File saved -- it is in fact saved after each command.')
_Print('The file is %s' % FLAGS.database_filename)
class Cmd(appcommands.Cmd): # pylint: disable=too-few-public-methods
"""Superclass for all our Cmds."""
def Run(self, argv):
"""Override."""
class Interactive(Cmd): # pylint: disable=too-few-public-methods
"""Run interactively, reading from stdin and printing to stdout."""
def Run(self, argv):
super().Run(argv)
if len(argv) != 1:
raise app.UsageError('Too many args: %s' % repr(argv))
try:
LoopInteractively()
except serialization.DeserializationError as e:
_Print(e)
_Print('Aborting.')
return 1
def ApplyBatchOfCommands(input_file, printer=None, reader=None, writer=None,
html_escaper=None):
"""Reads commands, one per line, from the named file, and performs them.
Args:
input_file: file
writer: None|object with 'write(bytes)' method
html_escaper: lambda unicode: unicode
Returns:
{'view': str, # e.g., 'default'
'cwc': str, # current working Container
'cwc_uid': int} # current working Container's UID
Raises:
Error
"""
if not printer:
printer = _Print
if FLAGS.database_filename is None:
tdl = serialization.DeserializeToDoList2(reader,
tdl_factory=uicmd.NewToDoList)
else:
tdl = serialization.DeserializeToDoList(FLAGS.database_filename,
tdl_factory=uicmd.NewToDoList)
the_state = state.State(
printer,
tdl,
uicmd.APP_NAMESPACE,
html_escaper)
for line in input_file:
line = line.strip()
if not line:
continue
try:
uicmd.ParsePyatdlPromptAndExecute(the_state, line)
except uicmd.BadArgsError as e:
printer(str(e))
if not FLAGS.pyatdl_allow_exceptions_in_batch_mode:
raise BadArgsForCommandError(str(e))
continue
the_state.ToDoList().CheckIsWellFormed()
if FLAGS.database_filename is None:
serialization.SerializeToDoList2(the_state.ToDoList(), writer)
else:
serialization.SerializeToDoList(
the_state.ToDoList(), FLAGS.database_filename)
return {'view': the_state.ViewFilter().ViewFilterUINames()[0],
'cwc': the_state.CurrentWorkingContainerString(),
'cwc_uid': the_state.CurrentWorkingContainer().uid}
class Batch(Cmd): # pylint: disable=too-few-public-methods
"""Run in batch mode, reading lines of commands from a file and printing to stdout.
The filename '-' is special; it means to read lines of commands from stdin.
The database affected is specified by --database_filename, but the
'load'/'save' commands are available to you.
"""
def Run(self, argv):
super().Run(argv)
if len(argv) != 2:
raise app.UsageError('Needs one argument, the filename of the file '
'where each line is a command.')
if argv[-1] == '-':
argv[-1] = '/dev/stdin'
if not os.path.exists(argv[-1]):
raise app.UsageError('File specified does not exist: %s' % argv[-1])
try:
with open(argv[-1]) as input_file:
ApplyBatchOfCommands(input_file)
except serialization.DeserializationError as e:
_Print(e)
_Print('Aborting.')
return 1
class ResetDatabase(Cmd): # pylint: disable=too-few-public-methods
"""Erase the current database and replace it with a brand-new one.
Uses the flag --database_filename.
This *should* be functionally the same thing as using the 'interactive' shell
and giving it the 'reset' command.
"""
def Run(self, argv):
super().Run(argv)
if len(argv) != 1:
raise app.UsageError('Too many args: %s' % repr(argv))
if os.path.exists(FLAGS.database_filename):
os.remove(FLAGS.database_filename)
print('Database successfully reset.')
class DumpRawProtobuf(Cmd): # pylint: disable=too-few-public-methods
"""Partially deserializes the to-do list but stops as soon as a protobuf is
available. Prints that protobuf.
Uses the flag --database_filename.
"""
def Run(self, argv):
super().Run(argv)
if len(argv) != 1:
raise app.UsageError('Too many args: %s' % repr(argv))
pb = serialization.GetRawProtobuf(FLAGS.database_filename)
print(text_format.MessageToString(pb))
def main(_):
"""Register the commands."""
appcommands.AddCmd('interactive', Interactive,
command_aliases=['shell', 'sh'])
appcommands.AddCmd('batch', Batch)
appcommands.AddCmd('reset_database', ResetDatabase)
appcommands.AddCmd('dump_raw_protobuf', DumpRawProtobuf)
def RegisterUICmds(cloud_only):
"""Registers all UICmds unless cloud_only is True, in which case a subset are
registered.
Args:
cloud_only: bool # Registers only the subset making sense with a cloud
backend
"""
uicmd.RegisterAppcommands(cloud_only, uicmd.APP_NAMESPACE)
def InitFlags():
"""If not running as __main__, use this to initialize the FLAGS module."""
FLAGS([])
if __name__ == '__main__':
RegisterUICmds(cloud_only=False)
appcommands.Run()
| 31.048193 | 110 | 0.695479 | 2,708 | 0.262709 | 0 | 0 | 0 | 0 | 0 | 0 | 4,366 | 0.423555 |
69b88545c388c1a97049f68a3c5994f40ec7a709 | 426 | py | Python | setup.py | raghavsub/gtkpass | 1361e1d3204cfc8d51e6027a4f76d038a1ee5d43 | [
"MIT"
] | 1 | 2017-10-30T21:37:06.000Z | 2017-10-30T21:37:06.000Z | setup.py | raghavsub/gtkpass | 1361e1d3204cfc8d51e6027a4f76d038a1ee5d43 | [
"MIT"
] | 10 | 2017-08-07T17:51:54.000Z | 2017-11-07T17:17:39.000Z | setup.py | raghavsub/gtkpass | 1361e1d3204cfc8d51e6027a4f76d038a1ee5d43 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gtkpass',
version='0.2.7',
description='A GTK+ 3 program for the standard unix password manager',
url='http://github.com/raghavsub/gtkpass',
author='Raghav Subramaniam',
author_email='raghavs511@gmail.com',
license='MIT',
packages=['gtkpass'],
entry_points={'console_scripts': ['gtkpass=gtkpass.main:main']},
install_requires=[])
| 32.769231 | 76 | 0.661972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.492958 |
69b8d80a3ae8551bbbe6e3727e00151e698e746d | 2,556 | py | Python | restApi/resources/rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | null | null | null | restApi/resources/rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 5 | 2019-10-21T17:05:46.000Z | 2021-06-01T22:35:47.000Z | restApi/resources/rides.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 1 | 2018-09-04T14:17:43.000Z | 2018-09-04T14:17:43.000Z | from flask_restplus import Resource, fields, Namespace
from restApi.models.rides import Rides
from restApi.helpers.ride_helpers import RideParser
from .auth import token_required
Ride_object = Rides()
ride_api = Namespace("rides", description="this are routes that allow users to create get or delete a ride")
ride_offer = ride_api.model('Rides', {'start_point': fields.String("nairobi"),
'destination': fields.String("kiambu"),
'seats_available': fields.String,
'date': fields.String("10/02/2018"),
'time': fields.String("10:21")
})
class Ride(Resource):
def get(self):
response = Ride_object.get_all_rides()
return response, 200
@token_required
@ride_api.doc(security='apikey')
@ride_api.expect(ride_offer)
def post(self):
data = RideParser.parser.parse_args()
for items in data.values():
if items == "":
return "Fields must not be blank", 400
Ride_object.create_rides(data['start_point'], data['destination'], data['seats_available'], data['date'],
data['time'])
return "Ride created successfully", 201
class Riide(Resource):
@token_required
@ride_api.doc(security='apikey')
@ride_api.expect(ride_offer)
def put(self, ride_id):
data = RideParser.parser.parse_args()
new_ride = Ride_object.get_single_ride(ride_id)
for items in data.values():
if items == "":
return "Fields must not be blank", 400
if new_ride:
Ride_object.update(ride_id, data['start_point'], data['destination'], data['seats_available'],
str(data['date']), str(data['time']))
return "Ride updated successfully", 200
return {"message": "Ride does not exist"}, 404
def delete(self, ride_id):
new_ride = Ride_object.get_single_ride(ride_id)
if new_ride:
Ride_object.delete_ride(ride_id)
return "Ride deleted successfully", 200
return "Ride does not exist", 404
def get(self, ride_id):
new_ride = Ride_object.get_single_ride(ride_id)
if new_ride:
return new_ride, 200
return "Ride does not exist", 404
ride_api.add_resource(Ride, '/rides')
ride_api.add_resource(Riide, '/rides/<int:ride_id>')
| 36 | 117 | 0.586072 | 1,727 | 0.675665 | 0 | 0 | 1,149 | 0.449531 | 0 | 0 | 535 | 0.209311 |
69b8ea15180398bc6ee7cd290eed58c81d9257b6 | 27,827 | py | Python | services/ui_backend_service/tests/integration_tests/tasks_test.py | runsascoded/metaflow-service | ac7770dfeae17fd060129d408fa3bb472fc00b86 | [
"Apache-2.0"
] | 103 | 2019-12-04T04:41:08.000Z | 2022-03-29T16:20:45.000Z | services/ui_backend_service/tests/integration_tests/tasks_test.py | runsascoded/metaflow-service | ac7770dfeae17fd060129d408fa3bb472fc00b86 | [
"Apache-2.0"
] | 42 | 2019-12-16T23:15:44.000Z | 2022-02-18T17:33:32.000Z | services/ui_backend_service/tests/integration_tests/tasks_test.py | valayDave/metaflow-service | 65e19aef268e9e707522ee0695fd4ebaee42aa69 | [
"Apache-2.0"
] | 36 | 2019-12-12T17:46:46.000Z | 2022-01-21T04:53:24.000Z | import pytest
import time
from .utils import (
init_app, init_db, clean_db,
add_flow, add_run, add_step, add_task, add_artifact,
_test_list_resources, _test_single_resource, add_metadata, get_heartbeat_ts
)
pytestmark = [pytest.mark.integration_tests]
# Fixtures begin
@pytest.fixture
def cli(loop, aiohttp_client):
return init_app(loop, aiohttp_client)
@pytest.fixture
async def db(cli):
async_db = await init_db(cli)
yield async_db
await clean_db(async_db)
# Fixtures end
async def test_list_tasks(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, [_task])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, [_task])
async def test_list_tasks_non_numerical(cli, db):
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
_step = (await add_step(db, flow_id=_run.get("flow_id"), step_name="step", run_number=_run.get("run_number"), run_id=_run.get("run_id"))).body
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step), 200, [])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step), 200, [])
_task = await create_task(db, step=_step, task_name="bar")
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task), 200, None)
_, data = await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task), 200, None)
assert len(data) == 1
assert data[0]['task_name'] == 'bar'
assert data[0]['task_id'] != 'bar'
async def test_single_task(cli, db):
await _test_single_resource(cli, db, "/flows/HelloFlow/runs/404/steps/none/tasks/5", 404, {})
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_single_task_non_numerical(cli, db):
_task = await create_task(db, task_name="bar")
_, data = await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/bar".format(**_task), 200, None)
assert data['task_name'] == 'bar'
assert data['task_id'] != 'bar'
async def test_list_old_metadata_task_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['finished_at'] = _artifact_first['ts_epoch']
_task_first_attempt['duration'] = _artifact_first['ts_epoch'] - \
_task_first_attempt['ts_epoch']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _artifact_second['ts_epoch'] - \
_task_second_attempt['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_old_metadata_task_with_multiple_attempts(cli, db):
# Test tasks with old (missing attempt) metadata
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_artifact_first = await create_ok_artifact_for_task(db, _task)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
_task['attempt_id'] = 1
_task['finished_at'] = _artifact_second['ts_epoch']
_task['duration'] = _artifact_second['ts_epoch'] - \
_task['ts_epoch']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'unknown'
_task['task_ok'] = 'location'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_task['status'] = 'unknown'
_task['finished_at'] = _attempt_done_first['ts_epoch']
_task['duration'] = _attempt_done_first['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, True) # status 'completed'
_task['status'] = 'completed'
_task['finished_at'] = _attempt_ok_first['ts_epoch']
_task['duration'] = _attempt_ok_first['ts_epoch'] - _task['started_at']
_task['task_ok'] = None # intended behavior, status refinement location field should remain empty when metadata exists.
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_failed_status_with_heartbeat(cli, db):
_task = await create_task(db, last_heartbeat_ts=1, status="failed")
_task['finished_at'] = 1000 # should be last heartbeat in this case, due to every other timestamp missing.
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_task_running_status_with_heartbeat(cli, db):
hb_freeze = get_heartbeat_ts()
_task = await create_task(db, last_heartbeat_ts=hb_freeze)
_task['finished_at'] = None # should not have a finished at for running tasks.
_task['duration'] = hb_freeze * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
async def test_list_task_attempts(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'unknown'
_task_first_attempt['task_ok'] = 'location'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'unknown'
_task_second_attempt['task_ok'] = 'location'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_with_attempt_ok_completed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True) # status = 'completed'
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
_task['status'] = 'completed'
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_task_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_task['started_at'] = _attempt_first['ts_epoch']
_task['finished_at'] = _artifact_first['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
_task['status'] = 'failed'
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_task['finished_at'] = _attempt_ok['ts_epoch']
_task['duration'] = _attempt_ok['ts_epoch'] - _task['started_at']
await _test_single_resource(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task), 200, _task)
async def test_list_task_multiple_attempts_failure(cli, db):
_task = await create_task(db)
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_artifact_second = await create_ok_artifact_for_task(db, _task, attempt=1)
# Mark first attempt as 'failure' and second as 'completed'
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _attempt_ok_first['ts_epoch'] - _task_first_attempt['started_at']
# Second attempt counts as completed as well due to the _task_ok existing.
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _artifact_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _attempt_ok_second['ts_epoch'] - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
async def test_task_attempts_with_attempt_metadata(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
# attempt metadata is written but no artifacts exist yet.
# Queries should return a second attempt at this point already!
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['task_ok'] = 'location' # should have location for status artifact
_task_first_attempt['status'] = 'unknown' # 'unknown' because we cannot determine correct status from DB as attempt_ok is missing
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_done_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'running'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['duration'] = int(round(time.time() * 1000)) - _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
# Write attempt_ok data for first attempt to check for status changes.
_first_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, False)
# NOTE: in current implementation, attempt_ok overrides attempt-done as a more accurate timestamp for finished_at.
_task_first_attempt['finished_at'] = _first_attempt_ok['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_first_attempt['task_ok'] = None # should have no task_ok location, as status can be determined from db.
_task_first_attempt['status'] = 'failed' # 'failed' because now we have attempt_ok false in db.
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt], approx_keys=["duration"])
async def test_task_attempt_statuses_with_attempt_ok_failed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt_first = await create_task_attempt_metadata(db, _task)
_artifact_first = await create_ok_artifact_for_task(db, _task)
_attempt_done_first = await create_task_attempt_done_metadata(db, _task)
_attempt_ok_first = await create_task_attempt_ok_metadata(db, _task, 0, False) # status = 'failed'
_attempt_second = await create_task_attempt_metadata(db, _task, attempt=1)
_attempt_done_second = await create_task_attempt_done_metadata(db, _task, attempt=1)
_attempt_ok_second = await create_task_attempt_ok_metadata(db, _task, 1, True) # status = 'completed'
_task_first_attempt = dict(_task)
_task_second_attempt = dict(_task)
# NOTE: In the current implementation attempt_ok overrides attempt-done ts_epoch as the finished_at
# as a more accurate timestamp for when a task finished.
_task_first_attempt['attempt_id'] = 0
_task_first_attempt['status'] = 'failed'
_task_first_attempt['started_at'] = _attempt_first['ts_epoch']
_task_first_attempt['finished_at'] = _attempt_ok_first['ts_epoch']
_task_first_attempt['duration'] = _task_first_attempt['finished_at'] \
- _task_first_attempt['started_at']
_task_second_attempt['attempt_id'] = 1
_task_second_attempt['status'] = 'completed'
_task_second_attempt['started_at'] = _attempt_second['ts_epoch']
_task_second_attempt['finished_at'] = _attempt_ok_second['ts_epoch']
_task_second_attempt['duration'] = _task_second_attempt['finished_at'] \
- _task_second_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task_second_attempt, _task_first_attempt])
# Test cases from the google docs table.
# status 'completed' tests
#
# STATUS: attempt_ok in task metadata for the attempt is set to True
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: created_at property for attempt_ok attribute for the attempt in task metadata
# NOTE: for a more accurate finished_at timestamp, use the greatest timestamp out of task_ok / attempt_ok / attempt-done
# as this is the latest write_timestamp for the task
async def test_task_attempt_status_completed(cli, db):
_task = await create_task(db)
_task['duration'] = None
_task['status'] = 'pending'
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_attempt_ok = await create_task_attempt_ok_metadata(db, _task, 0, True)
_attempt_done = await create_task_attempt_done_metadata(db, _task, 0)
_task['status'] = 'completed'
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = _attempt_done['ts_epoch']
_task['duration'] = _task['finished_at'] - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'running' tests
#
# STATUS 'running':
# Has all of
# Has a start time (NOTE: this requires 'attempt' metadata to be present)
# attempt_ok does not exist in the task metadata
# Has logged a heartbeat in the last x minutes (NOTE: we actually rely on heartbeat for running status.)
# No subsequent attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT: does not apply (NULL)
async def test_task_attempt_status_running(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts()) # default status: 'running'
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_attempt = await create_task_attempt_metadata(db, _task, 0)
_task['started_at'] = _attempt['ts_epoch']
_task['finished_at'] = None
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
# status 'failed' tests
#
# STATUS:
# Either of
# attempt_ok in task metadata for the attempt is set to False
# No heartbeat has been logged for the task in the last x minutes and no new attempt has started
# A newer attempt exists
# STARTED_AT: created_at property for attempt attribute for the attempt in task metadata
# FINISHED_AT:
# Either of (in priority)
# created_at property for attempt_ok attribute for the attempt in task metadata
# The timestamp in the heartbeat column for the task if no subsequent attempt is detected
# If a subsequent attempt exists, use the start time of the subsequent attempt
async def test_task_attempt_status_failed_with_existing_subsequent_attempt(cli, db):
_task = await create_task(db, last_heartbeat_ts=get_heartbeat_ts())
_task['duration'] = _task['last_heartbeat_ts'] * 1000 - _task['ts_epoch']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_task])
_first_attempt = dict(_task)
_second_attempt = dict(_task)
# we explicitly leave out attempt completion metadata for attempt 0 to test that it fails correctly
# when attempt 1 exists.
# ATTEMPT-0
_first_attempt_meta = await create_task_attempt_metadata(db, _task, 0)
_first_attempt['started_at'] = _first_attempt_meta['ts_epoch']
_first_attempt['duration'] = _first_attempt['last_heartbeat_ts'] * 1000 - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_first_attempt])
# ATTEMPT-1
_second_attempt_meta = await create_task_attempt_metadata(db, _task, 1)
_second_attempt['attempt_id'] = 1
_second_attempt['started_at'] = _second_attempt_meta['ts_epoch']
_second_attempt['duration'] = _second_attempt['last_heartbeat_ts'] * 1000 - _second_attempt['started_at']
# first attempt should be failed due to second attempt existing.
# finished_at timestamp should be the started_at of the second attempt due to it existing.
_first_attempt['status'] = 'failed'
_first_attempt['finished_at'] = _second_attempt['started_at']
_first_attempt['duration'] = _first_attempt['finished_at'] - _first_attempt['started_at']
await _test_list_resources(cli, db, "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task), 200, [_second_attempt, _first_attempt])
# Resource Helpers / factories
async def create_ok_artifact_for_task(db, task, attempt=0):
"Creates and returns a _task_ok artifact for a task"
_task = (await add_artifact(
db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
artifact={
"name": "_task_ok",
"location": "location",
"ds_type": "ds_type",
"sha": "sha",
"type": "type",
"content_type": "content_type",
"attempt_id": attempt
})
).body
return _task
async def create_task(db, step=None, status="running", task_id=None, task_name=None, last_heartbeat_ts=None):
"Creates and returns a task with specific status. Optionally creates the task for a specific step if provided."
if not step:
_flow = (await add_flow(db, flow_id="HelloFlow")).body
_run = (await add_run(db, flow_id=_flow.get("flow_id"))).body
step = (await add_step(
db,
flow_id=_run.get("flow_id"),
run_number=_run.get("run_number"),
step_name="step")
).body
_task = (await add_task(
db,
flow_id=step.get("flow_id"),
run_number=step.get("run_number"),
step_name=step.get("step_name"),
task_id=task_id,
task_name=task_name,
last_heartbeat_ts=last_heartbeat_ts)
).body
_task['status'] = status
return _task
async def create_metadata_for_task(db, task, metadata={}, tags=None):
"Creates a metadata record for a task"
_meta = (await add_metadata(db,
flow_id=task.get("flow_id"),
run_number=task.get("run_number"),
run_id=task.get("run_id"),
step_name=task.get("step_name"),
task_id=task.get("task_id"),
task_name=task.get("task_name"),
tags=tags,
metadata=metadata)
).body
return _meta
async def create_task_attempt_metadata(db, task, attempt=0):
"Create 'attempt' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt",
"field_name": "attempt",
"value": str(attempt)
}
)
async def create_task_attempt_done_metadata(db, task, attempt: int = 0):
"Create 'attempt-done' metadata for a task"
return await create_metadata_for_task(
db,
task,
metadata={
"type": "attempt-done",
"field_name": "attempt-done",
"value": str(attempt)
}
)
async def create_task_attempt_ok_metadata(db, task, attempt_id: int, attempt_ok: bool = False):
"Create 'attempt_ok' metadata for a task"
return await create_metadata_for_task(
db,
task,
tags=["attempt_id:{attempt_id}".format(attempt_id=attempt_id)],
metadata={
"type": "internal_attempt_status",
"field_name": "attempt_ok",
"value": str(attempt_ok)
}
)
| 47.084602 | 212 | 0.71215 | 0 | 0 | 100 | 0.003594 | 204 | 0.007331 | 25,716 | 0.924138 | 10,198 | 0.366479 |
69bc563b00c3a0dca8969daf6eb573ab788fb25a | 2,033 | py | Python | apps/my_app/handlers.py | opaniagu/fastapi_mongodb | 2a58c89a16efca8f656cc64d923e1eecb4120a80 | [
"MIT"
] | null | null | null | apps/my_app/handlers.py | opaniagu/fastapi_mongodb | 2a58c89a16efca8f656cc64d923e1eecb4120a80 | [
"MIT"
] | null | null | null | apps/my_app/handlers.py | opaniagu/fastapi_mongodb | 2a58c89a16efca8f656cc64d923e1eecb4120a80 | [
"MIT"
] | null | null | null | import datetime
import fastapi
import pymongo
import pymongo.errors
import pymongo.results
from apps.common.enums import CodeAudiences
from apps.common.handlers import PasswordsHandler, TokensHandler
from fastapi_mongodb.exceptions import HandlerException, RepositoryException
from fastapi_mongodb.handlers import BaseHandler, mongo_duplicate_key_error_handler
from fastapi_mongodb.pagination import Paginator
from fastapi_mongodb.projectors import BaseProjector
from fastapi_mongodb.repositories import BaseRepositoryConfig
from fastapi_mongodb.sorting import SortBuilder
from fastapi_mongodb.my_types import OID
from apps.users.models import UserModel
from apps.users.repositories import UserRepository
from apps.users.schemas import JWTPayloadSchema, JWTRefreshSchema, UserCreateSchema, UserLoginSchema, UserUpdateSchema
from apps.my_app.models import DeviceModel
from apps.my_app.repositories import DeviceRepository
from apps.my_app.schemas import DeviceCreateSchema
__all__ = ["DeviceHandler"]
class DeviceHandler(BaseHandler):
def __init__(self, request: fastapi.Request):
super().__init__(request=request)
self.device_repository = DeviceRepository()
async def create_device(self, request: fastapi.Request, device: DeviceCreateSchema) -> dict:
"""Create new device"""
device_model = DeviceModel(**device.dict(exclude_unset=True))
try:
result: pymongo.results.InsertOneResult = await self.device_repository.insert_one(
document=device_model.to_db(),
session=request.state.db_session,
)
except pymongo.errors.DuplicateKeyError as error:
mongo_duplicate_key_error_handler(model_name="Device", fields=["name"], error=error)
else:
return {"acknowledged": result.acknowledged, "inserted_id": result.inserted_id}
#return {"acknowledged": "True", "inserted_id": "000000000000000000000000"}
| 35.051724 | 119 | 0.745696 | 975 | 0.479587 | 0 | 0 | 0 | 0 | 787 | 0.387113 | 155 | 0.076242 |
69bda50be48c89c9d6e09da95c86cfb87230b936 | 25,971 | py | Python | trainer.py | icrdr/3D-UNet-Renal-Anatomy-Extraction | 50b16151730ec7868b3d3482e4db31e4c1e25412 | [
"MIT"
] | null | null | null | trainer.py | icrdr/3D-UNet-Renal-Anatomy-Extraction | 50b16151730ec7868b3d3482e4db31e4c1e25412 | [
"MIT"
] | null | null | null | trainer.py | icrdr/3D-UNet-Renal-Anatomy-Extraction | 50b16151730ec7868b3d3482e4db31e4c1e25412 | [
"MIT"
] | null | null | null | import torch
from torch.optim import lr_scheduler
from tqdm import tqdm
from torchsummary import summary
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from loss import dice
from pathlib import Path
from data import CaseDataset, load_case, save_pred, \
orient_crop_case, regions_crop_case, resample_normalize_case
import nibabel as nib
import numpy as np
import scipy.special as spe
from transform import pad, crop_pad, to_numpy, to_tensor, resize
def predict_per_patch(input,
model,
num_classes=3,
patch_size=(96, 96, 96),
step_per_patch=4,
verbose=True,
one_hot=False):
device = next(model.parameters()).device
# add padding if patch is larger than input shape
origial_shape = input.shape[:3]
input = pad(input, patch_size)
padding_shape = input.shape[:3]
coord_start = np.array([i // 2 for i in patch_size])
coord_end = np.array([padding_shape[i] - patch_size[i] // 2
for i in range(len(patch_size))])
num_steps = np.ceil([(coord_end[i] - coord_start[i]) / (patch_size[i] / step_per_patch)
for i in range(3)])
step_size = np.array([(coord_end[i] - coord_start[i]) / (num_steps[i] + 1e-8)
for i in range(3)])
step_size[step_size == 0] = 9999999
x_steps = np.arange(coord_start[0], coord_end[0] + 1e-8, step_size[0], dtype=np.int)
y_steps = np.arange(coord_start[1], coord_end[1] + 1e-8, step_size[1], dtype=np.int)
z_steps = np.arange(coord_start[2], coord_end[2] + 1e-8, step_size[2], dtype=np.int)
result = torch.zeros([num_classes] + list(padding_shape)).to(device)
result_n = torch.zeros_like(result).to(device)
if verbose:
print('Image Shape: {} Patch Size: {}'.format(padding_shape, patch_size))
print('X step: %d Y step: %d Z step: %d' %
(len(x_steps), len(y_steps), len(z_steps)))
# W H D C => C W H D => N C W H D for model input
input = torch.from_numpy(to_tensor(input)[None]).to(device)
patchs_slices = []
for x in x_steps:
x_mix = x - patch_size[0] // 2
x_max = x + patch_size[0] // 2
for y in y_steps:
y_min = y - patch_size[1] // 2
y_max = y + patch_size[1] // 2
for z in z_steps:
z_min = z - patch_size[2] // 2
z_max = z + patch_size[2] // 2
patchs_slices.append([slice(x_mix, x_max),
slice(y_min, y_max),
slice(z_min, z_max)])
# predict loop
predict_loop = tqdm(patchs_slices) if verbose else patchs_slices
model.eval()
with torch.no_grad():
for slices in predict_loop:
output = model(input[[slice(None), slice(None)]+slices])
if num_classes == 1:
output = torch.sigmoid(output)
else:
output = torch.softmax(output, dim=1)
result[[slice(None)]+slices] += output[0]
result_n[[slice(None)]+slices] += 1
# merge all patchs
if verbose:
print('Merging all patchs...')
result = result / result_n
if one_hot:
result = to_numpy(result.cpu().numpy()).astype(np.float32)
else:
if num_classes == 1:
result = torch.squeeze(result, dim=0)
else:
result = torch.softmax(result, dim=0)
result = torch.argmax(result, axis=0)
result = np.round(result.cpu().numpy()).astype(np.uint8)
return crop_pad(result, origial_shape)
def predict_case(case,
model,
target_spacing,
normalize_stats,
num_classes=3,
patch_size=(96, 96, 96),
step_per_patch=4,
verbose=True,
one_hot=False):
orig_shape = case['image'].shape[:-1]
affine = case['affine']
# resample case for predict
if verbose:
print('Resampling the case for prediction...')
case_ = resample_normalize_case(case, target_spacing, normalize_stats)
if verbose:
print('Predicting the case...')
pred = predict_per_patch(case_['image'],
model,
num_classes,
patch_size,
step_per_patch,
verbose,
one_hot)
if verbose:
print('Resizing the case to origial shape...')
case['pred'] = resize(pred, orig_shape, is_label=one_hot is False)
case['affine'] = affine
if verbose:
print('All done!')
return case
def batch_predict_case(load_dir,
save_dir,
model,
target_spacing,
normalize_stats,
num_classes=3,
patch_size=(240, 240, 80),
step_per_patch=4,
data_range=None):
load_dir = Path(load_dir)
cases = CaseDataset(load_dir, load_meta=True)
if data_range is None:
data_range = range(len(cases))
for i in tqdm(data_range):
case = predict_case(cases[i],
model,
target_spacing,
normalize_stats,
num_classes,
patch_size,
step_per_patch,
False)
save_pred(case, save_dir)
def cascade_predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
verbose=True):
if verbose:
print('Predicting the rough shape for further prediction...')
case = predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
1,
coarse_patch_size,
step_per_patch,
verbose=verbose)
regions = regions_crop_case(case, region_threshold, crop_padding, 'pred')
num_classes = detail_model.out_channels
orig_shape = case['image'].shape[:-1]
result = np.zeros(list(orig_shape)+[num_classes])
result_n = np.zeros_like(result)
if verbose:
print('Cropping regions (%d)...' % len(regions))
for idx, region in enumerate(regions):
bbox = region['bbox']
shape = region['image'].shape[:-1]
if verbose:
print('Region {} {} predicting...'.format(idx, shape))
region = predict_case(region,
detail_model,
detail_target_spacing,
detail_normalize_stats,
num_classes,
detail_patch_size,
step_per_patch,
verbose=verbose,
one_hot=True)
region_slices = []
result_slices = []
for i in range(len(bbox)):
region_slice_min = 0 + max(0 - bbox[i][0], 0)
region_slice_max = shape[i] - max(bbox[i][1] - orig_shape[i], 0)
region_slices.append(slice(region_slice_min, region_slice_max))
origin_slice_min = max(bbox[i][0], 0)
origin_slice_max = min(bbox[i][1], orig_shape[i])
result_slices.append(slice(origin_slice_min, origin_slice_max))
region_slices.append(slice(None))
result_slices.append(slice(None))
result[result_slices] += region['pred'][region_slices]
result_n[result_slices] += 1
if verbose:
print('Merging all regions...')
# avoid orig_pred_n = 0
mask = np.array(result_n > 0)
result[mask] = result[mask] / result_n[mask]
if num_classes == 1:
result = np.squeeze(result, axis=-1)
result = np.around(result)
else:
result = spe.softmax(result, axis=-1)
result = np.argmax(result, axis=-1)
case['pred'] = result.astype(np.uint8)
if verbose:
print('All done!')
return case
def cascade_predict(image_file,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air=-200,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
label_file=None,
verbose=True):
orig_case = load_case(image_file, label_file)
case = orient_crop_case(orig_case, air)
case = cascade_predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
num_classes,
step_per_patch,
region_threshold,
crop_padding,
verbose)
orient = nib.orientations.io_orientation(orig_case['affine'])
indices = orient[:, 0].astype(np.int)
orig_shape = np.array(orig_case['image'].shape[:3])
orig_shape = np.take(orig_shape, indices)
bbox = case['bbox']
orig_pred = np.zeros(orig_shape, dtype=np.uint8)
result_slices = []
for i in range(len(bbox)):
orig_slice_min = max(bbox[i][0], 0)
orig_slice_max = min(bbox[i][1], orig_shape[i])
result_slices.append(slice(orig_slice_min, orig_slice_max))
orig_pred[result_slices] = case['pred']
# orient
orig_case['pred'] = nib.orientations.apply_orientation(orig_pred, orient)
if len(orig_case['image'].shape) == 3:
orig_case['image'] = np.expand_dims(orig_case['image'], -1)
return orig_case
def batch_cascade_predict(image_dir,
save_dir,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air=-200,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
data_range=None):
image_dir = Path(image_dir)
image_files = [path for path in sorted(image_dir.iterdir()) if path.is_file()]
if data_range is None:
data_range = range(len(image_files))
for i in tqdm(data_range):
case = cascade_predict(image_files[i],
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air,
num_classes,
step_per_patch,
region_threshold,
crop_padding,
None,
False)
save_pred(case, save_dir)
def evaluate_case(case):
num_classes = case['label'].max()
evaluate_result = []
for c in range(num_classes):
pred = np.array(case['pred'] == c+1).astype(np.float32)
label = np.array(case['label'] == c+1).astype(np.float32)
dsc = dice(torch.tensor(pred), torch.tensor(label)).item()
evaluate_result.append(dsc)
return evaluate_result
def evaluate(label_file, pred_file):
label_nib = nib.load(str(label_file))
pred_nib = nib.load(str(pred_file))
case = {}
case['label'] = label_nib.get_fdata().astype(np.uint8)
case['pred'] = pred_nib.get_fdata().astype(np.uint8)
evaluate_result = evaluate_case(case)
return evaluate_result
def batch_evaluate(label_dir, pred_dir, data_range=None):
label_dir = Path(label_dir)
pred_dir = Path(pred_dir)
label_files = sorted(list(label_dir.glob('*.nii.gz')))
pred_files = sorted(list(pred_dir.glob('*.nii.gz')))
if data_range is None:
data_range = range(len(label_files))
evaluate_results = []
par = tqdm(data_range)
for i in par:
evaluate_result = evaluate(label_files[i], pred_files[i])
evaluate_results.append(evaluate_result)
evaluate_dict = {}
for idx, e in enumerate(evaluate_result):
evaluate_dict["label_%d" % (idx+1)] = e
par.set_description("Case %d" % i)
par.set_postfix(evaluate_dict)
print('\nThe mean dsc of each label:')
means = np.array(evaluate_results).mean(axis=0)
for i, mean in enumerate(means):
print("label_%d: %f" % (i+1, mean))
return evaluate_results
class Subset(torch.utils.data.Subset):
def __init__(self, dataset, indices, transform):
super(Subset, self).__init__(dataset, indices)
self.transform = transform
def __getitem__(self, idx):
case = self.dataset[self.indices[idx]]
if self.transform:
case = self.transform(case)
return case
class Trainer():
def __init__(self,
model,
optimizer,
loss,
dataset,
batch_size=10,
dataloader_kwargs={'num_workers': 2,
'pin_memory': True},
valid_split=0.2,
num_samples=None,
metrics=None,
scheduler=None,
train_transform=None,
valid_transform=None):
self.model = model
self.optimizer = optimizer
self.loss = loss
self.dataset = dataset
self.metrics = metrics
self.scheduler = scheduler
self.train_transform = train_transform
self.valid_transform = valid_transform
dataset_size = len(self.dataset)
indices = list(range(dataset_size))
split = int(np.floor(valid_split * dataset_size))
np.random.shuffle(indices)
self.train_indices = indices[split:]
self.valid_indices = indices[:split]
self.dataloader_kwargs = {'batch_size': batch_size, **dataloader_kwargs}
self.num_samples = num_samples
self.valid_split = valid_split
self.device = next(model.parameters()).device
self.best_result = {'loss': float('inf')}
self.current_epoch = 0
self.patience_counter = 0
self.amp_state_dict = None
def get_lr(self, idx=0):
return self.optimizer.param_groups[idx]['lr']
def set_lr(self, lr, idx=0):
self.optimizer.param_groups[idx]['lr'] = lr
def summary(self, input_shape):
return summary(self.model, input_shape)
def batch_loop(self, data_loader, is_train=True):
results = []
self.progress_bar.reset(len(data_loader))
desc = "Epoch %d/%d (LR %.2g)" % (self.current_epoch+1,
self.num_epochs,
self.get_lr())
self.progress_bar.set_description(desc)
for batch_idx, batch in enumerate(data_loader):
x = batch['image'].to(self.device)
y = batch['label'].to(self.device)
# forward
if is_train:
self.model.train()
y_pred = self.model(x)
else:
self.model.eval()
with torch.no_grad():
y_pred = self.model(x)
loss = self.loss(y_pred, y)
# backward
if is_train:
self.optimizer.zero_grad()
if self.use_amp:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.optimizer.step()
result = {'loss': loss.item()}
# calc the other metrics
if self.metrics is not None:
for key, metric_fn in self.metrics.items():
result[key] = metric_fn(y_pred, y).item()
if not torch.isnan(loss):
results.append(result)
self.progress_bar.set_postfix(result)
self.progress_bar.update()
mean_result = {}
for key in results[0].keys():
mean_result[key] = np.mean(np.array([x[key] for x in results]))
name = 'train' if is_train else 'valid'
if self.save_dir is not None:
writer = SummaryWriter(self.save_dir)
for key in mean_result.keys():
writer.add_scalar('%s/%s' % (key, name),
mean_result[key],
self.current_epoch)
writer.close()
return mean_result
def fit(self,
num_epochs=10,
save_dir=None,
use_amp=False,
opt_level='O1'):
# ----------------------
# initialize
# ----------------------
self.num_epochs = num_epochs
self.use_amp = use_amp
self.save_dir = save_dir
if use_amp:
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level=opt_level)
if self.amp_state_dict is not None:
amp.load_state_dict(self.amp_state_dict)
self.progress_bar = tqdm(total=0)
# ----------------------
# prepare data
# ----------------------
train_set = Subset(self.dataset, self.train_indices, self.train_transform)
if self.num_samples is not None:
sampler = torch.utils.data.RandomSampler(train_set, True, self.num_samples)
train_loader = torch.utils.data.DataLoader(train_set,
sampler=sampler,
**self.dataloader_kwargs)
else:
train_loader = torch.utils.data.DataLoader(train_set,
shuffle=True,
**self.dataloader_kwargs)
if len(self.valid_indices) > 0:
valid_set = Subset(self.dataset, self.valid_indices, self.valid_transform)
if self.num_samples is not None:
num_samples = round(self.num_samples * self.valid_split)
sampler = torch.utils.data.RandomSampler(valid_set, True, num_samples)
valid_loader = torch.utils.data.DataLoader(valid_set,
sampler=sampler,
**self.dataloader_kwargs)
else:
valid_loader = torch.utils.data.DataLoader(valid_set,
**self.dataloader_kwargs)
else:
valid_loader = None
# ----------------------
# main loop
# ----------------------
for epoch in range(self.current_epoch, num_epochs):
self.current_epoch = epoch
# train loop
result = self.batch_loop(train_loader, is_train=True)
# vaild loop
if valid_loader is not None:
result = self.batch_loop(valid_loader, is_train=False)
# build-in fn: lr_scheduler
if self.scheduler is not None:
if isinstance(self.scheduler, lr_scheduler.ReduceLROnPlateau):
self.scheduler.step(result['loss'])
else:
self.scheduler.step()
# save best
if result['loss'] < self.best_result['loss']-1e-3:
self.best_result = result
if save_dir is not None:
self.save_checkpoint(save_dir+'-best.pt')
if save_dir is not None:
self.save_checkpoint(save_dir+'-last.pt')
self.progress_bar.close()
def save_checkpoint(self, file_path):
checkpoint = {'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'train_indices': self.train_indices,
'valid_indices': self.valid_indices,
'best_result': self.best_result}
if self.scheduler is not None:
checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()
if self.use_amp:
checkpoint['amp_state_dict'] = amp.state_dict()
torch.save(checkpoint, file_path)
def load_checkpoint(self, file_path):
checkpoint = torch.load(file_path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.current_epoch = checkpoint['current_epoch']+1
self.train_indices = checkpoint['train_indices']
self.valid_indices = checkpoint['valid_indices']
self.best_result = checkpoint['best_result']
if 'amp_state_dict' in checkpoint:
self.amp_state_dict = checkpoint['amp_state_dict']
if 'scheduler_state_dict' in checkpoint and self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
# cross valid
# elif num_folds > 1:
# # split the dataset into k-fold
# fold_len = len(dataset) // num_folds
# fold_len_list = []
# for i in range(num_folds-1):
# fold_len_list.append(fold_len)
# fold_len_list.append(len(dataset)-fold_len * (num_folds-1))
# fold_subsets = torch.utils.data.random_split(dataset, fold_len_list)
# fold_metrics = []
# avg_metrics = {}
# self.save('init.pt')
# for i, fold_subset in enumerate(fold_subsets):
# train_subsets = fold_subsets.copy()
# train_subsets.remove(fold_subset)
# train_subset = torch.utils.data.ConcatDataset(train_subsets)
# train_set = DatasetFromSubset(train_subset, tr_transform)
# valid_set = DatasetFromSubset(fold_subset, vd_transform)
# print('Fold %d/%d:' % (i+1, num_folds))
# self.load('init.pt')
# train_kwargs['log_dir'] = '%s_%d' % (log_dir, i)
# metrics = self.train(train_set, valid_set, **train_kwargs)
# fold_metrics.append(metrics)
# # calc the avg
# for name in fold_metrics[0].keys():
# sum_metric = 0
# for fold_metric in fold_metrics:
# sum_metric += fold_metric[name]
# avg_metrics[name] = sum_metric / num_folds
# for i, fold_metric in enumerate(fold_metrics):
# print('Fold %d metrics:\t%s' %
# (i+1, self.metrics_stringify(fold_metric)))
# print('Avg metrics:\t%s' % self.metrics_stringify(avg_metrics))
# manual ctrl @lr_factor @min_lr @patience
# if metrics['Loss'] < best_metrics['Loss']-1e-4:
# if save_dir and save_best:
# self.save(save_dir+'-best.pt')
# best_metrics = metrics
# patience_counter = 0
# elif patience > 0:
# patience_counter += 1
# if patience_counter > patience:
# print("│\n├Loss stopped improving for %d num_epochs." %
# patience_counter)
# patience_counter = 0
# lr = self.get_lr() * lr_factor
# if min_lr and lr < min_lr:
# print("│LR below the min LR, stop training.")
# break
# else:
# print('│Reduce LR to %.3g' % lr)
# self.set_lr(lr)
# def get_lr(self):
# for param_group in self.optimizer.param_groups:
# return param_group['lr']
# def set_lr(self, lr):
# for param_group in self.optimizer.param_groups:
# param_group['lr'] = lr
# # save best & early_stop_patience counter
# if result['loss'] < self.best_result['loss']-1e-3:
# self.best_result = result
# self.patience_counter = 0
# if save_dir and save_best:
# self.save_checkpoint(save_dir+'-best.pt')
# elif early_stop_patience > 0:
# self.patience_counter += 1
# if self.patience_counter > early_stop_patience:
# print(("\nLoss stopped improving for %d num_epochs. "
# "stop training.") % self.patience_counter)
# self.patience_counter = 0
# break
| 36.020804 | 91 | 0.534558 | 8,713 | 0.335386 | 0 | 0 | 0 | 0 | 0 | 0 | 4,438 | 0.17083 |
69bf0e6bb6725641c106635237f21305eb007c18 | 8,285 | py | Python | workers/standard_methods.py | mahmoudthabit/augur | 0370dc983279ad80bff3f731a1a65ca6c8d27245 | [
"MIT"
] | null | null | null | workers/standard_methods.py | mahmoudthabit/augur | 0370dc983279ad80bff3f731a1a65ca6c8d27245 | [
"MIT"
] | null | null | null | workers/standard_methods.py | mahmoudthabit/augur | 0370dc983279ad80bff3f731a1a65ca6c8d27245 | [
"MIT"
] | null | null | null | """ Helper methods constant across all workers """
import requests, datetime, time
import sqlalchemy as s
import pandas as pd
def connect_to_broker(self, logging):
connected = False
for i in range(5):
try:
logging.info("attempt {}".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['broker_host'],self.config['broker_port']), json=self.specs)
logging.info("Connection to the broker was successful")
connected = True
break
except requests.exceptions.ConnectionError:
logging.error('Cannot connect to the broker. Trying again...')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...')
def record_model_process(self, logging, repo_id, model):
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.history_table.update().where(
self.history_table.c.history_id==self.history_id).values(task_history))
else:
result = self.helper_db.execute(self.history_table.insert().values(task_history))
logging.info("Record incomplete history tuple: {}".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
def register_task_completion(self, logging, entry_info, repo_id, model):
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': self.working_on,
'repo_id': repo_id,
'github_url': entry_info['given']['github_url'],
'job_model': model
}
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Success",
"total_results": self.results_counter
}
self.helper_db.execute(self.history_table.update().where(
self.history_table.c.history_id==self.history_id).values(task_history))
logging.info("Recorded job completion for: " + str(task_completed) + "\n")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(self.job_table.update().where(
self.job_table.c.job_model==model).values(updated_job))
logging.info("Updated job process for model: " + model + "\n")
# Notify broker of completion
logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" +
"This task inserted: " + str(self.results_counter) + " tuples.\n\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['broker_host'],self.config['broker_port']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
def register_task_failure(self, logging, task, repo_id, e):
logging.info("Worker ran into an error for task: {}".format(task))
logging.info("Error encountered: " + repr(e))
logging.info(f'This task inserted {self.results_counter} tuples before failure.')
logging.info("Notifying broker and logging task failure in database...\n")
github_url = task['given']['github_url']
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(github_url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['broker_host'],self.config['broker_port']), json=task)
except requests.exceptions.ConnectionError:
logging.error('Could not send task failure message to the broker')
except Exception:
logging.exception('An error occured while informing broker about task failure')
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history))
logging.info("Recorded job error in the history table for: " + str(task) + "\n")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==task['models'][0]).values(updated_job))
logging.info("Updated job process for model: " + task['models'][0] + "\n")
# Reset results counter for next task
self.results_counter = 0
def update_gh_rate_limit(self, logging, response):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
try:
self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
logging.info("Recieved rate limit from headers\n")
except:
self.oauths[0]['rate_limit'] -= 1
logging.info("Headers did not work, had to decrement\n")
logging.info("Updated rate limit, you have: " +
str(self.oauths[0]['rate_limit']) + " requests remaining.\n")
if self.oauths[0]['rate_limit'] <= 0:
reset_time = response.headers['X-RateLimit-Reset']
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
logging.info("Rate limit exceeded, checking for other available keys to use.\n")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/gabe-heim"
for oauth in self.oauths:
logging.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
oauth['seconds_to_reset'] = (datetime.datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) - datetime.datetime.now()).total_seconds()
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
logging.info("Higher rate limit found in oauth: {}".format(oauth))
new_oauth = oauth
elif oauth['rate_limit'] == new_oauth['rate_limit'] and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']:
logging.info("Lower wait time found in oauth with same rate limit: {}".format(oauth))
new_oauth = oauth
if new_oauth['rate_limit'] <= 0:
logging.info("No oauths with >0 rate limit were found, waiting for oauth with smallest wait time: {}".format(new_oauth))
time.sleep(new_oauth['seconds_to_reset'])
# Change headers to be using the new oauth's key
self.headers = {'Authorization': 'token %s' % new_oauth['access_token']}
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
logging.info("Using oauth: {}".format(self.oauths[0]))
| 44.783784 | 159 | 0.646952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,216 | 0.388171 |
69c0d64af0daa3c05b8a8e97e012bb20be7a3134 | 147 | py | Python | study/python-brasil/exercises/sequential-structure/sequential-structure - 006.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | study/python-brasil/exercises/sequential-structure/sequential-structure - 006.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | study/python-brasil/exercises/sequential-structure/sequential-structure - 006.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | # 006 - Faça um programa que peça o raio de um círculo, calcule e mostre sua área.
print(f'Área: {3.14 * pow(float(input("Raio círculo: ")), 2)}') | 49 | 82 | 0.673469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.941176 |
69c0ea03d21d79afcf9d113b44197d452321d747 | 16,034 | py | Python | Createmodele_V13_1.1.py | pad-awan/domotiquesante | c91d97065bfcf9816367263266c608cfdc0c8009 | [
"CNRI-Python"
] | null | null | null | Createmodele_V13_1.1.py | pad-awan/domotiquesante | c91d97065bfcf9816367263266c608cfdc0c8009 | [
"CNRI-Python"
] | null | null | null | Createmodele_V13_1.1.py | pad-awan/domotiquesante | c91d97065bfcf9816367263266c608cfdc0c8009 | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 09 22:25:07 2019
@author: arnaudhub
"""
#import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.sql import text
import configparser,os
from urllib import parse
#import sql.connector
config = configparser.ConfigParser()
config.read_file(open(os.path.expanduser("~/Bureau/OBJDOMO.cnf")))
DB = "OBJETDOMO_V13_1.1?charset=utf8"
CNF="OBJDOMO"
engine = create_engine("mysql://%s:%s@%s/%s" % (config[CNF]['user'], parse.quote_plus(config[CNF]['password']), config[CNF]['host'], DB))
user = config['OBJDOMO']['user']
password=config['OBJDOMO']['password']
import mysql.connector
from mysql.connector import Error
try:
connection = mysql.connector.connect(host="127.0.0.1",
database="OBJETDOMO_V13_1.1",
user=user,
password=password)
cursor = connection.cursor()
cursor.execute("""SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;""")
cursor.execute("""SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;""")
cursor.execute("""SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';""")
cursor.execute("""DROP SCHEMA IF EXISTS `OBJETDOMO_V13_1.1`;""")
print("DROP SCHEMA")
cursor.execute("""CREATE SCHEMA IF NOT EXISTS `OBJETDOMO_V13_1.1` DEFAULT CHARACTER SET utf8 ;""")
cursor.execute("""USE `OBJETDOMO_V13_1.1`;""")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` (
`TAD_ID` INT NOT NULL AUTO_INCREMENT,
`TAD_LIBELLE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`TAD_ID`))
ENGINE = InnoDB;""")
print("T_A_TYPE_ADRESSE_TAD Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` (
`GEN_ID` INT NOT NULL AUTO_INCREMENT,
`GEN_LIBELLE` VARCHAR(16) NOT NULL,
PRIMARY KEY (`GEN_ID`))
ENGINE = InnoDB;""")
print("T_R_GENRE_GEN Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` (
`STT_ID` INT NOT NULL AUTO_INCREMENT,
`STT_LIBELLE` VARCHAR(45) NOT NULL,
`STT_TYPE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`STT_ID`))
ENGINE = InnoDB;""")
print("T_A_STATUT_STT Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (
`PRS_ID` INT NOT NULL AUTO_INCREMENT,
`PRS_NOM` VARCHAR(40) NOT NULL,
`PRS_PRENOM` VARCHAR(40) NOT NULL,
`GEN_ID` INT NOT NULL,
`PRS_NOTES` VARCHAR(300) NULL,
`STT_ID` INT NOT NULL,
PRIMARY KEY (`PRS_ID`),
INDEX `fk_TE_PERSONNE_PRS_1_idx` (`GEN_ID` ASC),
INDEX `fk_TE_PERSONNE_PRS_2_idx` (`STT_ID` ASC),
INDEX `index4` (`PRS_NOM` ASC, `PRS_PRENOM` ASC),
CONSTRAINT `fk_TE_PERSONNE_PRS_1`
FOREIGN KEY (`GEN_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_GENRE_GEN` (`GEN_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PERSONNE_PRS_2`
FOREIGN KEY (`STT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_STATUT_STT` (`STT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_PERSONNEPHYSIQUE_PRS Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` (
`CITY_ID` INT NOT NULL AUTO_INCREMENT,
`CITY_CODEPOSTAL` CHAR(5) NOT NULL,
`CITY_COMMUNE` VARCHAR(60) NOT NULL,
PRIMARY KEY (`CITY_ID`),
INDEX `index2` (`CITY_CODEPOSTAL` ASC, `CITY_COMMUNE` ASC))
ENGINE = InnoDB;""")
print("T_A_VILLE_CITY Table created successfully ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` (
`ADR_ID` INT NOT NULL AUTO_INCREMENT,
`ADR_VOIEPRINCIPALE` VARCHAR(38) NOT NULL,
`ADR_COMPLEMENTIDENTIFICATION` VARCHAR(38) NOT NULL,
`CITY_ID` INT NOT NULL,
`TAD_ID` INT NOT NULL COMMENT ' ',
PRIMARY KEY (`ADR_ID`),
INDEX `fk_TE_ADRESSE_ADR_1_idx` (`TAD_ID` ASC),
INDEX `fk_TE_ADRESSEPOSTALE_ADR_1_idx` (`CITY_ID` ASC),
CONSTRAINT `fk_TE_ADRESSE_ADR_1`
FOREIGN KEY (`TAD_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_TYPE_ADRESSE_TAD` (`TAD_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_ADRESSEPOSTALE_ADR_1`
FOREIGN KEY (`CITY_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_VILLE_CITY` (`CITY_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_ADRESSEPOSTALE_ADR Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` (
`TPDT_ID` INT NOT NULL AUTO_INCREMENT,
`TPDT_CATEGORIE` VARCHAR(60) NULL,
PRIMARY KEY (`TPDT_ID`))
ENGINE = InnoDB;""")
print('T_R_TYPEPRODUIT_TPDT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (
`PDT_SERIALNUMBER` INT NOT NULL AUTO_INCREMENT,
`PDT_NOM` VARCHAR(45) NOT NULL,
`PDT_MARQUE` VARCHAR(45) NOT NULL,
`PDT_VALEUR` VARCHAR(45) NOT NULL,
`PDT_HEURE` VARCHAR(45) NOT NULL,
`PDT_DUREE` VARCHAR(45) NOT NULL,
`PDT_SOURCE` VARCHAR(45) NOT NULL,
`PDT_REGLE` VARCHAR(45) NOT NULL,
`TPDT_ID` INT NOT NULL,
PRIMARY KEY (`PDT_SERIALNUMBER`),
INDEX `index2` (`PDT_NOM` ASC, `PDT_MARQUE` ASC),
INDEX `fk_TE_PRODUIT_PDT_1_idx` (`TPDT_ID` ASC),
CONSTRAINT `fk_TE_PRODUIT_PDT_1`
FOREIGN KEY (`TPDT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_TYPEPRODUIT_TPDT` (`TPDT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_PRODUIT_PDT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_AUTHENTIFICATION_AUTH` (
`AUTH_ID` INT NOT NULL AUTO_INCREMENT,
`AUTH_USERNAME` VARCHAR(45) NOT NULL,
`AUTH_PASSWORD` VARCHAR(45) NOT NULL,
`PRS_ID` INT NOT NULL,
PRIMARY KEY (`AUTH_ID`),
INDEX `index2` (`AUTH_USERNAME` ASC, `AUTH_PASSWORD` ASC),
INDEX `fk_TR_AUTHENTIFICATION_AUTH_1_idx` (`PRS_ID` ASC),
CONSTRAINT `fk_TR_AUTHENTIFICATION_AUTH_1`
FOREIGN KEY (`PRS_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_AUTHENTIFICATION_AUTH Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_LOCALISATIONPRODUIT_LOC` (
`LOC_ID` INT NOT NULL AUTO_INCREMENT,
`LOC_LIBELLE` VARCHAR(45) NOT NULL,
`LOC_TYPE` VARCHAR(45) NOT NULL,
`LOC_NOTES` VARCHAR(300) NULL,
PRIMARY KEY (`LOC_ID`),
INDEX `index2` (`LOC_LIBELLE` ASC, `LOC_TYPE` ASC))
ENGINE = InnoDB;""")
print('T_E_LOCALISATIONPRODUIT_LOC Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_TYPEINTERVENTION_TPI` (
`TPI_ID` INT NOT NULL AUTO_INCREMENT,
`TPI_LIBELLE` VARCHAR(45) NOT NULL,
`TPI_TYPE` VARCHAR(45) NOT NULL,
PRIMARY KEY (`TPI_ID`),
INDEX `index2` (`TPI_LIBELLE` ASC, `TPI_TYPE` ASC))
ENGINE = InnoDB;""")
print('T_R_TYPEINTERVENTION_TPI Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` (
`AUT_ID` INT NOT NULL AUTO_INCREMENT,
`AUT_DEPENDANCE` VARCHAR(5) NOT NULL,
`AUT_DEFINITION` VARCHAR(105) NOT NULL,
PRIMARY KEY (`AUT_ID`),
INDEX `index2` (`AUT_DEPENDANCE` ASC, `AUT_DEFINITION` ASC))
ENGINE = InnoDB;""")
print('T_A_AUTONOMIE_AUT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (
`CTT_ID` INT NOT NULL AUTO_INCREMENT,
`CTT_INTITULECONTRAT` VARCHAR(45) NOT NULL,
`CTT_REFCONTRAT` VARCHAR(45) NOT NULL,
`AUT_ID` INT NOT NULL,
`CTT_DEBUTCONTRAT` DATE NOT NULL,
`CTT_DATENAISSANCEBENEFICIAIRE` DATE NOT NULL,
`CTT_TEL` VARCHAR(45) NULL,
`PRS_ID` INT NOT NULL,
PRIMARY KEY (`CTT_ID`),
INDEX `fk_TR_CONTRAT_CTT_1_idx` (`AUT_ID` ASC),
INDEX `fk_TR_CONTRATBENEFICIAIRE_CTT_TE_PERSONNE_PRS1_idx` (`PRS_ID` ASC),
CONSTRAINT `fk_TR_CONTRAT_CTT_1`
FOREIGN KEY (`AUT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_A_AUTONOMIE_AUT` (`AUT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PERSONNE_PRS1`
FOREIGN KEY (`PRS_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_BENEFICIAIRE_CTT Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (
`INT_ID` INT NOT NULL AUTO_INCREMENT,
`ADR_ID` INT NOT NULL,
`INT_DATEINTERVENTION` DATE NOT NULL,
`INT_PRESENCEANIMALMOYEN` TINYINT(1) NOT NULL DEFAULT 0,
`NOTES` VARCHAR(300) NULL,
`CTT_ID` INT NOT NULL,
`TPI_ID` INT NOT NULL,
PRIMARY KEY (`INT_ID`),
INDEX `fk_TR_INTERVENTION_INT_1_idx` (`TPI_ID` ASC),
INDEX `fk_TR_INTERVENTION_INT_2_idx` (`CTT_ID` ASC),
CONSTRAINT `fk_TR_INTERVENTION_INT_1`
FOREIGN KEY (`TPI_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`TR_TYPEINTERVENTION_TPI` (`TPI_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TR_INTERVENTION_INT_2`
FOREIGN KEY (`CTT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_E_INTERVENTION_INT Table created successfully')
##############
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_R_INTERCONNEXION_INTCO` (
`INTCO_ID` INT NOT NULL AUTO_INCREMENT,
`DATEEVENEMENT` DATETIME(6) NOT NULL,
`VALEUR` VARCHAR(45) NOT NULL,
`PDT_ID` INT NOT NULL,
`INTCO_ADRESSEIP` VARCHAR(20) NOT NULL,
PRIMARY KEY (`INTCO_ID`),
INDEX `fk_TR_COMMUNICATION_COM_1_idx` (`PDT_ID` ASC),
CONSTRAINT `fk_TR_COMMUNICATION_COM_1`
FOREIGN KEY (`PDT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_R_INTERCONNEXION_INTCO Table created successfully')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_CTT_ADR_PDT_INT` (
`PDT_SERIALNUMBER` INT NOT NULL,
`INT_ID` INT NOT NULL,
`NOTES` VARCHAR(300) NULL,
`LOC_ID` INT NOT NULL,
`CTT_ID` INT NOT NULL,
`ADR_ID` INT NOT NULL,
INDEX `fk_TJ_CTT_ADR_PDT_INT_2_idx` (`LOC_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_3_idx` (`PDT_SERIALNUMBER` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_4_idx` (`INT_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_5_idx` (`CTT_ID` ASC),
INDEX `fk_TJ_CTT_ADR_PDT_INT_1_idx` (`ADR_ID` ASC),
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_2`
FOREIGN KEY (`LOC_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`TE_LOCALISATIONPRODUIT_LOC` (`LOC_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_3`
FOREIGN KEY (`PDT_SERIALNUMBER`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PRODUIT_PDT` (`PDT_SERIALNUMBER`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_4`
FOREIGN KEY (`INT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_5`
FOREIGN KEY (`CTT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_R_BENEFICIAIRE_CTT` (`CTT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TJ_CTT_ADR_PDT_INT_1`
FOREIGN KEY (`ADR_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_ADRESSEPOSTALE_ADR` (`ADR_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print("table jointure T_J_CTT_ADR_PDT_INT créée ")
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` (
`PEM_NUMEROSIREN` INT NOT NULL,
`PEM_RAISONSOCIALE` VARCHAR(45) NOT NULL,
`PEM_TYPEACTIVITE` VARCHAR(60) NOT NULL,
`PEM_SIRET` VARCHAR(45) NULL,
PRIMARY KEY (`PEM_NUMEROSIREN`),
INDEX `index2` (`PEM_RAISONSOCIALE` ASC))
ENGINE = InnoDB;""")
print('T_E_PERSONNEMORALE_PEM créée')
cursor.execute("""DROP TABLE IF EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` ;""")
cursor.execute("""CREATE TABLE IF NOT EXISTS `OBJETDOMO_V13_1.1`.`T_J_EMPLOYE_EMP` (
`EMP_ID` INT NOT NULL,
`PEM_ID` INT NOT NULL,
`INT_ID` INT NOT NULL,
`EMP_TELEPHONE` CHAR(15) NOT NULL,
`EMP_EMAIL` VARCHAR(45) NOT NULL,
INDEX `fk_TE_PRESTATAIRE_PREST_2_idx` (`PEM_ID` ASC),
INDEX `fk_TE_PRESTATAIRE_PREST_3_idx` (`INT_ID` ASC),
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_1`
FOREIGN KEY (`EMP_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEPHYSIQUE_PRS` (`PRS_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_2`
FOREIGN KEY (`PEM_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_PERSONNEMORALE_PEM` (`PEM_NUMEROSIREN`)
ON DELETE NO ACTION
ON UPDATE NO ACTION,
CONSTRAINT `fk_TE_PRESTATAIRE_PREST_3`
FOREIGN KEY (`INT_ID`)
REFERENCES `OBJETDOMO_V13_1.1`.`T_E_INTERVENTION_INT` (`INT_ID`)
ON DELETE NO ACTION
ON UPDATE NO ACTION)
ENGINE = InnoDB;""")
print('T_J_EMPLOYE_EMP Table created successfully')
cursor.execute("""SET SQL_MODE=@OLD_SQL_MODE;""")
cursor.execute("""SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;""")
cursor.execute("""SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;""")
except mysql.connector.Error as error:
print("Failed to create table in MySQL: {}".format(error))
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")
| 43.102151 | 137 | 0.674192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,998 | 0.872802 |
69c5657dfbcc05e24b0f6da32143538ee6f1886c | 10,002 | py | Python | multiml/agent/pytorch/pytorch_asngnas.py | UTokyo-ICEPP/multiml | 3dce96492c90bb2bc9c2d4ccfd66eb13d652a520 | [
"Apache-2.0"
] | 7 | 2021-04-16T03:05:25.000Z | 2021-12-17T06:04:13.000Z | multiml/agent/pytorch/pytorch_asngnas.py | UTokyo-ICEPP/multiml | 3dce96492c90bb2bc9c2d4ccfd66eb13d652a520 | [
"Apache-2.0"
] | 6 | 2021-04-21T10:17:14.000Z | 2021-06-30T06:18:41.000Z | multiml/agent/pytorch/pytorch_asngnas.py | UTokyo-ICEPP/multiml | 3dce96492c90bb2bc9c2d4ccfd66eb13d652a520 | [
"Apache-2.0"
] | 5 | 2021-04-15T06:38:04.000Z | 2021-09-05T14:30:05.000Z | from multiml import logger
from multiml.task.pytorch import PytorchASNGNASTask
from multiml.task.pytorch import PytorchASNGNASBlockTask
from . import PytorchConnectionRandomSearchAgent
from multiml.task.pytorch.datasets import StoreGateDataset, NumpyDataset
import numpy as np
class PytorchASNGNASAgent(PytorchConnectionRandomSearchAgent):
"""Agent packing subtasks using Pytorch ASNG-NAS Model."""
def __init__(
self,
verbose=1,
num_epochs=1000,
max_patience=5,
batch_size={
'type': 'equal_length',
'length': 500,
'test': 100
},
asng_args={
'lam': 2,
'delta': 0.0,
'alpha': 1.5,
'clipping_value': None,
'range_restriction': True
},
#lam=2, delta_init_factor=1, alpha = 1.5, clipping_value = None,
optimizer=None,
optimizer_args=None,
scheduler=None,
scheduler_args=None,
**kwargs):
"""
Args:
training_choiceblock_model (bool): Training choiceblock model after connecting submodels
**kwargs: Arbitrary keyword arguments
"""
super().__init__(**kwargs)
self.do_pretraining = kwargs['do_pretraining']
self._verbose = verbose
self._num_epochs = num_epochs
self.asng_args = asng_args
self.batch_size = batch_size
self._max_patience = max_patience
self._optimizer = optimizer
self._optimizer_args = optimizer_args
self._scheduler = scheduler
self._scheduler_args = scheduler_args
# this variable will be set in _build_block funciton
self._loss_weights = {}
@logger.logging
def execute(self):
"""Execute Currently, only categorical ASNG NAS is implemented."""
asng_block_list, task_ids = self._build_disconnected_task_block_list()
asng_task = PytorchASNGNASTask(
asng_args=self.asng_args,
subtasks=asng_block_list,
variable_mapping=self._connectiontask_args["variable_mapping"],
saver=self._saver,
device=self._connectiontask_args['device'],
gpu_ids=None,
amp=False, # expert option
metrics=self._connectiontask_args["metrics"],
verbose=self._verbose,
num_epochs=self._num_epochs,
batch_size=self.batch_size,
max_patience=self._max_patience,
loss_weights=self._loss_weights,
optimizer=self._optimizer,
optimizer_args=self._optimizer_args,
scheduler=self._scheduler,
scheduler_args=self._scheduler_args,
)
self._task_scheduler.add_task(task_id='ASNG-NAS', add_to_dag=False)
self._task_scheduler.add_subtask('ASNG-NAS', 'main-task', env=asng_task)
asng_subtask = self._task_scheduler.get_subtask('ASNG-NAS', 'main-task')
if not self._connectiontask_args["load_weights"]:
unique_id = asng_task.get_unique_id()
self.saver.dump_ml(unique_id, ml_type='pytorch', model=asng_task.ml.model)
# Save model ordering (model index)
submodel_names = asng_subtask.env.get_submodel_names()
self._saver.add(f'ASNG-NAS_{submodel_names}', submodel_names)
asng_subtask.env.verbose = self._verbose
self._execute_subtask(asng_subtask, is_pretraining=False)
# check best model
asng_task.set_most_likely()
# re-train
best_task_ids, best_subtask_ids = asng_task.best_model()
best_subtasks = [
self._task_scheduler.get_subtask(task_id, subtask_id)
for task_id, subtask_id in zip(task_ids, best_subtask_ids)
]
best_combination_task = self._build_connected_models(
subtasks=[t.env for t in best_subtasks],
job_id='ASNG-NAS-Final',
use_task_scheduler=True)
best_comb = '+'.join(s for s in best_subtask_ids)
self._execute_subtask(best_combination_task, is_pretraining=False)
self._metric.storegate = self._storegate
metric = self._metric.calculate()
### evaluate
# make results for json output
# seed, nevents, walltime will be set at outside
results_json = {'agent': 'ASNG-NAS', 'tasks': {}}
c_cat, c_int = asng_task.get_most_likely()
theta_cat, theta_int = asng_task.get_thetas()
cat_idx = c_cat.argmax(axis=1)
pred_result = best_combination_task.env.predict(label=True)
best_combination_task.env._storegate.update_data(
data=pred_result['pred'],
var_names=best_combination_task.env._output_var_names,
phase='auto')
self._metric._storegate = best_combination_task.env._storegate
test_metric = self._metric.calculate()
self.result = dict(task_ids=['ASNG-NAS-Final'],
subtask_ids=best_subtask_ids,
subtask_hps=[None],
metric_value=test_metric)
test_result = dict(model_name='ASNG-NAS-Final', cat_idx=cat_idx, metric=test_metric)
self._saver.add(f"results.ASNG-NAS-Final", test_result)
results_json['loss_test'] = pred_result['loss']
results_json['subloss_test'] = pred_result['subloss']
results_json['metric_test'] = test_metric
for task_idx, task_id in enumerate(task_ids):
results_json['tasks'][task_id] = {}
results_json['tasks'][task_id]['weight'] = best_combination_task.env.ml.loss_weights[
task_idx]
results_json['tasks'][task_id]['models'] = []
results_json['tasks'][task_id]['theta_cat'] = []
subtasktuples = self._task_scheduler.get_subtasks_with_hps(task_id)
for subtask_idx, subtask in enumerate(subtasktuples):
this_id = subtask.subtask_id.split('-')[-1] # FIXME : hard coded
theta = theta_cat[task_idx, subtask_idx]
results_json['tasks'][task_id]['models'].append(this_id)
results_json['tasks'][task_id]['theta_cat'].append(theta)
if subtask_idx == cat_idx[task_idx]:
results_json['tasks'][task_id]['model_selected'] = this_id
if theta_cat is not None:
logger.info(f' theta_cat is {this_id: >20} : {theta:.3e}')
else:
logger.info(f'theta_cat is None')
if theta_int is not None:
for theta, job_id in zip(theta_int.tolist(), ):
for t, j in zip(theta, job_id):
logger.info(f' theta_cat is {j: >20} : {t:.3e}')
else:
logger.info(f'theta_int is None')
logger.info(f'best cat_idx is {cat_idx}')
logger.info(f'best combination is {best_comb}')
self.results_json = results_json
def _build_disconnected_task_block_list(self):
task_ids = []
asng_block_list = []
for task_idx, task_id in enumerate(self._task_scheduler.get_sorted_task_ids()):
subtasktuples = self._task_scheduler.get_subtasks_with_hps(task_id)
for subtask_idx, subtask in enumerate(subtasktuples):
subtask_env = subtask.env
subtask_hps = subtask.hps
subtask_env.set_hps(subtask_hps)
if self.do_pretraining:
logger.info(f'pretraining of {subtask_env.subtask_id} is starting...')
self._execute_subtask(subtask, is_pretraining=True)
else:
subtask.env.storegate = self._storegate
subtask.env.saver = self._saver
subtask.env.compile()
if '_model_fit' in dir(subtask_env):
if self._freeze_model_weights:
self._set_trainable_flags(subtask_env._model_fit, False)
l = ', '.join(subtask.env.subtask_id for subtask in subtasktuples)
logger.info(f'{l}')
params_list = [v.hps for v in subtasktuples]
self._saver.add(f'asng_block_{task_id}_submodel_params', params_list)
# build asng task block
subtasks = [v.env for v in subtasktuples]
asng_block_subtask = self._build_block_task(subtasks, task_id, is_pretraining=False)
asng_block_list.append(asng_block_subtask.env)
task_ids.append(task_id)
return asng_block_list, task_ids
def _build_block_task(self, subtasks, task_id, is_pretraining):
asng_block = PytorchASNGNASBlockTask(
subtasks=subtasks,
job_id=f'ASNG-NAS-Block-{task_id}',
saver=self._saver,
load_weights=self._connectiontask_args['load_weights'],
)
asng_task_id = 'ASNG-NAS-' + task_id
self._loss_weights[asng_task_id] = self._connectiontask_args['loss_weights'][task_id]
self._task_scheduler.add_task(task_id=asng_task_id)
self._task_scheduler.add_subtask(asng_task_id, 'BlockTask', env=asng_block)
asng_block_subtask = self._task_scheduler.get_subtask(asng_task_id, 'BlockTask')
if is_pretraining:
self._execute_subtask(asng_block_subtask, is_pretraining=True)
else:
asng_block_subtask.env.storegate = self._storegate
asng_block_subtask.env.saver = self._saver
asng_block_subtask.env.compile()
if not self._connectiontask_args['load_weights']:
unique_id = asng_block.get_unique_id()
self.saver.dump_ml(unique_id, ml_type='pytorch', model=asng_block.ml.model)
submodel_names = asng_block_subtask.env.get_submodel_names()
self._saver.add(f'asng_block_{task_id}_submodel_names', submodel_names)
return asng_block_subtask
| 40.008 | 100 | 0.623075 | 9,721 | 0.971906 | 0 | 0 | 5,225 | 0.522396 | 0 | 0 | 1,604 | 0.160368 |
69c56874d74c15c70229584ec7332f3dc283f7c2 | 393 | py | Python | tests/integration/controller/fixtures.py | jlamoso/petisco | bd71d28a5c0ba6ea789fa7c1529e7a2d108da53f | [
"MIT"
] | null | null | null | tests/integration/controller/fixtures.py | jlamoso/petisco | bd71d28a5c0ba6ea789fa7c1529e7a2d108da53f | [
"MIT"
] | null | null | null | tests/integration/controller/fixtures.py | jlamoso/petisco | bd71d28a5c0ba6ea789fa7c1529e7a2d108da53f | [
"MIT"
] | null | null | null | import os
import pytest
from petisco import FlaskApplication
SWAGGER_DIR = os.path.dirname(os.path.abspath(__file__)) + "/application/"
app = FlaskApplication(application_name="petisco", swagger_dir=SWAGGER_DIR).get_app()
@pytest.fixture
def client():
with app.app.test_client() as c:
yield c
@pytest.fixture
def given_any_apikey():
apikey = "apikey"
return apikey
| 17.863636 | 85 | 0.732824 | 0 | 0 | 66 | 0.167939 | 161 | 0.409669 | 0 | 0 | 32 | 0.081425 |
69c5c7a7d8c414585a90c6896cbf35b9061ae450 | 6,136 | py | Python | authors/apps/authentication/tests/test_register.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/authentication/tests/test_register.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 20 | 2018-11-26T16:22:46.000Z | 2018-12-21T10:08:25.000Z | authors/apps/authentication/tests/test_register.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 3 | 2019-01-24T15:39:42.000Z | 2019-09-25T17:57:08.000Z | from rest_framework import status
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from ..models import User
from ..token import account_activation_token
# local import
from authors.base_test_config import TestConfiguration
class TestRegister(TestConfiguration):
""" Test suite for user registration """
def register_user(self, data):
""" function register a new user """
return self.client.post(
reverse("create_user"),
data,
content_type='application/json'
)
def test_registration_email_verification(self):
response_details = self.register_user(self.new_user)
user_details = User.objects.get(username=self.new_user['user']['username'])
pk = urlsafe_base64_encode(force_bytes(user_details.id)).decode()
token = account_activation_token.make_token(self.new_user)
activate_url = 'http://localhost:8000/api/activate/account/{pk}/{token}'.format(pk=pk, token=token)
response = self.client.get(
activate_url,
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
def test_empty_username(self):
""" test empty username """
self.new_user["user"]["username"] = ''
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
response.data["errors"]["username"][0],
"This field may not be blank."
)
def test_invalid_email(self):
""" test invalid email """
self.new_user["user"]["email"] = 'kimameß'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
response.data["errors"]["email"][0],
"Enter a valid email address."
)
def test_empty_email(self):
""" test invalid email """
self.new_user["user"]["email"] = ''
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
response.data["errors"]["email"][0],
"This field may not be blank."
)
def test_invalid_password(self):
""" test invalid password """
self.new_user["user"]["password"] = 'rtryyr'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertIn(
"password with at least 8 characters",
response.data["errors"]["password"][0]
)
def test_empty_password(self):
""" test invalid password """
self.new_user["user"]["password"] = ''
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertEqual(
response.data["errors"]["password"][0],
"This field may not be blank."
)
def test_uppercase_password(self):
""" test that the password contains an uppercase letter """
self.new_user["user"]["password"] = 'codetitans32'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertIn(
"at least one number, an uppercase or lowercase letter",
response.data["errors"]["password"][0]
)
def test_lowercase_password(self):
""" test that the password contains an lowercase letter """
self.new_user["user"]["password"] = 'CODETITANS32'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertIn(
"at least one number, an uppercase or lowercase letter",
response.data["errors"]["password"][0]
)
def test_special_character_password(self):
""" test that the password contains a special character """
self.new_user["user"]["password"] = 'Codetitans32'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertIn(
"lowercase letter or one special character",
response.data["errors"]["password"][0]
)
def test_number_in_password(self):
""" test that the password contains a number """
self.new_user["user"]["password"] = 'Codetitans@!'
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
self.assertIn(
"Password should have at least one number",
response.data["errors"]["password"][0]
)
def test_register_user(self):
""" test register user """
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED
)
def test_existing_email(self):
""" test register with existing user email """
self.new_user["user"]["email"] = self.user["user"]["email"]
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
def test_existing_username(self):
""" test register with existing username """
self.new_user["user"]["username"] = self.user["user"]["username"]
response = self.register_user(self.new_user)
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST
)
| 31.306122 | 107 | 0.602347 | 5,828 | 0.94965 | 0 | 0 | 0 | 0 | 0 | 0 | 1,474 | 0.240182 |
69c74da7d507a228c1bd9d4078bd51d786cb3f7d | 2,192 | py | Python | web_app/routes/stats_routes.py | jae-finger/twitoff | 73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c | [
"MIT"
] | null | null | null | web_app/routes/stats_routes.py | jae-finger/twitoff | 73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c | [
"MIT"
] | 3 | 2021-06-08T21:32:20.000Z | 2022-03-12T00:32:35.000Z | web_app/routes/stats_routes.py | jae-finger/twitoff | 73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c | [
"MIT"
] | null | null | null | from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from flask import Blueprint, jsonify, request, flash, redirect, render_template
from web_app.models import User
from web_app.statsmodels import load_model
from web_app.services.basilica_service import connection as basilica_connection
stats_routes = Blueprint("stats_routes", __name__)
@stats_routes.route("/stats/iris")
def iris():
X, y = load_iris(return_X_y=True)
clf = load_model() # make sure to pre-train the model first!
result = str(clf.predict(X[:2, :]))
print("PREDICTION", result)
return result # maybe return as JSON?
@stats_routes.route("/stats/predict", methods=["POST"])
def twitoff_predict():
# 0. Grab data
print("PREDICT ROUTE...")
print("FORM DATA:", dict(request.form)) # {'screen_name_a': 'elonmusk', 'example: 'j_a_e_f', 'tweet_text': 'Example tweet text here'}
screen_name_a = request.form["screen_name_a"]
screen_name_b = request.form["screen_name_b"]
tweet_text = request.form["tweet_text"]
print(screen_name_a, screen_name_b, tweet_text)
# 1. Train model
tweet_embeddings = []
tweet_labels = []
user_a = User.query.filter(User.screen_name == screen_name_a).one()
user_b = User.query.filter(User.screen_name == screen_name_b).one()
tweets_a = user_a.tweets
tweets_b = user_b.tweets
all_tweets = tweets_a + tweets_b
for tweet in all_tweets:
tweet_embeddings.append(tweet.embedding)
tweet_labels.append(tweet.user.screen_name)
print("Embeddings:", len(tweet_embeddings), "Lables:", len(tweet_labels))
classifier = LogisticRegression(random_state=0, solver="lbfgs", multi_class="multinomial")
classifier.fit(tweet_embeddings, tweet_labels)
# 2. Make prediction
example_tweet_embedding = basilica_connection.embed_sentence(tweet_text, model="twitter")
result = classifier.predict([example_tweet_embedding])
print("Result:", result[0])
return render_template("prediction_results.html",
screen_name_a=screen_name_a,
screen_name_b=screen_name_b,
tweet_text=tweet_text,
screen_name_most_likely=result[0]
) | 37.793103 | 137 | 0.728102 | 0 | 0 | 0 | 0 | 1,809 | 0.825274 | 0 | 0 | 425 | 0.193887 |
69c769830c272d7a9f4d246a127dbb4b3989b330 | 1,399 | py | Python | tests/unit/test_parameters/test_geometric_parameters.py | manjunathnilugal/PyBaMM | 65d5cba534b4f163670e753714964aaa75d6a2d2 | [
"BSD-3-Clause"
] | 330 | 2019-04-17T11:36:57.000Z | 2022-03-28T16:49:55.000Z | tests/unit/test_parameters/test_geometric_parameters.py | masoodtamaddon/PyBaMM | a31e2095600bb92e913598ac4d02b2b6b77b31c1 | [
"BSD-3-Clause"
] | 1,530 | 2019-03-26T18:13:03.000Z | 2022-03-31T16:12:53.000Z | tests/unit/test_parameters/test_geometric_parameters.py | masoodtamaddon/PyBaMM | a31e2095600bb92e913598ac4d02b2b6b77b31c1 | [
"BSD-3-Clause"
] | 178 | 2019-03-27T13:48:04.000Z | 2022-03-31T09:30:11.000Z | #
# Tests for the standard parameters
#
import pybamm
import unittest
class TestGeometricParameters(unittest.TestCase):
def test_macroscale_parameters(self):
geo = pybamm.geometric_parameters
L_n = geo.L_n
L_s = geo.L_s
L_p = geo.L_p
L_x = geo.L_x
l_n = geo.l_n
l_s = geo.l_s
l_p = geo.l_p
parameter_values = pybamm.ParameterValues(
values={
"Negative electrode thickness [m]": 0.05,
"Separator thickness [m]": 0.02,
"Positive electrode thickness [m]": 0.21,
}
)
L_n_eval = parameter_values.process_symbol(L_n)
L_s_eval = parameter_values.process_symbol(L_s)
L_p_eval = parameter_values.process_symbol(L_p)
L_x_eval = parameter_values.process_symbol(L_x)
self.assertEqual(
(L_n_eval + L_s_eval + L_p_eval).evaluate(), L_x_eval.evaluate()
)
l_n_eval = parameter_values.process_symbol(l_n)
l_s_eval = parameter_values.process_symbol(l_s)
l_p_eval = parameter_values.process_symbol(l_p)
self.assertAlmostEqual((l_n_eval + l_s_eval + l_p_eval).evaluate(), 1)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 28.55102 | 78 | 0.620443 | 1,134 | 0.810579 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.124375 |
69c79c1acedc30f9a5022e2647ed20f40fd5e207 | 1,860 | py | Python | src/preprocessing1/convert_ctrees_to_dtrees_rstdt.py | norikinishida/discourse-parsing | 7377a78cc32ad6430d256694e31ed9426e7c6340 | [
"Apache-2.0"
] | 2 | 2022-02-16T20:41:22.000Z | 2022-03-11T18:28:24.000Z | src/preprocessing1/convert_ctrees_to_dtrees_rstdt.py | norikinishida/discourse-parsing | 7377a78cc32ad6430d256694e31ed9426e7c6340 | [
"Apache-2.0"
] | null | null | null | src/preprocessing1/convert_ctrees_to_dtrees_rstdt.py | norikinishida/discourse-parsing | 7377a78cc32ad6430d256694e31ed9426e7c6340 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import pyprind
import utils
import treetk
import treetk.rstdt
def main(args):
"""
We use n-ary ctrees (ie., *.labeled.nary.ctree) to generate dtrees.
Morey et al. (2018) demonstrate that scores evaluated on these dtrees are superficially lower than those on right-heavy binarized trees (ie., *.labeled.bin.ctree).
"""
path = args.path
filenames = os.listdir(path)
filenames = [n for n in filenames if n.endswith(".labeled.nary.ctree")]
filenames.sort()
def func_label_rule(node, i, j):
relations = node.relation_label.split("/")
if len(relations) == 1:
return relations[0] # Left-most node is head.
else:
if i > j:
return relations[j]
else:
return relations[j-1]
for filename in pyprind.prog_bar(filenames):
sexp = utils.read_lines(
os.path.join(path, filename),
process=lambda line: line.split())
assert len(sexp) == 1
sexp = sexp[0]
# Constituency
ctree = treetk.rstdt.postprocess(treetk.sexp2tree(sexp, with_nonterminal_labels=True, with_terminal_labels=False))
# Dependency
# Assign heads
ctree = treetk.rstdt.assign_heads(ctree)
# Conversion
dtree = treetk.ctree2dtree(ctree, func_label_rule=func_label_rule)
arcs = dtree.tolist(labeled=True)
# Write
with open(os.path.join(
path,
filename.replace(".labeled.nary.ctree", ".arcs")), "w") as f:
f.write("%s\n" % " ".join(["%d-%d-%s" % (h,d,l) for h,d,l in arcs]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True)
args = parser.parse_args()
main(args=args)
| 30 | 167 | 0.597849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.22957 |
69c88ceb18f630f200621f0a183a63c5b4bebc25 | 4,507 | py | Python | vinyl.py | jsonmorberg/Vinyl-Bot | 84bae5eb8a9184cf7dbc5e652dd7f428d9a8295e | [
"MIT"
] | 1 | 2021-11-10T04:29:35.000Z | 2021-11-10T04:29:35.000Z | vinyl.py | jsonmorberg/Vinyl-Bot | 84bae5eb8a9184cf7dbc5e652dd7f428d9a8295e | [
"MIT"
] | null | null | null | vinyl.py | jsonmorberg/Vinyl-Bot | 84bae5eb8a9184cf7dbc5e652dd7f428d9a8295e | [
"MIT"
] | null | null | null | # vinyl.py
import os
import discord
from discord import voice_client
from audio_source import AudioSource
from audio_controller import AudioController
import yt_dlp
from dotenv import load_dotenv
from discord.ext import commands
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Suppress unnecessary bug reports
yt_dlp.utils.bug_reports_message = lambda: ''
# Bot class for commands
class Vinyl(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.audio_players = {}
def get_audio_player(self, ctx):
audio_player = self.audio_players.get(ctx.guild.id)
if not audio_player:
audio_player = AudioController(self.bot, ctx)
self.audio_players[ctx.guild.id] = audio_player
return audio_player
async def cog_before_invoke(self, ctx):
ctx.audio_player = self.get_audio_player(ctx)
async def cog_command_error(self, ctx, error):
await ctx.send('An error occurred: {}'.format(str(error)))
@commands.command(name='join', aliases=['j'], help='Ask Vinyl to join your voice channel')
async def _join(self, ctx, *, channel: discord.VoiceChannel=None):
if not channel and not ctx.author.voice.channel:
await ctx.send("{} is not connected to a voice channel".format(ctx.author.name))
return
channel = channel or ctx.author.voice.channel
if ctx.audio_player.voice_client:
await ctx.audio_player.voice_client.move_to(channel)
return
await ctx.message.add_reaction('☑️')
ctx.audio_player.voice_client = await channel.connect()
@commands.command(name='leave', aliases=['l'], help='Make Vinyl leave your voice channel')
async def _leave(self, ctx):
if not ctx.audio_player.voice_client:
await ctx.send("Vinyl is not connected to a voice channel")
else:
await ctx.message.add_reaction('☑️')
await ctx.audio_player.stop()
del self.audio_players[ctx.guild.id]
@commands.command(name='play', aliases=['p'], help="Play")
async def _play(self, ctx, *, search):
if not ctx.audio_player.voice_client:
await ctx.invoke(self._join)
async with ctx.typing():
try:
source = await AudioSource.generate_source(ctx, search, loop=self.bot.loop)
except:
await ctx.send("An error occured while trying to play")
else:
if(ctx.audio_player.voice_client.is_playing()):
await ctx.send('**Queued:** {}'.format(source.title))
await ctx.audio_player.queue.put(source)
await ctx.message.add_reaction('▶️')
@commands.command(name='skip', aliases=['s'], help="Skip the song currently playing")
async def _skip(self, ctx):
voice_client = ctx.audio_player.voice_client
if voice_client is None:
await ctx.send("Vinyl is not connected to a voice channel currently")
elif voice_client.is_playing():
await ctx.message.add_reaction('⏭️')
ctx.audio_player.skip()
else:
await ctx.send("Vinyl isn't playing anything")
@commands.command(name='pause', help='Pause any song Vinyl is currently playing')
async def _pause(self, ctx):
voice_client = ctx.audio_player.voice_client
if voice_client is None:
await ctx.send("Vinyl is not connected to a voice channel currently")
elif voice_client.is_playing():
await ctx.message.add_reaction('⏸️')
voice_client.pause()
else:
await ctx.send("Vinyl isn't playing anything")
@commands.command(name='resume', help='Resume a paused song')
async def _resume(self, ctx):
voice_client = ctx.audio_player.voice_client
if voice_client is None:
await ctx.sent("Vinyl is not connected to a voice channel currently")
elif voice_client.is_paused():
await ctx.message.add_reaction('▶️')
voice_client.resume()
else:
await ctx.send("Vinyl isn't paused currently")
# Set discord intents to all for now
intents = discord.Intents().all()
bot = commands.Bot(command_prefix='-', intents=intents)
bot.add_cog(Vinyl(bot))
@bot.event
async def on_ready():
print(f'Logged in as {bot.user} (ID: {bot.user.id})')
print('------')
bot.run(TOKEN) | 36.056 | 94 | 0.633903 | 3,864 | 0.852792 | 0 | 0 | 3,300 | 0.728316 | 3,000 | 0.662105 | 868 | 0.191569 |
69c8df43a27c1af9bf7464c6393251ec9ff02443 | 1,106 | py | Python | tests/test_components/test_lenses/test_AchromLens.py | spacesys-finch/payload-designer | f21dc70f7301f166558a8f61bcbbccce83770343 | [
"Unlicense"
] | 2 | 2022-01-01T23:52:08.000Z | 2022-01-18T06:39:58.000Z | tests/test_components/test_lenses/test_AchromLens.py | spacesys-finch/payload-designer | f21dc70f7301f166558a8f61bcbbccce83770343 | [
"Unlicense"
] | 55 | 2021-12-30T18:15:38.000Z | 2022-03-06T16:02:57.000Z | tests/test_components/test_lenses/test_AchromLens.py | spacesys-finch/payload-designer | f21dc70f7301f166558a8f61bcbbccce83770343 | [
"Unlicense"
] | 2 | 2022-01-20T01:43:59.000Z | 2022-01-20T01:45:50.000Z | """Tests for AchromLens component."""
# stdlib
import logging
# external
import pytest
# project
from payload_designer.components import lenses
LOG = logging.getLogger(__name__)
def test_focal_length_1():
"""Test AchromLens.focal_length_1()"""
f_eq = 50
V_1 = 0.016
V_2 = 0.028
doublet = lenses.AchromLens(f_eq=f_eq, V_1=V_1, V_2=V_2)
fl1 = doublet.focal_length_1()
LOG.info(f"Focal length 1: {fl1}")
assert fl1 == pytest.approx(-37.5)
def test_focal_length_2():
"""Test AchromLens.focal_length_2()"""
f_eq = 50
V_1 = 0.016
V_2 = 0.028
doublet = lenses.AchromLens(f_eq=f_eq, V_1=V_1, V_2=V_2)
fl2 = doublet.focal_length_2()
LOG.info(f"Focal length 2: {fl2}")
assert fl2 == pytest.approx(350 / 3)
def test_effective_focal_length():
"""Test AchromLens.effective_focal_length()"""
f_1 = 50
V_1 = 0.016
V_2 = 0.028
doublet = lenses.AchromLens(f_1=f_1, V_1=V_1, V_2=V_2)
fleq = doublet.effective_focal_length()
LOG.info(f"Effective focal length: {fleq}")
assert fleq == pytest.approx(-200 / 3)
| 19.403509 | 60 | 0.658228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.24141 |
69c94e463ba20a6268b57098d9cdaac3fe21a484 | 486 | py | Python | python/demos/mcEstimatePi.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | python/demos/mcEstimatePi.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | python/demos/mcEstimatePi.py | qyxiao/pmt | 87513794fc43f8aa1f4f3d7588fa45ffc75d1a44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import matplotlib.pyplot as pl
import numpy as np
p = np.random.rand(5000, 2) * 4 - 2
inner = np.sum(p ** 2, axis=1) <= 4
pl.figure(figsize=(10, 10))
pl.plot(p[inner, 0], p[inner, 1], 'bo')
pl.plot(p[~inner, 0], p[~inner, 1], 'rD')
pi_estimate = np.sum(inner) / 5000 * 4
print('the estimated pi = %f' % pi_estimate)
print('the standard pi = %f' % np.pi)
err = np.abs(np.pi - pi_estimate) / np.pi
print('err = %f' % err)
pl.savefig('mcEstimatePi.png')
pl.show()
| 25.578947 | 44 | 0.62963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.209877 |
69c954f1be24efef0a745d933498ace36a82ac1a | 1,949 | py | Python | src/utils/hdf5_helper/h5_util.py | SteffenMauceri/OWLS-Autonomy | e676282a87e17030887b0174f3b8b38aab170d15 | [
"RSA-MD"
] | null | null | null | src/utils/hdf5_helper/h5_util.py | SteffenMauceri/OWLS-Autonomy | e676282a87e17030887b0174f3b8b38aab170d15 | [
"RSA-MD"
] | null | null | null | src/utils/hdf5_helper/h5_util.py | SteffenMauceri/OWLS-Autonomy | e676282a87e17030887b0174f3b8b38aab170d15 | [
"RSA-MD"
] | null | null | null | from contextlib import closing
import h5py
import numpy as np
def save_h5(outfile, dictionary):
""" Saves passed dictionary to an h5 file
Parameters
----------
outfile : string
Name of output h5 file
dictionary : dictionary
Dictionary that will be saved
"""
def save_layer(f, seed, dictionary):
for key, value in dictionary.items():
fullKey = f"{seed}/{key}"
if type(dictionary[key]) == dict:
f = save_layer(f, fullKey, value)
else:
f[fullKey] = dictionary[key]
return f
with closing(h5py.File(outfile, 'w')) as f:
for key, value in dictionary.items():
if type(dictionary[key]) == dict:
f = save_layer(f, key, value)
else:
f[key] = dictionary[key]
def load_h5(feature_file):
"""
Loads h5 contents to dictionary.
Single level dictionary with keys being full h5 paths.
Parameters
----------
feature_file : string
Name of input h5 file
Returns
-------
dictionary : dictionary
Dictionary of h5 contents
"""
def load_layer(f, seed, dictionary):
for key in f[seed].keys():
fullKey = f"{seed}/{key}"
if isinstance(f[fullKey], h5py.Dataset):
if (seed in dictionary.keys()):
dictionary[seed][key] = np.asarray(f[fullKey])
else:
dictionary[seed] = {key: np.asarray(f[fullKey])}
else:
dictionary = load_layer(f, fullKey, dictionary)
return dictionary
with h5py.File(feature_file, 'r') as f:
dictionary = {}
for key in f.keys():
if isinstance(f[key], h5py.Dataset):
dictionary[key] = np.asarray(f[key])
else:
dictionary = load_layer(f, key, dictionary)
return dictionary
| 26.69863 | 68 | 0.542329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.263212 |
69ca8a62d7144693f84bd439c77728f7b5f93e62 | 526 | py | Python | solutions/problem-007/python.py | kmchmk/projecteuler-solutions | 0900f4ef031e360fb80414420fb61522be1dd66a | [
"MIT"
] | 2 | 2021-09-05T11:50:30.000Z | 2021-09-10T15:46:05.000Z | solutions/problem-007/python.py | kmchmk/projecteuler-solutions | 0900f4ef031e360fb80414420fb61522be1dd66a | [
"MIT"
] | 2 | 2021-09-06T14:42:54.000Z | 2021-09-10T14:52:59.000Z | solutions/problem-007/python.py | kmchmk/projecteuler-solutions | 0900f4ef031e360fb80414420fb61522be1dd66a | [
"MIT"
] | 1 | 2021-09-05T14:40:14.000Z | 2021-09-05T14:40:14.000Z | from math import sqrt
def is_prime_number(number):
# we only need to loop from 2 to square root of number
# https://stackoverflow.com/a/5811176/4388776
for i in range(2, int(sqrt(number)) + 1):
if number % i == 0:
return False
return True
def get_nth_prime(n):
count = 0
number = 2
while True:
if is_prime_number(number):
count = count + 1
if count == n:
return number
number = number + 1
print(get_nth_prime(10001)) | 21.916667 | 58 | 0.579848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.188213 |
69cab18673618de1d90978d44eee2864143ab833 | 11,582 | py | Python | model/methods.py | MokkeMeguru/seq2bseq | 4e66f2d4738751cf4552a5b61728ad3b4967feb6 | [
"MIT"
] | null | null | null | model/methods.py | MokkeMeguru/seq2bseq | 4e66f2d4738751cf4552a5b61728ad3b4967feb6 | [
"MIT"
] | null | null | null | model/methods.py | MokkeMeguru/seq2bseq | 4e66f2d4738751cf4552a5b61728ad3b4967feb6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Helper functions for VariationalModel class """
from __future__ import print_function
from __future__ import division
import math
import random
import tensorflow as tf
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as s2s
def linearOutcomePrediction(zs, params_pred, scope=None):
"""
English:
Model for predictions outcomes from latent representations Z,
zs = batch of z-vectors (encoder-states, matrix)
Japanese:
このモデルにおける、潜在表現Zから得られる出力の予測です。
zs = ベクトル z のバッチ(袋)です。 (encoder の状態であり、行列です)
(恐らく、[z_0, z_1, z_2, ...] というような意味)
"""
with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True):
coefficients, bias = params_pred
outcome_preds = tf.add(tf.matmul(zs, coefficients), bias)
return outcome_preds
def flexibleOutcomePrediction(zs, params_pred, use_sigmoid=False, scope=None):
"""
English:
Model for nonlinearly predicting outcomes from latent representations Z.
Uses a single hidden layer of pre-specified size, by default = d (the size of the RNN hidden-state)
zs = batch of z-vectors (encoder-states, matrix)
use_sigmoid = if True, then outcome-predictions are constrained to [0, 1]
Japanese:
このモデルにおける、潜在表現Zから得られる非線形な出力の予測です。
事前にサイズ (標準では d 、つまりRNNの隠れ層の数) が指定されている、一つの隠れ層を用います。
zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。)
use_sigmoid = これが True であるならば、出力はシグモイド関数によって [0, 1] 区間に抑えられます。
(d は encoder のための連なったRNNの最後の隠れ層を示している考えられます。)
"""
with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True):
weights_pred = params_pred[0]
biases_pred = params_pred[1]
hidden1 = tf.nn.tanh(tf.add(tf.matmul(zs, weights_pred['W1']), biases_pred['B1']))
outcome_preds = tf.add(tf.matmul(hidden1, weights_pred['W2']), biases_pred['B2'])
if use_sigmoid:
outcome_preds = tf.sigmoid(outcome_preds)
return outcome_preds
def outcomePrediction(zs, params_pred, which_outcomeprediction, use_sigmoid=False, scope=None):
if which_outcomeprediction == 'linear':
return linearOutcomePrediction(zs, params_pred, scope=scope)
else:
return flexibleOutcomePrediction(zs, params_pred, scope=scope)
def getEncoding(inputs, cell, num_symbols, embedding_size, dtype=s2s.dtypes.float32, scope=None):
"""
English:
Model for produce encoding z from x
zs = batch of z-vectors (encoding-states, matrix)
Japanese:
このモデルにおける、入力 x から潜在表現 z の生成です。
zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。)
"""
with s2s.variable_scope.variable_scope(scope or 'seq2seq', reuse=True):
encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_symbols,
embedding_size=embedding_size
)
_, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype)
# batch_size x cell.state_size
# batch_size だけ、cell が含まれていると考えると良いでしょう。
return encoder_state
def variationalEncoding(inputs, cell, num_symbols, embedding_size,
variational_params, dtypes=s2s.dtypes.float32, scope=None):
"""
English:
Model for produce encoding z from x.
zs = batch of z-vectors (encoding-stats, matrix).
sigmas: posterior standard devs for each dimension,
produced using 2-layer neural net with Relu units.
Japanese:
このモデルにおける、入力 x から潜在表現 z の生成です。
zs = ベクトル z のバッチ(袋)です。(encoder の状態であり、行列です。)
sigmas = それぞれの次元における、事後標準偏差(devs = deviations)であり、
Relu ユニットから成る2つのレイヤーを用いて生成されます。
variational_params = VAE 内で生成される \mu と \sigma を持っています。
"""
min_sigma = 1e-6
# the smallest allowable sigma value
# 許容できる最小の偏差です。
h_T = getEncoding(inputs, cell, num_symbols, embedding_size, dtype=dtypes, scope=scope)
with s2s.variable_scope.variable_scope(scope or 'variational', reuse=True):
mu_params, sigma_params = variational_params
mu = tf.add(tf.matmul(h_T, mu_params['weights']), mu_params['biases'])
hidden_layer_sigma = tf.nn.relu(tf.add(tf.matmul(h_T, sigma_params['weights1']),
sigma_params['biases1']))
# Relu layer of same size as h_T
# h_T と同じサイズの Relu レイヤーです。
sigma = tf.clip_by_value(
tf.exp(- tf.abs(tf.add(tf.matmul(hidden_layer_sigma, sigma_params['weights2']),
sigma_params['biases2']))), min_sigma, 1.0)
return mu, sigma
def getDecoding(encoder_state, inputs, cell,
num_symbols, embedding_size,
feed_previous=True, output_prejection=None,
dtype=s2s.dtypes.float32, scope=None):
"""
English:
Model for producing probabilities over x from z
Japanese:
このモデルにおける、z から x へ向かう確率を計算します。
"""
with s2s.variable_scope.variable_scope(scope or 'seq2seq', reuse=True):
if output_prejection is None:
cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols)
decode_probs, _ = s2s.embedding_rnn_decoder(
inputs, encoder_state, cell, num_symbols,
embedding_size, output_projection=output_prejection,
feed_previous=feed_previous)
return decode_probs
def createVariationalVar(inputs, cell, num_symbols, embedding_size,
feed_previous=False, output_projection=None,
dtype=s2s.dtypes.float32, scope=None):
"""
English:
Creates Tensorflow variables which can reused.
Japanese:
再利用可能な Tensorflow の変数を作ります。
"""
with s2s.variable_scope.variable_scope(scope or 'seq2seq'):
encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_symbols, embedding_size=embedding_size)
_, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype)
# batch_size x cell.state_size
if output_projection is None:
cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols)
decode_probs, _ = s2s.embedding_rnn_decoder(
inputs, encoder_state, cell, num_symbols,
embedding_size, output_projection=output_projection,
feed_previous=feed_previous)
return None
def createDeterministicVar(inputs, cell, num_symbols, embedding_size,
feed_previous=False, output_projection=None,
dtype=s2s.dtypes.float32, scope=None):
"""
English:
Creates Tensorflow variables which can be reused.
Japanese:
再利用可能な Tensorflow の変数を作ります。
"""
with s2s.variable_scope.variable_scope(scope or 'seq2seq'):
encoder_cell = s2s.core_rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_symbols,
embedding_size=embedding_size)
_, encoder_state = s2s.rnn.static_rnn(encoder_cell, inputs, dtype=dtype)
# batch_size x cell.state_size
if output_projection is None:
cell = s2s.core_rnn_cell.OutputProjectionWrapper(cell, num_symbols)
decode_probs, _ = s2s.embedding_rnn_decoder(
inputs, encoder_state, cell, num_symbols,
embedding_size, output_projection=output_projection,
feed_previous=feed_previous)
return None
def levenshtein(seq1, seq2):
"""
English:
Computes edit distance between two (possibly padded) sequences:
Japanese:
2つのシーケンスにおける独自のレーベンシュタイン距離を計算する。
(padding である '<PAD>'が加えられている可能性を考慮しています)
(ここにおけるレーベンシュタイン距離は、
恐らく単語ごとに分割した場合のレーベンシュタイン距離(一般には文字ごと))
"""
s1 = [value for value in seq1 if value != '<PAD>']
s2 = [value for value in seq2 if value != '<PAD>']
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 +
min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
"""
Info
for i1, c1 in enumerate(['a', 'b', 'c']):
print('{} : {}'.format(i1, c1))
=>
0 : a
1 : b
2 : c
"""
def mutate_lengthconstrained(init_seq, num_edits, vocab, length_range=(10, 20)):
"""
English:
Preforms random edits of sequences, respecting min/max sequence-length constraints.
At each edit, possible operations (equally likely) are:
(1) Do nothing (2) Substitution (3) Deletion (4) Insertion
Each operation is uniform over possible symbols and possible positions
Japanese:
最小/最大のシーケンスの長さに制約をかけながら、シーケンスのランダムな編集を行います。
編集時に可能な操作は以下の4つです。
(1) 何もしない (2) 置換 (3) 削除 (4) 挿入
それぞれの編集は、可能なシンボル(単語など)や位置に対して均一に(偏りなく)行われます。
"""
min_seq_length, max_seq_length = length_range
new_seq = init_seq[:]
for i in range(num_edits):
operation = random.randint(1, 4)
# 1 = Do nothing, 2 = Substitution, 3 = Deletion, 4 = Insertion
# 1 = 何もしない 2 = 置換 3 = 削除 4 = 挿入
if operation > 1:
char = '<PAD>'
# potential character, cannot be PAD.
# 潜在的な element であり、 <PAD> になることはない。
# (つまり <PAD> 以外の任意の element(単語) になる)
while char == '<PAD>':
char = vocab[random.randint(0, len(vocab) - 1)]
position = random.randint(0, len(new_seq) - 1)
if (operation == 4) and (len(new_seq) < max_seq_length):
position = random.randint(0, len(new_seq))
new_seq.insert(position, char)
elif (operation == 3) and (len(new_seq) > min_seq_length):
_ = new_seq.pop(position)
elif operation == 2:
new_seq[position] = char
edit_dist = levenshtein(new_seq, init_seq)
if edit_dist > num_edits:
raise ValueError('edit distance invalid')
return new_seq, edit_dist
def mutate(init_seq, num_edits, vocab):
new_seq = init_seq[:]
for i in range(num_edits):
operation = random.randint(1, 4)
# 1 = Do nothing, 2 = Substitution, 3 = Deletion, 4 = Insertion
# 1 = 何もしない 2 = 置換 3 = 削除 4 = 挿入
if operation > 1:
char = '<PAD>'
# potential character, cannot be PAD.
# 潜在的な element であり、 <PAD> になることはない。
while char == '<PAD>':
char = vocab[random.randint(0, len(vocab) - 1)]
position = random.randint(0, len(new_seq) - 1)
if operation == 4:
position = random.randint(0, len(new_seq))
new_seq.insert(position, char)
elif (operation == 3) and len(new_seq) > 1:
_ = new_seq.pop(position)
elif operation == 2:
new_seq[position] = char
edit_dist = levenshtein(new_seq, init_seq)
if edit_dist > num_edits:
raise ValueError("edit distance invalid")
return new_seq, edit_dist
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def smoothedsigmoid(x, b=1):
"""
English:
b controls smoothness, lower = smoother
Japanese:
b は緩やかさを調整します。b が小さいほど緩やかに(変化が小さく)なります。
"""
return 1 / (1 + math.exp(- b * x))
| 39.128378 | 104 | 0.625108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,599 | 0.424424 |
69ce8902e8aaaf0b89cf2ee81714c5532b36a81f | 417 | py | Python | src/Factory/Abstract/SongFactory.py | jundoll/discordbot-BSPlaylistManager | 38e80a7fc2ab779b4ab9fec33a40c0a7b14ad622 | [
"MIT"
] | null | null | null | src/Factory/Abstract/SongFactory.py | jundoll/discordbot-BSPlaylistManager | 38e80a7fc2ab779b4ab9fec33a40c0a7b14ad622 | [
"MIT"
] | 8 | 2020-10-25T06:07:41.000Z | 2020-12-30T10:03:54.000Z | src/Factory/Abstract/SongFactory.py | jundoll/discordbot-BSPlaylistManager | 38e80a7fc2ab779b4ab9fec33a40c0a7b14ad622 | [
"MIT"
] | null | null | null |
# load modules
from abc import ABCMeta, abstractmethod
from typing import List
from src.Domain.Song import Song, SongHash, Url
# definition
class ISongFactory(metaclass=ABCMeta):
# URLからSongインスタンスのリストを生成する
@abstractmethod
def generateByUrl(self, url: Url) -> List[Song]:
pass
# hash値からSongインスタンスを生成する
@abstractmethod
def generateByHash(self, hash: SongHash) -> Song:
pass
| 20.85 | 53 | 0.717026 | 335 | 0.699374 | 0 | 0 | 163 | 0.340292 | 0 | 0 | 138 | 0.2881 |
69cf73dbd38061a833e0ed5dd8261d357ce7829e | 1,164 | py | Python | migrations/versions/b9ab1a9a2113_.py | a-sanders/currency-exchange-calc | f8ce357b2cb958b32782d2e812e51d22b5f04d3a | [
"MIT"
] | null | null | null | migrations/versions/b9ab1a9a2113_.py | a-sanders/currency-exchange-calc | f8ce357b2cb958b32782d2e812e51d22b5f04d3a | [
"MIT"
] | null | null | null | migrations/versions/b9ab1a9a2113_.py | a-sanders/currency-exchange-calc | f8ce357b2cb958b32782d2e812e51d22b5f04d3a | [
"MIT"
] | null | null | null | """empty message
Revision ID: b9ab1a9a2113
Revises:
Create Date: 2021-11-28 22:41:01.160642
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b9ab1a9a2113'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('curpairs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('base_code', sa.String(length=3), nullable=True),
sa.Column('target_code', sa.String(length=3), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('currates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('rate', sa.Float(), nullable=True),
sa.Column('pair_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pair_id'], ['curpairs.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('currates')
op.drop_table('curpairs')
# ### end Alembic commands ###
| 27.069767 | 65 | 0.664948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.392612 |