hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace6abf19251141125b4ba86aee2181150535596 | 2,396 | py | Python | src/chapters/wall/hyperspace_helper/AssetLibrary.py | scottalmond/EscapeRoom | fc669fffebd29d7fe51c356cda7b52f86e9bae73 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2017-12-01T23:37:36.000Z | 2017-12-01T23:37:36.000Z | src/chapters/wall/hyperspace_helper/AssetLibrary.py | scottalmond/EscapeRoom | fc669fffebd29d7fe51c356cda7b52f86e9bae73 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/chapters/wall/hyperspace_helper/AssetLibrary.py | scottalmond/EscapeRoom | fc669fffebd29d7fe51c356cda7b52f86e9bae73 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | import sys
#sys.path.insert(1,'/home/pi/pi3d') # to use local 'develop' branch version
#import pi3d
import numpy as np
import math
import random
import time
#quick accessors for all 3d models
# allows for cloning to minimize GPU usage
class AssetLibrary:
MODEL_PATH = 'chapters/wall/assets/hyperspace/3D/'
def __init__(self,pi3d):
#init
self.pi3d=pi3d
MODEL_PATH=AssetLibrary.MODEL_PATH
shader = self.pi3d.Shader('uv_light')
self.invisible = self.pi3d.Triangle(corners=((0,0),(.001,.001),(-.001,.001)))
#pod
self.pod_frame=self.invisible.shallow_clone()
#the frame is the central location, but the pod may be rotated about the local axes for an animatic rotation effect during translation
self.pod_frame.children=[]
self.pod=self.pi3d.Model(file_string=MODEL_PATH+'pod_2.obj', z=0.0)
pod_scale=0.33
self.pod.scale(pod_scale,pod_scale,pod_scale)
self.laser_base = self.pi3d.Model(file_string=MODEL_PATH+'laser_base_2.obj', y=3.15)
self.laser_gun = self.pi3d.Model(file_string=MODEL_PATH+'laser_gun_2.obj', y=0.4, z=-0.4)
self.laser_base.add_child(self.laser_gun)
self.pod.add_child(self.laser_base)
self.pod_frame.add_child(self.pod)
self.pod.set_shader(shader)
self.laser_gun.set_shader(shader)
self.laser_base.set_shader(shader)
#asteroids
self.asteroids=[]
asteroid_filename=['asteroid_large_1.obj']
for asteroid_id in range(len(asteroid_filename)):
asteroid_large_scale=0.55
print("AssetLibrary.__init__: temporary asteroid scale: ",asteroid_large_scale)
asteroid=self.pi3d.Model(file_string=MODEL_PATH+'asteroid_large_1.obj',sx=asteroid_large_scale,sy=asteroid_large_scale,sz=asteroid_large_scale)
asteroid.set_shader(shader)
self.__setFog(asteroid)
self.asteroids.append(asteroid)
#asteroid_large_scale=0.55
#self.asteroid_large=self.pi3d.Model(file_string=MODEL_PATH+'asteroid_large_1.obj',sx=asteroid_large_scale,sy=asteroid_large_scale,sz=asteroid_large_scale)
#self.asteroid_large.set_shader(shader)
#self.__setFog(self.asteroid_large)
#rings
self.rings=[]
ring_filename=['branched_ring_1.obj','straight_ring_1.obj']
for ring_id in range(len(ring_filename)):
ring = self.pi3d.Model(file_string=MODEL_PATH+ring_filename[ring_id])
ring.set_shader(shader)
self.__setFog(ring)
self.rings.append(ring)
def __setFog(self,model):
model.set_fog((0.0, 0.0, 0.0, 0.0), 110.8)
#pass
| 36.861538 | 157 | 0.767529 |
ace6ac1264b22b9863e0408dfcc44da7200e96dc | 1,615 | py | Python | calabiyau/ui/app.py | Vuader/calabiyau | 71751bc674b16a59d019918af9e1bb717661c2ac | [
"BSD-3-Clause"
] | null | null | null | calabiyau/ui/app.py | Vuader/calabiyau | 71751bc674b16a59d019918af9e1bb717661c2ac | [
"BSD-3-Clause"
] | null | null | null | calabiyau/ui/app.py | Vuader/calabiyau | 71751bc674b16a59d019918af9e1bb717661c2ac | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Christiaan Frans Rademan.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from calabiyau.ui import views
| 50.46875 | 79 | 0.780805 |
ace6ac9098bed0c2d27e5487b665f0ee61f52698 | 915 | py | Python | utils/switch.py | lxchtan/TEGTOK | 13f8722de4dcd44ce75d9a85acfe914bcb1588b0 | [
"Apache-2.0"
] | 1 | 2022-03-17T11:30:16.000Z | 2022-03-17T11:30:16.000Z | utils/switch.py | lxchtan/TEGTOK | 13f8722de4dcd44ce75d9a85acfe914bcb1588b0 | [
"Apache-2.0"
] | null | null | null | utils/switch.py | lxchtan/TEGTOK | 13f8722de4dcd44ce75d9a85acfe914bcb1588b0 | [
"Apache-2.0"
] | null | null | null | import dataloaders
import helpers
# import models
import importlib
def import_from_path(module_name, module_file_path):
module_spec = importlib.util.spec_from_file_location(module_name, module_file_path)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
return module
def get_modules(args):
config = args
dataloader = import_from_path("dataloader", f"dataloaders/{config.dataloader}.py")
helper = import_from_path("helper", f"helpers/{config.helper}.py")
model = import_from_path("model", f"models/{config.model}.py")
output = (dataloader, model, helper)
return output
def get_train_aux(args, model):
optimizers = helpers.optimizer(args, model)
optimizer = getattr(optimizers, args.optimizer)
lr_schedulers = helpers.lr_scheduler(args, optimizer)
lr_scheduler = getattr(lr_schedulers, args.lr_scheduler)
return optimizer, lr_scheduler | 30.5 | 85 | 0.783607 |
ace6ad0172b118e956c45fcd1b914f6916acdb34 | 8,497 | py | Python | keras-onnx/onnx_keras/frontend.py | WeiChe-Huang/ONNX_Convertor | 7ba4fe3fd9f606d39cf61b46080c3dc244dfe207 | [
"MIT"
] | 2 | 2020-07-15T09:11:42.000Z | 2021-01-11T23:49:34.000Z | keras-onnx/onnx_keras/frontend.py | WeiChe-Huang/ONNX_Convertor | 7ba4fe3fd9f606d39cf61b46080c3dc244dfe207 | [
"MIT"
] | null | null | null | keras-onnx/onnx_keras/frontend.py | WeiChe-Huang/ONNX_Convertor | 7ba4fe3fd9f606d39cf61b46080c3dc244dfe207 | [
"MIT"
] | 4 | 2020-08-26T07:08:20.000Z | 2020-09-03T09:33:59.000Z | """Frontend for exporting Keras graph to ONNX graph
"""
import os.path
import logging
import importlib
import numpy as np
import keras.models as Kmodels
import keras.utils as Kutils
import onnx as O
from onnx import shape_inference
import keras as K
import tensorflow as tf
from . import helper
from . import preprocess
from . import layers
from .exceptions import FeatureNotImplemented, OnnxNotSupport
# Please check the README.md for assumptions and modifications.
class KerasFrontend(object):
"""Keras frontend for Keras
"""
def __init__(self):
self.logger = logging.getLogger("onnx-keras")
self.logger.setLevel(logging.DEBUG)
# Keras and Onnx model
self.kmodel = None
self.omodel = None
# Helper model attributes
self.ops = set()
# True model attributes
self.values_in = []
self.values_out = []
self.node_list = []
self.value_info = []
def loadFromFile(self, kpath):
"""Read the keras model file and prepare the path for output Onnx model.
# Arguments:
kpath: path to the Keras model(.hdf5) file.
"""
kpath = os.path.abspath(kpath)
(dirname, basename) = os.path.split(kpath)
self.name = basename.rpartition('.')[0]
self.opath = os.path.join(dirname, self.name+".onnx")
self.kmodel = Kmodels.load_model(kpath, custom_objects={ 'tf': tf, 'relu6': helper.relu6 })
def loadFromModel(self, model):
"""Read the keras model directly and prepare the path for output Onnx model
# Arguments:
model: the Keras model.
"""
self.kmodel = model
self.name = 'converted_model'
self.opath = self.name + '.onnx'
def saveToFile(self, path=None):
"""Save the Keras model to an onnx file.
Argument:
path: the path to save the output file. Default path depends on how you
loaded the model. If you load the model using loadFromFile, the
converted file will be under the same folder of the source model,
with the same name but different suffix. If you load the model
using loadKerasModule, the path will be the current folder with the
file name as model name with '.onnx'.
"""
if path is not None:
self.opath = path
O.save(self.omodel, self.opath)
def convertToOnnx(self, optimize=0, input_shape=None):
"""Convert the Keras model to onnx model.
# Return:
It will return an Onnx model
"""
## 1. Prepare the environment and the model
# Check the keras model
if self.kmodel is None:
raise ValueError("Need to load a keras model before conversion")
# Load all layers' converters
converters = layers
## 2. Construct and process the graph
# Preprocess and generate a layer tree
self.logger.info("Preprocessing the graph")
self.tree, self.input_nodes, self.output_tensors = preprocess.preprocess(self.kmodel, optimize=optimize)
# Check all the layers in Keras
self.logger.info("Start processing the graph")
# Check the data format
if helper.data_format is None:
self.logger.warning("There is no data format specified in the model. Assume it is channels last.")
helper.data_format = 'channels_last'
if input_shape is not None:
self.logger.warning("Currently, custom input size is only available for single input size. Mystery node may generate wrong size.")
input_shape = list(map(int, input_shape))
## 3. Use worklist to process all the layers.
# Initialize some variables for the worklist
worklist = list(self.tree)
converted_tree_tensors = dict()
last_count = len(worklist)
cur = 0
while len(worklist) != 0:
# If we are reaching the end, check whether there is any change in worklist.
# If changes exist, back to the beginning. Otherwise, exit.
if cur == len(worklist):
if cur == last_count:
self.logger.warning("The nodes listed below are not reachable:")
for bad_node in worklist:
self.logger.warning(bad_node.name)
break
cur = 0
node = worklist[cur]
# If the current layer is still not ready to be processed, process the next one.
if not node.check_input_ready(converted_tree_tensors):
cur += 1
continue
# Current layer can be processed.
# Prepare some debug information
self.logger.debug("Processing layer %s(%s)", node.name, node.type)
self.ops.add(node.type)
# Check if the current node has only one output.
if len(node.outputs) != 1:
raise FeatureNotImplemented('Operator with more than one outputs')
###############
# Input layer #
###############
# Input layer is special since it has no input nodes.
# And onnx do not have the input operator but input value info instead.
if (node.type == "InputLayer"):
if node.klayer is not None:
config = node.klayer.get_config()
helper.dtype = helper.convertKerasType(np.dtype(config['dtype']))
tree_tensor = node.outputs[0]
if input_shape is not None:
if len(input_shape) != len(node.klayer.input_shape):
raise RuntimeError("Unmatch input shape: expected {}, got {}".format(
node.klayer.input_shape, input_shape))
tree_tensor.set_shape(input_shape)
else:
tree_tensor.set_shape(node.klayer.input_shape)
else:
tree_tensor = node.outputs[0]
if input_shape is not None:
if len(input_shape) != len(self.tree[1].klayer.input_shape):
raise RuntimeError("Unmatch input shape: expected {}, got {}".format(
self.tree[1].klayer.input_shape, input_shape))
tree_tensor.set_shape(input_shape)
else:
tree_tensor.set_shape(self.tree[1].klayer.input_shape)
in_var = O.helper.make_tensor_value_info(
name=node.outputs[0].name,
elem_type=helper.dtype,
shape=tree_tensor.shape
)
converted_tree_tensors[tree_tensor.name] = tree_tensor
self.values_in.append(in_var)
#################
# General cases #
#################
else:
# Set up converter by layer name
try:
Converter = getattr(converters, node.type)
converter = Converter(node)
except AttributeError:
helper.warning_once("OP " + node.type + " is an unknown layer. Using CustomOP layer instead.")
converter = layers.Lambda(node)
# Infer the output shape
node_output_value = converter.setOutputValue()
self.value_info.append(node_output_value)
tree_tensor = node.outputs[0]
converted_tree_tensors[tree_tensor.name] = tree_tensor
# Convert and append to finished list
nodes, value_infos = converter.generate()
self.node_list += nodes
self.value_info += value_infos
self.logger.debug("Output shape: %s", str(node.outputs[0].shape))
# Delete current layer from the worklist. And start from the beginning.
del worklist[cur]
cur = 0
# Construct output tensors
for output in self.output_tensors:
# Compare output shape with the shape from the value infos
shape = output.shape
if output.name not in converted_tree_tensors:
raise ValueError("Unknown output tensor: ", output.name)
if converted_tree_tensors[output.name].shape != shape:
if output.name in helper.final_output_change:
shape = converted_tree_tensors[output.name].shape
self.logger.debug("Ignore " + output.name + " for output shape check")
raise ValueError("Unmatched output shape: ", converted_tree_tensors[output.name].shape, shape)
# Generate outputs
out_var = O.helper.make_tensor_value_info(
name=output.name,
elem_type=helper.dtype,
shape=shape)
self.values_out.append(out_var)
# Now construct the graph
self.logger.debug("Nodes:")
for node in self.node_list:
self.logger.debug(node.name)
graph_def = O.helper.make_graph(
self.node_list,
self.name + '_onnx',
self.values_in,
self.values_out,
value_info=self.value_info
)
# Create the model (ModelProto)
self.omodel = O.helper.make_model(graph_def, producer_name='Kneron')
# O.checker.check_model(self.omodel)
self.logger.debug("Conversion Finished. With op: " + str(self.ops))
return self.omodel
| 37.431718 | 136 | 0.654937 |
ace6ade92fadb8db26cb2e054cc902747cbe7e43 | 1,132 | py | Python | src/main.py | jorgegus/autotext | d146c718cdb01873af0a1dde4faa04094dfaca69 | [
"BSD-2-Clause"
] | 5 | 2019-06-25T04:24:39.000Z | 2021-01-29T18:57:17.000Z | src/main.py | jorgegus/autotext | d146c718cdb01873af0a1dde4faa04094dfaca69 | [
"BSD-2-Clause"
] | null | null | null | src/main.py | jorgegus/autotext | d146c718cdb01873af0a1dde4faa04094dfaca69 | [
"BSD-2-Clause"
] | 1 | 2021-06-26T00:42:16.000Z | 2021-06-26T00:42:16.000Z | from autotext import Autotext
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
root = '../datasets/'
train_sets = ['20_newsgroups/']
test_sets = ['20_newsgroups/']
skip_sets = ['20news-18828']
autotext = Autotext( strategy = 'classif', limit_memory = True)
#try:
autotext.train(root+train_sets[0], skip = skip_sets[0])
predictions = autotext.predict(root+test_sets[0])
'''
except Exception as e:
predictions = []
print('ERROR: ')
print(e)
'''
a = accuracy_score(predictions,autotext.y_test)
f = f1_score(predictions,autotext.y_test,average='macro')
p = precision_score(predictions,autotext.y_test,average='macro')
r = recall_score(predictions,autotext.y_test,average='macro')
with open('../results/performance.csv', 'w+') as f_results:
rwriter = csv.writer(f_results, delimiter=',')
rwriter.writerow(['Dataset', 'Accuracy', 'F1', 'Precission', 'Recall'])
rwriter.writerow([test_sets[0], a, f, p, r])
print(train_sets[0]+ ': '+str(a))
| 32.342857 | 72 | 0.75 |
ace6ae013d1d31fd5f95f75c62c30a9b3b775cf5 | 35,752 | py | Python | weasyl/define.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | weasyl/define.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | weasyl/define.py | hyena/weasyl | a43ad885eb07ae89d6639f289a5b95f3a177439c | [
"Apache-2.0"
] | null | null | null | # define.py
import os
import re
import time
import random
import urllib
import hashlib
import logging
import numbers
import datetime
import urlparse
import functools
import traceback
import string
import subprocess
import unicodedata
import anyjson as json
import arrow
import requests
import web
import sqlalchemy as sa
import sqlalchemy.orm
from psycopg2cffi.extensions import QuotedString
import pytz
import macro
import errorcode
from error import WeasylError
from libweasyl.legacy import UNIXTIME_OFFSET as _UNIXTIME_OFFSET
from libweasyl.models.tables import metadata as meta
from libweasyl import html, text, ratings, security, staff
from weasyl.compat import FakePyramidRequest
from weasyl.config import config_obj, config_read, config_read_setting, config_read_bool
from weasyl.cache import region
from weasyl import config
_shush_pyflakes = [sqlalchemy.orm, config_read]
reload_templates = bool(os.environ.get('WEASYL_RELOAD_TEMPLATES'))
reload_assets = bool(os.environ.get('WEASYL_RELOAD_ASSETS'))
def _load_resources():
global resource_paths
with open(os.path.join(macro.MACRO_SYS_BASE_PATH, 'build/rev-manifest.json'), 'r') as f:
resource_paths = json.loads(f.read())
_load_resources()
# XXX: eventually figure out how to include this in libweasyl.
def record_timing(func):
key = 'timing.{0.__module__}.{0.__name__}'.format(func)
@functools.wraps(func)
def wrapper(*a, **kw):
start = time.time()
try:
return func(*a, **kw)
finally:
delta = time.time() - start
metric('timing', key, delta)
return wrapper
_sqlalchemy_url = config_obj.get('sqlalchemy', 'url')
if config._in_test:
_sqlalchemy_url += '_test'
engine = meta.bind = sa.create_engine(_sqlalchemy_url, max_overflow=25, pool_size=10)
sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine, autocommit=True))
def connect():
if 'pg_connection' not in web.ctx:
web.ctx.pg_connection = db = sessionmaker()
try:
# Make sure postgres is still there before issuing any further queries.
db.execute('SELECT 1')
except sa.exc.OperationalError:
log_exc = web.ctx.env.get('raven.captureException', traceback.print_exc)
log_exc()
raise web.webapi.HTTPError('503 Service Unavailable', data='database error')
return web.ctx.pg_connection
def execute(statement, argv=None, options=None):
"""
Executes an SQL statement; if `statement` represents a SELECT or RETURNING
statement, the query results will be returned. Note that 'argv' and `options`
need not be lists if they would have contained only one element.
"""
db = connect()
if argv is None:
argv = list()
if options is None:
options = list()
if argv and not isinstance(argv, list):
argv = [argv]
if options and not isinstance(options, list):
options = [options]
if argv:
statement %= tuple([sql_escape(i) for i in argv])
query = db.connection().execute(statement)
if statement.lstrip()[:6] == "SELECT" or " RETURNING " in statement:
query = query.fetchall()
if "list" in options or "zero" in options:
query = [list(i) for i in query]
if "zero" in options:
for i in range(len(query)):
for j in range(len(query[i])):
if query[i][j] is None:
query[i][j] = 0
if "bool" in options:
return query and query[0][0]
elif "within" in options:
return [x[0] for x in query]
elif "single" in options:
return query[0] if query else list()
elif "element" in options:
return query[0][0] if query else list()
return query
else:
query.close()
def quote_string(s):
"""
SQL-escapes `target`; pg_escape_string is used if `target` is a string or
unicode object, else the integer equivalent is returned.
"""
quoted = QuotedString(s).getquoted()
assert quoted[0] == quoted[-1] == "'"
return quoted[1:-1].replace('%', '%%')
def sql_escape(target):
if isinstance(target, str):
# Escape ASCII string
return quote_string(target)
elif isinstance(target, unicode):
# Escape Unicode string
return quote_string(target.encode("utf-8"))
else:
# Escape integer
try:
return int(target)
except:
return 0
def sql_number_list(target):
"""
Returns a list of numbers suitable for placement after the SQL IN operator in
a query statement, as in "(1, 2, 3)".
"""
if not target:
raise ValueError
elif not isinstance(target, list):
target = [target]
return "(%s)" % (", ".join(["%d" % (i,) for i in target]))
CURRENT_SHA = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).strip()
the_fake_request = FakePyramidRequest()
# Caching all templates. Parsing templates is slow; we don't need to do it all
# the time and there's plenty of memory for storing the compiled templates.
_template_cache = {}
def compile(template_name):
"""
Compiles a template file and returns the result.
"""
template = _template_cache.get(template_name)
if template is None or reload_templates:
template_path = os.path.join(macro.MACRO_SYS_BASE_PATH, 'templates', template_name)
_template_cache[template_name] = template = web.template.frender(
template_path,
globals={
"INT": int,
"STR": str,
"SUM": sum,
"LOGIN": get_sysname,
"TOKEN": get_token,
"CSRF": get_csrf_token,
"USER_TYPE": user_type,
"DATE": convert_date,
"TIME": convert_time,
"PRICE": text_price_amount,
"SYMBOL": text_price_symbol,
"TITLE": titlebar,
"RENDER": render,
"COMPILE": compile,
"CAPTCHA": captcha_public,
"MARKDOWN": text.markdown,
"SUMMARIZE": summarize,
"CONFIG": config_read_setting,
"SHA": CURRENT_SHA,
"NOW": get_time,
"THUMB": thumb_for_sub,
"M": macro,
"R": ratings,
"SLUG": text.slug_for,
"QUERY_STRING": query_string,
"INLINE_JSON": html.inline_json,
"CDNIFY": cdnify_url,
"PATH": get_path,
"arrow": arrow,
"getattr": getattr,
"sorted": sorted,
"staff": staff,
"request": the_fake_request,
"resource_path": get_resource_path,
})
return template
def render(template_name, argv=()):
"""
Renders a template and returns the resulting HTML.
"""
template = compile(template_name)
return unicode(template(*argv))
def titlebar(title, backtext=None, backlink=None):
return render("common/stage_title.html", [title, backtext, backlink])
def errorpage(userid, code=None, links=None,
unexpected=None, request_id=None, **extras):
if links is None:
links = []
if code is None:
code = errorcode.unexpected
if unexpected:
code = "".join([code, " The error code associated with this condition "
"is '", unexpected, "'."])
code = text.markdown(code)
return webpage(userid, "error/error.html", [code, links, request_id], **extras)
def webpage(userid=0, template=None, argv=None, options=None, **extras):
if argv is None:
argv = []
if options is None:
options = []
if template is None:
if userid:
template, argv = "error/error.html", [errorcode.signed]
else:
template, argv = "error/error.html", [errorcode.unsigned]
page = common_page_start(userid, options=options, **extras)
page.append(render(template, argv))
return common_page_end(userid, page, options=options)
def plaintext(target):
"""
Returns `target` string stripped of non-ASCII characters.
"""
return "".join([c for c in target if ord(c) < 128])
def _captcha_section():
host = web.ctx.env.get('HTTP_HOST', '').partition(':')[0]
return 'recaptcha-' + host
def captcha_public():
"""
Returns the reCAPTCHA public key, or None if CAPTCHA verification
is disabled.
"""
if config_read_bool("captcha_disable_verification", value=False):
return None
return config_obj.get(_captcha_section(), 'public_key')
def captcha_verify(form):
if config_read_bool("captcha_disable_verification", value=False):
return True
if not form.g_recaptcha_response:
return False
data = dict(
secret=config_obj.get(_captcha_section(), 'private_key'),
response=form.g_recaptcha_response['g-recaptcha-response'],
remoteip=get_address())
response = http_post('https://www.google.com/recaptcha/api/siteverify', data=data)
captcha_validation_result = response.json()
return captcha_validation_result['success']
def get_userid(sessionid=None):
"""
Returns the userid corresponding to the user's sessionid; if no such session
exists, zero is returned.
"""
api_token = web.ctx.env.get('HTTP_X_WEASYL_API_KEY')
authorization = web.ctx.env.get('HTTP_AUTHORIZATION')
if api_token is not None:
userid = engine.execute("SELECT userid FROM api_tokens WHERE token = %(token)s", token=api_token).scalar()
if not userid:
web.header('WWW-Authenticate', 'Weasyl-API-Key realm="Weasyl"')
raise web.webapi.Unauthorized()
return userid
elif authorization:
from weasyl.oauth2 import get_userid_from_authorization
userid = get_userid_from_authorization()
if not userid:
web.header('WWW-Authenticate', 'Bearer realm="Weasyl" error="invalid_token"')
raise web.webapi.Unauthorized()
return userid
else:
userid = web.ctx.weasyl_session.userid
return 0 if userid is None else userid
def get_token():
import api
if api.is_api_user():
return ''
sess = web.ctx.weasyl_session
if sess.csrf_token is None:
sess.csrf_token = security.generate_key(64)
sess.save = True
return sess.csrf_token
def get_csrf_token():
return '<input type="hidden" name="token" value="%s" />' % (get_token(),)
SYSNAME_CHARACTERS = (
set(unicode(string.ascii_lowercase)) |
set(unicode(string.digits)))
def get_sysname(target):
"""
Return `target` stripped of all non-alphanumeric characters and lowercased.
"""
if isinstance(target, unicode):
normalized = unicodedata.normalize("NFD", target.lower())
return "".join(i for i in normalized if i in SYSNAME_CHARACTERS).encode("ascii")
else:
return "".join(i for i in target if i.isalnum()).lower()
@region.cache_on_arguments()
@record_timing
def _get_config(userid):
return engine.execute("SELECT config FROM profile WHERE userid = %(user)s", user=userid).scalar()
def get_config(userid):
if not userid:
return ""
return _get_config(userid)
@region.cache_on_arguments()
@record_timing
def get_login_settings(userid):
return engine.execute("SELECT settings FROM login WHERE userid = %(user)s", user=userid).scalar()
@region.cache_on_arguments()
@record_timing
def _get_profile_settings(userid):
"""
This helper function is required because we want to return
the ProfileSettings object, which by itself is not serializable
by our cacheing library (or at least, I don't know how to make it so)
:param userid:
:return: json representation of profile settings
"""
if userid is None:
return {}
jsonb = engine.execute("SELECT jsonb_settings FROM profile WHERE userid = %(user)s",
user=userid).scalar()
if jsonb is None:
jsonb = {}
return jsonb
def get_profile_settings(userid):
from weasyl.profile import ProfileSettings
return ProfileSettings(_get_profile_settings(userid))
def get_rating(userid):
if not userid:
return ratings.GENERAL.code
profile_settings = get_profile_settings(userid)
if is_sfw_mode():
# if no explicit max SFW rating picked assume general as a safe default
return profile_settings.max_sfw_rating
config = get_config(userid)
if 'p' in config:
return ratings.EXPLICIT.code
elif 'a' in config:
return ratings.MATURE.code
elif 'm' in config:
return ratings.MODERATE.code
else:
return ratings.GENERAL.code
# this method is used specifically for the settings page, where
# the max sfw/nsfw rating need to be displayed separately
def get_config_rating(userid):
"""
Retrieve the sfw-mode and regular-mode ratings separately
:param userid: the user to retrieve ratings for
:return: a tuple of (max_rating, max_sfw_rating)
"""
config = get_config(userid)
max_rating = ratings.GENERAL.code
if 'p' in config:
max_rating = ratings.EXPLICIT.code
elif 'a' in config:
max_rating = ratings.MATURE.code
elif 'm' in config:
max_rating = ratings.MODERATE.code
profile_settings = get_profile_settings(userid)
sfw_rating = profile_settings.max_sfw_rating
return max_rating, sfw_rating
def is_sfw_mode():
"""
determine whether the current session is in SFW mode
:return: TRUE if sfw or FALSE if nsfw
"""
return web.cookies(sfwmode="nsfw").sfwmode == "sfw"
def get_premium(userid):
if not userid:
return False
config = get_config(userid)
return "d" in config
@region.cache_on_arguments()
@record_timing
def _get_display_name(userid):
"""
Return the display name assiciated with `userid`; if no such user exists,
return None.
"""
return engine.execute("SELECT username FROM profile WHERE userid = %(user)s", user=userid).scalar()
def get_display_name(userid):
if not userid:
return None
return _get_display_name(userid)
def get_int(target):
if isinstance(target, numbers.Number):
return int(target)
try:
return int("".join(i for i in target if i.isdigit()))
except:
return 0
def get_targetid(*argv):
for i in argv:
if i:
return i
def get_search_tag(target):
target = plaintext(target)
target = target.replace(" ", "_")
target = "".join(i for i in target if i.isalnum() or i in "_")
target = target.strip("_")
target = "_".join(i for i in target.split("_") if i)
return target.lower()
def get_time():
"""
Returns the current unixtime.
"""
return int(time.time()) + _UNIXTIME_OFFSET
def get_timestamp():
"""
Returns the current date in the format YYYY-MM.
"""
return time.strftime("%Y-%m", time.localtime(get_time()))
_hash_path_roots = {
"user": [macro.MACRO_SYS_USER_PATH],
"save": [macro.MACRO_SYS_SAVE_PATH],
"submit": [macro.MACRO_SYS_SUBMIT_PATH],
"char": [macro.MACRO_SYS_CHAR_PATH],
"journal": [macro.MACRO_SYS_JOURNAL_PATH],
None: [],
}
def get_hash_path(target_id, content_type=None):
path_hash = hashlib.sha1(str(target_id)).hexdigest()
path_hash = "/".join([path_hash[i:i + 2] for i in range(0, 11, 2)])
root = _hash_path_roots[content_type]
return "".join(root + [path_hash, "/"])
def get_userid_list(target):
query = engine.execute(
"SELECT userid FROM login WHERE login_name = ANY (%(usernames)s)",
usernames=[get_sysname(i) for i in target.split(";")])
return [userid for (userid,) in query]
def get_ownerid(submitid=None, charid=None, journalid=None, commishid=None):
if submitid:
return engine.execute("SELECT userid FROM submission WHERE submitid = %(id)s", id=submitid).scalar()
if charid:
return engine.execute("SELECT userid FROM character WHERE charid = %(id)s", id=charid).scalar()
if journalid:
return engine.execute("SELECT userid FROM journal WHERE journalid = %(id)s", id=journalid).scalar()
if commishid:
return engine.execute("SELECT userid FROM commission WHERE commishid = %(id)s", id=commishid).scalar()
def get_random_set(target, count=None):
"""
Returns the specified number of unique items chosen at random from the target
list. If more items are specified than the list contains, the full contents
of the list will be returned in a randomized order.
"""
if count:
return random.sample(target, min(count, len(target)))
else:
return random.choice(target)
def get_address():
return web.ctx.env.get("HTTP_X_FORWARDED_FOR", web.ctx.ip)
def get_path():
return web.ctx.homepath + web.ctx.fullpath
def text_price_amount(target):
return "%i.%s%i" % (target / 100, "" if target % 100 > 9 else "0", target % 100)
def text_price_symbol(target):
if "e" in target:
return "€"
elif "p" in target:
return "£"
elif "y" in target:
return "¥"
elif "c" in target:
return "C$"
elif "u" in target:
return "A$"
elif "m" in target:
return "M$"
else:
return "$"
def text_first_line(target, strip=False):
"""
Return the first line of text; if `strip` is True, return all but the first
line of text.
"""
first_line, _, rest = target.partition("\n")
if strip:
return rest
else:
return first_line
def text_fix_url(target):
if target.startswith(("http://", "https://")):
return target
return "http://" + target
def text_bool(target, default=False):
return target.lower().strip() == "true" or default and target == ""
def convert_to_localtime(target):
tz = web.ctx.weasyl_session.timezone
if isinstance(target, arrow.Arrow):
return tz.localtime(target.datetime)
else:
target = int(get_time() if target is None else target) - _UNIXTIME_OFFSET
return tz.localtime_from_timestamp(target)
def convert_date(target=None):
"""
Returns the date in the format 1 January 1970. If no target is passed, the
current date is returned.
"""
dt = convert_to_localtime(target)
result = dt.strftime("%d %B %Y")
return result[1:] if result and result[0] == "0" else result
def convert_time(target=None):
"""
Returns the time in the format 16:00:00. If no target is passed, the
current time is returned.
"""
dt = convert_to_localtime(target)
config = get_config(get_userid())
if '2' in config:
return dt.strftime("%I:%M:%S %p %Z")
else:
return dt.strftime("%H:%M:%S %Z")
def convert_unixdate(day, month, year, escape=True):
"""
Returns the unixtime corresponding to the beginning of the specified date; if
the date is not valid, None is returned.
"""
if escape:
day, month, year = (get_int(i) for i in [day, month, year])
try:
ret = int(time.mktime(datetime.date(year, month, day).timetuple()))
except:
return
# range of a postgres integer
if ret > 2147483647 or ret < -2147483648:
return None
return ret
def convert_inputdate(target):
def _month(target):
target = "".join(i for i in target if i in "abcdefghijklmnopqrstuvwxyz")
for i, j in enumerate(["ja", "f", "mar", "ap", "may", "jun", "jul", "au",
"s", "o", "n", "d"]):
if target.startswith(j):
return i + 1
target = target.strip().lower()
if not target:
return
if re.match(r"[0-9]+ [a-z]+,? [0-9]+", target):
# 1 January 1990
target = target.split()
target[0] = get_int(target[0])
target[2] = get_int(target[2])
if 1933 <= target[0] <= 2037:
return convert_unixdate(target[2], _month(target[1]), target[0])
else:
return convert_unixdate(target[0], _month(target[1]), target[2])
elif re.match("[a-z]+ [0-9]+,? [0-9]+", target):
# January 1 1990
target = target.split()
target[1] = get_int(target[1])
target[2] = get_int(target[2])
return convert_unixdate(target[1], _month(target[0]), target[2])
elif re.match("[0-9]+ ?/ ?[0-9]+ ?/ ?[0-9]+", target):
# 1/1/1990
target = target.split("/")
target[0] = get_int(target[0])
target[1] = get_int(target[1])
target[2] = get_int(target[2])
if target[0] > 12:
return convert_unixdate(target[0], target[1], target[2])
else:
return convert_unixdate(target[1], target[0], target[2])
def convert_age(target):
return (get_time() - target) / 31556926
def age_in_years(birthdate):
"""
Determines an age in years based off of the given arrow.Arrow birthdate
and the current date.
"""
now = arrow.now()
is_upcoming = (now.month, now.day) < (birthdate.month, birthdate.day)
return now.year - birthdate.year - int(is_upcoming)
def user_type(userid):
if userid in staff.DIRECTORS:
return "director"
if userid in staff.TECHNICAL:
return "tech"
if userid in staff.ADMINS:
return "admin"
if userid in staff.MODS:
return "mod"
if userid in staff.DEVELOPERS:
return "dev"
return None
@region.cache_on_arguments(expiration_time=180)
@record_timing
def _page_header_info(userid):
messages = engine.execute(
"SELECT COUNT(*) FROM message WHERE otherid = %(user)s AND settings ~ 'u'", user=userid).scalar()
result = [messages, 0, 0, 0, 0]
counts = engine.execute(
"""
SELECT type / 1000 AS group, COUNT(*) AS count
FROM welcome
LEFT JOIN submission
ON welcome.targetid = submission.submitid
AND welcome.type BETWEEN 2000 AND 2999
WHERE
welcome.userid = %(user)s
AND (
submission.rating IS NULL
OR submission.rating <= %(rating)s
)
GROUP BY "group"
""", user=userid, rating=get_rating(userid))
for group, count in counts:
result[5 - group] = count
return result
def page_header_info(userid):
from weasyl import media
sfw = web.cookies(sfwmode="nsfw").sfwmode
return {
"welcome": _page_header_info(userid),
"userid": userid,
"username": get_display_name(userid),
"user_media": media.get_user_media(userid),
"sfw": sfw,
}
def common_page_start(userid, options=None, **extended_options):
if options is None:
options = []
userdata = None
if userid:
userdata = page_header_info(userid)
data = render(
"common/page_start.html", [userdata, options, extended_options])
return [data]
def _active_users(seconds):
usercount_url_template = config_read_setting('url_template', section='usercount')
if not usercount_url_template:
return
try:
resp = http_get(usercount_url_template % (seconds,))
except WeasylError:
return
if resp.status_code != 200:
return
return resp.json()['users']
@region.cache_on_arguments(expiration_time=600)
@record_timing
def active_users():
active_users = []
for span, seconds in [('hour', 60 * 60), ('day', 60 * 60 * 24)]:
users = _active_users(seconds)
if users:
active_users.append((span, users))
return '; '.join(
'%d users active in the last %s' % (users, span)
for span, users in active_users)
def common_page_end(userid, page, rating=None, config=None,
now=None, options=None):
active_users_string = active_users()
data = render("common/page_end.html", [options, active_users_string])
page.append(data)
return "".join(page)
def common_status_check(userid):
"""
Returns the name of the script to which the user should be redirected
if required.
"""
if not userid:
return None
settings = get_login_settings(userid)
if "p" in settings:
return "resetpassword"
if "i" in settings:
return "resetbirthday"
if "e" in settings:
return "resetemail"
if "b" in settings:
return "banned"
if "s" in settings:
return "suspended"
return None
def common_status_page(userid, status):
"""
Raise the redirect to the script returned by common_status_check() or render
the appropriate site status error page.
"""
if status == "admin":
return errorpage(0, errorcode.admin_mode)
elif status == "local":
return errorpage(0, errorcode.local_mode)
elif status == "offline":
return errorpage(0, errorcode.offline_mode)
elif status == "address":
return "IP ADDRESS TEMPORARILY REJECTED"
elif status == "resetpassword":
return webpage(userid, "force/resetpassword.html")
elif status == "resetbirthday":
return webpage(userid, "force/resetbirthday.html")
elif status == "resetemail":
return "reset email" # todo
elif status in ('banned', 'suspended'):
from weasyl import moderation, login
login.signout(userid)
if status == 'banned':
reason = moderation.get_ban_reason(userid)
return errorpage(
userid,
"Your account has been permanently banned and you are no longer allowed "
"to sign in.\n\n%s\n\nIf you believe this ban is in error, please "
"contact support@weasyl.com for assistance." % (reason,))
elif status == 'suspended':
suspension = moderation.get_suspension(userid)
return errorpage(
userid,
"Your account has been temporarily suspended and you are not allowed to "
"be logged in at this time.\n\n%s\n\nThis suspension will be lifted on "
"%s.\n\nIf you believe this suspension is in error, please contact "
"support@weasyl.com for assistance." % (suspension.reason, convert_date(suspension.release)))
_content_types = {
'submit': 110,
'char': 120,
'journal': 130,
'profile': 210,
}
def common_view_content(userid, targetid, feature):
"""
Return True if a record was successfully inserted into the contentview table
and the page view statistic incremented, else False.
"""
if feature == "profile" and targetid == userid:
return
typeid = _content_types.get(feature, 0)
if userid:
viewer = 'user:%d' % (userid,)
else:
viewer = get_address()
result = engine.execute(
'INSERT INTO views (viewer, targetid, type) VALUES (%(viewer)s, %(targetid)s, %(type)s) ON CONFLICT DO NOTHING',
viewer=viewer, targetid=targetid, type=typeid)
if result.rowcount == 0:
return False
if feature == "submit":
engine.execute("UPDATE submission SET page_views = page_views + 1 WHERE submitid = %(id)s", id=targetid)
elif feature == "char":
engine.execute("UPDATE character SET page_views = page_views + 1 WHERE charid = %(id)s", id=targetid)
elif feature == "journal":
engine.execute("UPDATE journal SET page_views = page_views + 1 WHERE journalid = %(id)s", id=targetid)
elif feature == "profile":
engine.execute("UPDATE profile SET page_views = page_views + 1 WHERE userid = %(id)s", id=targetid)
return True
def append_to_log(logname, **parameters):
parameters['when'] = datetime.datetime.now().isoformat()
log_path = '%s%s.%s.log' % (macro.MACRO_SYS_LOG_PATH, logname, get_timestamp())
with open(log_path, 'a') as outfile:
outfile.write(json.dumps(parameters))
outfile.write('\n')
_CHARACTER_SETTINGS_FEATURE_SYMBOLS = {
"char/thumb": "-",
"char/cover": "~",
"char/submit": "=",
}
_CHARACTER_SETTINGS_TYPE_EXTENSIONS = {
"J": ".jpg",
"P": ".png",
"G": ".gif",
"T": ".txt",
"H": ".htm",
"M": ".mp3",
"F": ".swf",
"A": ".pdf",
}
def url_type(settings, feature):
"""
Return the file extension specified in `settings` for the passed feature.
"""
symbol = _CHARACTER_SETTINGS_FEATURE_SYMBOLS[feature]
type_code = settings[settings.index(symbol) + 1]
return _CHARACTER_SETTINGS_TYPE_EXTENSIONS[type_code]
def url_make(targetid, feature, query=None, root=False, file_prefix=None):
"""
Return the URL to a resource; if `root` is True, the path will start from
the root.
"""
result = [] if root else ["/"]
if root:
result.append(macro.MACRO_SYS_BASE_PATH)
if "char/" in feature:
result.extend([macro.MACRO_URL_CHAR_PATH, get_hash_path(targetid)])
if file_prefix is not None:
result.append("%s-" % (file_prefix,))
# Character file
if feature == "char/submit":
if query is None:
query = engine.execute("SELECT userid, settings FROM character WHERE charid = %(id)s", id=targetid).first()
if query and "=" in query[1]:
result.append("%i.submit.%i%s" % (targetid, query[0], url_type(query[1], feature)))
else:
return None
# Character cover
elif feature == "char/cover":
if query is None:
query = engine.execute("SELECT settings FROM character WHERE charid = %(id)s", id=targetid).first()
if query and "~" in query[0]:
result.append("%i.cover%s" % (targetid, url_type(query[0], feature)))
else:
return None
# Character thumbnail
elif feature == "char/thumb":
if query is None:
query = engine.execute("SELECT settings FROM character WHERE charid = %(id)s", id=targetid).first()
if query and "-" in query[0]:
result.append("%i.thumb%s" % (targetid, url_type(query[0], feature)))
else:
return None if root else macro.MACRO_BLANK_THUMB
# Character thumbnail selection
elif feature == "char/.thumb":
result.append("%i.new.thumb" % (targetid,))
return "".join(result)
def cdnify_url(url):
cdn_root = config_read_setting("cdn_root")
if not cdn_root:
return url
return urlparse.urljoin(cdn_root, url)
def get_resource_path(resource):
if reload_assets:
_load_resources()
return cdnify_url('/' + resource_paths[resource])
def absolutify_url(url):
cdn_root = config_read_setting("cdn_root")
if cdn_root and url.startswith(cdn_root):
return url
return urlparse.urljoin(web.ctx.realhome, url)
def user_is_twitterbot():
return web.ctx.env.get('HTTP_USER_AGENT', '').startswith('Twitterbot')
def summarize(s, max_length=200):
if len(s) > max_length:
return s[:max_length - 1].rstrip() + u'\N{HORIZONTAL ELLIPSIS}'
return s
def clamp(val, lower_bound, upper_bound):
return min(max(val, lower_bound), upper_bound)
def timezones():
ct = datetime.datetime.now(pytz.utc)
timezones_by_country = [
(pytz.country_names[cc], [
(int(ct.astimezone(pytz.timezone(tzname)).strftime("%z")), tzname)
for tzname in timezones
])
for cc, timezones in pytz.country_timezones.iteritems()]
timezones_by_country.sort()
ret = []
for country, timezones in timezones_by_country:
ret.append(('- %s -' % (country,), None))
ret.extend(
("[UTC%+05d] %s" % (offset, tzname.replace('_', ' ')), tzname)
for offset, tzname in timezones)
return ret
def query_string(query):
pairs = []
for key, value in query.items():
if isinstance(value, (tuple, list, set)):
for subvalue in value:
if isinstance(subvalue, unicode):
pairs.append((key, subvalue.encode("utf-8")))
else:
pairs.append((key, subvalue))
elif isinstance(value, unicode):
pairs.append((key, value.encode("utf-8")))
elif value:
pairs.append((key, value))
return urllib.urlencode(pairs)
def _requests_wrapper(func_name):
func = getattr(requests, func_name)
def wrapper(*a, **kw):
try:
return func(*a, **kw)
except Exception as e:
web.ctx.log_exc(level=logging.DEBUG)
w = WeasylError('httpError')
w.error_suffix = 'The original error was: %s' % (e,)
raise w
return wrapper
http_get = _requests_wrapper('get')
http_post = _requests_wrapper('post')
def metric(*a, **kw):
from weasyl.wsgi import app
app.statsFactory.metric(*a, **kw)
def iso8601(unixtime):
if isinstance(unixtime, arrow.Arrow):
return unixtime.isoformat().partition('.')[0] + 'Z'
else:
return datetime.datetime.utcfromtimestamp(unixtime - _UNIXTIME_OFFSET).isoformat() + 'Z'
def parse_iso8601(s):
return arrow.Arrow.strptime(s, '%Y-%m-%dT%H:%M:%SZ').timestamp + _UNIXTIME_OFFSET
def paginate(results, backid, nextid, limit, key):
at_start = at_end = False
# if neither value is specified, we're definitely at the start
if not backid and not nextid:
at_start = True
# if we were cut short...
if len(results) <= limit:
if backid:
# if moving backward we're at the start
at_start = True
else:
# if moving forward we're at the end
at_end = True
elif backid:
# delete extraneous rows from the front if we're moving backward
del results[:-limit]
else:
# or from the back if we're moving forward
del results[limit:]
return (
None if at_start or not results else results[0][key],
None if at_end or not results else results[-1][key])
def token_checked(handler):
from weasyl import api
def wrapper(self, *args, **kwargs):
form = web.input(token="")
if not api.is_api_user() and form.token != get_token():
return errorpage(self.user_id, errorcode.token)
return handler(self, *args, **kwargs)
return wrapper
def supports_json(handler):
def wrapper(*args, **kwargs):
form = web.input(format="")
if form.format == "json":
web.header("Content-Type", "application/json")
try:
result = handler(*args, **kwargs)
except WeasylError as e:
result = {"error": e.value, "message": errorcode.error_messages.get(e.value)}
return json.dumps(result)
return handler(*args, **kwargs)
return wrapper
def thumb_for_sub(submission):
"""
Given a submission dict containing sub_media, sub_type and userid,
returns the appropriate media item to use as a thumbnail.
Params:
submission: The submission.
Returns:
The sub media to use as a thumb.
"""
user_id = get_userid()
profile_settings = get_profile_settings(user_id)
if (profile_settings.disable_custom_thumbs and
submission.get('subtype', 9999) < 2000 and
submission['userid'] != user_id):
thumb_key = 'thumbnail-generated'
else:
thumb_key = 'thumbnail-custom' if 'thumbnail-custom' in submission['sub_media'] else 'thumbnail-generated'
return submission['sub_media'][thumb_key][0]
| 28.855529 | 120 | 0.625755 |
ace6ae206ad72b84b34f7ac844bab3dbc0d28497 | 1,308 | py | Python | gossip_ds/mixing_manager.py | aparna-aketi/Low_Precision_DL | 5a2489cac5da8f43dd8490a9d871f1ce17f8e7f8 | [
"MIT"
] | null | null | null | gossip_ds/mixing_manager.py | aparna-aketi/Low_Precision_DL | 5a2489cac5da8f43dd8490a9d871f1ce17f8e7f8 | [
"MIT"
] | null | null | null | gossip_ds/mixing_manager.py | aparna-aketi/Low_Precision_DL | 5a2489cac5da8f43dd8490a9d871f1ce17f8e7f8 | [
"MIT"
] | null | null | null |
"""
Mixing Manager Class
:description: Class provides an API for dynamically selecting mixing weights
for gossip
"""
import torch
class MixingManager(object):
def __init__(self, graph, device):
self.graph_manager = graph
self.device = device
def is_regular(self):
"""
Whether there is bias accumulated in local entry of stationary
distribution of mixing matrix
"""
return self.graph_manager.is_regular_graph() and self.is_uniform()
def is_uniform(self):
""" Whether mixing weights are distributed uniformly over peers """
raise NotImplementedError
def get_mixing_weights(self, residual_adjusted=True):
""" Create mixing weight dictionary using uniform allocation """
raise NotImplementedError
class UniformMixing(MixingManager):
def get_mixing_weights(self):
""" Create mixing weight dictionary using uniform allocation """
mixing_weights = {}
out_peers, _ = self.graph_manager.get_peers()
mixing_weights['uniform'] = torch.tensor([1. / (len(out_peers)+1)], device=self.device)
#mixing_weights['try'] = torch.tensor([1. / (len(out_peers)+2)], device=self.device)
return mixing_weights
def is_uniform(self): return True
| 27.829787 | 95 | 0.672018 |
ace6ae28b3a538306651ff0314318565fd45bb53 | 2,770 | py | Python | covid19bot.py | yazneswarramcharan/covid19bot | d875d782c23e153f9e922be2d2410090cb76ff9b | [
"MIT"
] | null | null | null | covid19bot.py | yazneswarramcharan/covid19bot | d875d782c23e153f9e922be2d2410090cb76ff9b | [
"MIT"
] | null | null | null | covid19bot.py | yazneswarramcharan/covid19bot | d875d782c23e153f9e922be2d2410090cb76ff9b | [
"MIT"
] | null | null | null | import telegram
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import Updater, CommandHandler, CallbackContext, CallbackQueryHandler
import requests
BOT_TOKEN = os.environ.get("BOT_API_KEY","")
bot = telegram.Bot(BOT_TOKEN)
def start(update: Update, context: CallbackContext) -> None:
keyboard = [
[
InlineKeyboardButton("WORLDWIDE", callback_data='World'),
InlineKeyboardButton("INDIA", callback_data='india'),
],
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please choose which data u want:', reply_markup=reply_markup)
def button(update: Update, context: CallbackContext) -> None:
query = update.callback_query
if query.data == "World":
world_data=os.environ.get("WORLD_API_KEY","")
world = requests.get(os.environ.get(world_data).json()
r = world['data'][0]
s = ""
for i in r:
if i!="updated_at":
s += str(i) + " - " + str(r[i]) + "\n"
bot.send_message(update.effective_user.id, s)
elif query.data[0] == "+":
state_data=os.environ.get("STATE_API_KEY","")
state=requests.get(state_data).json()
for i in range(len(state)):
if state[i]['state'] == query.data[1:]:
s=""
for j in state[i]:
if j!="districtData" and j!="id":
s+=str(j) + " - " +str(state[i][j]) + "\n"
bot.send_message(update.effective_user.id, s)
break
else:
world = requests.get(state_data).json()
india_data=os.environ.get("INDIA_API_KEY","")
india = requests.get(india_data).json()
ind=india['data']['timeline'][0];s=""
for i in ind:
if i!="updated_at":
s += str(i) + " - " + str(ind[i]) + "\n"
bot.send_message(update.effective_user.id, s)
w = world
keyboard = []
for i in range(len(w)):
keyboard += [
[
InlineKeyboardButton(w[i]['state'], callback_data="+" + str(w[i]['state'])),
],
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(update.effective_user.id, 'India States', reply_markup=reply_markup)
query.answer()
def main():
updater = Updater(BOT_TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.start_polling()
updater.idle()
if __name__=='__main__':
main()
| 33.780488 | 97 | 0.566426 |
ace6af283656fb6b7ac5b5b62a453fef70ff3245 | 25,081 | py | Python | vnpy/gateway/sinopac/sinopac_gateway.py | fakecoinbase/ChenYuHoslashvnpy | 928c83b5cf970f31554fe72da4fb89f4afc4221b | [
"MIT"
] | null | null | null | vnpy/gateway/sinopac/sinopac_gateway.py | fakecoinbase/ChenYuHoslashvnpy | 928c83b5cf970f31554fe72da4fb89f4afc4221b | [
"MIT"
] | null | null | null | vnpy/gateway/sinopac/sinopac_gateway.py | fakecoinbase/ChenYuHoslashvnpy | 928c83b5cf970f31554fe72da4fb89f4afc4221b | [
"MIT"
] | null | null | null | """
Gateway for Sinopac securities.
Author: ypochien@gmail.com
"""
import os
import sys
from collections import OrderedDict
from copy import copy
from datetime import datetime
from threading import Thread
from time import sleep
import shioaji as sj
from shioaji import constant
from shioaji.account import StockAccount, FutureAccount
from shioaji.order import Status as SinopacStatus
from vnpy.trader.constant import (
Direction,
Exchange,
Product,
OptionType,
Status,
Offset,
)
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest,
)
EXCHANGE_VT2SINOPAC = {Exchange.TSE: "TSE", Exchange.TFE: "TFE"}
EXCHANGE_SINOPAC2VT = {v: k for k, v in EXCHANGE_VT2SINOPAC.items()}
STATUS_SINOPAC2VT = {
SinopacStatus.Cancelled: Status.CANCELLED,
SinopacStatus.Failed: Status.REJECTED,
SinopacStatus.Filled: Status.ALLTRADED,
SinopacStatus.PartFilled: Status.PARTTRADED,
SinopacStatus.PreSubmitted: Status.SUBMITTING,
SinopacStatus.Submitted: Status.NOTTRADED,
SinopacStatus.PendingSubmit: Status.SUBMITTING,
SinopacStatus.Inactive: Status.SUBMITTING,
}
class SinopacGateway(BaseGateway):
"""
VN Trader Gateway for Sinopac connection
"""
default_setting = {
"身份證字號": "",
"密碼": "",
"憑證檔案路徑": "",
"憑證密碼": "",
"環境": ["正式", "模擬"],
"預設現貨帳號": "0",
"預設期貨帳號": "0",
}
exchanges = list(EXCHANGE_SINOPAC2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super(SinopacGateway, self).__init__(event_engine, "Sinopac")
self.subscribed = set()
self.userid = ""
self.password = ""
self.ticks = {}
self.code2contract = {}
self.trades = set()
self.orders = OrderedDict()
self.count = 0
self.interval = 20
self.thread = Thread(target=self.query_data)
self.query_funcs = [self.query_position, self.query_trade]
self.api = None
def proc_trade(self, trades):
# self.write_log("cb_update_status")
trades = self.api.list_trades()
for item in trades:
self.orders[str(id(item))] = item
if item.status.status in [SinopacStatus.Filled]: # 成交
tradeid = item.status.id
if tradeid in self.trades:
continue
self.trades.add(tradeid)
trade = TradeData(
symbol=item.contract.code,
exchange=EXCHANGE_SINOPAC2VT.get(
item.contract.exchange, Exchange.TSE
),
direction=Direction.LONG
if item.order.action == "Buy"
else Direction.SHORT,
tradeid=tradeid,
orderid=str(id(item)),
price=float(item.order.price),
volume=float(item.order.quantity),
datetime=item.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_trade(trade)
else:
unVol = float(
item.order.quantity
- (item.status.deal_quantity + item.status.cancel_quantity)
)
order = OrderData(
symbol=item.contract.code,
exchange=EXCHANGE_SINOPAC2VT.get(
item.contract.exchange, Exchange.TSE
),
orderid=str(id(item)),
direction=Direction.LONG
if item.order.action == "Buy"
else Direction.SHORT,
price=float(item.order.price),
volume=unVol,
traded=float(item.status.deal_quantity),
status=STATUS_SINOPAC2VT[item.status.status],
datetime=item.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_order(order)
def query_trade(self):
self.api.update_status(timeout=0, cb=self.proc_trade)
def query_data(self):
"""
Query all data necessary.
"""
sleep(2.0) # Wait 2 seconds utill connection completed.
self.query_position()
self.query_trade()
# Start fixed interval query.
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def connect(self, setting: dict):
simulation = True if setting["環境"] == "模擬" else False
self.write_log(f"使用永豐金證券 {setting['環境']} 平臺")
if self.api:
self.api.logout()
self.api = sj.Shioaji(simulation=simulation)
userid = setting["身份證字號"]
password = setting["密碼"]
try:
self.api.login(userid, password, contracts_cb=self.query_contract)
except Exception as exc:
self.write_log(f"登入失敗. [{exc}]")
return
self.write_log(f"登入成功. [{userid}]")
self.select_default_account(setting.get("預設現貨帳號", 0), setting.get("預設期貨帳號", 0))
self.query_position()
self.write_log("庫存部位查詢")
if not simulation and setting["憑證檔案路徑"] != "":
self.api.activate_ca(setting["憑證檔案路徑"], setting["憑證密碼"], setting["身份證字號"])
self.write_log(f"{setting['身份證字號']} 憑證 已啟用.")
self.api.quote.set_callback(self.cb_quote)
self.api.set_order_callback(self.cb_pushorder)
self.write_log("交易行情 - 連線成功")
self.thread.start()
def cb_pushorder(self, stat, msg):
self.write_log(f"{stat},{msg}")
if stat == constant.OrderState.FOrder:
pass
elif stat == constant.OrderState.FDeal:
pass
elif stat == constant.OrderState.TFTOrder:
pass
elif stat == constant.OrderState.TFTDeal:
pass
def select_default_account(self, select_stock_number, select_futures_number):
stock_account_count = 0
futures_account_count = 0
for acc in self.api.list_accounts():
if isinstance(acc, StockAccount):
self.write_log(
f"股票帳號: [{stock_account_count}] - {acc.broker_id}-{acc.account_id} {acc.username}"
)
stock_account_count += 1
if isinstance(acc, FutureAccount):
self.write_log(
f"期貨帳號: [{futures_account_count}] - {acc.broker_id}-{acc.account_id} {acc.username}"
)
futures_account_count += 1
if stock_account_count >= 2:
acc = self.api.list_accounts()[int(select_stock_number)]
self.api.set_default_account(acc)
self.write_log(
f"***預設 現貨下單帳號 - [{select_stock_number}] {acc.broker_id}-{acc.account_id} {acc.username}"
)
if futures_account_count >= 2:
acc = self.api.list_accounts()[int(select_futures_number)]
self.api.set_default_account(acc)
self.write_log(
f"***預設 期貨下單帳號 - [{select_futures_number}] {acc.broker_id}-{acc.account_id} {acc.username}"
)
def proc_account(self, data):
pass
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < self.interval:
return
self.count = 0
func = self.query_funcs.pop(0)
func()
self.query_funcs.append(func)
def query_contract(self, securities_type=None):
self.write_log(f"Download 商品檔 {securities_type} 完成")
if securities_type == constant.SecurityType.Future:
for category in self.api.Contracts.Futures:
for contract in category:
data = ContractData(
symbol=contract.code,
exchange=Exchange.TFE,
name=contract.name + contract.delivery_month,
product=Product.FUTURES,
size=200,
pricetick=0.01,
stop_supported=False,
net_position=True,
min_volume=1,
gateway_name=self.gateway_name,
)
self.on_contract(data)
symbol = f"{contract.code}.{Exchange.TFE.value}"
self.code2contract[symbol] = contract
if securities_type == constant.SecurityType.Option:
for category in self.api.Contracts.Options:
for contract in category:
data = ContractData(
symbol=contract.code,
exchange=Exchange.TFE,
name=f"{contract.name} {contract.delivery_month} {contract.strike_price}{contract.option_right}",
product=Product.OPTION,
size=50,
net_position=True,
pricetick=0.01,
min_volume=1,
gateway_name=self.gateway_name,
option_strike=contract.strike_price,
option_underlying=contract.underlying_code,
option_type=OptionType.CALL
if contract.option_right == constant.OptionRight.Call
else OptionType.PUT,
option_expiry=None,
)
self.on_contract(data)
symbol = f"{contract.code}.{Exchange.TFE.value}"
self.code2contract[symbol] = contract
if securities_type == constant.SecurityType.Stock:
for category in self.api.Contracts.Stocks:
for contract in category:
pricetick = 5
if contract.limit_down < 10:
pricetick = 0.01
elif contract.limit_down < 50:
pricetick = 0.05
elif contract.limit_down < 100:
pricetick = 0.1
elif contract.limit_down < 500:
pricetick = 0.5
elif contract.limit_down < 1000:
pricetick = 1
data = ContractData(
symbol=contract.code,
exchange=Exchange.TSE,
name=f"{contract.name} (不可當沖)"
if contract.day_trade != constant.DayTrade.Yes
else contract.name,
product=Product.EQUITY,
size=1,
net_position=False,
pricetick=pricetick,
min_volume=1,
gateway_name=self.gateway_name,
)
self.on_contract(data)
symbol = f"{contract.code}.{Exchange.TSE.value}"
self.code2contract[symbol] = contract
def getContractSnapshot(self, contract):
snapshot = self.api.quote.snapshots([contract])[0]
code = snapshot.code
exchange = Exchange.TSE if snapshot.exchange in ["TSE", "OTC"] else Exchange.TFE
symbol = f"{code}.{exchange.value}"
tick = self.ticks.get(symbol, None)
if tick is None:
self.code2contract[symbol] = contract
if exchange == Exchange.TFE:
name = f"{contract['name']}{contract['delivery_month']}"
else:
name = f"{contract['name']}"
tick = TickData(
symbol=code,
exchange=exchange,
name=name,
datetime=datetime.fromtimestamp(snapshot.ts / 1000000000 - 8 * 60 * 60),
gateway_name=self.gateway_name,
)
tick.volume = snapshot.total_volume
tick.last_price = snapshot.close
tick.limit_up = contract.limit_up
tick.open_interest = 0
tick.limit_down = contract.limit_down
tick.open_price = snapshot.open
tick.high_price = snapshot.high
tick.low_price = snapshot.low
tick.pre_close = contract.reference
tick.bid_price_1 = snapshot.buy_price
tick.bid_volume_1 = snapshot.buy_volume
tick.ask_price_1 = snapshot.sell_price
tick.ask_volume_1 = snapshot.sell_volume
self.ticks[symbol] = tick
self.on_tick(copy(tick))
def subscribe(self, req: SubscribeRequest):
""""""
symbol = f"{req.symbol}.{req.exchange.value}"
if symbol in self.subscribed:
return
contract = self.code2contract.get(symbol, None)
if contract:
self.getContractSnapshot(contract)
self.api.quote.subscribe(contract, quote_type="tick")
self.api.quote.subscribe(contract, quote_type="bidask")
msg = f"訂閱 [{symbol}] {contract.name}"
if req.exchange == Exchange.TFE:
msg = f"訂閱 [{symbol}] {contract.name}{contract.delivery_month}"
self.write_log(msg)
self.subscribed.add(symbol)
else:
self.write_log(f"無此訂閱商品[{symbol}]")
def send_order(self, req: OrderRequest):
""""""
if req.exchange == Exchange.TFE:
action = (
constant.ACTION_BUY
if req.direction == Direction.LONG
else constant.ACTION_SELL
)
price_type = constant.FuturesPriceType.LMT
order_type = constant.FuturesOrderType.ROD
order = self.api.Order(
price=req.price,
quantity=int(req.volume),
action=action,
price_type=price_type,
order_type=order_type,
)
elif req.exchange == Exchange.TSE:
action = (
constant.ACTION_BUY
if req.direction == Direction.LONG
else constant.ACTION_SELL
)
price_type = constant.STOCK_PRICE_TYPE_LIMITPRICE
order_type = constant.STOCK_ORDER_TYPE_COMMON
first_sell = (
constant.STOCK_FIRST_SELL_YES
if req.offset == Offset.CLOSETODAY
else constant.STOCK_FIRST_SELL_NO
)
order = self.api.Order(
price=req.price,
quantity=int(req.volume),
action=action,
price_type=price_type,
order_type=order_type,
first_sell=first_sell,
)
symbol = f"{req.symbol}.{req.exchange.value}"
trade = self.api.place_order(
self.code2contract[symbol], order, 0, self.cb_placeorder
)
orderdata = req.create_order_data(str(id(trade)), self.gateway_name)
self.orders[orderdata.orderid] = trade
self.on_order(orderdata)
return orderdata.vt_orderid
def cb_placeorder(self, trade: sj.order.Trade):
self.orders[id(trade)] = trade
if trade.status.status in [SinopacStatus.Filled]: # 成交
tradeid = trade.status.id
trade = TradeData(
symbol=trade.contract.code,
exchange=EXCHANGE_SINOPAC2VT.get(trade.contract.exchange, Exchange.TSE),
direction=Direction.LONG
if trade.order.action == "Buy"
else Direction.SHORT,
tradeid=tradeid,
orderid=str(id(trade)),
price=float(trade.order.price),
volume=float(trade.order.quantity),
datetime=trade.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_trade(trade)
else:
order = OrderData(
symbol=trade.contract.code,
exchange=EXCHANGE_SINOPAC2VT.get(trade.contract.exchange, Exchange.TSE),
orderid=str(id(trade)),
direction=Direction.LONG
if trade.order.action == "Buy"
else Direction.SHORT,
price=float(trade.order.price),
volume=float(
trade.order.quantity
- (trade.status.deal_quantity + trade.status.cancel_quantity)
),
traded=float(trade.status.deal_quantity),
status=STATUS_SINOPAC2VT[trade.status.status],
datetime=trade.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_order(order)
def cancel_order(self, req: CancelRequest):
""""""
self.write_log("***cancel_order")
self.write_log(str(req))
self.write_log(str(self.orders[req.orderid]))
self.api.cancel_order(self.orders[req.orderid])
def query_account(self):
""""""
self.write_log("***query_account")
def query_position(self):
""""""
self.api.get_stock_account_unreal_profitloss().update()
data = self.api.get_stock_account_unreal_profitloss().data()["summary"]
for item in data:
volume = float(item["real_qty"]) / 1000
total_qty = float(item["real_qty"]) / 1000
yd_qty = float(item["qty"]) / 1000
pos = PositionData(
symbol=item["stock"],
exchange=EXCHANGE_SINOPAC2VT.get("TSE", Exchange.TSE),
direction=Direction.LONG if volume >= 0 else Direction.SHORT,
volume=volume,
frozen=total_qty - yd_qty,
price=float(item["avgprice"]),
pnl=float(item["unreal"]),
yd_volume=yd_qty,
gateway_name=self.gateway_name,
)
self.on_position(pos)
def close(self):
""""""
if self.api:
self.api.logout()
def cb_quote(self, topic, data):
"""
# L/TFE/TXFF9
{'Amount': [21088.0], 'AmountSum': [1028165646.0], 'AvgPrice': [10562.513699263414],
'Close': [10544.0], 'Code': 'TXFF9', 'Date': '2019/05/16', 'DiffPrice': [-37.0],
'DiffRate': [-0.34968339476419996], 'DiffType': [4], 'High': [10574.0],
'Low': [10488.0], 'Open': 10537.0, 'TargetKindPrice': 10548.47, 'TickType': [2],
'Time': '11:15:11.911000', 'TradeAskVolSum': 52599, 'TradeBidVolSum': 53721,
'VolSum': [97341], 'Volume': [2]}
# Q/TFE/TXFF9
{'AskPrice': [10545.0, 10546.0, 10547.0, 10548.0, 10549.0], 'AskVolSum': 262,
'AskVolume': [17, 99, 59, 45, 42], 'BidPrice': [10544.0, 10543.0, 10542.0, 10541.0, 10540.0],
'BidVolSum': 289, 'BidVolume': [16, 41, 32, 123, 77], 'Code': 'TXFF9', 'Date': '2019/05/16',
'DiffAskVol': [0, 0, 0, -1, 0], 'DiffAskVolSum': -1, 'DiffBidVol': [0, 0, 0, 0, 0], 'DiffBidVolSum': 0,
'FirstDerivedAskPrice': 10547.0, 'FirstDerivedAskVolume': 1, 'FirstDerivedBidPrice': 10542.0,
'FirstDerivedBidVolume': 1, 'TargetKindPrice': 10548.47, 'Time': '11:15:11.911000'}
# QUT/idcdmzpcr01/TSE/2330
{'AskPrice': [248.0, 248.5, 249.0, 249.5, 250.0], 'AskVolume': [355, 632, 630, 301, 429],
'BidPrice': [247.5, 247.0, 246.5, 246.0, 245.5], 'BidVolume': [397, 389, 509, 703, 434],
'Date': '2019/05/17', 'Time': '09:53:00.706928'}
"""
try:
topics = topic.split("/")
realtime_type = topics[0]
tick = None
if realtime_type == "L":
tick = self.qutote_futures_L(data)
elif realtime_type == "Q":
tick = self.quote_futures_Q(data)
elif realtime_type == "MKT":
tick = self.quote_stock_MKT(topics[3], data)
elif realtime_type == "QUT":
tick = self.qute_stock_QUT(topics[3], data)
if tick:
tick.open_interest = 0
self.on_tick(copy(tick))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.write_log(
"except: [{}][{}][{}][{}]".format(
exc_type, filename, exc_tb.tb_lineno, str(e)
)
)
self.write_log(data)
def qutote_futures_L(self, data):
code = data.get("Code", None)
if code is None:
return
symbol = f"{code}.TFE"
tick = self.ticks.get(symbol, None)
if tick is None:
contract = self.code2contract.get(symbol, None)
self.getContractSnapshot(contract)
tick = self.ticks.get(symbol, None)
tick.datetime = datetime.strptime(
"{} {}".format(data["Date"], data["Time"]), "%Y/%m/%d %H:%M:%S.%f"
)
tick.volume = int(data["VolSum"][0])
tick.last_price = data["Close"][0]
tick.limit_up = 0
tick.open_interest = 0
tick.limit_down = 0
tick.open_price = data["Open"]
tick.high_price = data["High"][0]
tick.low_price = data["Low"][0]
tick.pre_close = data["Close"][0] - data["DiffPrice"][0]
return tick
def quote_stock_MKT(self, code, data):
"""
QUT/idcdmzpcr01/TSE/2330
{'AskPrice': [248.0, 248.5, 249.0, 249.5, 250.0], 'AskVolume': [355, 632, 630, 301, 429],
'BidPrice': [247.5, 247.0, 246.5, 246.0, 245.5], 'BidVolume': [397, 389, 509, 703, 434],
'Date': '2019/05/17', 'Time': '09:53:00.706928'}
MKT/idcdmzpcr01/TSE/2330
{'Close': [248.0], 'Time': '09:53:00.706928',
'VolSum': [7023], 'Volume': [1]}
"""
symbol = f"{code}.TSE"
tick = self.ticks.get(symbol, None)
if tick is None:
contract = self.code2contract[symbol]
self.getContractSnapshot(contract)
tick = self.ticks.get(symbol, None)
tick.datetime = datetime.combine(
datetime.today(),
datetime.strptime("{}".format(data["Time"]), "%H:%M:%S.%f").time(),
)
tick.volume = int(data["VolSum"][0])
tick.last_price = data["Close"][0]
tick.open_price = data["Close"][0] if tick.open_price == 0 else tick.open_price
tick.high_price = (
data["Close"][0] if data["Close"][0] > tick.high_price else tick.high_price
)
tick.low_price = (
data["Close"][0] if data["Close"][0] < tick.low_price else tick.low_price
)
return tick
def quote_futures_Q(self, data):
code = data.get("Code", None)
if code is None:
return
symbol = f"{code}.TFE"
return self.update_orderbook_tick(data, symbol)
def qute_stock_QUT(self, code, data):
symbol = f"{code}.TSE"
return self.update_orderbook_tick(data, symbol)
def update_orderbook_tick(self, data, symbol):
tick = self.ticks.get(symbol, None)
if tick is None:
contract = self.code2contract[symbol]
self.getContractSnapshot(contract)
tick = self.ticks.get(symbol, None)
tick.bid_price_1 = data["BidPrice"][0]
tick.bid_price_2 = data["BidPrice"][1]
tick.bid_price_3 = data["BidPrice"][2]
tick.bid_price_4 = data["BidPrice"][3]
tick.bid_price_5 = data["BidPrice"][4]
tick.ask_price_1 = data["AskPrice"][0]
tick.ask_price_2 = data["AskPrice"][1]
tick.ask_price_3 = data["AskPrice"][2]
tick.ask_price_4 = data["AskPrice"][3]
tick.ask_price_5 = data["AskPrice"][4]
tick.bid_volume_1 = data["BidVolume"][0]
tick.bid_volume_2 = data["BidVolume"][1]
tick.bid_volume_3 = data["BidVolume"][2]
tick.bid_volume_4 = data["BidVolume"][3]
tick.bid_volume_5 = data["BidVolume"][4]
tick.ask_volume_1 = data["AskVolume"][0]
tick.ask_volume_2 = data["AskVolume"][1]
tick.ask_volume_3 = data["AskVolume"][2]
tick.ask_volume_4 = data["AskVolume"][3]
tick.ask_volume_5 = data["AskVolume"][4]
return tick
| 39.748019 | 122 | 0.531159 |
ace6b1ce9f973785f32799aba3edf3c2b01d67ea | 2,333 | py | Python | src/fire/test_tree.py | mikespub-org/mar10-clouddav | dabfe2832438667ca5ff960e9273fceae9280f15 | [
"MIT"
] | 2 | 2021-03-01T10:27:41.000Z | 2021-11-04T21:27:12.000Z | src/fire/test_tree.py | mikespub-org/mar10-clouddav | dabfe2832438667ca5ff960e9273fceae9280f15 | [
"MIT"
] | 1 | 2022-03-07T09:09:21.000Z | 2022-03-07T09:09:21.000Z | src/fire/test_tree.py | mikespub-org/mar10-clouddav | dabfe2832438667ca5ff960e9273fceae9280f15 | [
"MIT"
] | 1 | 2020-10-17T07:35:37.000Z | 2020-10-17T07:35:37.000Z | #
# Copyright (c) 2019-2020 Mike's Pub, see https://github.com/mikespub-org
# Licensed under the MIT license: https://opensource.org/licenses/mit-license.php
#
import json
import logging
from .db import get_client, to_dict
from .tree import get_structure
logging.getLogger().setLevel(logging.DEBUG)
def test_my_fs(my_fs):
# Try creating a dir and a file
# my_fs.make_dir("/my_dir")
# my_fs.make_file("/my_file.txt", b"Hello world")
# my_fs.make_file("/my_dir/your_file.txt", b"=" * 1024)
# result = my_fs.get_file_data("/my_dir/your_file.txt")
# my_fs.clean_file("/my_dir/your_file.txt")
# my_fs.clean_dir("/my_dir")
# my_fs.clean_file("/my_file.txt")
#
# Verify correct ref_to_path
# result = my_fs.make_file("/test/dir.9/dir.9.9/file.9.9.1.txt")
# ref_path = result.reference.path
# print(ref_path, my_fs.convert_ref_to_path(ref_path))
# result = my_fs.get_file_ref("/test/dir.9/dir.9.9/file.9.9.1.txt")
#
# Create/delete a test filesystem
# my_fs.make_dir("/test")
my_fs.make_tree("/test", 0)
# my_fs.clean_tree("/test", 0)
# my_fs.make_tree("/test", 0)
# result = my_fs.count
# and now do it again
# my_fs.make_tree("/test", 0)
#
# Compare speed of looking up by ref or by doc
# result = my_fs.list_dir_refs("/test")
# result = my_fs.list_dir_refs("/test", True)
# result = my_fs.list_dir_docs("/test")
# result = my_fs.list_dir_docs("/test", True)
#
data = my_fs.get_file_data("/test/dir.7/dir.7.3/file.7.3.6.txt")
print(len(data))
#
my_fs.close()
def main():
result = None
client = get_client()
# client = None
#
# my_fs = BaseStructure(client)
# my_fs = FileStructure(client)
# my_fs = TestStructure(client)
# my_fs = TreeStructure(client)
# my_fs = FlatStructure(client)
# my_fs = get_structure("test", client)
#
# test_my_fs(my_fs)
#
for struct in ["test", "tree", "flat", "hash"]:
my_fs = get_structure(struct, client)
test_my_fs(my_fs)
client = None
return result
if __name__ == "__main__":
result = main()
# print(json.dumps(result, indent=2, default=lambda o: repr(o)))
print("Result: %s" % type(result).__name__)
print(json.dumps(result, indent=2, default=lambda o: to_dict(o)))
| 30.298701 | 81 | 0.642092 |
ace6b23ec114ca327943dcbaa02428fbbd22eb4c | 1,160 | py | Python | model.py | sibalex/ml_prod_heroku | 314f117188e62c8b805ff9e23a6509961aac6c23 | [
"MIT"
] | 2 | 2020-07-22T07:34:11.000Z | 2020-07-26T16:46:40.000Z | model.py | sibalex/ml_prod_heroku | 314f117188e62c8b805ff9e23a6509961aac6c23 | [
"MIT"
] | null | null | null | model.py | sibalex/ml_prod_heroku | 314f117188e62c8b805ff9e23a6509961aac6c23 | [
"MIT"
] | 1 | 2020-07-26T16:46:54.000Z | 2020-07-26T16:46:54.000Z |
# pip freeze > requirements.txt
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
dataset = pd.read_csv('hiring.csv')
dataset['experience'].fillna(0, inplace=True)
dataset['test_score'].fillna(dataset['test_score'].mean(), inplace=True)
X = dataset.iloc[:, :3]
# Converting words to integer values
def convert_to_int(word):
word_dict = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8,
'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12, 'zero': 0, 0: 0}
return word_dict[word]
X['experience'] = X['experience'].apply(lambda x: convert_to_int(x))
y = dataset.iloc[:, -1]
# Splitting Training and Test Set
# Since we have a very small dataset, we will train our model with all availabe data.
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
# Fitting model with trainig data
regressor.fit(X, y)
# Saving model to disk!!!
# pickle.dump(regressor, open('model.pkl', 'wb'))
# Loading model to compare the results
model = pickle.load(open('model.pkl', 'rb'))
print(model.predict([[2, 9, 6]]))
| 25.217391 | 104 | 0.682759 |
ace6b2db49e006079d543f89426809183c38ad01 | 5,626 | py | Python | tests/test_repo.py | moremoban/fs.gitpython | cb471612dc7bd4dd692c0dbb7976bc85988c8161 | [
"MIT"
] | 7 | 2019-10-12T17:47:14.000Z | 2021-11-05T00:43:21.000Z | tests/test_repo.py | moremoban/fs.gitpython | cb471612dc7bd4dd692c0dbb7976bc85988c8161 | [
"MIT"
] | 11 | 2019-08-17T23:13:12.000Z | 2020-09-14T20:01:51.000Z | tests/test_repo.py | moremoban/fs.gitpython | cb471612dc7bd4dd692c0dbb7976bc85988c8161 | [
"MIT"
] | 1 | 2019-12-07T14:52:20.000Z | 2019-12-07T14:52:20.000Z | import fs.path
import fs.errors
from nose.tools import eq_, raises
from gitfs2.repo import (
GitRequire,
git_clone,
get_app_home,
get_repo_name,
make_sure_git_is_available,
)
try:
from mock import MagicMock, patch
except ImportError:
from unittest.mock import MagicMock, patch
@patch("appdirs.user_cache_dir", return_value="root")
@patch("gitfs2.repo.mkdir_p")
@patch("fs.open_fs")
@patch("git.Repo", autospec=True)
class TestGitFunctions:
def setUp(self):
self.repo_name = "repoA"
self.repo = "https://github.com/my/" + self.repo_name
self.require = GitRequire(git_url=self.repo)
self.require_with_submodule = GitRequire(
git_url=self.repo, submodule="True"
)
self.require_with_branch = GitRequire(
git_url=self.repo, branch="ghpages"
)
self.require_with_reference = GitRequire(
git_url=self.repo, reference="a-commit-reference"
)
self.expected_local_repo_path = fs.path.join(
"root", "repos", self.repo_name
)
def test_checkout_new(self, fake_repo, local_folder_exists, *_):
local_folder_exists.side_effect = [fs.errors.CreateFailed]
git_clone(self.require)
fake_repo.clone_from.assert_called_with(
self.repo,
self.expected_local_repo_path,
single_branch=True,
depth=2,
)
repo = fake_repo.return_value
eq_(repo.git.submodule.called, False)
def test_checkout_new_with_submodules(
self, fake_repo, local_folder_exists, *_
):
local_folder_exists.side_effect = [fs.errors.CreateFailed]
git_clone(self.require_with_submodule)
fake_repo.clone_from.assert_called_with(
self.repo,
self.expected_local_repo_path,
single_branch=True,
depth=2,
)
repo = fake_repo.clone_from.return_value
repo.git.submodule.assert_called_with("update", "--init")
def test_git_update(self, fake_repo, local_folder_exists, *_):
git_clone(self.require)
fake_repo.assert_called_with(self.expected_local_repo_path)
repo = fake_repo.return_value
repo.git.pull.assert_called()
def test_git_update_with_submodules(
self, fake_repo, local_folder_exists, *_
):
git_clone(self.require_with_submodule)
fake_repo.assert_called_with(self.expected_local_repo_path)
repo = fake_repo.return_value
repo.git.submodule.assert_called_with("update")
def test_checkout_new_with_branch(
self, fake_repo, local_folder_exists, *_
):
local_folder_exists.side_effect = [fs.errors.CreateFailed]
git_clone(self.require_with_branch)
fake_repo.clone_from.assert_called_with(
self.repo,
self.expected_local_repo_path,
branch="ghpages",
single_branch=True,
depth=2,
)
repo = fake_repo.return_value
eq_(repo.git.submodule.called, False)
def test_update_existing_with_branch_parameter(
self, fake_repo, local_folder_exists, *_
):
git_clone(self.require_with_branch)
repo = fake_repo.return_value
repo.git.checkout.assert_called_with("ghpages")
def test_checkout_new_with_reference(
self, fake_repo, local_folder_exists, *_
):
local_folder_exists.side_effect = [fs.errors.CreateFailed]
git_clone(self.require_with_reference)
fake_repo.clone_from.assert_called_with(
self.repo,
self.expected_local_repo_path,
reference="a-commit-reference",
single_branch=True,
depth=2,
)
repo = fake_repo.return_value
eq_(repo.git.submodule.called, False)
def test_update_existing_with_reference_parameter(
self, fake_repo, local_folder_exists, *_
):
git_clone(self.require_with_reference)
repo = fake_repo.return_value
repo.git.checkout.assert_called_with("a-commit-reference")
@patch("gitfs2.repo.reporter.warn")
def test_update_failed_because_offline(
self, fake_warn, fake_repo, local_folder_exists, *_
):
from git.exc import GitCommandError
repo = MagicMock(autospec=True)
fake_repo.return_value = repo
repo.git.pull.side_effect = [GitCommandError("a", "b")]
git_clone(self.require_with_reference)
fake_warn.assert_called_with("Unable to run git commands. Offline?")
def test_get_repo_name():
repos = [
"https://github.com/repo-abc-def/repo",
"https://github.com/abc/repo",
"https://github.com/abc/repo.git",
"https://github.com/abc/repo/",
"git@github.com:abc/repo.git",
"git@bitbucket.org:abc/repo.git",
"git://github.com/abc/repo.git",
]
actual = [get_repo_name(repo) for repo in repos]
expected = ["repo"] * len(repos)
eq_(expected, actual)
@patch("gitfs2.reporter.error")
def test_get_repo_name_can_handle_invalid_url(fake_reporter):
invalid_repo = "invalid"
try:
get_repo_name(invalid_repo)
except Exception:
fake_reporter.assert_called_with(
'An invalid git url: "invalid" in mobanfile'
)
@patch("appdirs.user_cache_dir", return_value="root")
def test_get_app_home(_):
actual = get_app_home()
eq_(fs.path.join("root", "repos"), actual)
@raises(Exception)
@patch("subprocess.check_output", side_effect=Exception)
def test_make_git_is_available(_):
make_sure_git_is_available()
| 32.148571 | 76 | 0.663704 |
ace6b308774e18260b5b715fa9386dc9eaf26327 | 1,897 | py | Python | Fancy_progressbar/family.py | Plawn/Fancy_progressbar | cda9b59475c8ce2786a46a41d06b33b4cf217f7d | [
"Apache-2.0"
] | 2 | 2018-11-26T16:09:15.000Z | 2021-01-09T14:25:35.000Z | Fancy_progressbar/family.py | Plawn/Fancy_progressbar | cda9b59475c8ce2786a46a41d06b33b4cf217f7d | [
"Apache-2.0"
] | null | null | null | Fancy_progressbar/family.py | Plawn/Fancy_progressbar | cda9b59475c8ce2786a46a41d06b33b4cf217f7d | [
"Apache-2.0"
] | null | null | null | from .bar import ProgressBar
class ProgressBarFamily:
def __init__(self, *args, **kwargs):
self.bars: List[ProgressBar] = []
self.is_child = False
self.progress = 0
self.coeff = 0
self.is_set = False
self.level = 0
self.append(*args, **kwargs)
self.family_set = False
self._task_name: str = kwargs.get('taskname', '')
self.task_name = ''
self.set_taskname()
self.top_bar = ProgressBar(**kwargs.get('bar_options', {}))
self.set_childs()
self.finished = False
def set_taskname(self):
self.task_name = ' ' * self.level + self._task_name + ' :'
def __iter__(self):
for bar in self.bars:
yield bar
def __repr__(self):
return 'Family: {}'.format(self.task_name)
def finish(self):
self.finished = True
self.top_bar.finish()
def set_childs(self, childs=None):
if self.is_child:
self.top_bar.level = self.level
self.top_bar.set_taskname()
if childs is None:
childs = self.bars
for bar in childs:
bar.level = self.level + 1
bar.set_taskname()
bar.set_child()
if isinstance(bar, ProgressBarFamily):
bar.set_childs()
def set_child(self):
self.is_child = True
def current(self, string):
self.top_bar.current(string)
def append(self, *args, **kwargs):
progress_bar_list = kwargs.get('list', []) + [*args]
for bar in progress_bar_list:
self.bars.append(bar)
bar.to_suppr = self.bars
self.set_childs(progress_bar_list)
def update(self, u=None):
self.progress, n = 0, len(self.bars)
for bar in self.bars:
self.progress += bar.progress / n
self.top_bar.update(self.progress)
| 28.742424 | 67 | 0.568266 |
ace6b361817f4bd2d8ac7eaf4dbe1f6af0447971 | 24,625 | py | Python | test/feature/test_laf.py | wangg12/kornia | 65777029ff430f841f19aa5cd08ddb3c4ca36338 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/feature/test_laf.py | wangg12/kornia | 65777029ff430f841f19aa5cd08ddb3c4ca36338 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/feature/test_laf.py | wangg12/kornia | 65777029ff430f841f19aa5cd08ddb3c4ca36338 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import pytest
import kornia as kornia
import kornia.geometry.transform.imgwarp
import kornia.testing as utils # test utils
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
class TestAngleToRotationMatrix:
def test_shape(self, device):
inp = torch.ones(1, 3, 4, 4).to(device)
rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(inp)
assert rotmat.shape == (1, 3, 4, 4, 2, 2)
def test_angles(self, device):
ang_deg = torch.tensor([0, 90.], device=device)
expected = torch.tensor([[[1.0, 0.], [0., 1.0]],
[[0, 1.0], [-1.0, 0]]], device=device)
rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(ang_deg)
assert_allclose(rotmat, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix,
(img,),
raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Problems with kornia.pi")
def test_jit(self, device, dtype):
B, C, H, W = 2, 1, 32, 32
patches = torch.rand(B, C, H, W, device=device, dtype=dtype)
model = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix
model_jit = torch.jit.script(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix)
assert_allclose(model(patches), model_jit(patches))
class TestGetLAFScale:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
rotmat = kornia.feature.get_laf_scale(inp)
assert rotmat.shape == (1, 3, 1, 1)
def test_scale(self, device):
inp = torch.tensor([[5., 1, 0], [1, 1, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[[2]]]], device=device).float()
rotmat = kornia.feature.get_laf_scale(inp)
assert_allclose(rotmat, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_scale,
(img,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_scale
model_jit = torch.jit.script(kornia.feature.get_laf_scale)
assert_allclose(model(img), model_jit(img))
class TestGetLAFCenter:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
xy = kornia.feature.get_laf_center(inp)
assert xy.shape == (1, 3, 2)
def test_center(self, device):
inp = torch.tensor([[5., 1, 2], [1, 1, 3]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[2, 3]]], device=device).float()
xy = kornia.feature.get_laf_center(inp)
assert_allclose(xy, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_center,
(img,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_center
model_jit = torch.jit.script(kornia.feature.get_laf_center)
assert_allclose(model(img), model_jit(img))
class TestGetLAFOri:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
ori = kornia.feature.get_laf_orientation(inp)
assert ori.shape == (1, 3, 1)
def test_ori(self, device):
inp = torch.tensor([[1, 1, 2], [1, 1, 3]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[45.]]], device=device).float()
angle = kornia.feature.get_laf_orientation(inp)
assert_allclose(angle, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_orientation,
(img,),
raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_orientation
model_jit = torch.jit.script(kornia.feature.get_laf_orientation)
assert_allclose(model(img), model_jit(img))
class TestScaleLAF:
def test_shape_float(self, device):
inp = torch.ones(7, 3, 2, 3, device=device).float()
scale = 23.
assert kornia.feature.scale_laf(inp, scale).shape == inp.shape
def test_shape_tensor(self, device):
inp = torch.ones(7, 3, 2, 3, device=device).float()
scale = torch.zeros(7, 1, 1, 1, device=device).float()
assert kornia.feature.scale_laf(inp, scale).shape == inp.shape
def test_scale(self, device):
inp = torch.tensor([[5., 1, 0.8], [1, 1, -4.]], device=device).float()
inp = inp.view(1, 1, 2, 3)
scale = torch.tensor([[[[2.]]]], device=device).float()
out = kornia.feature.scale_laf(inp, scale)
expected = torch.tensor([[[[10., 2, 0.8], [2, 2, -4.]]]], device=device).float()
assert_allclose(out, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
scale = torch.rand(batch_size, device=device)
scale = utils.tensor_to_gradcheck_var(scale) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.scale_laf,
(laf, scale),
raise_exception=True, atol=1e-4)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
scale = torch.rand(batch_size, device=device)
model = kornia.feature.scale_laf
model_jit = torch.jit.script(kornia.feature.scale_laf)
assert_allclose(model(laf, scale), model_jit(laf, scale))
class TestMakeUpright:
def test_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
rotmat = kornia.feature.make_upright(inp)
assert rotmat.shape == (5, 3, 2, 3)
def test_do_nothing(self, device):
inp = torch.tensor([[1, 0, 0], [0, 1, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[1, 0, 0], [0, 1, 0]], device=device).float()
laf = kornia.feature.make_upright(inp)
assert_allclose(laf, expected)
def test_do_nothing_with_scalea(self, device):
inp = torch.tensor([[2, 0, 0], [0, 2, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[2, 0, 0], [0, 2, 0]], device=device).float()
laf = kornia.feature.make_upright(inp)
assert_allclose(laf, expected)
def test_check_zeros(self, device):
inp = torch.rand(4, 5, 2, 3, device=device)
laf = kornia.feature.make_upright(inp)
must_be_zeros = laf[:, :, 0, 1]
assert_allclose(must_be_zeros, torch.zeros_like(must_be_zeros))
def test_gradcheck(self, device):
batch_size, channels, height, width = 14, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.make_upright,
(img,),
raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.make_upright
model_jit = torch.jit.script(kornia.feature.make_upright)
assert_allclose(model(img), model_jit(img))
class TestELL2LAF:
def test_shape(self, device):
inp = torch.ones(5, 3, 5, device=device)
inp[:, :, 3] = 0
rotmat = kornia.feature.ellipse_to_laf(inp)
assert rotmat.shape == (5, 3, 2, 3)
def test_conversion(self, device):
inp = torch.tensor([[10, -20, 0.01, 0, 0.01]], device=device).float()
inp = inp.view(1, 1, 5)
expected = torch.tensor([[10, 0, 10.], [0, 10, -20]], device=device).float()
expected = expected.view(1, 1, 2, 3)
laf = kornia.feature.ellipse_to_laf(inp)
assert_allclose(laf, expected)
def test_gradcheck(self, device):
batch_size, channels, height = 1, 2, 5
img = torch.rand(batch_size, channels, height, device=device).abs()
img[:, :, 2] = img[:, :, 3].abs() + 0.3
img[:, :, 4] += 1.
# assure it is positive definite
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.ellipse_to_laf,
(img,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height = 1, 2, 5
img = torch.rand(batch_size, channels, height, device=device).abs()
img[:, :, 2] = img[:, :, 3].abs() + 0.3
img[:, :, 4] += 1.
model = kornia.feature.ellipse_to_laf
model_jit = torch.jit.script(kornia.feature.ellipse_to_laf)
assert_allclose(model(img), model_jit(img))
class TestNormalizeLAF:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3)
img = torch.rand(5, 3, 10, 10)
assert inp.shape == kornia.feature.normalize_laf(inp, img).shape
def test_conversion(self, device):
w, h = 10, 5
laf = torch.tensor([[1, 0, 1], [0, 1, 1]]).float()
laf = laf.view(1, 1, 2, 3)
img = torch.rand(1, 3, h, w)
expected = torch.tensor([[0.2, 0, 0.1], [0, 0.2, 0.2]]).float()
lafn = kornia.feature.normalize_laf(laf, img)
assert_allclose(lafn, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.normalize_laf,
(laf, img,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
model = kornia.feature.normalize_laf
model_jit = torch.jit.script(kornia.feature.normalize_laf)
assert_allclose(model(laf, img), model_jit(laf, img))
class TestLAF2pts:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
n_pts = 13
assert kornia.feature.laf_to_boundary_points(inp, n_pts).shape == (5, 3, n_pts, 2)
def test_conversion(self, device):
laf = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()
laf = laf.view(1, 1, 2, 3)
n_pts = 6
expected = torch.tensor([[[[1, 1],
[1, 2],
[2, 1],
[1, 0],
[0, 1],
[1, 2]]]], device=device).float()
pts = kornia.feature.laf_to_boundary_points(laf, n_pts)
assert_allclose(pts, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.laf_to_boundary_points,
(laf),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_to_boundary_points
model_jit = torch.jit.script(kornia.feature.laf_to_boundary_points)
assert_allclose(model(laf), model_jit(laf))
class TestDenormalizeLAF:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
assert inp.shape == kornia.feature.denormalize_laf(inp, img).shape
def test_conversion(self, device):
w, h = 10, 5
expected = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()
expected = expected.view(1, 1, 2, 3)
img = torch.rand(1, 3, h, w, device=device)
lafn = torch.tensor([[0.2, 0, 0.1], [0, 0.2, 0.2]], device=device).float()
laf = kornia.feature.denormalize_laf(lafn.view(1, 1, 2, 3), img)
assert_allclose(laf, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
img = torch.rand(batch_size, 3, 10, 32, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.denormalize_laf,
(laf, img,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
model = kornia.feature.denormalize_laf
model_jit = torch.jit.script(kornia.feature.denormalize_laf)
assert_allclose(model(laf, img), model_jit(laf, img))
class TestGenPatchGrid:
def test_shape(self, device):
laf = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
PS = 3
from kornia.feature.laf import generate_patch_grid_from_normalized_LAF
grid = generate_patch_grid_from_normalized_LAF(img, laf, PS)
assert grid.shape == (15, 3, 3, 2)
def test_gradcheck(self, device):
laf = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
PS = 3
from kornia.feature.laf import generate_patch_grid_from_normalized_LAF
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(generate_patch_grid_from_normalized_LAF,
(img, laf, PS,),
raise_exception=True)
class TestExtractPatchesSimple:
def test_shape(self, device):
laf = torch.rand(5, 4, 2, 3, device=device)
img = torch.rand(5, 3, 100, 30, device=device)
PS = 10
patches = kornia.feature.extract_patches_simple(img, laf, PS)
assert patches.shape == (5, 4, 3, PS, PS)
# TODO: check what to do to improve timing
# @pytest.mark.skip("The test takes too long to finish.")
def test_gradcheck(self, device):
nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()
nlaf = nlaf.view(1, 1, 2, 3)
img = torch.rand(1, 3, 20, 30, device=device)
PS = 11
img = utils.tensor_to_gradcheck_var(img) # to var
nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var
assert gradcheck(kornia.feature.extract_patches_simple,
(img, nlaf, PS, False),
raise_exception=True)
class TestExtractPatchesPyr:
def test_shape(self, device):
laf = torch.rand(5, 4, 2, 3, device=device)
img = torch.rand(5, 3, 100, 30, device=device)
PS = 10
patches = kornia.feature.extract_patches_from_pyramid(img, laf, PS)
assert patches.shape == (5, 4, 3, PS, PS)
# TODO: check what to do to improve timing
# @pytest.mark.skip("The test takes too long to finish.")
def test_gradcheck(self, device):
nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()
nlaf = nlaf.view(1, 1, 2, 3)
img = torch.rand(1, 3, 20, 30, device=device)
PS = 11
img = utils.tensor_to_gradcheck_var(img) # to var
nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var
assert gradcheck(kornia.feature.extract_patches_from_pyramid,
(img, nlaf, PS, False),
raise_exception=True)
class TestLAFIsTouchingBoundary:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
assert (5, 3) == kornia.feature.laf_is_inside_image(inp, img).shape
def test_touch(self, device):
w, h = 10, 5
img = torch.rand(1, 3, h, w, device=device)
laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]],
[[1, 0, 5], [0, 1, 2]]]], device=device).float()
expected = torch.tensor([[False, True]], device=device)
assert torch.all(kornia.feature.laf_is_inside_image(laf, img) == expected).item()
@pytest.mark.jit
def test_jit(self, device, dtype):
w, h = 10, 5
img = torch.rand(1, 3, h, w, device=device)
laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]],
[[1, 0, 5], [0, 1, 2]]]], device=device).float()
model = kornia.feature.laf_is_inside_image
model_jit = torch.jit.script(kornia.feature.laf_is_inside_image)
assert_allclose(model(laf, img), model_jit(laf, img))
class TestGetCreateLAF:
def test_shape(self, device):
xy = torch.ones(1, 3, 2, device=device)
ori = torch.ones(1, 3, 1, device=device)
scale = torch.ones(1, 3, 1, 1, device=device)
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
assert laf.shape == (1, 3, 2, 3)
def test_laf(self, device):
xy = torch.ones(1, 1, 2, device=device)
ori = torch.zeros(1, 1, 1, device=device)
scale = 5 * torch.ones(1, 1, 1, 1, device=device)
expected = torch.tensor([[[[5, 0, 1], [0, 5, 1]]]], device=device).float()
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
assert_allclose(laf, expected)
def test_cross_consistency(self, device):
batch_size, channels = 3, 2
xy = torch.rand(batch_size, channels, 2, device=device)
ori = torch.rand(batch_size, channels, 1, device=device)
scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
scale2 = kornia.feature.get_laf_scale(laf)
assert_allclose(scale, scale2)
xy2 = kornia.feature.get_laf_center(laf)
assert_allclose(xy2, xy)
ori2 = kornia.feature.get_laf_orientation(laf)
assert_allclose(ori2, ori)
def test_gradcheck(self, device):
batch_size, channels = 3, 2
xy = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 2, device=device))
ori = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 1, device=device))
scale = utils.tensor_to_gradcheck_var(torch.abs(torch.rand(batch_size, channels, 1, 1, device=device)))
assert gradcheck(kornia.feature.laf_from_center_scale_ori,
(xy, scale, ori,),
raise_exception=True)
@pytest.mark.skip("Depends on angle-to-rotation-matric")
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels = 3, 2
xy = torch.rand(batch_size, channels, 2, device=device)
ori = torch.rand(batch_size, channels, 1, device=device)
scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))
model = kornia.feature.laf_from_center_scale_ori
model_jit = torch.jit.script(kornia.feature.laf_from_center_scale_ori)
assert_allclose(model(xy, scale, ori), model_jit(xy, scale, ori))
class TestGetLAF3pts:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
out = kornia.feature.laf_to_three_points(inp)
assert out.shape == inp.shape
def test_batch_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
out = kornia.feature.laf_to_three_points(inp)
assert out.shape == inp.shape
def test_conversion(self, device):
inp = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)
expected = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)
threepts = kornia.feature.laf_to_three_points(inp)
assert_allclose(threepts, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(kornia.feature.laf_to_three_points,
(inp,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_to_three_points
model_jit = torch.jit.script(kornia.feature.laf_to_three_points)
assert_allclose(model(inp), model_jit(inp))
class TestGetLAFFrom3pts:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
out = kornia.feature.laf_from_three_points(inp)
assert out.shape == inp.shape
def test_batch_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
out = kornia.feature.laf_from_three_points(inp)
assert out.shape == inp.shape
def test_conversion(self, device):
expected = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)
inp = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)
threepts = kornia.feature.laf_from_three_points(inp)
assert_allclose(threepts, expected)
def test_cross_consistency(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp_2 = kornia.feature.laf_from_three_points(inp)
inp_2 = kornia.feature.laf_to_three_points(inp_2)
assert_allclose(inp_2, inp)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(kornia.feature.laf_from_three_points,
(inp,),
raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_from_three_points
model_jit = torch.jit.script(kornia.feature.laf_from_three_points)
assert_allclose(model(inp), model_jit(inp))
| 42.603806 | 111 | 0.609706 |
ace6b3c1b02a184f9efa40a6bb1aacc876d63839 | 337 | py | Python | canteen_tests/test_http/test_cookies.py | dbl0null/canteen | 3bef22a2059ef6ac5df178324fbc1dba45316e22 | [
"MIT"
] | 2 | 2016-08-24T18:42:41.000Z | 2017-12-08T00:41:02.000Z | canteen_tests/test_http/test_cookies.py | dbl0null/canteen | 3bef22a2059ef6ac5df178324fbc1dba45316e22 | [
"MIT"
] | null | null | null | canteen_tests/test_http/test_cookies.py | dbl0null/canteen | 3bef22a2059ef6ac5df178324fbc1dba45316e22 | [
"MIT"
] | 2 | 2015-09-22T05:36:27.000Z | 2017-12-08T00:41:21.000Z | # -*- coding: utf-8 -*-
"""
HTTP cookie logic tests
~~~~~~~~~~~~~~~~~~~~~~~
:author: Sam Gammon <sg@samgammon.com>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
| 22.466667 | 67 | 0.575668 |
ace6b559828b8aad25753da813d283d4539fd7e3 | 6,241 | py | Python | mmdeploy/backend/tensorrt/utils.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 746 | 2021-12-27T10:50:28.000Z | 2022-03-31T13:34:14.000Z | mmdeploy/backend/tensorrt/utils.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 253 | 2021-12-28T05:59:13.000Z | 2022-03-31T18:22:25.000Z | mmdeploy/backend/tensorrt/utils.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 147 | 2021-12-27T10:50:33.000Z | 2022-03-30T10:44:20.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Dict, Optional, Sequence, Union
import onnx
import tensorrt as trt
from packaging import version
from mmdeploy.utils import get_root_logger
from .init_plugins import load_tensorrt_plugin
def save(engine: trt.ICudaEngine, path: str) -> None:
"""Serialize TensorRT engine to disk.
Args:
engine (tensorrt.ICudaEngine): TensorRT engine to be serialized.
path (str): The absolute disk path to write the engine.
"""
with open(path, mode='wb') as f:
f.write(bytearray(engine.serialize()))
def load(path: str) -> trt.ICudaEngine:
"""Deserialize TensorRT engine from disk.
Args:
path (str): The disk path to read the engine.
Returns:
tensorrt.ICudaEngine: The TensorRT engine loaded from disk.
"""
load_tensorrt_plugin()
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
with open(path, mode='rb') as f:
engine_bytes = f.read()
engine = runtime.deserialize_cuda_engine(engine_bytes)
return engine
def from_onnx(onnx_model: Union[str, onnx.ModelProto],
output_file_prefix: str,
input_shapes: Dict[str, Sequence[int]],
max_workspace_size: int = 0,
fp16_mode: bool = False,
int8_mode: bool = False,
int8_param: Optional[dict] = None,
device_id: int = 0,
log_level: trt.Logger.Severity = trt.Logger.ERROR,
**kwargs) -> trt.ICudaEngine:
"""Create a tensorrt engine from ONNX.
Args:
onnx_model (str or onnx.ModelProto): Input onnx model to convert from.
output_file_prefix (str): The path to save the output ncnn file.
input_shapes (Dict[str, Sequence[int]]): The min/opt/max shape of
each input.
max_workspace_size (int): To set max workspace size of TensorRT engine.
some tactics and layers need large workspace. Defaults to `0`.
fp16_mode (bool): Specifying whether to enable fp16 mode.
Defaults to `False`.
int8_mode (bool): Specifying whether to enable int8 mode.
Defaults to `False`.
int8_param (dict): A dict of parameter int8 mode. Defaults to `None`.
device_id (int): Choice the device to create engine. Defaults to `0`.
log_level (trt.Logger.Severity): The log level of TensorRT. Defaults to
`trt.Logger.ERROR`.
Returns:
tensorrt.ICudaEngine: The TensorRT engine created from onnx_model.
Example:
>>> from mmdeploy.apis.tensorrt import from_onnx
>>> engine = from_onnx(
>>> "onnx_model.onnx",
>>> {'input': {"min_shape" : [1, 3, 160, 160],
>>> "opt_shape" : [1, 3, 320, 320],
>>> "max_shape" : [1, 3, 640, 640]}},
>>> log_level=trt.Logger.WARNING,
>>> fp16_mode=True,
>>> max_workspace_size=1 << 30,
>>> device_id=0)
>>> })
"""
import os
old_cuda_device = os.environ.get('CUDA_DEVICE', None)
os.environ['CUDA_DEVICE'] = str(device_id)
import pycuda.autoinit # noqa:F401
if old_cuda_device is not None:
os.environ['CUDA_DEVICE'] = old_cuda_device
else:
os.environ.pop('CUDA_DEVICE')
load_tensorrt_plugin()
# create builder and network
logger = trt.Logger(log_level)
builder = trt.Builder(logger)
EXPLICIT_BATCH = 1 << (int)(
trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(EXPLICIT_BATCH)
# parse onnx
parser = trt.OnnxParser(network, logger)
if isinstance(onnx_model, str):
onnx_model = onnx.load(onnx_model)
if not parser.parse(onnx_model.SerializeToString()):
error_msgs = ''
for error in range(parser.num_errors):
error_msgs += f'{parser.get_error(error)}\n'
raise RuntimeError(f'Failed to parse onnx, {error_msgs}')
# config builder
if version.parse(trt.__version__) < version.parse('8'):
builder.max_workspace_size = max_workspace_size
config = builder.create_builder_config()
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for input_name, param in input_shapes.items():
min_shape = param['min_shape']
opt_shape = param['opt_shape']
max_shape = param['max_shape']
profile.set_shape(input_name, min_shape, opt_shape, max_shape)
config.add_optimization_profile(profile)
if fp16_mode:
if version.parse(trt.__version__) < version.parse('8'):
builder.fp16_mode = fp16_mode
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
from .calib_utils import HDF5Calibrator
config.set_flag(trt.BuilderFlag.INT8)
assert int8_param is not None
config.int8_calibrator = HDF5Calibrator(
int8_param['calib_file'],
input_shapes,
model_type=int8_param['model_type'],
device_id=device_id,
algorithm=int8_param.get(
'algorithm', trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2))
if version.parse(trt.__version__) < version.parse('8'):
builder.int8_mode = int8_mode
builder.int8_calibrator = config.int8_calibrator
# create engine
engine = builder.build_engine(network, config)
assert engine is not None, 'Failed to create TensorRT engine'
save(engine, output_file_prefix + '.engine')
return engine
def get_trt_log_level() -> trt.Logger.Severity:
"""Get tensorrt log level from root logger.
Returns:
level (tensorrt.Logger.Severity):
Logging level of tensorrt.Logger.
"""
logger = get_root_logger()
level = logger.level
trt_log_level = trt.Logger.INFO
if level == logging.ERROR:
trt_log_level = trt.Logger.ERROR
elif level == logging.WARNING:
trt_log_level = trt.Logger.WARNING
elif level == logging.DEBUG:
trt_log_level = trt.Logger.VERBOSE
return trt_log_level
| 35.460227 | 79 | 0.633713 |
ace6b5719a06c1a1f3aba09ef31352cbb984227b | 8,912 | py | Python | restle/fields.py | consbio/restle | 1235d3da4926e78465302560eb6b5a389156a472 | [
"BSD-3-Clause"
] | 3 | 2015-04-11T16:55:40.000Z | 2021-02-24T23:10:09.000Z | restle/fields.py | consbio/restle | 1235d3da4926e78465302560eb6b5a389156a472 | [
"BSD-3-Clause"
] | 4 | 2021-07-01T19:57:03.000Z | 2021-10-14T18:49:00.000Z | restle/fields.py | consbio/restle | 1235d3da4926e78465302560eb6b5a389156a472 | [
"BSD-3-Clause"
] | null | null | null | import six
class Field(object):
"""Field base class"""
def __init__(self, name=None, required=True, default=None):
self._attr_name = None
self._resource = None
self.name = name
self.required = required
self.default = default
def contribute_to_class(self, cls, name):
self._attr_name = name
self._resource = cls
if not self.name:
self.name = name
cls._meta.fields.append(self)
def to_python(self, value, resource):
"""Returns the value as returned by the serializer converted to a Python object"""
return value
def to_value(self, obj, resource):
"""Returns the Python object converted to a value ready for serialization"""
return obj
class DictField(Field):
"""Same as base field. Aliased for semantic purposes."""
pass
class ListField(Field):
def to_python(self, value, resource):
if not isinstance(value, (list, tuple, set)):
raise ValueError("Expected a list, got '{0}'".format(value.__class__.__name__))
return list(value)
class TextField(Field):
def __init__(self, encoding='utf-8', strip=False, lower=False, *args, **kwargs):
"""
:param encoding: Assume this encoding for incoming values
:param strip: If True, will remove leading and trailing whitespace from value
:param lower: If True, will convert value to lower case
"""
super(TextField, self).__init__(*args, **kwargs)
self.encoding = encoding
self.strip = strip
self.lower = lower
def _transform(self, value):
if self.strip:
value = value.strip()
if self.lower:
value = value.lower()
return value
def to_python(self, value, resource):
"""Converts to unicode if `self.encoding != None`, otherwise returns input without attempting to decode"""
if value is None:
return self._transform(value)
if isinstance(value, six.text_type):
return self._transform(value)
if self.encoding is None and isinstance(value, (six.text_type, six.binary_type)):
return self._transform(value)
if self.encoding is not None and isinstance(value, six.binary_type):
return self._transform(value.decode(self.encoding))
return self._transform(six.text_type(value))
class BooleanField(Field):
def to_python(self, value, resource):
if value is None:
return value
return bool(value)
class NumberField(Field):
def to_python(self, value, resource):
if isinstance(value, (int, float)) or value is None:
return value
number = float(value)
return int(number) if number.is_integer() else number
class IntegerField(NumberField):
def to_python(self, value, resource):
if isinstance(value, int) or value is None:
return value
return int(super(IntegerField, self).to_python(value, resource))
def to_value(self, obj, resource):
if obj is None:
return obj
return int(obj)
class FloatField(NumberField):
def to_python(self, value, resource):
if value is None:
return value
return float(super(FloatField, self).to_python(value, resource))
def to_value(self, obj, resource):
if obj is None:
return obj
return float(obj)
class ObjectField(Field):
"""Represents a dictionary as a Python object (lists, too)"""
def __init__(self, class_name='AnonymousObject', aliases={}, *args, **kwargs):
self.class_name = class_name
self.aliases = aliases
self.reverse_aliases = {v: k for k, v in six.iteritems(aliases)}
super(ObjectField, self).__init__(*args, **kwargs)
def to_python(self, value, resource):
"""Dictionary to Python object"""
if isinstance(value, dict):
d = {
self.aliases.get(k, k): self.to_python(v, resource) if isinstance(v, (dict, list)) else v
for k, v in six.iteritems(value)
}
return type(self.class_name, (), d)
elif isinstance(value, list):
return [self.to_python(x, resource) if isinstance(x, (dict, list)) else x for x in value]
else:
return value
def to_value(self, obj, resource, visited=set()):
"""Python object to dictionary"""
if id(obj) in visited:
raise ValueError('Circular reference detected when attempting to serialize object')
if isinstance(obj, (list, tuple, set)):
return [self.to_value(x, resource) if hasattr(x, '__dict__') else x for x in obj]
elif hasattr(obj, '__dict__'):
attrs = obj.__dict__.copy()
for key in six.iterkeys(obj.__dict__):
if key.startswith('_'):
del attrs[key]
return {
self.reverse_aliases.get(k, k):
self.to_value(v, resource) if hasattr(v, '__dict__') or isinstance(v, (list, tuple, set)) else v
for k, v in six.iteritems(attrs)
}
else:
return obj
class NestedResourceField(Field):
"""Base class for nested resource fields"""
ID_ONLY = 'id'
PARTIAL_OBJECT = 'partial'
FULL_OBJECT = 'full'
def __init__(self, resource_class, nest_type, id_field='id', relative_path=None, *args, **kwargs):
"""
:param str nest_type: One of 'id', 'partial', 'full' depending on whether the resource is expanded or needs to
be loaded separately.
:param id_field: For types 'id' and 'partial', specifies which field will be used as the nested resource id
when constructing the URI.
:param relative_path: The relative path (from this resource) to the nested resource. May contain {id} which
will be replaced with the resource id. E.g. '/nested-resource/{id}/'
"""
super(NestedResourceField, self).__init__(*args, **kwargs)
if nest_type == self.ID_ONLY and not relative_path:
raise ValueError("Nested resources of type 'uri' must provide a relative_path argument.")
if nest_type == self.PARTIAL_OBJECT and not (id_field and relative_path):
raise ValueError("Nested resources of type 'partial' must specify 'id_field' and 'relative_path'")
self.resource_class = resource_class
self.type = nest_type
self.id_field = id_field
self.relative_path = relative_path
def get_uri(self, obj, base_uri):
if not base_uri.endswith('/') and not self.relative_path.startswith('/'):
base_uri += '/'
if self.type == self.ID_ONLY:
resource_id = obj
else:
resource_id = obj.get(self.id_field)
return ''.join((base_uri, self.relative_path.format(id=resource_id)))
def to_python(self, value, resource):
if value is None:
return value
if self.type in (self.PARTIAL_OBJECT, self.FULL_OBJECT) and not isinstance(value, dict):
raise ValueError(
"Expected nested resource to be of type 'dict', got '{0}'".format(value.__class__.__name__)
)
elif self.type == self.ID_ONLY and not isinstance(value, (six.string_types, int)):
raise ValueError(
"Expected nested resource to be a string or int, got type {0}'".format(value.__class__.__name__)
)
if self.type == self.FULL_OBJECT:
nested = self.resource_class()
nested.populate_field_values(value)
if self.relative_path:
nested._url = self.get_uri(value, resource._url)
return nested
else:
return self.resource_class.get(self.get_uri(value, resource._url), session=resource._session)
def to_value(self, obj, resource):
raise NotImplementedError('Serializing nested resources is not yet supported')
class ToOneField(NestedResourceField):
"""Same as NestedResourceField. Aliased for semantic reasons"""
pass
class ToManyField(NestedResourceField):
"""To-many nested resource field"""
def __iter__(self):
"""Implementing __iter__ avoids IDE inspection errors/warnings when this field is used in iteration"""
while False:
yield None
def to_python(self, value, resource):
if value is None:
return []
if not isinstance(value, list):
raise ValueError("Expected a list for 'to many' value, got '{0}'".format(value.__class__.__name__))
return [super(ToManyField, self).to_python(x, resource) for x in value]
def to_value(self, obj, resource):
raise NotImplementedError('Serializing nested resources is not yet supported')
| 32.173285 | 118 | 0.623092 |
ace6b5995ec9073fb28040d7c7939aedb3c856ae | 3,772 | py | Python | pksampler/Small/Sample.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | pksampler/Small/Sample.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | pksampler/Small/Sample.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | """ Sample.py: A Sample widget. """
import os.path
from qt import *
import PKAudio
from sampleform import SampleForm
from Globals import DisplayTimer
displayTimer = DisplayTimer()
class Wrapper(QObject):
""" QObject wrapper around PKAudio.Sample. """
def __init__(self):
QObject.__init__(self)
self.sample = None
self.volume = PKAudio.Volume('vol')
self.d = PKAudio.Driver()
def load(self, path):
if self.sample:
self.sample.outputPort().disconnect()
self.sample = None
self.sample = PKAudio.Sample(path)
self.sample.outputPort().connect(self.volume.inputPort())
self.d.getMixer(0).connect(self.volume.outputPort())
def unload(self):
self.sample.outputPort().disconnect()
self.sample = None
def play(self):
if self.sample:
self.sample.play()
self.emit(PYSIGNAL('playing'), () )
def stop(self):
if self.sample:
self.sample.stop()
def cue(self):
if self.sample:
self.sample.stop()
self.sample.reset()
def atEnd(self):
if self.sample:
return self.sample.atEnd()
else:
return True
def looping(self):
if self.sample:
return self.sample.getLooping()
else:
return False
def setLooping(self, a0):
if self.sample:
self.sample.setLooping(a0)
def setVolume(self, v):
self.volume.setVolume(v)
def pos(self):
if self.sample:
return self.sample.pos()
else:
return 0
def setPos(self, p):
if self.sample:
self.sample.pos(p)
def reset(self):
if self.sample:
self.sample.reset()
def length(self):
if self.sample:
return self.sample.length()
else:
return 0
class Sample(SampleForm):
""" Sample Widget
SIGNALS:
PYSIGNAL('delete'), (self,)
"""
def __init__(self, parent=None, name=''):
SampleForm.__init__(self, parent, name)
self.setFrameStyle(QFrame.Raised)
self.wrapper = Wrapper()
displayTimer.register(self)
def unload(self):
self.wrapper.stop()
self.wrapper.unload()
displayTimer.deregister(self)
def load(self, path):
self.wrapper.load(path)
self.volumeSlider.setValue(100)
self.titleLabel.setText(os.path.basename(path))
def updateDisplay(self):
if self.wrapper.atEnd() and not self.wrapper.looping():
self.wrapper.stop()
self.wrapper.reset()
frames = int(self.wrapper.pos())
self.textLabel.setText(str(frames / 1000))
def reparent(self, parent, point, showit=False):
if parent:
c = parent.paletteBackgroundColor()
self.setPaletteBackgroundColor(c.dark(110))
SampleForm.reparent(self, parent, 0, point, showit)
## User controls
def slotPlay(self):
self.wrapper.play()
def slotCue(self):
self.wrapper.cue()
def slotVolume(self, v):
self.wrapper.setVolume(127 - v)
def slotDelete(self):
self.wrapper.unload()
self.emit(PYSIGNAL('delete'), (self,))
def slotLooping(self, a0):
self.wrapper.setLooping(a0)
def main():
a = QApplication([])
PKAudio.start_server()
w = Sample()
w.load('/home/ajole/wav/track.wav')
a.setMainWidget(w)
w.show()
a.exec_loop()
if __name__ == "__main__":
main()
| 24.179487 | 65 | 0.551166 |
ace6b7ce078422e9f1761393ef0245d1e157015f | 2,310 | py | Python | src/python/pants/backend/terraform/lint/fmt.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/terraform/lint/fmt.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | 12 | 2022-01-06T23:20:22.000Z | 2022-03-17T05:06:37.000Z | src/python/pants/backend/terraform/lint/fmt.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from typing import Iterable
from pants.backend.terraform.style import StyleRequest
from pants.backend.terraform.target_types import TerraformModuleSourcesField
from pants.core.goals.fmt import FmtResult, LanguageFmtResults, LanguageFmtTargets
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import Digest, Snapshot
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionMembership, UnionRule, union
@dataclass(frozen=True)
class TerraformFmtTargets(LanguageFmtTargets):
required_fields = (TerraformModuleSourcesField,)
@union
class TerraformFmtRequest(StyleRequest):
pass
@rule
async def format_terraform_targets(
terraform_fmt_targets: TerraformFmtTargets, union_membership: UnionMembership
) -> LanguageFmtResults:
original_sources = await Get(
SourceFiles,
SourceFilesRequest(
target[TerraformModuleSourcesField] for target in terraform_fmt_targets.targets
),
)
prior_formatter_result = original_sources.snapshot
results = []
fmt_request_types: Iterable[type[StyleRequest]] = union_membership[TerraformFmtRequest]
for fmt_request_type in fmt_request_types:
request = fmt_request_type(
(
fmt_request_type.field_set_type.create(target)
for target in terraform_fmt_targets.targets
if fmt_request_type.field_set_type.is_applicable(target)
),
prior_formatter_result=prior_formatter_result,
)
if not request.field_sets:
continue
result = await Get(FmtResult, TerraformFmtRequest, request)
results.append(result)
if result.did_change:
prior_formatter_result = await Get(Snapshot, Digest, result.output)
return LanguageFmtResults(
tuple(results),
input=original_sources.snapshot.digest,
output=prior_formatter_result.digest,
)
def rules():
return [
*collect_rules(),
UnionRule(LanguageFmtTargets, TerraformFmtTargets),
]
| 33.970588 | 91 | 0.739394 |
ace6b84fab2db582d8c1567ad73925e5725e1784 | 697 | py | Python | faster-rcnn-vgg16-fpn/model/utils/nms/_nms_gpu_post_py.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 13 | 2019-05-21T13:19:56.000Z | 2022-02-27T14:36:43.000Z | faster-rcnn-vgg16-fpn/model/utils/nms/_nms_gpu_post_py.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 2 | 2019-06-27T07:02:33.000Z | 2021-06-30T15:51:12.000Z | faster-rcnn-vgg16-fpn/model/utils/nms/_nms_gpu_post_py.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 4 | 2019-05-21T13:19:56.000Z | 2021-06-29T01:10:31.000Z | import numpy as np
def _nms_gpu_post(mask,
n_bbox,
threads_per_block,
col_blocks):
n_selection = 0
one_ull = np.array([1],dtype=np.uint64)
selection = np.zeros((n_bbox,), dtype=np.int32)
remv = np.zeros((col_blocks,), dtype=np.uint64)
for i in range(n_bbox):
nblock = i // threads_per_block
inblock = i % threads_per_block
if not (remv[nblock] & one_ull << inblock):
selection[n_selection] = i
n_selection += 1
index = i * col_blocks
for j in range(nblock, col_blocks):
remv[j] |= mask[index + j]
return selection, n_selection
| 29.041667 | 51 | 0.556671 |
ace6b90125727a542e8a49b3075aa03df07c04ff | 11,760 | py | Python | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_magnitude.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_magnitude.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_magnitude.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from flaky import flaky
from sparseml.pytorch.sparsification.pruning import (
GlobalMagnitudePruningModifier,
GMPruningModifier,
MagnitudePruningModifier,
)
from sparseml.pytorch.utils import tensor_sparsity
from sparseml.utils import FROM_PARAM_TOKEN
from tests.sparseml.pytorch.helpers import LinearNet
from tests.sparseml.pytorch.optim.test_modifier import (
ScheduledUpdateModifierTest,
create_optim_adam,
create_optim_sgd,
)
from tests.sparseml.pytorch.sparsification.pruning.helpers import (
pruning_modifier_serialization_vals_test,
state_dict_save_load_test,
)
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
@flaky(max_runs=3, min_passes=2)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.parametrize(
"modifier_lambda",
[
lambda: GMPruningModifier(
init_sparsity=0.05,
final_sparsity=0.95,
start_epoch=0.0,
end_epoch=15.0,
update_frequency=1.0,
params=["re:.*weight"],
inter_func="linear",
),
lambda: GMPruningModifier(
init_sparsity=FROM_PARAM_TOKEN,
final_sparsity=0.95,
start_epoch=0.0,
end_epoch=15.0,
update_frequency=1.0,
params=["re:.*weight"],
inter_func="linear",
),
lambda: GlobalMagnitudePruningModifier(
params="__ALL_PRUNABLE__",
init_sparsity=0.05,
final_sparsity=0.95,
start_epoch=10.0,
end_epoch=25.0,
update_frequency=1.0,
inter_func="cubic",
),
lambda: GMPruningModifier(
params=["seq.fc1.weight", "seq.fc2.weight"],
init_sparsity=0.05,
final_sparsity=0.95,
start_epoch=10.0,
end_epoch=25.0,
update_frequency=1.0,
inter_func="cubic",
mask_type="block",
),
lambda: GMPruningModifier(
params=["__ALL_PRUNABLE__"],
init_sparsity=0.8,
final_sparsity=0.9,
start_epoch=10.0,
end_epoch=25.0,
update_frequency=2.0,
inter_func="cubic",
),
lambda: GMPruningModifier(
params=[],
init_sparsity=0.05,
final_sparsity={
0.6: ["seq.fc1.weight", "seq.fc2.weight"],
0.8: ["re:seq.block1.*weight"],
},
start_epoch=10.0,
end_epoch=25.0,
update_frequency=1.0,
inter_func="cubic",
mask_type="block",
),
],
scope="function",
)
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize(
"optim_lambda",
[create_optim_sgd, create_optim_adam],
scope="function",
)
class TestGMPruningModifier(ScheduledUpdateModifierTest):
def test_lifecycle(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
if modifier.start_epoch > 0:
assert modifier.applied_sparsity is None
assert modifier._mask_creator == modifier._module_masks._mask_creator
# check sparsity is not set before
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
assert modifier.applied_sparsity is None
epoch = int(modifier.start_epoch)
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
applied_sparsities = modifier.applied_sparsity
if not isinstance(applied_sparsities, list):
applied_sparsities = [applied_sparsities]
if not isinstance(modifier.init_sparsity, str):
assert all(
applied_sparsity == modifier.init_sparsity
for applied_sparsity in applied_sparsities
)
else:
assert len(modifier._init_sparsity) == len(modifier.module_masks.layers)
for idx, param in enumerate(modifier.module_masks.params_data):
assert modifier._init_sparsity[idx] == tensor_sparsity(param).item()
last_sparsities = applied_sparsities
# check forward pass
input_shape = model_lambda.layer_descs()[0].input_size
test_batch = torch.randn(10, *input_shape)
_ = model(test_batch)
while epoch < modifier.end_epoch - modifier.update_frequency:
epoch += modifier.update_frequency
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
applied_sparsities = modifier.applied_sparsity
if not isinstance(applied_sparsities, list):
applied_sparsities = [applied_sparsities]
assert all(
applied_sparsity > last_sparsity
for applied_sparsity, last_sparsity in zip(
applied_sparsities, last_sparsities
)
)
last_sparsities = applied_sparsities
_ = model(test_batch) # check forward pass
epoch = int(modifier.end_epoch)
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
def _test_final_sparsity_applied():
final_sparsities = (
[modifier.final_sparsity]
if isinstance(modifier.final_sparsity, float)
else modifier.final_sparsity
)
assert all(
sparsity in final_sparsities for sparsity in modifier.applied_sparsity
)
_test_final_sparsity_applied()
for epoch in range(int(modifier.end_epoch) + 1, int(modifier.end_epoch) + 6):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
_test_final_sparsity_applied()
def test_state_dict_save_load(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
state_dict_save_load_test(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch,
True,
)
@pytest.mark.parametrize(
"params,init_sparsity,final_sparsity",
[
(["re:.*weight"], 0.05, 0.8),
(
[],
0.05,
{0.7: ["param1"], 0.8: ["param2", "param3"], 0.9: ["param4", "param5"]},
),
(["re:.*weight"], FROM_PARAM_TOKEN, 0.8),
(
[],
FROM_PARAM_TOKEN,
{0.7: ["param1"], 0.8: ["param2", "param3"], 0.9: ["param4", "param5"]},
),
],
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
def test_gm_pruning_yaml(params, init_sparsity, final_sparsity):
start_epoch = 5.0
end_epoch = 15.0
update_frequency = 1.0
inter_func = "cubic"
mask_type = "filter"
yaml_str = f"""
!GMPruningModifier
init_sparsity: {init_sparsity}
final_sparsity: {final_sparsity}
start_epoch: {start_epoch}
end_epoch: {end_epoch}
update_frequency: {update_frequency}
params: {params}
inter_func: {inter_func}
mask_type: {mask_type}
"""
yaml_modifier = GMPruningModifier.load_obj(yaml_str) # type: GMPruningModifier
serialized_modifier = GMPruningModifier.load_obj(
str(yaml_modifier)
) # type: GMPruningModifier
obj_modifier = GMPruningModifier(
init_sparsity=init_sparsity,
final_sparsity=final_sparsity,
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=update_frequency,
params=params,
inter_func=inter_func,
mask_type=mask_type,
)
assert isinstance(yaml_modifier, GMPruningModifier)
pruning_modifier_serialization_vals_test(
yaml_modifier, serialized_modifier, obj_modifier
)
def test_magnitude_pruning_yaml():
init_sparsity = 0.05
final_sparsity = 0.8
start_epoch = 5.0
end_epoch = 15.0
update_frequency = 1.0
params = "__ALL_PRUNABLE__"
inter_func = "cubic"
mask_type = "block"
yaml_str = f"""
!MagnitudePruningModifier
init_sparsity: {init_sparsity}
final_sparsity: {final_sparsity}
start_epoch: {start_epoch}
end_epoch: {end_epoch}
update_frequency: {update_frequency}
params: {params}
inter_func: {inter_func}
mask_type: {mask_type}
"""
yaml_modifier = MagnitudePruningModifier.load_obj(
yaml_str
) # type: MagnitudePruningModifier
serialized_modifier = MagnitudePruningModifier.load_obj(
str(yaml_modifier)
) # type: MagnitudePruningModifier
obj_modifier = MagnitudePruningModifier(
init_sparsity=init_sparsity,
final_sparsity=final_sparsity,
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=update_frequency,
params=params,
inter_func=inter_func,
mask_type=mask_type,
)
assert isinstance(yaml_modifier, MagnitudePruningModifier)
pruning_modifier_serialization_vals_test(
yaml_modifier, serialized_modifier, obj_modifier
)
def test_global_magnitude_pruning_yaml():
init_sparsity = 0.05
final_sparsity = 0.8
start_epoch = 5.0
end_epoch = 15.0
update_frequency = 1.0
params = "__ALL_PRUNABLE__"
inter_func = "cubic"
mask_type = "filter"
yaml_str = f"""
!GlobalMagnitudePruningModifier
init_sparsity: {init_sparsity}
final_sparsity: {final_sparsity}
start_epoch: {start_epoch}
end_epoch: {end_epoch}
update_frequency: {update_frequency}
params: {params}
inter_func: {inter_func}
mask_type: {mask_type}
"""
yaml_modifier = GlobalMagnitudePruningModifier.load_obj(yaml_str)
serialized_modifier = GlobalMagnitudePruningModifier.load_obj(
str(yaml_modifier)
) # type: GlobalMagnitudePruningModifier
obj_modifier = GlobalMagnitudePruningModifier(
init_sparsity=init_sparsity,
final_sparsity=final_sparsity,
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=update_frequency,
params=params,
inter_func=inter_func,
mask_type=mask_type,
)
assert isinstance(yaml_modifier, GlobalMagnitudePruningModifier)
pruning_modifier_serialization_vals_test(
yaml_modifier, serialized_modifier, obj_modifier
)
| 32.131148 | 86 | 0.640476 |
ace6b9990acf92b3f567d5ed31f671982d4fa5fa | 6,918 | py | Python | core/controls/weird.py | intercellar/FLOOR-E | 3ab23fb92375e5fe74254f84b1ad10e07932ff40 | [
"MIT"
] | null | null | null | core/controls/weird.py | intercellar/FLOOR-E | 3ab23fb92375e5fe74254f84b1ad10e07932ff40 | [
"MIT"
] | null | null | null | core/controls/weird.py | intercellar/FLOOR-E | 3ab23fb92375e5fe74254f84b1ad10e07932ff40 | [
"MIT"
] | null | null | null | # Simple two DC motor robot class. Exposes a simple LOGO turtle-like API for
# moving a robot forward, backward, and turning. See RobotTest.py for an
# example of using this class.
# Author: Tony DiCola
# License: MIT License https://opensource.org/licenses/MIT
import time
import atexit
from Adafruit_MotorHAT import Adafruit_MotorHAT
class Robot(object):
def __init__(self, addr=0x60, left_id=4, right_id=3, light1_id=2, left_trim=0, right_trim=0,
stop_at_exit=True):
"""Create an instance of the robot. Can specify the following optional
parameters:
- addr: The I2C address of the motor HAT, default is 0x60.
- left_id: The ID of the left motor, default is 1.
- right_id: The ID of the right motor, default is 2.
- left_trim: Amount to offset the speed of the left motor, can be positive
or negative and use useful for matching the speed of both
motors. Default is 0.
- right_trim: Amount to offset the speed of the right motor (see above).
- stop_at_exit: Boolean to indicate if the motors should stop on program
exit. Default is True (highly recommended to keep this
value to prevent damage to the bot on program crash!).
"""
# Initialize motor HAT and left, right motor.
self._mh = Adafruit_MotorHAT(addr)
self._left = self._mh.getMotor(left_id)
self._right = self._mh.getMotor(right_id)
self._light1 = self._mh.getMotor(light1_id)
self._left_trim = left_trim
self._right_trim = right_trim
# Start with motors turned off.
self._left.run(Adafruit_MotorHAT.RELEASE)
self._right.run(Adafruit_MotorHAT.RELEASE)
self._light1.run(Adafruit_MotorHAT.RELEASE)
# Configure all motors to stop at program exit if desired.
if stop_at_exit:
atexit.register(self.stop)
def _left_speed(self, speed):
"""Set the speed of the left motor, taking into account its trim offset.
"""
assert 0 <= speed <= 255, 'Speed must be a value between 0 to 255 inclusive!'
speed += self._left_trim
speed = max(0, min(255, speed)) # Constrain speed to 0-255 after trimming.
self._left.setSpeed(speed)
def _right_speed(self, speed):
"""Set the speed of the right motor, taking into account its trim offset.
"""
assert 0 <= speed <= 255, 'Speed must be a value between 0 to 255 inclusive!'
speed += self._right_trim
speed = max(0, min(255, speed)) # Constrain speed to 0-255 after trimming.
self._right.setSpeed(speed)
def stop(self):
"""Stop all movement."""
self._left.run(Adafruit_MotorHAT.RELEASE)
self._right.run(Adafruit_MotorHAT.RELEASE)
def forward(self, speed, seconds=None):
"""Move forward at the specified speed (0-255). Will start moving
forward and return unless a seconds value is specified, in which
case the robot will move forward for that amount of time and then stop.
"""
# Set motor speed and move both forward.
self._left_speed(speed)
self._right_speed(speed)
self._left.run(Adafruit_MotorHAT.FORWARD)
self._right.run(Adafruit_MotorHAT.FORWARD)
# If an amount of time is specified, move for that time and then stop.
if seconds is not None:
time.sleep(seconds)
self.stop()
def backward(self, speed, seconds=None):
"""Move backward at the specified speed (0-255). Will start moving
backward and return unless a seconds value is specified, in which
case the robot will move backward for that amount of time and then stop.
"""
# Set motor speed and move both backward.
self._left_speed(speed)
self._right_speed(speed)
self._left.run(Adafruit_MotorHAT.BACKWARD)
self._right.run(Adafruit_MotorHAT.BACKWARD)
# If an amount of time is specified, move for that time and then stop.
if seconds is not None:
time.sleep(seconds)
self.stop()
def right(self, speed, seconds=None):
"""Spin to the right at the specified speed. Will start spinning and
return unless a seconds value is specified, in which case the robot will
spin for that amount of time and then stop.
"""
# Set motor speed and move both forward.
self._left_speed(speed)
self._right_speed(speed)
self._left.run(Adafruit_MotorHAT.FORWARD)
self._right.run(Adafruit_MotorHAT.BACKWARD)
# If an amount of time is specified, move for that time and then stop.
if seconds is not None:
time.sleep(seconds)
self.stop()
def left(self, speed, seconds=None):
"""Spin to the left at the specified speed. Will start spinning and
return unless a seconds value is specified, in which case the robot will
spin for that amount of time and then stop.
"""
# Set motor speed and move both forward.
self._left_speed(speed)
self._right_speed(speed)
self._left.run(Adafruit_MotorHAT.BACKWARD)
self._right.run(Adafruit_MotorHAT.FORWARD)
# If an amount of time is specified, move for that time and then stop.
if seconds is not None:
time.sleep(seconds)
self.stop()
"""Ben Made These Functions"""
def left_forward(self, speed, seconds=None):
"""Moves the left set of treads forward
"""
self._left_speed(speed)
self._left.run(Adafruit_MotorHAT.FORWARD)
if seconds is not None:
time.sleep(seconds)
self.stop()
def left_backward(self, speed, sedonds=None):
self._left_speed(speed)
self._left.run(Adafruit_MotorHAT.BACKWARD)
if seconds is not None:
time.sleep(seonds)
self.stop()
def right_forward(self, speed, seconds=None):
self._right_speed(speed)
self._right.run(Adafruit_MotorHAT.FORWARD)
if seconds is not None:
time.sleep(seconds)
self.stop()
def right_backward(self, speed, seconds=None):
self._right_speed(speed)
self._right.run(Adafruit_MotorHAT.BACKWARD)
if seconds is not None:
time.sleep(seconds)
self.stop()
def set_light1(self, value, seconds=None):
#print("in set light1")
value = max(0,min(190,value))
if(value == 0):
self._light1.run(Adafruit_MotorHAT.RELEASE)
return
self._light1.setSpeed(value)
self._light1.run(Adafruit_MotorHAT.FORWARD)
if seconds is not None:
time.sleep(seconds)
self.stop()
| 39.306818 | 96 | 0.631107 |
ace6b99fc5c53ab3d4e04a7aab093c88656caddc | 12,218 | py | Python | coolamqp/attaches/declarer.py | smok-serwis/coolamqp | d57ada0d478bd1ca94743ae341f6819ba85ea253 | [
"MIT"
] | 4 | 2018-06-20T13:59:35.000Z | 2021-08-31T12:03:59.000Z | coolamqp/attaches/declarer.py | piotrmaslanka/coolamqp | d57ada0d478bd1ca94743ae341f6819ba85ea253 | [
"MIT"
] | 33 | 2016-06-03T11:41:09.000Z | 2020-07-09T17:48:28.000Z | coolamqp/attaches/declarer.py | smok-serwis/coolamqp | d57ada0d478bd1ca94743ae341f6819ba85ea253 | [
"MIT"
] | null | null | null | # coding=UTF-8
"""
queue.declare, exchange.declare and that shit
"""
from __future__ import print_function, absolute_import, division
import collections
import logging
from concurrent.futures import Future
from coolamqp.attaches.channeler import Channeler, ST_ONLINE
from coolamqp.attaches.utils import Synchronized
from coolamqp.exceptions import AMQPError, ConnectionDead
from coolamqp.framing.definitions import ChannelOpenOk, ExchangeDeclare, \
ExchangeDeclareOk, QueueDeclare, \
QueueDeclareOk, ChannelClose, QueueDelete, QueueDeleteOk, QueueBind, QueueBindOk
from coolamqp.objects import Exchange, Queue, Callable, QueueBind as CommandQueueBind
logger = logging.getLogger(__name__)
class Operation(object):
"""
An abstract operation.
This class possesses the means to carry itself out and report back status.
Represents the op currently carried out.
This will register it's own callback. Please, call on_connection_dead when connection is broken
to fail futures with ConnectionDead, since this object does not watch for Fails
"""
__slots__ = ('done', 'fut', 'declarer', 'obj', 'on_done', 'parent_span', 'enqueued_span',
'processing_span')
def __init__(self, declarer, obj, fut=None, span_parent=None, span_enqueued=None):
self.done = False
self.fut = fut
self.parent_span = span_parent
self.enqueued_span = span_enqueued
self.processing_span = None
self.declarer = declarer
self.obj = obj
self.on_done = Callable() # callable/0
def span_exception(self, exception):
if self.parent_span is not None:
if self.enqueued_span is None:
from opentracing import tags, logs
self.enqueued_span.set_tag(tags.ERROR, True)
self.enqueued_span.log_kv({logs.EVENT: tags.ERROR,
logs.ERROR_KIND: exception,
logs.ERROR_OBJECT: exception})
self.enqueued_span.finish()
self.enqueued_span = None
if self.processing_span is not None:
from opentracing import tags, logs
self.processing_span.set_tag(tags.ERROR, True)
self.processing_span.log_kv({logs.EVENT: tags.ERROR,
logs.ERROR_KIND: exception,
logs.ERROR_OBJECT: exception})
self.processing_span.finish()
self.processing_span = None
if self.enqueued_span is not None:
self.enqueued_span.finish()
self.enqueued_span = None
self.parent_span.finish()
def on_connection_dead(self):
"""To be called by declarer when our link fails"""
if self.fut is not None:
err = ConnectionDead()
self.span_exception(err)
self.fut.set_exception(err)
self.fut = None
def span_starting(self):
if self.enqueued_span is not None:
self.enqueued_span.finish()
from opentracing import follows_from
self.processing_span = self.declarer.cluster.tracer.start_span('Declaring',
child_of=self.parent_span,
references=follows_from(self.enqueued_span))
self.enqueued_span = None
def span_finished(self):
if self.processing_span is not None:
self.processing_span.finish()
self.processing_span = None
def span_begin(self):
if self.enqueued_span is not None:
self.enqueued_span.finish()
from opentracing import follows_from
self.processing_span = self.declarer.cluster.tracer.start_span('Declaring',
child_of=self.parent_span,
references=follows_from(self.enqueued_span))
self.enqueued_span = None
def perform(self):
"""Attempt to perform this op."""
self.span_begin()
obj = self.obj
if isinstance(obj, Exchange):
self.declarer.method_and_watch(
ExchangeDeclare(self.obj.name.encode('utf8'), obj.type, False,
obj.durable,
obj.auto_delete, False, False, []),
(ExchangeDeclareOk, ChannelClose),
self._callback)
elif isinstance(obj, Queue):
self.declarer.method_and_watch(
QueueDeclare(obj.name, False, obj.durable, obj.exclusive,
obj.auto_delete, False, []),
(QueueDeclareOk, ChannelClose),
self._callback)
elif isinstance(obj, CommandQueueBind):
self.declarer.method_and_watch(
QueueBind(obj.queue, obj.exchange, obj.routing_key, False, []),
(QueueBindOk, ChannelClose),
self._callback
)
def _callback(self, payload):
assert not self.done
self.done = True
if isinstance(payload, ChannelClose):
err = AMQPError(payload)
self.span_exception(err)
if self.fut is not None:
self.fut.set_exception(err)
self.fut = None
else:
# something that had no Future failed. Is it in declared?
if self.obj in self.declarer.declared:
self.declarer.declared.remove(
self.obj) # todo access not threadsafe
self.declarer.on_discard(self.obj)
else:
if isinstance(payload, QueueDeclareOk) and self.obj.anonymous:
self.obj.name = payload.queue
self.obj.anonymous = False
self.span_finished()
if self.fut is not None:
self.fut.set_result(None)
self.fut = None
self.declarer.on_operation_done()
class DeleteQueue(Operation):
def __init__(self, declarer, queue, fut, span_parent=None, span_enqueued=None):
super(DeleteQueue, self).__init__(declarer, queue, fut=fut, span_parent=span_parent,
span_enqueued=span_enqueued)
def perform(self):
queue = self.obj
self.declarer.method_and_watch(
QueueDelete(queue.name, False, False, False),
(QueueDeleteOk, ChannelClose),
self._callback)
def _callback(self, payload):
assert not self.done
self.done = True
if isinstance(payload, ChannelClose):
err = AMQPError(payload)
self.span_exception(err)
self.fut.set_exception(err)
else: # Queue.DeleteOk
self.span_finished()
self.fut.set_result(None)
self.declarer.on_operation_done()
class Declarer(Channeler, Synchronized):
"""
Doing other things, such as declaring, deleting and other stuff.
This also maintains a list of declared queues/exchanges, and redeclares them on each reconnect.
"""
def __init__(self, cluster):
"""
Create a new declarer.
"""
Channeler.__init__(self)
Synchronized.__init__(self)
self.cluster = cluster
self.declared = set() # since Queues and Exchanges are hashable...
# anonymous queues aren't, but we reject those
# persistent
self.left_to_declare = collections.deque() # since last disconnect. persistent+transient
# deque of Operation objects
self.on_discard = Callable() # callable/1, with discarded elements
self.in_process = None # Operation instance that is being progressed right now
def on_close(self, payload=None):
# we are interested in ChannelClose during order execution,
# because that means that operation was illegal, and must
# be discarded/exceptioned on future
if payload is None:
if self.in_process is not None:
self.in_process.on_connection_dead()
self.in_process = None
# connection down, panic mode engaged.
while len(self.left_to_declare) > 0:
self.left_to_declare.pop().on_connection_dead()
# recast current declarations as new operations
for dec in self.declared:
self.left_to_declare.append(Operation(self, dec))
super(Declarer, self).on_close()
return
elif isinstance(payload, ChannelClose):
# Looks like a soft fail - we may try to survive that
old_con = self.connection
super(Declarer, self).on_close()
# But, we are super optimists. If we are not cancelled, and connection is ok,
# we must reestablish
if old_con.state == ST_ONLINE and not self.cancelled:
self.attach(old_con)
else:
super(Declarer, self).on_close(payload)
def on_operation_done(self):
"""
Called by operation, when it's complete (whether success or fail).
Not called when operation fails due to DC
"""
self.in_process = None
self._do_operations()
def delete_queue(self, queue, span=None):
"""
Delete a queue.
Future is returned, so that user knows when it happens. This may fail.
Returned Future is already running, and so cannot be cancelled.
If the queue is in declared consumer list, it will not be removed.
:param queue: Queue instance
:param span: optional span, if opentracing is installed
:return: a Future
"""
fut = Future()
fut.set_running_or_notify_cancel()
self.left_to_declare.append(DeleteQueue(self, queue, fut))
self._do_operations()
return fut
def declare(self, obj, persistent=False, span=None):
"""
Schedule to have an object declared.
Future is returned, so that user knows when it happens.
Returned Future is already running, and so cannot be cancelled.
Exchange declarations never fail.
Of course they do, but you will be told that it succeeded. This is by design,
and due to how AMQP works.
Queue declarations CAN fail.
Note that if re-declaring these fails, they will be silently discarded.
You can subscribe an on_discard(Exchange | Queue) here.
:param obj: Exchange or Queue instance
:param persistent: will be redeclared upon disconnect. To remove, use "undeclare"
:param span: span if opentracing is installed
:return: a Future instance
:raise ValueError: tried to declare anonymous queue
"""
if span is not None:
enqueued_span = self.cluster.tracer.start_span('Enqueued', child_of=span)
else:
span = None
enqueued_span = None
fut = Future()
fut.set_running_or_notify_cancel()
if persistent:
if obj not in self.declared:
self.declared.add(obj) # todo access not threadsafe
self.left_to_declare.append(Operation(self, obj, fut, span, enqueued_span))
self._do_operations()
return fut
@Synchronized.synchronized
def _do_operations(self):
"""
Attempt to execute something.
To be called when it's possible that something can be done
"""
if (self.state != ST_ONLINE) or len(self.left_to_declare) == 0 or (
self.in_process is not None):
return
self.in_process = self.left_to_declare.popleft()
self.in_process.perform()
def on_setup(self, payload):
if isinstance(payload, ChannelOpenOk):
assert self.in_process is None
self.state = ST_ONLINE
self._do_operations()
| 37.478528 | 119 | 0.595515 |
ace6b9f3e8b0363666da5d96858b3864213aeabe | 14,210 | py | Python | mmpose/models/detectors/pose_lifter.py | ALISCIFP/mmpose | 2433e3dbcc44baa2253e2a7c748ba0216937933e | [
"Apache-2.0"
] | 1 | 2022-02-13T12:27:40.000Z | 2022-02-13T12:27:40.000Z | mmpose/models/detectors/pose_lifter.py | alikaz3mi/mmpose | 5c8ba2657b26ee9487451c45ba794823fa607cfd | [
"Apache-2.0"
] | 1 | 2022-03-13T12:52:47.000Z | 2022-03-13T12:52:47.000Z | mmpose/models/detectors/pose_lifter.py | alikaz3mi/mmpose | 5c8ba2657b26ee9487451c45ba794823fa607cfd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
from mmcv.utils.misc import deprecated_api_warning
from mmpose.core import imshow_bboxes, imshow_keypoints, imshow_keypoints_3d
from .. import builder
from ..builder import POSENETS
from .base import BasePose
try:
from mmcv.runner import auto_fp16
except ImportError:
warnings.warn('auto_fp16 from mmpose will be deprecated from v0.15.0'
'Please install mmcv>=1.1.4')
from mmpose.core import auto_fp16
@POSENETS.register_module()
class PoseLifter(BasePose):
"""Pose lifter that lifts 2D pose to 3D pose.
The basic model is a pose model that predicts root-relative pose. If
traj_head is not None, a trajectory model that predicts absolute root joint
position is also built.
Args:
backbone (dict): Config for the backbone of pose model.
neck (dict|None): Config for the neck of pose model.
keypoint_head (dict|None): Config for the head of pose model.
traj_backbone (dict|None): Config for the backbone of trajectory model.
If traj_backbone is None and traj_head is not None, trajectory
model will share backbone with pose model.
traj_neck (dict|None): Config for the neck of trajectory model.
traj_head (dict|None): Config for the head of trajectory model.
loss_semi (dict|None): Config for semi-supervision loss.
train_cfg (dict|None): Config for keypoint head during training.
test_cfg (dict|None): Config for keypoint head during testing.
pretrained (str|None): Path to pretrained weights.
"""
def __init__(self,
backbone,
neck=None,
keypoint_head=None,
traj_backbone=None,
traj_neck=None,
traj_head=None,
loss_semi=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__()
self.fp16_enabled = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# pose model
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if keypoint_head is not None:
keypoint_head['train_cfg'] = train_cfg
keypoint_head['test_cfg'] = test_cfg
self.keypoint_head = builder.build_head(keypoint_head)
# trajectory model
if traj_head is not None:
self.traj_head = builder.build_head(traj_head)
if traj_backbone is not None:
self.traj_backbone = builder.build_backbone(traj_backbone)
else:
self.traj_backbone = self.backbone
if traj_neck is not None:
self.traj_neck = builder.build_neck(traj_neck)
# semi-supervised learning
self.semi = loss_semi is not None
if self.semi:
assert keypoint_head is not None and traj_head is not None
self.loss_semi = builder.build_loss(loss_semi)
self.init_weights(pretrained=pretrained)
@property
def with_neck(self):
"""Check if has keypoint_neck."""
return hasattr(self, 'neck')
@property
def with_keypoint(self):
"""Check if has keypoint_head."""
return hasattr(self, 'keypoint_head')
@property
def with_traj_backbone(self):
"""Check if has trajectory_backbone."""
return hasattr(self, 'traj_backbone')
@property
def with_traj_neck(self):
"""Check if has trajectory_neck."""
return hasattr(self, 'traj_neck')
@property
def with_traj(self):
"""Check if has trajectory_head."""
return hasattr(self, 'traj_head')
@property
def causal(self):
if hasattr(self.backbone, 'causal'):
return self.backbone.causal
else:
raise AttributeError('A PoseLifter\'s backbone should have '
'the bool attribute "causal" to indicate if'
'it performs causal inference.')
def init_weights(self, pretrained=None):
"""Weight initialization for model."""
self.backbone.init_weights(pretrained)
if self.with_neck:
self.neck.init_weights()
if self.with_keypoint:
self.keypoint_head.init_weights()
if self.with_traj_backbone:
self.traj_backbone.init_weights(pretrained)
if self.with_traj_neck:
self.traj_neck.init_weights()
if self.with_traj:
self.traj_head.init_weights()
@auto_fp16(apply_to=('input', ))
def forward(self,
input,
target=None,
target_weight=None,
metas=None,
return_loss=True,
**kwargs):
"""Calls either forward_train or forward_test depending on whether
return_loss=True.
Note:
- batch_size: N
- num_input_keypoints: Ki
- input_keypoint_dim: Ci
- input_sequence_len: Ti
- num_output_keypoints: Ko
- output_keypoint_dim: Co
- input_sequence_len: To
Args:
input (torch.Tensor[NxKixCixTi]): Input keypoint coordinates.
target (torch.Tensor[NxKoxCoxTo]): Output keypoint coordinates.
Defaults to None.
target_weight (torch.Tensor[NxKox1]): Weights across different
joint types. Defaults to None.
metas (list(dict)): Information about data augmentation
return_loss (bool): Option to `return loss`. `return loss=True`
for training, `return loss=False` for validation & test.
Returns:
dict|Tensor: If `reutrn_loss` is true, return losses. \
Otherwise return predicted poses.
"""
if return_loss:
return self.forward_train(input, target, target_weight, metas,
**kwargs)
else:
return self.forward_test(input, metas, **kwargs)
def forward_train(self, input, target, target_weight, metas, **kwargs):
"""Defines the computation performed at every call when training."""
assert input.size(0) == len(metas)
# supervised learning
# pose model
features = self.backbone(input)
if self.with_neck:
features = self.neck(features)
if self.with_keypoint:
output = self.keypoint_head(features)
losses = dict()
if self.with_keypoint:
keypoint_losses = self.keypoint_head.get_loss(
output, target, target_weight)
keypoint_accuracy = self.keypoint_head.get_accuracy(
output, target, target_weight, metas)
losses.update(keypoint_losses)
losses.update(keypoint_accuracy)
# trajectory model
if self.with_traj:
traj_features = self.traj_backbone(input)
if self.with_traj_neck:
traj_features = self.traj_neck(traj_features)
traj_output = self.traj_head(traj_features)
traj_losses = self.traj_head.get_loss(traj_output,
kwargs['traj_target'], None)
losses.update(traj_losses)
# semi-supervised learning
if self.semi:
ul_input = kwargs['unlabeled_input']
ul_features = self.backbone(ul_input)
if self.with_neck:
ul_features = self.neck(ul_features)
ul_output = self.keypoint_head(ul_features)
ul_traj_features = self.traj_backbone(ul_input)
if self.with_traj_neck:
ul_traj_features = self.traj_neck(ul_traj_features)
ul_traj_output = self.traj_head(ul_traj_features)
output_semi = dict(
labeled_pose=output,
unlabeled_pose=ul_output,
unlabeled_traj=ul_traj_output)
target_semi = dict(
unlabeled_target_2d=kwargs['unlabeled_target_2d'],
intrinsics=kwargs['intrinsics'])
semi_losses = self.loss_semi(output_semi, target_semi)
losses.update(semi_losses)
return losses
def forward_test(self, input, metas, **kwargs):
"""Defines the computation performed at every call when training."""
assert input.size(0) == len(metas)
results = {}
features = self.backbone(input)
if self.with_neck:
features = self.neck(features)
if self.with_keypoint:
output = self.keypoint_head.inference_model(features)
keypoint_result = self.keypoint_head.decode(metas, output)
results.update(keypoint_result)
if self.with_traj:
traj_features = self.traj_backbone(input)
if self.with_traj_neck:
traj_features = self.traj_neck(traj_features)
traj_output = self.traj_head.inference_model(traj_features)
results['traj_preds'] = traj_output
return results
def forward_dummy(self, input):
"""Used for computing network FLOPs. See ``tools/get_flops.py``.
Args:
input (torch.Tensor): Input pose
Returns:
Tensor: Model output
"""
output = self.backbone(input)
if self.with_neck:
output = self.neck(output)
if self.with_keypoint:
output = self.keypoint_head(output)
if self.with_traj:
traj_features = self.traj_backbone(input)
if self.with_neck:
traj_features = self.traj_neck(traj_features)
traj_output = self.traj_head(traj_features)
output = output + traj_output
return output
@deprecated_api_warning({'pose_limb_color': 'pose_link_color'},
cls_name='PoseLifter')
def show_result(self,
result,
img=None,
skeleton=None,
pose_kpt_color=None,
pose_link_color=None,
radius=8,
thickness=2,
vis_height=400,
num_instances=-1,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Visualize 3D pose estimation results.
Args:
result (list[dict]): The pose estimation results containing:
- "keypoints_3d" ([K,4]): 3D keypoints
- "keypoints" ([K,3] or [T,K,3]): Optional for visualizing
2D inputs. If a sequence is given, only the last frame
will be used for visualization
- "bbox" ([4,] or [T,4]): Optional for visualizing 2D inputs
- "title" (str): title for the subplot
img (str or Tensor): Optional. The image to visualize 2D inputs on.
skeleton (list of [idx_i,idx_j]): Skeleton described by a list of
links, each is a pair of joint indices.
pose_kpt_color (np.array[Nx3]`): Color of N keypoints.
If None, do not draw keypoints.
pose_link_color (np.array[Mx3]): Color of M links.
If None, do not draw links.
radius (int): Radius of circles.
thickness (int): Thickness of lines.
vis_height (int): The image height of the visualization. The width
will be N*vis_height depending on the number of visualized
items.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
Tensor: Visualized img, only if not `show` or `out_file`.
"""
if num_instances < 0:
assert len(result) > 0
result = sorted(result, key=lambda x: x.get('track_id', 1e4))
# draw image and input 2d poses
if img is not None:
img = mmcv.imread(img)
bbox_result = []
pose_input_2d = []
for res in result:
if 'bbox' in res:
bbox = np.array(res['bbox'])
if bbox.ndim != 1:
assert bbox.ndim == 2
bbox = bbox[-1] # Get bbox from the last frame
bbox_result.append(bbox)
if 'keypoints' in res:
kpts = np.array(res['keypoints'])
if kpts.ndim != 2:
assert kpts.ndim == 3
kpts = kpts[-1] # Get 2D keypoints from the last frame
pose_input_2d.append(kpts)
if len(bbox_result) > 0:
bboxes = np.vstack(bbox_result)
imshow_bboxes(
img,
bboxes,
colors='green',
thickness=thickness,
show=False)
if len(pose_input_2d) > 0:
imshow_keypoints(
img,
pose_input_2d,
skeleton,
kpt_score_thr=0.3,
pose_kpt_color=pose_kpt_color,
pose_link_color=pose_link_color,
radius=radius,
thickness=thickness)
img = mmcv.imrescale(img, scale=vis_height / img.shape[0])
img_vis = imshow_keypoints_3d(
result,
img,
skeleton,
pose_kpt_color,
pose_link_color,
vis_height,
num_instances=num_instances)
if show:
mmcv.visualization.imshow(img_vis, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img_vis, out_file)
return img_vis
| 36.157761 | 79 | 0.567488 |
ace6b9f98516b312f32c81a43142ced364b0f32e | 9,473 | py | Python | astropy/units/format/ogip_parsetab.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:26:49.000Z | 2019-03-11T12:26:49.000Z | astropy/units/format/ogip_parsetab.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T18:54:27.000Z | 2019-10-09T18:54:27.000Z | astropy/units/format/ogip_parsetab.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2020-02-18T04:10:00.000Z | 2020-02-18T04:10:00.000Z | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# ogip_parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'CLOSE_PAREN DIVISION LIT10 OPEN_PAREN SIGN STAR STARSTAR UFLOAT UINT UNIT UNKNOWN WHITESPACE\n main : UNKNOWN\n | complete_expression\n | scale_factor complete_expression\n | scale_factor WHITESPACE complete_expression\n \n complete_expression : product_of_units\n \n product_of_units : unit_expression\n | division unit_expression\n | product_of_units product unit_expression\n | product_of_units division unit_expression\n \n unit_expression : unit\n | UNIT OPEN_PAREN complete_expression CLOSE_PAREN\n | OPEN_PAREN complete_expression CLOSE_PAREN\n | UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power\n | OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power\n \n scale_factor : LIT10 power numeric_power\n | LIT10\n | signed_float\n | signed_float power numeric_power\n | signed_int power numeric_power\n \n division : DIVISION\n | WHITESPACE DIVISION\n | WHITESPACE DIVISION WHITESPACE\n | DIVISION WHITESPACE\n \n product : WHITESPACE\n | STAR\n | WHITESPACE STAR\n | WHITESPACE STAR WHITESPACE\n | STAR WHITESPACE\n \n power : STARSTAR\n \n unit : UNIT\n | UNIT power numeric_power\n \n numeric_power : UINT\n | signed_float\n | OPEN_PAREN signed_int CLOSE_PAREN\n | OPEN_PAREN signed_float CLOSE_PAREN\n | OPEN_PAREN signed_float division UINT CLOSE_PAREN\n \n sign : SIGN\n |\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n '
_lr_action_items = {'UNKNOWN':([0,],[2,]),'LIT10':([0,],[7,]),'SIGN':([0,25,26,27,28,34,47,59,63,],[13,48,-29,48,48,48,13,48,48,]),'UNIT':([0,4,7,8,11,16,17,19,20,21,22,23,24,30,31,33,36,38,39,42,43,44,45,46,49,50,54,55,60,61,67,],[15,15,-16,-17,15,15,-20,15,-21,15,15,-24,-25,-40,-41,15,-23,-20,-22,-26,-28,-15,-32,-33,-18,-19,-22,-27,-34,-35,-36,]),'OPEN_PAREN':([0,4,7,8,11,15,16,17,19,20,21,22,23,24,25,26,27,28,30,31,33,34,36,38,39,42,43,44,45,46,49,50,54,55,59,60,61,63,67,],[16,16,-16,-17,16,33,16,-20,16,-21,16,16,-24,-25,47,-29,47,47,-40,-41,16,47,-23,-20,-22,-26,-28,-15,-32,-33,-18,-19,-22,-27,47,-34,-35,47,-36,]),'DIVISION':([0,4,5,6,7,8,10,14,15,16,19,23,29,30,31,33,40,41,44,45,46,49,50,52,53,57,58,60,61,64,66,67,],[17,17,20,17,-16,-17,-6,-10,-30,17,38,20,-7,-40,-41,17,-8,-9,-15,-32,-33,-18,-19,-31,-12,17,-11,-34,-35,-14,-13,-36,]),'WHITESPACE':([0,4,6,7,8,10,14,15,16,17,19,20,24,29,30,31,33,38,40,41,42,44,45,46,49,50,52,53,57,58,60,61,64,66,67,],[5,19,23,-16,-17,-6,-10,-30,5,36,5,39,43,-7,-40,-41,5,54,-8,-9,55,-15,-32,-33,-18,-19,-31,-12,5,-11,-34,-35,-14,-13,-36,]),'UINT':([0,12,13,17,20,25,26,27,28,34,36,39,47,48,59,62,63,],[-38,30,32,-20,-21,45,-29,45,45,45,-23,-22,-38,-37,45,65,45,]),'UFLOAT':([0,12,13,25,26,27,28,34,47,48,59,63,],[-38,31,-37,-38,-29,-38,-38,-38,-38,-37,-38,-38,]),'$end':([1,2,3,6,10,14,15,18,29,30,31,37,40,41,45,46,52,53,58,60,61,64,66,67,],[0,-1,-2,-5,-6,-10,-30,-3,-7,-40,-41,-4,-8,-9,-32,-33,-31,-12,-11,-34,-35,-14,-13,-36,]),'CLOSE_PAREN':([6,10,14,15,29,30,31,32,35,40,41,45,46,51,52,53,56,57,58,60,61,64,65,66,67,],[-5,-6,-10,-30,-7,-40,-41,-39,53,-8,-9,-32,-33,58,-31,-12,60,61,-11,-34,-35,-14,67,-13,-36,]),'STAR':([6,10,14,15,23,29,30,31,40,41,45,46,52,53,58,60,61,64,66,67,],[24,-6,-10,-30,42,-7,-40,-41,-8,-9,-32,-33,-31,-12,-11,-34,-35,-14,-13,-36,]),'STARSTAR':([7,8,9,15,30,31,32,53,58,],[26,26,26,26,-40,-41,-39,26,26,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,],[1,]),'complete_expression':([0,4,16,19,33,],[3,18,35,37,51,]),'scale_factor':([0,],[4,]),'product_of_units':([0,4,16,19,33,],[6,6,6,6,6,]),'signed_float':([0,25,27,28,34,47,59,63,],[8,46,46,46,46,57,46,46,]),'signed_int':([0,47,],[9,56,]),'unit_expression':([0,4,11,16,19,21,22,33,],[10,10,29,10,10,40,41,10,]),'division':([0,4,6,16,19,33,57,],[11,11,22,11,11,11,62,]),'sign':([0,25,27,28,34,47,59,63,],[12,12,12,12,12,12,12,12,]),'unit':([0,4,11,16,19,21,22,33,],[14,14,14,14,14,14,14,14,]),'product':([6,],[21,]),'power':([7,8,9,15,53,58,],[25,27,28,34,59,63,]),'numeric_power':([25,27,28,34,59,63,],[44,49,50,52,64,66,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> UNKNOWN','main',1,'p_main','ogip.py',184),
('main -> complete_expression','main',1,'p_main','ogip.py',185),
('main -> scale_factor complete_expression','main',2,'p_main','ogip.py',186),
('main -> scale_factor WHITESPACE complete_expression','main',3,'p_main','ogip.py',187),
('complete_expression -> product_of_units','complete_expression',1,'p_complete_expression','ogip.py',198),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','ogip.py',204),
('product_of_units -> division unit_expression','product_of_units',2,'p_product_of_units','ogip.py',205),
('product_of_units -> product_of_units product unit_expression','product_of_units',3,'p_product_of_units','ogip.py',206),
('product_of_units -> product_of_units division unit_expression','product_of_units',3,'p_product_of_units','ogip.py',207),
('unit_expression -> unit','unit_expression',1,'p_unit_expression','ogip.py',221),
('unit_expression -> UNIT OPEN_PAREN complete_expression CLOSE_PAREN','unit_expression',4,'p_unit_expression','ogip.py',222),
('unit_expression -> OPEN_PAREN complete_expression CLOSE_PAREN','unit_expression',3,'p_unit_expression','ogip.py',223),
('unit_expression -> UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power','unit_expression',6,'p_unit_expression','ogip.py',224),
('unit_expression -> OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power','unit_expression',5,'p_unit_expression','ogip.py',225),
('scale_factor -> LIT10 power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',259),
('scale_factor -> LIT10','scale_factor',1,'p_scale_factor','ogip.py',260),
('scale_factor -> signed_float','scale_factor',1,'p_scale_factor','ogip.py',261),
('scale_factor -> signed_float power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',262),
('scale_factor -> signed_int power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',263),
('division -> DIVISION','division',1,'p_division','ogip.py',278),
('division -> WHITESPACE DIVISION','division',2,'p_division','ogip.py',279),
('division -> WHITESPACE DIVISION WHITESPACE','division',3,'p_division','ogip.py',280),
('division -> DIVISION WHITESPACE','division',2,'p_division','ogip.py',281),
('product -> WHITESPACE','product',1,'p_product','ogip.py',287),
('product -> STAR','product',1,'p_product','ogip.py',288),
('product -> WHITESPACE STAR','product',2,'p_product','ogip.py',289),
('product -> WHITESPACE STAR WHITESPACE','product',3,'p_product','ogip.py',290),
('product -> STAR WHITESPACE','product',2,'p_product','ogip.py',291),
('power -> STARSTAR','power',1,'p_power','ogip.py',297),
('unit -> UNIT','unit',1,'p_unit','ogip.py',303),
('unit -> UNIT power numeric_power','unit',3,'p_unit','ogip.py',304),
('numeric_power -> UINT','numeric_power',1,'p_numeric_power','ogip.py',313),
('numeric_power -> signed_float','numeric_power',1,'p_numeric_power','ogip.py',314),
('numeric_power -> OPEN_PAREN signed_int CLOSE_PAREN','numeric_power',3,'p_numeric_power','ogip.py',315),
('numeric_power -> OPEN_PAREN signed_float CLOSE_PAREN','numeric_power',3,'p_numeric_power','ogip.py',316),
('numeric_power -> OPEN_PAREN signed_float division UINT CLOSE_PAREN','numeric_power',5,'p_numeric_power','ogip.py',317),
('sign -> SIGN','sign',1,'p_sign','ogip.py',328),
('sign -> <empty>','sign',0,'p_sign','ogip.py',329),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','ogip.py',338),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','ogip.py',344),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','ogip.py',345),
]
| 114.13253 | 2,320 | 0.614589 |
ace6ba1dce018ee9e5f51a4f191e0a3f13774266 | 6,339 | py | Python | trinity/components/eth2/beacon/component.py | wschwab/trinity | f94c1aa1642dd5d83eb6a89e48205abda234de79 | [
"MIT"
] | null | null | null | trinity/components/eth2/beacon/component.py | wschwab/trinity | f94c1aa1642dd5d83eb6a89e48205abda234de79 | [
"MIT"
] | null | null | null | trinity/components/eth2/beacon/component.py | wschwab/trinity | f94c1aa1642dd5d83eb6a89e48205abda234de79 | [
"MIT"
] | null | null | null | from argparse import (
ArgumentParser,
_SubParsersAction,
)
import os
import asyncio
from typing import (
cast,
)
from lahja import EndpointAPI
from libp2p.crypto.keys import KeyPair
from libp2p.crypto.secp256k1 import create_new_key_pair, Secp256k1PrivateKey
from eth_utils import decode_hex
from eth2.beacon.operations.attestation_pool import AttestationPool
from eth2.beacon.typing import (
ValidatorIndex,
)
from trinity._utils.shutdown import (
exit_with_services,
)
from trinity.config import BeaconAppConfig
from trinity.db.manager import DBClient
from trinity.extensibility import AsyncioIsolatedComponent
from trinity.protocol.bcc_libp2p.node import Node
from trinity.protocol.bcc_libp2p.servers import BCCReceiveServer
from .slot_ticker import (
SlotTicker,
)
from .validator import (
Validator,
)
from trinity.sync.beacon.chain import BeaconChainSyncer
from trinity.db.beacon.chain import AsyncBeaconChainDB
from trinity.sync.common.chain import (
SyncBlockImporter,
)
class BeaconNodeComponent(AsyncioIsolatedComponent):
@property
def name(self) -> str:
return "Beacon Node"
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
arg_parser.add_argument(
"--bootstrap_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
arg_parser.add_argument(
"--preferred_nodes",
help="/ip4/127.0.0.1/tcp/1234/p2p/node1_peer_id,/ip4/127.0.0.1/tcp/5678/p2p/node2_peer_id", # noqa: E501
)
arg_parser.add_argument(
"--beacon-nodekey",
help="0xabcd",
)
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
if self.boot_info.trinity_config.has_app_config(BeaconAppConfig):
self.start()
def _load_or_create_node_key(self) -> KeyPair:
if self.boot_info.args.beacon_nodekey:
privkey = Secp256k1PrivateKey.new(
decode_hex(self.boot_info.args.beacon_nodekey)
)
key_pair = KeyPair(private_key=privkey, public_key=privkey.get_public_key())
return key_pair
else:
config = self.boot_info.trinity_config
beacon_nodekey_path = f"{config.nodekey_path}-beacon"
if os.path.isfile(beacon_nodekey_path):
with open(beacon_nodekey_path, "rb") as f:
key_data = f.read()
private_key = Secp256k1PrivateKey.new(key_data)
key_pair = KeyPair(
private_key=private_key,
public_key=private_key.get_public_key()
)
return key_pair
else:
key_pair = create_new_key_pair()
private_key_bytes = key_pair.private_key.to_bytes()
with open(beacon_nodekey_path, "wb") as f:
f.write(private_key_bytes)
return key_pair
def do_start(self) -> None:
trinity_config = self.boot_info.trinity_config
key_pair = self._load_or_create_node_key()
beacon_app_config = trinity_config.get_app_config(BeaconAppConfig)
base_db = DBClient.connect(trinity_config.database_ipc_path)
chain_config = beacon_app_config.get_chain_config()
attestation_pool = AttestationPool()
chain = chain_config.beacon_chain_class(
base_db,
attestation_pool,
chain_config.genesis_config
)
# TODO: Handle `bootstrap_nodes`.
libp2p_node = Node(
key_pair=key_pair,
listen_ip="127.0.0.1", # FIXME: Should be configurable
listen_port=self.boot_info.args.port,
preferred_nodes=trinity_config.preferred_nodes,
chain=chain,
)
self.logger.warning(f'Node listening: {libp2p_node.listen_maddr_with_peer_id}')
receive_server = BCCReceiveServer(
chain=chain,
p2p_node=libp2p_node,
topic_msg_queues=libp2p_node.pubsub.my_topics,
cancel_token=libp2p_node.cancel_token,
)
state = chain.get_state_by_slot(chain_config.genesis_config.GENESIS_SLOT)
registry_pubkeys = [v_record.pubkey for v_record in state.validators]
validator_privkeys = {}
validator_keymap = chain_config.genesis_data.validator_keymap
for pubkey in validator_keymap:
try:
validator_index = cast(ValidatorIndex, registry_pubkeys.index(pubkey))
except ValueError:
self.logger.error(f'Could not find key {pubkey.hex()} in genesis state')
raise
validator_privkeys[validator_index] = validator_keymap[pubkey]
validator = Validator(
chain=chain,
p2p_node=libp2p_node,
validator_privkeys=validator_privkeys,
event_bus=self.event_bus,
token=libp2p_node.cancel_token,
get_ready_attestations_fn=receive_server.get_ready_attestations,
)
slot_ticker = SlotTicker(
genesis_slot=chain_config.genesis_config.GENESIS_SLOT,
genesis_time=chain_config.genesis_data.genesis_time,
seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT,
event_bus=self.event_bus,
token=libp2p_node.cancel_token,
)
syncer = BeaconChainSyncer(
chain_db=AsyncBeaconChainDB(
base_db,
chain_config.genesis_config,
),
peer_pool=libp2p_node.handshaked_peers,
block_importer=SyncBlockImporter(chain),
genesis_config=chain_config.genesis_config,
token=libp2p_node.cancel_token,
)
asyncio.ensure_future(exit_with_services(
self._event_bus_service,
libp2p_node,
receive_server,
slot_ticker,
validator,
syncer,
))
asyncio.ensure_future(libp2p_node.run())
asyncio.ensure_future(receive_server.run())
asyncio.ensure_future(slot_ticker.run())
asyncio.ensure_future(validator.run())
asyncio.ensure_future(syncer.run())
| 34.451087 | 117 | 0.650891 |
ace6bc88d3206780c2dd03ee4333918d04e818ed | 13,150 | py | Python | pype/plugins/hiero/publish/extract_review_cutup.py | Yowza-Animation/pype | 0212fa8357e6ffd490230193e69e101aaf262587 | [
"MIT"
] | null | null | null | pype/plugins/hiero/publish/extract_review_cutup.py | Yowza-Animation/pype | 0212fa8357e6ffd490230193e69e101aaf262587 | [
"MIT"
] | null | null | null | pype/plugins/hiero/publish/extract_review_cutup.py | Yowza-Animation/pype | 0212fa8357e6ffd490230193e69e101aaf262587 | [
"MIT"
] | null | null | null | import os
import sys
import six
import errno
from pyblish import api
import pype
import clique
from avalon.vendor import filelink
class ExtractReviewCutUp(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp"
hosts = ["hiero"]
families = ["review"]
# presets
tags_addition = []
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
# get representation and loop them
representations = inst_data["representations"]
# check if sequence
is_sequence = inst_data["isSequence"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
input_args = list()
output_args = list()
tags = repre.get("tags", [])
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
if tag in tags:
filter_tag = True
break
if not filter_tag:
continue
self.log.debug("__ repre: {}".format(repre))
files = repre.get("files")
staging_dir = repre.get("stagingDir")
fps = repre.get("fps")
ext = repre.get("ext")
# make paths
full_output_dir = os.path.join(
staging_dir, "cuts")
if is_sequence:
new_files = list()
# frame range delivery included handles
frame_start = (
inst_data["frameStart"] - inst_data["handleStart"])
frame_end = (
inst_data["frameEnd"] + inst_data["handleEnd"])
self.log.debug("_ frame_start: {}".format(frame_start))
self.log.debug("_ frame_end: {}".format(frame_end))
# make collection from input files list
collections, remainder = clique.assemble(files)
collection = collections.pop()
self.log.debug("_ collection: {}".format(collection))
# name components
head = collection.format("{head}")
padding = collection.format("{padding}")
tail = collection.format("{tail}")
self.log.debug("_ head: {}".format(head))
self.log.debug("_ padding: {}".format(padding))
self.log.debug("_ tail: {}".format(tail))
# make destination file with instance data
# frame start and end range
index = 0
for image in collection:
dst_file_num = frame_start + index
dst_file_name = head + str(padding % dst_file_num) + tail
src = os.path.join(staging_dir, image)
dst = os.path.join(full_output_dir, dst_file_name)
self.log.info("Creating temp hardlinks: {}".format(dst))
self.hardlink_file(src, dst)
new_files.append(dst_file_name)
index += 1
self.log.debug("_ new_files: {}".format(new_files))
else:
# ffmpeg when single file
new_files = "{}_{}".format(asset, files)
# frame range
frame_start = repre.get("frameStart")
frame_end = repre.get("frameEnd")
full_input_path = os.path.join(
staging_dir, files)
os.path.isdir(full_output_dir) or os.makedirs(full_output_dir)
full_output_path = os.path.join(
full_output_dir, new_files)
self.log.debug(
"__ full_input_path: {}".format(full_input_path))
self.log.debug(
"__ full_output_path: {}".format(full_output_path))
# check if audio stream is in input video file
ffprob_cmd = (
"\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams"
" -select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug(
"audio_check_output: {}".format(audio_check_output))
# Fix one frame difference
""" TODO: this is just work-around for issue:
https://github.com/pypeclub/pype/issues/659
"""
frame_duration_extend = 1
if audio_check_output:
frame_duration_extend = 0
# translate frame to sec
start_sec = float(frame_start) / fps
duration_sec = float(
(frame_end - frame_start) + frame_duration_extend) / fps
empty_add = None
# check if not missing frames at start
if (start_sec < 0) or (media_duration < frame_end):
# for later swithing off `-c:v copy` output arg
empty_add = True
# init empty variables
video_empty_start = video_layer_start = ""
audio_empty_start = audio_layer_start = ""
video_empty_end = video_layer_end = ""
audio_empty_end = audio_layer_end = ""
audio_input = audio_output = ""
v_inp_idx = 0
concat_n = 1
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"\"{ffprobe_path}\" -i \"{full_input_path}\""
" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
x, y = resolution_output.split("x")
resolution_width = int(x)
resolution_height = int(y)
except Exception as _ex:
self.log.warning(
"Video native resolution is untracable: {}".format(
_ex))
if audio_check_output:
# adding input for empty audio
input_args.append("-f lavfi -i anullsrc")
# define audio empty concat variables
audio_input = "[1:a]"
audio_output = ":a=1"
v_inp_idx = 1
# adding input for video black frame
input_args.append((
"-f lavfi -i \"color=c=black:"
"s={resolution_width}x{resolution_height}:r={fps}\""
).format(**locals()))
if (start_sec < 0):
# recalculate input video timing
empty_start_dur = abs(start_sec)
start_sec = 0
duration_sec = float(frame_end - (
frame_start + (empty_start_dur * fps)) + 1) / fps
# define starting empty video concat variables
video_empty_start = (
"[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa
).format(**locals())
video_layer_start = "[gv0]"
if audio_check_output:
# define starting empty audio concat variables
audio_empty_start = (
"[0]atrim=duration={empty_start_dur}[ga0];"
).format(**locals())
audio_layer_start = "[ga0]"
# alter concat number of clips
concat_n += 1
# check if not missing frames at the end
if (media_duration < frame_end):
# recalculate timing
empty_end_dur = float(
frame_end - media_duration + 1) / fps
duration_sec = float(
media_duration - frame_start) / fps
# define ending empty video concat variables
video_empty_end = (
"[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];"
).format(**locals())
video_layer_end = "[gv1]"
if audio_check_output:
# define ending empty audio concat variables
audio_empty_end = (
"[0]atrim=duration={empty_end_dur}[ga1];"
).format(**locals())
audio_layer_end = "[ga0]"
# alter concat number of clips
concat_n += 1
# concatting black frame togather
output_args.append((
"-filter_complex \""
"{audio_empty_start}"
"{video_empty_start}"
"{audio_empty_end}"
"{video_empty_end}"
"{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa
"{video_layer_end}{audio_layer_end}"
"concat=n={concat_n}:v=1{audio_output}\""
).format(**locals()))
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i \"{}\"".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
output_args.append("-c:v copy")
# make sure it is having no frame to frame comprassion
output_args.append("-intra")
# output filename
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
"\"{}\"".format(ffmpeg_path),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_files,
"stagingDir": full_output_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}
representations_new.append(repre_new)
for repre in representations_new:
if ("delete" in repre.get("tags", [])) and (
"cut_up_preview" not in repre["name"]):
representations_new.remove(repre)
self.log.debug(
"Representations: {}".format(representations_new))
instance.data["representations"] = representations_new
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
# make sure the destination folder exist
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())
# create hardlined file
try:
filelink.create(src, dst, filelink.HARDLINK)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
six.reraise(*sys.exc_info())
| 39.020772 | 90 | 0.480684 |
ace6bca6b3402abc9b674363a10938a5e0b76a3c | 6,406 | py | Python | CTFd/plugins/__init__.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/plugins/__init__.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/plugins/__init__.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | import glob
import importlib
import os
from collections import namedtuple
from flask import current_app as app
from flask import send_file, send_from_directory
from CTFd.utils.config.pages import get_pages
from CTFd.utils.decorators import admins_only as admins_only_wrapper
from CTFd.utils.plugins import override_template as utils_override_template
from CTFd.utils.plugins import (
register_admin_script as utils_register_admin_plugin_script,
)
from CTFd.utils.plugins import (
register_admin_stylesheet as utils_register_admin_plugin_stylesheet,
)
from CTFd.utils.plugins import register_script as utils_register_plugin_script
from CTFd.utils.plugins import register_stylesheet as utils_register_plugin_stylesheet
Menu = namedtuple("Menu", ["title", "route"])
def register_plugin_assets_directory(app, base_path, admins_only=False, endpoint=None):
"""
Registers a directory to serve assets
:param app: A CTFd application
:param string base_path: The path to the directory
:param boolean admins_only: Whether or not the assets served out of the directory should be accessible to the public
:return:
"""
base_path = base_path.strip("/")
if endpoint is None:
endpoint = base_path.replace("/", ".")
def assets_handler(path):
return send_from_directory(base_path, path)
rule = "/" + base_path + "/<path:path>"
app.add_url_rule(rule=rule, endpoint=endpoint, view_func=assets_handler)
def register_plugin_asset(app, asset_path, admins_only=False, endpoint=None):
"""
Registers an file path to be served by CTFd
:param app: A CTFd application
:param string asset_path: The path to the asset file
:param boolean admins_only: Whether or not this file should be accessible to the public
:return:
"""
asset_path = asset_path.strip("/")
if endpoint is None:
endpoint = asset_path.replace("/", ".")
def asset_handler():
return send_file(asset_path)
if admins_only:
asset_handler = admins_only_wrapper(asset_handler)
rule = "/" + asset_path
app.add_url_rule(rule=rule, endpoint=endpoint, view_func=asset_handler)
def override_template(*args, **kwargs):
"""
Overrides a template with the provided html content.
e.g. override_template('scoreboard.html', '<h1>scores</h1>')
"""
utils_override_template(*args, **kwargs)
def register_plugin_script(*args, **kwargs):
"""
Adds a given script to the base.html template which all pages inherit from
"""
utils_register_plugin_script(*args, **kwargs)
def register_plugin_stylesheet(*args, **kwargs):
"""
Adds a given stylesheet to the base.html template which all pages inherit from.
"""
utils_register_plugin_stylesheet(*args, **kwargs)
def register_admin_plugin_script(*args, **kwargs):
"""
Adds a given script to the base.html of the admin theme which all admin pages inherit from
:param args:
:param kwargs:
:return:
"""
utils_register_admin_plugin_script(*args, **kwargs)
def register_admin_plugin_stylesheet(*args, **kwargs):
"""
Adds a given stylesheet to the base.html of the admin theme which all admin pages inherit from
:param args:
:param kwargs:
:return:
"""
utils_register_admin_plugin_stylesheet(*args, **kwargs)
def register_admin_plugin_menu_bar(title, route):
"""
Registers links on the Admin Panel menubar/navbar
:param name: A string that is shown on the navbar HTML
:param route: A string that is the href used by the link
:return:
"""
am = Menu(title=title, route=route)
app.admin_plugin_menu_bar.append(am)
def get_admin_plugin_menu_bar():
"""
Access the list used to store the plugin menu bar
:return: Returns a list of Menu namedtuples. They have name, and route attributes.
"""
return app.admin_plugin_menu_bar
def register_user_page_menu_bar(title, route):
"""
Registers links on the User side menubar/navbar
:param name: A string that is shown on the navbar HTML
:param route: A string that is the href used by the link
:return:
"""
p = Menu(title=title, route=route)
app.plugin_menu_bar.append(p)
def get_user_page_menu_bar():
"""
Access the list used to store the user page menu bar
:return: Returns a list of Menu namedtuples. They have name, and route attributes.
"""
return get_pages() + app.plugin_menu_bar
def bypass_csrf_protection(f):
"""
Decorator that allows a route to bypass the need for a CSRF nonce on POST requests.
This should be considered beta and may change in future versions.
:param f: A function that needs to bypass CSRF protection
:return: Returns a function with the _bypass_csrf attribute set which tells CTFd to not require CSRF protection.
"""
f._bypass_csrf = True
return f
def get_plugin_names():
modules = sorted(glob.glob(app.plugins_dir + "/*"))
blacklist = {"__pycache__"}
plugins = []
for module in modules:
module_name = os.path.basename(module)
if os.path.isdir(module) and module_name not in blacklist:
plugins.append(module_name)
return plugins
def init_plugins(app):
"""
Searches for the load function in modules in the CTFd/plugins folder. This function is called with the current CTFd
app as a parameter. This allows CTFd plugins to modify CTFd's behavior.
:param app: A CTFd application
:return:
"""
app.admin_plugin_scripts = []
app.admin_plugin_stylesheets = []
app.plugin_scripts = []
app.plugin_stylesheets = []
app.admin_plugin_menu_bar = []
app.plugin_menu_bar = []
app.plugins_dir = os.path.dirname(__file__)
if app.config.get("SAFE_MODE", False) is False:
for plugin in get_plugin_names():
module = "." + plugin
module = importlib.import_module(module, package="CTFd.plugins")
module.load(app)
print(" * Loaded module, %s" % module)
app.jinja_env.globals.update(get_admin_plugin_menu_bar=get_admin_plugin_menu_bar)
app.jinja_env.globals.update(get_user_page_menu_bar=get_user_page_menu_bar)
| 31.870647 | 121 | 0.688417 |
ace6bdef76ee6a1f8a5a188a6a96afaef23c3e94 | 1,595 | py | Python | tests/test_request_body_parameters_media_type.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 53,007 | 2018-12-08T10:05:29.000Z | 2022-03-31T23:30:02.000Z | tests/test_request_body_parameters_media_type.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,155 | 2019-01-05T05:07:49.000Z | 2022-03-31T21:25:38.000Z | tests/test_request_body_parameters_media_type.py | Aryabhata-Rootspring/fastapi | f6237ad05a8468ac19c591181adad38d75372c46 | [
"MIT"
] | 4,092 | 2018-12-09T16:21:00.000Z | 2022-03-31T07:59:45.000Z | import typing
from fastapi import Body, FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
media_type = "application/vnd.api+json"
# NOTE: These are not valid JSON:API resources
# but they are fine for testing requestBody with custom media_type
class Product(BaseModel):
name: str
price: float
class Shop(BaseModel):
name: str
@app.post("/products")
async def create_product(data: Product = Body(..., media_type=media_type, embed=True)):
pass # pragma: no cover
@app.post("/shops")
async def create_shop(
data: Shop = Body(..., media_type=media_type),
included: typing.List[Product] = Body([], media_type=media_type),
):
pass # pragma: no cover
create_product_request_body = {
"content": {
"application/vnd.api+json": {
"schema": {"$ref": "#/components/schemas/Body_create_product_products_post"}
}
},
"required": True,
}
create_shop_request_body = {
"content": {
"application/vnd.api+json": {
"schema": {"$ref": "#/components/schemas/Body_create_shop_shops_post"}
}
},
"required": True,
}
client = TestClient(app)
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
openapi_schema = response.json()
assert (
openapi_schema["paths"]["/products"]["post"]["requestBody"]
== create_product_request_body
)
assert (
openapi_schema["paths"]["/shops"]["post"]["requestBody"]
== create_shop_request_body
)
| 23.115942 | 88 | 0.657053 |
ace6be709ba5d67b4364d37e82a7d0893e35d782 | 4,162 | py | Python | scripts/volumeSetRotation.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | 4 | 2016-06-03T18:41:43.000Z | 2020-04-17T20:28:58.000Z | scripts/volumeSetRotation.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | scripts/volumeSetRotation.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import numpy as np
import voxie
import sys
# TODO: What happens if this is not a pure rotation matrix?
def rotMatToQuat(mat):
# https://stackoverflow.com/questions/32208838/rotation-matrix-to-quaternion-equivalence
# get the real part of the quaternion first
r = np.math.sqrt(float(1) + mat[0, 0] + mat[1, 1] + mat[2, 2]) * 0.5
i = (mat[2, 1] - mat[1, 2]) / (4 * r)
j = (mat[0, 2] - mat[2, 0]) / (4 * r)
k = (mat[1, 0] - mat[0, 1]) / (4 * r)
return tuple(map(float, (r, i, j, k)))
parser = voxie.parser
parser.add_argument('--quaternion')
parser.add_argument('--matrix4')
parser.add_argument('--matrix4-ref')
args = parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
obj = instance.Gui.SelectedObjects[0]
# oldRot = obj.GetProperty('de.uni_stuttgart.Voxie.MovableDataNode.Rotation').getValue('(dddd)')
# print (fov)
if args.quaternion is not None:
rot = tuple(map(float, args.quaternion.split(' ')))
obj.SetProperty('de.uni_stuttgart.Voxie.MovableDataNode.Rotation',
voxie.Variant('(dddd)', rot))
elif args.matrix4 is not None:
refPos = np.array([0, 0, 0])
if args.matrix4_ref is not None:
matrix_ref = tuple(map(float, args.matrix4_ref.split(' ')))
if len(matrix_ref) != 16:
raise Exception(
'Invalid number of ref matrix values: ' + repr(len(matrix_ref)))
matrix_ref = np.array(matrix_ref).reshape((4, 4), order='F')
if np.any(matrix_ref[3] != [0, 0, 0, 1]):
raise Exception(
'Last line of ref matrix != [0, 0, 0, 1]: ' + repr(matrix_ref[3]))
if np.any(matrix_ref[0:3, 0:3] != [[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
raise Exception('ref matrix has rotation: ' +
repr(matrix_ref[0:3, 0:3]))
refPos = matrix_ref[0:3, 3]
# print (refPos)
matrix = tuple(map(float, args.matrix4.split(' ')))
if len(matrix) != 16:
raise Exception('Invalid number of matrix values: ' +
repr(len(matrix)))
matrix = np.array(matrix).reshape((4, 4), order='F')
if np.any(matrix[3] != [0, 0, 0, 1]):
raise Exception(
'Last line of matrix != [0, 0, 0, 1]: ' + repr(matrix[3]))
rotMat = matrix[0:3, 0:3]
trans = matrix[0:3, 3]
# print (trans)
# print (rotMat)
rot = rotMatToQuat(rotMat)
# TODO: ?
# Seems like VGStudio ignores the origin of volumes anyway
trans = voxie.Rotation(rot).inverse * (trans)
# print (rot)
# print (trans)
trans = tuple(-np.array(trans))
rot = tuple(map(float, voxie.Rotation(rot).inverse.quaternion.value))
# print (rot)
# print (trans)
# b = np.array((0.00025778799317777157, -0.0016201899852603674, 0.0007827000226825476))
# print (trans / b)
# TODO: Determine translation
obj.SetProperty('de.uni_stuttgart.Voxie.MovableDataNode.Rotation',
voxie.Variant('(dddd)', rot))
else:
print('No rotation given')
| 37.836364 | 96 | 0.645363 |
ace6be891e0190053c4f55873b38376602450783 | 615 | py | Python | entity_framework/tests/storages/sqlalchemy/conftest.py | Enforcer/python_entity_framework | 015221c068c23834aa3663c6239947c01e38f40a | [
"MIT"
] | 17 | 2019-03-04T14:53:29.000Z | 2021-11-24T17:41:55.000Z | entity_framework/tests/storages/sqlalchemy/conftest.py | Enforcer/python_entity_framework | 015221c068c23834aa3663c6239947c01e38f40a | [
"MIT"
] | null | null | null | entity_framework/tests/storages/sqlalchemy/conftest.py | Enforcer/python_entity_framework | 015221c068c23834aa3663c6239947c01e38f40a | [
"MIT"
] | 1 | 2021-08-16T13:31:23.000Z | 2021-08-16T13:31:23.000Z | from typing import Generator
import pytest
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
@pytest.fixture()
def sa_base() -> DeclarativeMeta:
return declarative_base()
@pytest.fixture()
def session(sa_base: DeclarativeMeta, engine: Engine) -> Generator[Session, None, None]:
sa_base.metadata.drop_all(engine)
sa_base.metadata.create_all(engine)
session_factory = sessionmaker(engine)
yield session_factory()
session_factory.close_all()
sa_base.metadata.drop_all(engine)
| 27.954545 | 88 | 0.785366 |
ace6beed04aa6ed61ba1882b84115667be208e3a | 1,789 | py | Python | tests/test_action.py | branasingh/stackstorm-servicenow | 0e659d493dde4563ddbb49408dffefe6d8e8e00e | [
"Apache-2.0"
] | 12 | 2017-12-18T02:44:13.000Z | 2020-09-10T16:44:45.000Z | tests/test_action.py | branasingh/stackstorm-servicenow | 0e659d493dde4563ddbb49408dffefe6d8e8e00e | [
"Apache-2.0"
] | 12 | 2017-03-29T19:49:18.000Z | 2022-03-23T14:34:33.000Z | tests/test_action.py | branasingh/stackstorm-servicenow | 0e659d493dde4563ddbb49408dffefe6d8e8e00e | [
"Apache-2.0"
] | 20 | 2017-03-29T15:38:23.000Z | 2022-01-24T19:53:03.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import mock
import yaml
import sys
sys.modules['pysnow'] = mock.Mock()
from st2tests.base import BaseActionTestCase
from get_non_structured import GetNonStructuredAction
class ServiceNowActionTestCase(BaseActionTestCase):
action_cls = GetNonStructuredAction
def setUp(self):
super(ServiceNowActionTestCase, self).setUp()
self._full_config = self.load_yaml('full.yaml')
def load_yaml(self, filename):
return yaml.safe_load(self.get_fixture_content(filename))
@property
def full_config(self):
return self._full_config
def test_get_instance_with_config(self):
self.get_action_instance(self.full_config)
def test_get_instance_without_config(self):
# Use try/except as self.assertRaises wasn't matching
try:
self.get_action_instance(None)
self.fail("Expection exception not thrown")
except ValueError as e:
self.assertTrue('Config for pack "tests" is missing' in e.args[0],
e.args)
| 35.78 | 78 | 0.727781 |
ace6bf3c533700b7471f658d338cd9bfaf0252b0 | 2,398 | py | Python | tests/graph/test_graph_team.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | tests/graph/test_graph_team.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | tests/graph/test_graph_team.py | stardust85/Office365-REST-Python-Client | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | [
"MIT"
] | null | null | null | import uuid
from office365.graph.directory.group import Group
from office365.graph.directory.groupProfile import GroupProfile
from tests.graph.graph_case import GraphTestCase
class TestGraphTeam(GraphTestCase):
"""Tests for teams"""
target_group = None # type: Group
@classmethod
def setUpClass(cls):
super(TestGraphTeam, cls).setUpClass()
grp_name = "Group_" + uuid.uuid4().hex
properties = GroupProfile(grp_name)
properties.securityEnabled = False
properties.mailEnabled = True
properties.groupTypes = ["Unified"]
cls.target_group = cls.client.groups.add(properties)
cls.client.execute_query()
def test2_ensure_team(self):
teams = self.client.me.joinedTeams.filter("id eq '{0}'".format(self.__class__.target_group.id))
self.client.load(teams)
self.client.execute_query()
self.assertIsNotNone(teams.resource_path)
if len(teams) == 0:
new_team = self.__class__.target_group.add_team()
self.client.execute_query()
self.assertIsNotNone(new_team)
else:
self.assertEqual(len(teams), 1)
def test3_get_team(self):
group_id = self.__class__.target_group.id
existing_team = self.client.teams[group_id]
self.client.load(existing_team)
self.client.execute_query()
self.assertIsNotNone(existing_team.resource_url)
self.assertIsNotNone(existing_team.messagingSettings)
if existing_team.properties["isArchived"]:
existing_team.unarchive()
self.client.load(existing_team)
self.client.execute_query()
self.assertFalse(existing_team.properties["isArchived"])
def test4_update_team(self):
group_id = self.__class__.target_group.properties['id']
team_to_update = self.client.teams[group_id]
team_to_update.funSettings.allowGiphy = False
team_to_update.update()
self.client.execute_query()
def test5_archive_team(self):
group_id = self.__class__.target_group.id
team_to_archive = self.client.teams[group_id]
team_to_archive.archive()
self.client.execute_query()
def test6_delete_group_with_team(self):
grp_to_delete = self.__class__.target_group
grp_to_delete.delete_object(True)
self.client.execute_query()
| 35.264706 | 103 | 0.682235 |
ace6bf55261c74aebf6fe9df341667a3d9ca1d9e | 5,120 | py | Python | adm/telegraf_collector_metwork_module.py | frdcms/mfext | c4df02183a5b6b65746bf10eb5f2ff7480e9b216 | [
"BSD-3-Clause"
] | 3 | 2020-01-10T08:29:27.000Z | 2021-12-13T21:33:12.000Z | adm/telegraf_collector_metwork_module.py | frdcms/mfext | c4df02183a5b6b65746bf10eb5f2ff7480e9b216 | [
"BSD-3-Clause"
] | 699 | 2018-06-01T13:43:37.000Z | 2022-03-30T12:12:43.000Z | adm/telegraf_collector_metwork_module.py | frdcms/mfext | c4df02183a5b6b65746bf10eb5f2ff7480e9b216 | [
"BSD-3-Clause"
] | 4 | 2018-09-28T14:18:01.000Z | 2019-10-02T12:54:06.000Z | #!/usr/bin/env python
# inspired from
# https://github.com/monitoring-tools/telegraf-plugins/tree/master/netstat
import os
import sys
import time
import json
import fnmatch
from telegraf_unixsocket_client import TelegrafUnixSocketClient
from mflog import getLogger
from mfutil import BashWrapper
import mfplugin.compat
import distro
MFMODULE_RUNTIME_HOME = os.environ["MFMODULE_RUNTIME_HOME"]
SOCKET_PATH = os.path.join(MFMODULE_RUNTIME_HOME, "var", "telegraf.socket")
LOGGER = getLogger("telegraf_collector_metwork_module")
MFEXT_VERSION = os.environ["MFEXT_VERSION"]
MFMODULE_VERSION = os.environ["MFMODULE_VERSION"]
MFMODULE = os.environ['MFMODULE']
CMD = "list_metwork_processes.py --output-format=json --include-current-family"
MONITORING_CMDLINE_PATTERNS = ['*telegraf*', '*list_metwork_processes*',
'*vector*']
IS_MONITORING_MODULE = (MFMODULE in ['MFSYSMON', 'MFADMIN'])
IS_LINUX = sys.platform.startswith("linux")
OS_NAME = distro.name(pretty=True) if IS_LINUX else "unknown"
def is_cmdline_monitoring(cmdline):
if IS_MONITORING_MODULE:
return True
for pattern in MONITORING_CMDLINE_PATTERNS:
if fnmatch.fnmatch(cmdline, pattern):
return True
return False
def get_stats():
stats = {}
results = BashWrapper(CMD)
if not results:
LOGGER.warning("can't execute %s: %s" % (CMD, results))
return None
try:
processes = json.loads(results.stdout)
except Exception:
LOGGER.warning("can't parse %s output as JSON" % CMD)
return None
plugins = set([x['plugin'] for x in processes if x['plugin'] != ''])
plugins.add('#monitoring#')
if not IS_MONITORING_MODULE:
plugins.add('#core#')
for plugin in plugins:
if plugin not in stats:
stats[plugin] = {}
for key in ('mem_percent', 'num_threads', 'cpu_percent', 'num_fds'):
search_plugin = plugin if not plugin.startswith('#') else ''
if plugin != '#monitoring#':
stats[plugin][key] = \
sum([x[key] for x in processes
if x['plugin'] == search_plugin and
not is_cmdline_monitoring(x['cmdline'])])
else:
stats[plugin][key] = \
sum([x[key] for x in processes
if x['plugin'] == search_plugin and
is_cmdline_monitoring(x['cmdline'])])
return stats
def get_versions():
return {
"mfext_version": MFEXT_VERSION,
"version": MFMODULE_VERSION,
"os_name": OS_NAME
}
def get_plugins():
plugins = {}
if MFMODULE in ("MFSERV", "MFDATA", "MFBASE"):
try:
plugins = mfplugin.compat.get_installed_plugins()
except Exception:
pass
return plugins
def get_status():
status = "unknown"
status_code = 0
try:
with open("%s/var/status" % MFMODULE_RUNTIME_HOME, "r") as f:
status = f.read().strip().lower()
if status in ('running',):
status_code = 2
elif status in ('error', 'unknown'):
status_code = 0
else:
status_code = 1
except Exception:
pass
return {"status": status, "status_code": status_code}
while True:
LOGGER.debug("waiting 10s...")
time.sleep(10)
client = TelegrafUnixSocketClient(SOCKET_PATH)
try:
client.connect()
except Exception:
LOGGER.warning("can't connect to %s, wait 10s and try again...",
SOCKET_PATH)
continue
stats = get_stats()
if stats:
for plugin, fields_dict in stats.items():
msg = client.send_measurement("metwork_module", fields_dict,
extra_tags={"plugin": plugin})
LOGGER.debug("sended msg: %s" % msg)
versions = get_versions()
status = get_status()
msg = client.send_measurement(
"metwork_version", {"version": versions["mfext_version"],
"status": "ok",
"os_name": versions["os_name"]},
extra_tags={"bypassbasicstats": "1", "modname": "mfext"})
LOGGER.debug("sended msg: %s" % msg)
msg = client.send_measurement(
"metwork_version", {"version": versions["version"],
"status": status["status"],
"os_name": versions["os_name"],
"status_code": status["status_code"]},
extra_tags={"bypassbasicstats": "1", "modname": MFMODULE.lower()})
LOGGER.debug("sended msg: %s" % msg)
plugins = get_plugins()
for plugin in plugins:
tags = {"plugin_name": plugin["name"],
"bypassbasicstats": "1"}
msg = client.send_measurement("metwork_plugin",
{"version": plugin["version"],
"release": plugin["release"]},
extra_tags=tags)
LOGGER.debug("sended msg: %s" % msg)
client.close()
| 34.362416 | 79 | 0.582227 |
ace6bf77c1ed4cdc9059287eefc8c7cbc8dd014e | 966 | py | Python | conda/models/leased_path_entry.py | sakibguy/conda | 892f9f6a196312765b515b0db53a5558566fe456 | [
"BSD-3-Clause"
] | null | null | null | conda/models/leased_path_entry.py | sakibguy/conda | 892f9f6a196312765b515b0db53a5558566fe456 | [
"BSD-3-Clause"
] | 3 | 2022-03-03T02:36:53.000Z | 2022-03-03T02:42:50.000Z | conda/models/leased_path_entry.py | sakibguy/conda | 892f9f6a196312765b515b0db53a5558566fe456 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from .enums import LeasedPathType
from ..auxlib.entity import Entity, EnumField, StringField
log = getLogger(__name__)
class LeasedPathEntry(Entity):
"""
_path: short path for the leased path, using forward slashes
target_path: the full path to the executable in the private env
target_prefix: the full path to the private environment
leased_path: the full path for the lease in the root prefix
package_name: the package holding the lease
leased_path_type: application_entry_point
"""
_path = StringField()
target_path = StringField()
target_prefix = StringField()
leased_path = StringField()
package_name = StringField()
leased_path_type = EnumField(LeasedPathType)
| 31.16129 | 82 | 0.730849 |
ace6c0023cf6c384a8af8cc0dd8c30d89aebedd6 | 5,165 | py | Python | sisense_monitor/main.py | marcomasulli/sisense-monitor | 482ba6252ccf55785caea9aa3fe670b02be139bc | [
"MIT"
] | null | null | null | sisense_monitor/main.py | marcomasulli/sisense-monitor | 482ba6252ccf55785caea9aa3fe670b02be139bc | [
"MIT"
] | null | null | null | sisense_monitor/main.py | marcomasulli/sisense-monitor | 482ba6252ccf55785caea9aa3fe670b02be139bc | [
"MIT"
] | null | null | null | from .database import session, FailedBuilds
from .config import Config
from urllib.parse import urljoin
from datetime import datetime, timedelta
import requests
import pandas as pd
import traceback
from time import sleep
def record_failure(oid, datamodel_id, datamodel_title, instance_id):
try:
new_failed_build = FailedBuilds(
oid=oid,
datamodel_id=datamodel_id,
datamodel_title=datamodel_title,
instance_id=instance_id,
)
session.add(new_failed_build)
session.commit()
# insert oid and data in db
print("Failed build recorded in db")
return True
except Exception:
print(traceback.print_exc())
def save_log_to_sp(
log,
datamodel_title,
ts,
site_url=Config.SP_ROOT_URL,
client_id=Config.SP_ID,
client_secret=Config.SP_SECRET,
target_folder_name="Shared Documents/Sisense Monitor/BuildLogs",
):
from office365.runtime.auth.user_credential import UserCredential
from office365.sharepoint.client_context import ClientContext
from office365.runtime.auth.authentication_context import AuthenticationContext
import json
# connect to SP
context_auth = AuthenticationContext(url=site_url)
context_auth.acquire_token_for_app(client_id=client_id, client_secret=client_secret)
ctx = ClientContext(site_url, context_auth)
web = ctx.web
ctx.load(web)
ctx.execute_query()
target_folder = web.get_folder_by_server_relative_url(target_folder_name)
filename = f"{datamodel_title} {ts} buildlog.json"
try:
target_file = target_folder.upload_file(
filename, bytes(json.dumps(log, indent=4), encoding="utf-8")
)
ctx.execute_query()
print("OK - Log saved to SP")
return target_file.serverRelativeUrl
except Exception as e:
print(traceback.print_exc())
return
def get_logs(datamodel_id, datamodel_title):
# get log
log = requests.get(
f"{Config.SISENSE_URL}/v1/elasticubes/{datamodel_id}/buildLogs",
headers=Config.SISENSE_HEADERS,
)
# convert to json
json_log = log.json()
# get ts and error message
for l in json_log:
if "verbosity" in l.keys():
if l.get("verbosity") == "Error":
ts = l.get("timestamp") or "1900-01-01T00:00:00.00000"
print(ts)
error_message = l.get("message") or "No error message"
print(error_message)
# transform ts to string
ts_dt = datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S.%fZ")
ts_filestring = ts_dt.strftime("%Y-%m-%dT%H%M")
save_link = save_log_to_sp(json_log, datamodel_title, ts_filestring)
ts_cardstring = ts_dt.strftime("%Y-%m-%d %H:%M:%S")
error_dict = {
"timestamp": ts_cardstring,
"error_message": error_message,
"file_link": save_link,
}
print(error_dict)
return error_dict
def make_teams_card(datamodel_name, ts, error_message, save_link):
card_json = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "0076D7",
"summary": f"New Failed Cube: {datamodel_name}",
"sections": [
{
"activityTitle": f"New Failed Build: {datamodel_name}",
"facts": [
{"name": "TimeStamp", "value": f"{ts}"},
{"name": "Error Log", "value": f"{error_message}"},
{"name": "Full Log Link", "value": f"{save_link}"},
],
"markdown": False,
}
],
}
return card_json
def send_teams_card(card_json):
response = requests.post(
Config.TEAMS_CONNECTOR_URL,
headers={"Content-Type": "application/json"},
json=card_json,
)
return response
def check_builds():
"""Base task"""
response = requests.get(
url=urljoin(Config.SISENSE_URL, "v2/builds"), headers=Config.SISENSE_HEADERS
)
builds = pd.DataFrame(data=response.json())
failed_builds = builds.loc[(builds.status == "failed")]
# for each failed cube:
for build in failed_builds.to_dict(orient="records"):
# check if failed cube is already recorded (oid), if not record
recorded_failure = (
session.query(FailedBuilds).filter(FailedBuilds.oid == build["oid"]).first()
)
if recorded_failure is None:
# record
record_failure(
build["oid"],
build["datamodelId"],
build["datamodelTitle"],
build["instanceId"],
)
# save log and get elements for log card
error_dict = get_logs(build["datamodelId"], build["datamodelTitle"])
# prepare card (so look into log)
card = make_teams_card(
build["datamodelTitle"],
error_dict["timestamp"],
error_dict["error_message"],
error_dict["file_link"],
)
# send card
send_teams_card(card)
return error_dict
| 31.30303 | 88 | 0.610455 |
ace6c0ebf53c3a0dceacb882b4790d9a1f9528ee | 35 | py | Python | config.py | koitoror/MyDiary | 0b21582a758ecdac38f395c36982816a8fb4b399 | [
"MIT"
] | 1 | 2019-11-04T20:50:42.000Z | 2019-11-04T20:50:42.000Z | config.py | koitoror/MyDiary | 0b21582a758ecdac38f395c36982816a8fb4b399 | [
"MIT"
] | 4 | 2018-07-25T14:12:53.000Z | 2019-11-04T20:55:16.000Z | config.py | koitoror/MyDiary | 0b21582a758ecdac38f395c36982816a8fb4b399 | [
"MIT"
] | null | null | null | class BaseConfig:
DEBUG = True
| 11.666667 | 17 | 0.685714 |
ace6c11ef8ea57ee13cc68e5b04c2ffbd3d5b9f5 | 30,605 | py | Python | hoomd/hpmc/update.py | spraharsh/hoomd-blue | 11d9b841bc249b1a3cca70195eabe4eedd6c758d | [
"BSD-3-Clause"
] | null | null | null | hoomd/hpmc/update.py | spraharsh/hoomd-blue | 11d9b841bc249b1a3cca70195eabe4eedd6c758d | [
"BSD-3-Clause"
] | null | null | null | hoomd/hpmc/update.py | spraharsh/hoomd-blue | 11d9b841bc249b1a3cca70195eabe4eedd6c758d | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""HPMC updaters.
HPMC updaters work with the `hpmc.integrate.HPMCIntegrator` to apply changes to
the system consistent with the particle shape and defined interaction energies.
The `BoxMC`, `Clusters`, and `MuVT` updaters apply trial moves that enable
enhanced sampling or the equilibration of different ensembles. `QuickCompress`
helps prepare non-overlapping configurations of particles in a given box shape.
"""
from . import _hpmc
from . import integrate
from hoomd import _hoomd
from hoomd.logging import log
from hoomd.data.parameterdicts import TypeParameterDict, ParameterDict
from hoomd.data.typeparam import TypeParameter
import hoomd.data.typeconverter
from hoomd.operation import Updater
import hoomd
class BoxMC(Updater):
r"""Apply box updates to sample isobaric and related ensembles.
Args:
betaP (`float` or :py:mod:`hoomd.variant.Variant`):
:math:`\frac{p}{k_{\mathrm{B}}T}` :math:`[\mathrm{length}^{-2}]`
in 2D or :math:`[\mathrm{length}^{-3}]` in 3D.
trigger (hoomd.trigger.Trigger): Select the timesteps to perform box
trial moves.
Use `BoxMC` in conjunction with an HPMC integrator to allow the simulation
box to undergo random fluctuations at constant pressure, or random
deformations at constant volume. `BoxMC` supports both isotropic and
anisotropic volume change moves as well as shearing of the simulation box. A
single `BoxMC` instance may apply multiple types of box moves during a
simulation run.
.. rubric:: Box move types
By default, no moves are applied (the *weight* values for
all move types default to 0). In a given timestep, the type of move is
selected randomly with probability:
.. math::
p = \frac{w_k}{\sum_k w_k}
where :math:`w_k` is the weight of the move type.
A given box move proposes a trial simulation box :math:`(L_x^t, L_y^t,
L_z^t, xy^t, xz^t, yz^t)` as a change from the current box: :math:`(L_x,
L_y, L_z, xy, xz, yz)`. The form of the change depends on the selected
move type:
* `volume` (``mode='standard'``): Change the volume (or area in 2D) of the
simulation box while maining fixed aspect ratios :math:`Lx/Ly`,
:math:`Lx/Lz`. In 3D:
.. math::
V^t &= V + u \\
L_x^t &= \left( \frac{Lx}{Ly} \frac{Lx}{Lz} V^t \right)^{1/3} \\
L_y^t &= L_x^t \frac{Ly}{Lx} \\
L_z^t &= L_x^t \frac{Lz}{Lx} \\
xy^t &= xy \\
xz^t &= xz \\
yz^t &= yz \\
where :math:`u` is a random value uniformly distributed in the interval
:math:`[-\delta_\mathrm{volume}, \delta_\mathrm{volume}]`.
In 2D:
.. math::
V^t &= V + u \\
L_x^t &= \left( \frac{Lx}{Ly} V^t \right)^{1/2} \\
L_y^t &= L_x^t \frac{Ly}{Lx} \\
xy^t &= xy \\
* `volume` (``mode='ln'``): Change the volume (or area in 2D) of the
simulation box while maining fixed aspect ratios :math:`Lx/Ly`,
:math:`Lx/Lz`. In 3D:
.. math::
V^t &= V e^u \\
L_x^t &= \left( \frac{Lx}{Ly} \frac{Lx}{Lz} V^t \right)^{1/3} \\
L_y^t &= L_x^t \frac{Ly}{Lx} \\
L_z^t &= L_x^t \frac{Lz}{Lx} \\
xy^t &= xy \\
xz^t &= xz \\
yz^t &= yz \\
where :math:`u` is a random value uniformly distributed in the interval
:math:`[-\delta_\mathrm{volume}, \delta_\mathrm{volume}]`.
In 2D:
.. math::
V^t &= V e^u \\
L_x^t &= \left( \frac{Lx}{Ly} V^t \right)^{1/2} \\
L_y^t &= L_x^t \frac{Ly}{Lx} \\
xy^t &= xy \\
* `aspect`: Change the aspect ratio of the simulation box while maintaining
a fixed volume. In 3D:
.. math::
L_k^t & = \begin{cases} L_k(1 + a) & u < 0.5 \\
L_k \frac{1}{1+a} & u \ge 0.5
\end{cases} \\
L_{m \ne k}^t & = L_m \sqrt{\frac{L_k}{L_k^t}} &
xy^t &= xy \\
xz^t &= xz \\
yz^t &= yz \\
where :math:`u` is a random value uniformly distributed in the interval
:math:`[0, 1]`, :math:`a` is a random value uniformly distributed in the
interval :math:`[0, \delta_\mathrm{aspect}]` and :math:`k` is randomly
chosen uniformly from the set :math:`\{x, y, z\}`.
In 2D:
.. math::
L_k^t & = \begin{cases} L_k(1 + a) & u < 0.5 \\
L_k \frac{1}{1+a} & u \ge 0.5
\end{cases} \\
L_{m \ne k}^t & = L_m \frac{L_k}{L_k^t} \\
xy^t &= xy \\
* `length`: Change the box lengths:
.. math::
L_k^t = L_k + u
where :math:`u` is a random value uniformly distributed in the interval
:math:`[-\delta_{\mathrm{length},k}, -\delta_{\mathrm{length},k}]`,
and :math:`k` is randomly chosen uniformly from the set
:math:`\{a : a \in \{x, y, z\}, \delta_{\mathrm{length},a} \ne 0 \}`.
* `shear`: Change the box shear parameters. In 3D:
.. math::
(xy^t, xz^t, yz^t) =
\begin{cases}
\left(xy + s_{xy},
\enspace xz,
\enspace yz \right) & u < \frac{1}{3} \\
\left( xy^t = xy,
\enspace xz + s_{xz},
\enspace yz \right) & \frac{1}{3} \le u < \frac{2}{3} \\
\left( xy^t = xy,
\enspace xz,
\enspace yz + s_{yz} \right) & \frac{2}{3} \le u \le 1 \\
\end{cases} \\
where :math:`u` is a random value uniformly distributed in the interval
:math:`[0, 1]` and :math:`s_k` is a random value uniformly distributed in
the interval :math:`[-\delta_{\mathrm{shear},k},
\delta_{\mathrm{shear},k}]`. `BoxMC` attempts and records trial moves for
shear parameters even when :math:`\delta_{\mathrm{shear},k}=0`.
In 2D:
.. math::
xy^t = xy + s_{xy}
.. rubric:: Acceptance
All particle particle positions are scaled into the trial box to form the
trial configuration :math:`C^t`:
.. math::
\vec{r}_i^t = s_x \vec{a}_1^t + s_y \vec{a}_2^t +
s_z \vec{a}_3^t -
\frac{\vec{a}_1^t + \vec{a}_2^t + \vec{a}_3^t}{2}
where :math:`\vec{a}_k^t` are the new box vectors determined by
:math:`(L_x^t, L_y^t, L_z^t, xy^t, xz^t, yz^t)` and the scale factors are
determined by the current particle position :math:`\vec{r}_i` and the box
vectors :math:`\vec{a}_k`:
.. math::
\vec{r}_i = s_x \vec{a}_1 + s_y \vec{a}_2 + s_z \vec{a}_3 -
\frac{\vec{a}_1 + \vec{a}_2 + \vec{a}_3}{2}
The trial move is accepted with the probability:
.. math::
p_\mathrm{accept} =
\begin{cases}
\exp(-(\beta \Delta H + \beta \Delta U)) &
\beta \Delta H + \beta \Delta U > 0 \\
1 & \beta \Delta H + \beta \Delta U \le 0 \\
\end{cases}
where :math:`\Delta U = U^t - U` is the difference in potential energy,
:math:`\beta \Delta H = \beta P (V^t - V) - N_\mathrm{particles} \cdot
\ln(V^t / V)` for most move types. It is :math:`\beta P (V^t - V) -
(N_\mathrm{particles}+1) \cdot \ln(V^t / V)` for ln volume moves.
When the trial move is accepted, the system state is set to the the trial
configuration. When it is not accepted, the move is rejected and the state
is not modified.
.. rubric:: Mixed precision
`BoxMC` uses reduced precision floating point arithmetic when checking
for particle overlaps in the local particle reference frame.
Attributes:
volume (dict):
Parameters for isobaric volume moves that scale the box lengths
uniformly. The dictionary has the following keys:
* ``weight`` (float) - Relative weight of volume box moves.
* ``mode`` (str) - ``standard`` proposes changes to the box volume
and ``ln`` proposes changes to the logarithm of the volume.
Initially starts off in 'standard' mode.
* ``delta`` (float) - Maximum change in **V** or **ln(V)** where V
is box area (2D) or volume (3D) :math:`\delta_\mathrm{volume}`.
aspect (dict):
Parameters for isovolume aspect ratio moves. The dictionary has the
following keys:
* ``weight`` (float) - Relative weight of aspect box moves.
* ``delta`` (float) - Maximum relative change of box aspect ratio
:math:`\delta_\mathrm{aspect} [\mathrm{dimensionless}]`.
length (dict):
Parameters for isobaric box length moves that change box lengths
independently. The dictionary has the following keys:
* ``weight`` (float) - Maximum change of HOOMD-blue box parameters
Lx, Ly, and Lz.
* ``delta`` (tuple[float, float, float]) - Maximum change of the
box lengths :math:`(\delta_{\mathrm{length},x},
\delta_{\mathrm{length},y}, \delta_{\mathrm{length},z})
[\mathrm{length}]`.
shear (dict):
Parameters for isovolume box shear moves. The dictionary has the
following keys:
* ``weight`` (float) - Relative weight of shear box moves.
* ``delta`` (tuple[float, float, float]) - maximum change of the
box tilt factor :math:`(\delta_{\mathrm{shear},xy},
\delta_{\mathrm{shear},xz}, \delta_{\mathrm{shear},yz})
[\mathrm{dimensionless}]`.
* ``reduce`` (float) - Maximum number of lattice vectors of shear
to allow before applying lattice reduction. Values less than 0.5
disable shear reduction.
instance (int):
When using multiple `BoxMC` updaters in a single simulation,
give each a unique value for `instance` so they generate
different streams of random numbers.
"""
def __init__(self, trigger, betaP):
super().__init__(trigger)
_default_dict = dict(weight=0.0, delta=0.0)
param_dict = ParameterDict(
volume={
"mode": hoomd.data.typeconverter.OnlyFrom(['standard', 'ln']),
**_default_dict
},
aspect=_default_dict,
length=dict(weight=0.0, delta=(0.0,) * 3),
shear=dict(weight=0.0, delta=(0.0,) * 3, reduce=0.0),
betaP=hoomd.variant.Variant,
instance=int,
)
self._param_dict.update(param_dict)
self.volume["mode"] = "standard"
self.betaP = betaP
self.instance = 0
def _add(self, simulation):
"""Add the operation to a simulation.
HPMC uses RNGs. Warn the user if they did not set the seed.
"""
if isinstance(simulation, hoomd.Simulation):
simulation._warn_if_seed_unset()
super()._add(simulation)
def _attach(self):
integrator = self._simulation.operations.integrator
if not isinstance(integrator, integrate.HPMCIntegrator):
raise RuntimeError("The integrator must be a HPMC integrator.")
if not integrator._attached:
raise RuntimeError("Integrator is not attached yet.")
self._cpp_obj = _hpmc.UpdaterBoxMC(self._simulation.state._cpp_sys_def,
self.trigger, integrator._cpp_obj,
self.betaP)
super()._attach()
@property
def counter(self):
"""Trial move counters.
The counter object has the following attributes:
* ``volume``: `tuple` [`int`, `int`] - Number of accepted and rejected
volume and length moves.
* ``shear``: `tuple` [`int`, `int`] - Number of accepted and rejected
shear moves.
* ``aspect``: `tuple` [`int`, `int`] - Number of accepted and rejected
aspect moves.
Note:
The counts are reset to 0 at the start of each call to
`hoomd.Simulation.run`. Before the first call to `Simulation.run`,
`counter` is `None`.
"""
if not self._attached:
return None
else:
return self._cpp_obj.getCounters(1)
@log(category="sequence")
def volume_moves(self):
"""tuple[int, int]: The accepted and rejected volume and length moves.
(0, 0) before the first call to `Simulation.run`.
"""
counter = self.counter
if counter is None:
return (0, 0)
else:
if self.volume["mode"] == "standard":
attr = "volume"
else:
attr = "ln_volume"
return getattr(counter, attr)
@log(category="sequence")
def shear_moves(self):
"""tuple[int, int]: The accepted and rejected shear moves.
(0, 0) before the first call to `Simulation.run`.
"""
counter = self.counter
if counter is None:
return (0, 0)
else:
return counter.shear
@log(category="sequence")
def aspect_moves(self):
"""tuple[int, int]: The accepted and rejected aspect moves.
(0, 0) before the first call to `Simulation.run`.
"""
counter = self.counter
if counter is None:
return (0, 0)
else:
return counter.aspect
class MuVT(Updater):
r"""Insert and remove particles in the muVT ensemble.
Args:
trigger (int): Number of timesteps between grand canonical insertions
transfer_types (list): List of type names that are being transferred
from/to the reservoir or between boxes
ngibbs (int): The number of partitions to use in Gibbs ensemble
simulations (if == 1, perform grand canonical muVT)
max_volume_rescale (float): maximum step size in ln(V) (applies to Gibbs
ensemble)
move_ratio (float): (if set) Set the ratio between volume and
exchange/transfer moves (applies to Gibbs ensemble)
The muVT (or grand-canonical) ensemble simulates a system at constant
fugacity.
Gibbs ensemble simulations are also supported, where particles and volume
are swapped between two or more boxes. Every box correspond to one MPI
partition, and can therefore run on multiple ranks. Use the
``ranks_per_partition`` argument of `hoomd.communicator.Communicator` to
enable partitioned simulations.
.. rubric:: Mixed precision
`MuVT` uses reduced precision floating point arithmetic when checking
for particle overlaps in the local particle reference frame.
Note:
Multiple Gibbs ensembles are also supported in a single parallel job,
with the ``ngibbs`` option to update.muvt(), where the number of
partitions can be a multiple of ``ngibbs``.
Attributes:
trigger (int): Select the timesteps on which to perform cluster moves.
transfer_types (list): List of type names that are being transferred
from/to the reservoir or between boxes
max_volume_rescale (float): Maximum step size in ln(V) (applies to
Gibbs ensemble)
move_ratio (float): The ratio between volume and exchange/transfer moves
(applies to Gibbs ensemble)
ntrial (float): (**default**: 1) Number of configurational bias attempts
to swap depletants
"""
def __init__(self,
transfer_types,
ngibbs=1,
max_volume_rescale=0.1,
volume_move_probability=0.5,
trigger=1):
super().__init__(trigger)
self.ngibbs = int(ngibbs)
_default_dict = dict(ntrial=1)
param_dict = ParameterDict(
transfer_types=list(transfer_types),
max_volume_rescale=float(max_volume_rescale),
volume_move_probability=float(volume_move_probability),
**_default_dict)
self._param_dict.update(param_dict)
typeparam_fugacity = TypeParameter(
'fugacity',
type_kind='particle_types',
param_dict=TypeParameterDict(hoomd.variant.Variant,
len_keys=1,
_defaults=hoomd.variant.Constant(0.0)))
self._extend_typeparam([typeparam_fugacity])
def _attach(self):
integrator = self._simulation.operations.integrator
if not isinstance(integrator, integrate.HPMCIntegrator):
raise RuntimeError("The integrator must be a HPMC integrator.")
cpp_cls_name = "UpdaterMuVT"
cpp_cls_name += integrator.__class__.__name__
cpp_cls = getattr(_hpmc, cpp_cls_name)
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def,
self.trigger, integrator._cpp_obj, self.ngibbs)
super()._attach()
@log(category='sequence', requires_run=True)
def insert_moves(self):
"""tuple[int, int]: Count of the accepted and rejected paricle \
insertion moves.
None when not attached
"""
counter = self._cpp_obj.getCounters(1)
return counter.insert
@log(category='sequence', requires_run=True)
def remove_moves(self):
"""tuple[int, int]: Count of the accepted and rejected paricle removal \
moves.
None when not attached
"""
counter = self._cpp_obj.getCounters(1)
return counter.remove
@log(category='sequence', requires_run=True)
def exchange_moves(self):
"""tuple[int, int]: Count of the accepted and rejected paricle \
exchange moves.
None when not attached
"""
counter = self._cpp_obj.getCounters(1)
return counter.exchange
@log(category='sequence', requires_run=True)
def volume_moves(self):
"""tuple[int, int]: Count of the accepted and rejected paricle volume \
moves.
None when not attached
"""
counter = self._cpp_obj.getCounters(1)
return counter.volume
@log(category='object')
def N(self): # noqa: N802 - allow N as a function name
"""dict: Map of number of particles per type.
None when not attached.
"""
N_dict = None
if self._attached:
N_dict = self._cpp_obj.N
return N_dict
class Clusters(Updater):
"""Apply geometric cluster algorithm (GCA) moves.
Args:
pivot_move_probability (float): Set the probability for attempting a
pivot move.
flip_probability (float): Set the probability for transforming an
individual cluster.
trigger (Trigger): Select the timesteps on which to perform cluster
moves.
The GCA as described in Liu and Lujten (2004),
http://doi.org/10.1103/PhysRevLett.92.035504 is used for hard shape, patch
interactions and depletants. Implicit depletants are supported and simulated
on-the-fly, as if they were present in the actual system.
Supported moves include pivot moves (point reflection) and line reflections
(pi rotation around an axis). With anisotropic particles, the pivot move
cannot be used because it would create a chiral mirror image of the
particle, and only line reflections are employed. In general, line
reflections are not rejection free because of periodic boundary conditions,
as discussed in Sinkovits et al. (2012), http://doi.org/10.1063/1.3694271 .
However, we restrict the line reflections to axes parallel to the box axis,
which makes those moves rejection-free for anisotropic particles, but the
algorithm is then no longer ergodic for those and needs to be combined with
local moves.
.. rubric:: Mixed precision
`Clusters` uses reduced precision floating point arithmetic when checking
for particle overlaps in the local particle reference frame.
Attributes:
pivot_move_probability (float): Set the probability for attempting a
pivot move.
flip_probability (float): Set the probability for transforming an
individual cluster.
trigger (Trigger): Select the timesteps on which to perform cluster
moves.
"""
_remove_for_pickling = Updater._remove_for_pickling + ('_cpp_cell',)
_skip_for_equality = Updater._skip_for_equality | {'_cpp_cell'}
def __init__(self,
pivot_move_probability=0.5,
flip_probability=0.5,
trigger=1):
super().__init__(trigger)
param_dict = ParameterDict(
pivot_move_probability=float(pivot_move_probability),
flip_probability=float(flip_probability))
self._param_dict.update(param_dict)
self.instance = 0
def _add(self, simulation):
"""Add the operation to a simulation.
HPMC uses RNGs. Warn the user if they did not set the seed.
"""
if isinstance(simulation, hoomd.Simulation):
simulation._warn_if_seed_unset()
super()._add(simulation)
def _attach(self):
integrator = self._simulation.operations.integrator
if not isinstance(integrator, integrate.HPMCIntegrator):
raise RuntimeError("The integrator must be a HPMC integrator.")
cpp_cls_name = "UpdaterClusters"
cpp_cls_name += integrator.__class__.__name__
cpp_cls = getattr(_hpmc, cpp_cls_name)
use_gpu = (isinstance(self._simulation.device, hoomd.device.GPU)
and (cpp_cls_name + 'GPU') in _hpmc.__dict__)
if use_gpu:
cpp_cls_name += "GPU"
cpp_cls = getattr(_hpmc, cpp_cls_name)
if not integrator._attached:
raise RuntimeError("Integrator is not attached yet.")
if use_gpu:
sys_def = self._simulation.state._cpp_sys_def
self._cpp_cell = _hoomd.CellListGPU(sys_def)
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def,
self.trigger, integrator._cpp_obj,
self._cpp_cell)
else:
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def,
self.trigger, integrator._cpp_obj)
super()._attach()
@log(requires_run=True)
def avg_cluster_size(self):
"""float: the typical size of clusters.
None when not attached.
"""
counter = self._cpp_obj.getCounters(1)
return counter.average_cluster_size
class QuickCompress(Updater):
r"""Quickly compress a hard particle system to a target box.
Args:
trigger (Trigger): Update the box dimensions on triggered time steps.
target_box (Box): Dimensions of the target box.
max_overlaps_per_particle (float): The maximum number of overlaps to
allow per particle (may be less than 1 - e.g.
up to 250 overlaps would be allowed when in a system of 1000
particles when max_overlaps_per_particle=0.25).
min_scale (float): The minimum scale factor to apply to box dimensions.
Use `QuickCompress` in conjunction with an HPMC integrator to scale the
system to a target box size. `QuickCompress` can typically compress dilute
systems to near random close packing densities in tens of thousands of time
steps.
It operates by making small changes toward the `target_box`, but only
when there are no particle overlaps in the current simulation state. In 3D:
.. math::
L_x' &= \begin{cases}
\max( L_x \cdot s, L_{\mathrm{target},x} )
& L_{\mathrm{target},x} < L_x \\
\min( L_x / s, L_{\mathrm{target},x} )
& L_{\mathrm{target},x} \ge L_x
\end{cases} \\
L_y' &= \begin{cases}
\max( L_y \cdot s, L_{\mathrm{target},y} )
& L_{\mathrm{target},y} < L_y \\
\min( L_y / s, L_{\mathrm{target},y} )
& L_{\mathrm{target},y} \ge L_y
\end{cases} \\
L_z' &= \begin{cases}
\max( L_z \cdot s, L_{\mathrm{target},z} )
& L_{\mathrm{target},z} < L_z \\
\min( L_z / s, L_{\mathrm{target},z} )
& L_{\mathrm{target},z} \ge L_z
\end{cases} \\
xy' &= \begin{cases}
\max( xy \cdot s, xy_\mathrm{target} )
& xy_\mathrm{target} < xy \\
\min( xy / s, xy_\mathrm{target} )
& xy_\mathrm{target} \ge xy
\end{cases} \\
xz' &= \begin{cases}
\max( xz \cdot s, xz_\mathrm{target} )
& xz_\mathrm{target} < xz \\
\min( xz / s, xz_\mathrm{target} )
& xz_\mathrm{target} \ge xz
\end{cases} \\
yz' &= \begin{cases}
\max( yz \cdot s, yz_\mathrm{target} )
& yz_\mathrm{target} < yz \\
\min( yz / s, yz_\mathrm{target} )
& yz_\mathrm{target} \ge yz
\end{cases} \\
and in 2D:
.. math::
L_x' &= \begin{cases}
\max( L_x \cdot s, L_{\mathrm{target},x} )
& L_{\mathrm{target},x} < L_x \\
\min( L_x / s, L_{\mathrm{target},x} )
& L_{\mathrm{target},x} \ge L_x
\end{cases} \\
L_y' &= \begin{cases}
\max( L_y \cdot s, L_{\mathrm{target},y} )
& L_{\mathrm{target},y} < L_y \\
\min( L_y / s, L_{\mathrm{target},y} )
& L_{\mathrm{target},y} \ge L_y
\end{cases} \\
L_z' &= L_z \\
xy' &= \begin{cases}
\max( xy \cdot s, xy_\mathrm{target} )
& xy_\mathrm{target} < xy \\
\min( xy / s, xy_\mathrm{target} )
& xy_\mathrm{target} \ge xy
\end{cases} \\
xz' &= xz \\
yz' &= yz \\
where the current simulation box is :math:`(L_x, L_y, L_z, xy, xz, yz)`,
the target is :math:`(L_{\mathrm{target},x}, L_{\mathrm{target},y},
L_{\mathrm{target},z}, xy_\mathrm{target}, xz_\mathrm{target},
yz_\mathrm{target})`, the new simulation box set is
:math:`(L_x', L_y', L_z', xy', xz', yz')` and :math:`s` is the scale factor
chosen for this step (see below). `QuickCompress` scales particle
coordinates (see `BoxMC` for details) when it sets a new box.
When there are more than ``max_overlaps_per_particle * N_particles`` hard
particle overlaps in the system in the new box, the box move is rejected.
Otherwise, the small number of overlaps remain when the new box is set.
`QuickCompress` then waits until `hoomd.hpmc.integrate.HPMCIntegrator` makes
local MC trial moves that remove all overlaps.
`QuickCompress` adjusts the value of :math:`s` based on the particle and
translational trial move sizes to ensure that the trial moves will be able
to remove the overlaps. It randomly chooses a value of :math:`s` uniformly
distributed between ``max(min_scale, 1.0 - min_move_size / max_diameter)``
and 1.0 where ``min_move_size`` is the smallest MC translational move size
adjusted by the acceptance ratio and ``max_diameter`` is the circumsphere
diameter of the largest particle type.
Tip:
Use the `hoomd.hpmc.tune.MoveSize` in conjunction with
`QuickCompress` to adjust the move sizes to maintain a constant
acceptance ratio as the density of the system increases.
Warning:
When the smallest MC translational move size is 0, `QuickCompress`
will scale the box by 1.0 and not progress toward the target box.
Warning:
Use `QuickCompress` *OR* `BoxMC`. Do not use both at the same time.
.. rubric:: Mixed precision
`QuickCompress` uses reduced precision floating point arithmetic when
checking for particle overlaps in the local particle reference frame.
Attributes:
trigger (Trigger): Update the box dimensions on triggered time steps.
target_box (Box): Dimensions of the target box.
max_overlaps_per_particle (float): The maximum number of overlaps to
allow per particle (may be less than 1 - e.g.
up to 250 overlaps would be allowed when in a system of 1000
particles when max_overlaps_per_particle=0.25).
min_scale (float): The minimum scale factor to apply to box dimensions.
instance (int):
When using multiple `QuickCompress` updaters in a single simulation,
give each a unique value for `instance` so that they generate
different streams of random numbers.
"""
def __init__(self,
trigger,
target_box,
max_overlaps_per_particle=0.25,
min_scale=0.99):
super().__init__(trigger)
param_dict = ParameterDict(max_overlaps_per_particle=float,
min_scale=float,
target_box=hoomd.Box,
instance=int)
param_dict['max_overlaps_per_particle'] = max_overlaps_per_particle
param_dict['min_scale'] = min_scale
param_dict['target_box'] = target_box
self._param_dict.update(param_dict)
self.instance = 0
def _add(self, simulation):
"""Add the operation to a simulation.
HPMC uses RNGs. Warn the user if they did not set the seed.
"""
if isinstance(simulation, hoomd.Simulation):
simulation._warn_if_seed_unset()
super()._add(simulation)
def _attach(self):
integrator = self._simulation.operations.integrator
if not isinstance(integrator, integrate.HPMCIntegrator):
raise RuntimeError("The integrator must be a HPMC integrator.")
if not integrator._attached:
raise RuntimeError("Integrator is not attached yet.")
self._cpp_obj = _hpmc.UpdaterQuickCompress(
self._simulation.state._cpp_sys_def, self.trigger,
integrator._cpp_obj, self.max_overlaps_per_particle, self.min_scale,
self.target_box._cpp_obj)
super()._attach()
@property
def complete(self):
"""True when the box has achieved the target."""
if not self._attached:
return False
return self._cpp_obj.isComplete()
| 37.59828 | 80 | 0.596406 |
ace6c14755e9421d78cdae4e60ad649938df3570 | 5,168 | py | Python | xclim/sdba/__init__.py | fossabot/xclim | 31fbdce6545d29e8a762b64b880e04eeb601f9eb | [
"Apache-2.0"
] | null | null | null | xclim/sdba/__init__.py | fossabot/xclim | 31fbdce6545d29e8a762b64b880e04eeb601f9eb | [
"Apache-2.0"
] | 2 | 2021-07-16T16:05:54.000Z | 2021-11-15T13:02:10.000Z | xclim/sdba/__init__.py | fossabot/xclim | 31fbdce6545d29e8a762b64b880e04eeb601f9eb | [
"Apache-2.0"
] | 1 | 2021-07-16T16:49:05.000Z | 2021-07-16T16:49:05.000Z | # -*- coding: utf-8 -*-
# noqa: D205,D400
"""
===========================================
Statistical Downscaling and Bias Adjustment
===========================================
The `xclim.sdba` submodule provides bias-adjustment methods and will eventually provide statistical downscaling algorithms.
Almost all adjustment algorithms conform to the `train` - `adjust` scheme, formalized within `TrainAdjust` classes.
Given a reference time series (ref), historical simulations (hist) and simulations to be adjusted (sim),
any bias-adjustment method would be applied by first estimating the adjustment factors between the historical simulation and the observations series, and then applying these factors to `sim`, which could be a future simulation::
# Create the adjustment object by training it with reference and model data, plus certains arguments
Adj = Adjustment.train(ref, hist, group="time.month")
# Get a scenario by applying the adjustment to a simulated timeseries.
scen = Adj.adjust(sim, interp="linear")
Adj.ds.af # adjustment factors.
The `group` argument allows adjustment factors to be estimated independently for different periods: the full
time series, months, seasons or day of the year. The `interp` argument then allows for interpolation between these
adjustment factors to avoid discontinuities in the bias-adjusted series (only applicable for monthly grouping).
.. warning::
If grouping according to the day of the year is needed, the :py:mod:`xclim.core.calendar` submodule contains useful tools to manage the
different calendars that the input data can have. By default, if 2 different calendars are passed, the adjustment
factors will always be interpolated to the largest range of day of the years but this can lead to strange values
and we recommend converting the data beforehand to a common calendar.
The same interpolation principle is also used for quantiles. Indeed, for methods extracting adjustment factors by
quantile, interpolation is also done between quantiles. This can help reduce discontinuities in the adjusted time
series, and possibly reduce the number of quantile bins used.
Modular approach
================
This module adopts a modular approach instead of implementing published and named methods directly.
A generic bias adjustment process is laid out as follows:
- preprocessing on ``ref``, ``hist`` and ``sim`` (using methods in :py:mod:`xclim.sdba.processing` or :py:mod:`xclim.sdba.detrending`)
- creating and training the adjustment object ``Adj = Adjustment.train(obs, sim, **kwargs)`` (from :py:mod:`xclim.sdba.adjustment`)
- adjustment ``scen = Adj.adjust(sim, **kwargs)``
- post-processing on ``scen`` (for example: re-trending)
The train-adjust approach allows to inspect the trained adjustment object. The training information is stored in
the underlying `Adj.ds` dataset and always has a `af` variable with the adjustment factors. Its layout and the
other available variables vary between the different algorithm, refer to :ref:`Adjustment methods`.
Parameters needed by the training and the adjustment are saved to the ``Adj.ds`` dataset as a `adj_params` attribute.
Other parameters, those only needed by the adjustment are passed in the `adjust` call and written to the history attribute
in the output scenario dataarray.
Grouping
========
For basic time period grouping (months, day of year, season), passing a string to the methods needing it is sufficient.
Most methods acting on grouped data also accept a `window` int argument to pad the groups with data from adjacent ones.
Units of `window` are the sampling frequency of the main grouping dimension (usually `time`). For more complex grouping,
one can pass an instance of :py:class:`xclim.sdba.base.Grouper` directly.
Notes for developers
====================
To be scalable and performant, the sdba module makes use of the special decorators :py:func`xclim.sdba.base.map_blocks`
and :py:func:`xclim.sdba.base.map_groups`. However, they have the inconvenient that functions wrapped by them are unable
to manage xarray attributes (including units) correctly and their signatures are sometime wrong and often unclear. For
this reason, the module is often divided in two parts : the (decorated) compute functions in a "private" file (ex: ``_adjustment.py``)
and the user-facing functions or objects in corresponding public file (ex: ``adjustment.py``). See the `sdba-advanced`
notebook for more info on the reasons for this move.
Other restrictions : ``map_blocks`` will remove any "auxiliary" coordinates before calling the wrapped function and will add them back on exit.
"""
from . import detrending, processing, utils
from .adjustment import *
from .base import Grouper
from .processing import (
construct_moving_yearly_window,
stack_variables,
unpack_moving_yearly_window,
unstack_variables,
)
# TODO: ISIMIP ? Used for precip freq adjustment in biasCorrection.R
# Hempel, S., Frieler, K., Warszawski, L., Schewe, J., & Piontek, F. (2013). A trend-preserving bias correction –
# The ISI-MIP approach. Earth System Dynamics, 4(2), 219–236. https://doi.org/10.5194/esd-4-219-2013
| 61.52381 | 228 | 0.760255 |
ace6c1906b079732d4a6f4d5510ddb3fbaa99795 | 6,463 | py | Python | load_splitter_lambda/load_splitter_lambda/code/event_queue.py | aws-samples/lambda-load-splitter-sample | d781b0a875a626a2fc3da8a914a46fc223863256 | [
"MIT-0"
] | null | null | null | load_splitter_lambda/load_splitter_lambda/code/event_queue.py | aws-samples/lambda-load-splitter-sample | d781b0a875a626a2fc3da8a914a46fc223863256 | [
"MIT-0"
] | null | null | null | load_splitter_lambda/load_splitter_lambda/code/event_queue.py | aws-samples/lambda-load-splitter-sample | d781b0a875a626a2fc3da8a914a46fc223863256 | [
"MIT-0"
] | null | null | null | #######################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#######################################################################################
import copy
import json
import logging
import os
import sys
import boto3
import botocore
sys.path.append("load_splitter_lambda/code") # needed for aws and pytest
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def enqueue_event(event):
# -*- coding: utf-8 -*-
"""This method takes a event and enqueues it as a JSON string in an SQS Queue. The
name of the Queue is found in the "QUEUE_NAME" environment.
Parameters
----------
event : list or dict
The incoming data event that should be enqueued. Typically this is a python
representation of JSON, in an arbitrarily nested list or dict object.
"""
try:
sqs = boto3.resource("sqs")
queue_name = os.environ["QUEUE_NAME"]
if not queue_name or len(queue_name) == 0:
logger.error(f"===> FAIL: unable to find environment variable QUEUE_NAME")
return
logger.info(f"===> queueing a message to queue '{queue_name}'...")
queue = sqs.get_queue_by_name(QueueName=queue_name)
queue.send_message(MessageBody=json.dumps(event))
except botocore.exceptions.ClientError as ex:
logger.error(f"===> EXCEPTION CAUGHT: while queueing message to SQS queue named '{queue_name}'")
raise ex
def get_reference_to_embedded_list(event):
# -*- coding: utf-8 -*-
"""This method takes a dict of the format shown below, and returns a reference to
the "items" list found inside it. If the incoming 'event' parameter doesn't have the
expected format, an empty list is returned.
Here is an example input which has the required structure. The list [1,2,3] would be
returned with this as the incoming 'event' parameter:
{
"detail": {
"requestParameters": {
"tagSet": {
"items": [ 1, 2, 3 ]
}
}
}
}
Parameters
----------
event : dict
The incoming dict should have the structure shown ealier with the "items" key in
it. If it doesn't have that structure, then 'None' will be returned.
Returns
-------
list
The returned list will have the value of the nested "items" key as found in the
incoming 'event' parameter. If that key is not found, then an empty list will be
returned.
"""
if not event:
return []
return event.get("detail",{}).get("requestParameters",{}).get("tagSet",{}).get("items",[])
def queue_smaller_events(event):
# -*- coding: utf-8 -*-
"""This method takes an event parameter and finds an embedded list of interest inside
it. If there is more than 1 item in the list, then this method iteratively enqueues to
SQS the same original incoming event, but each time replacing the embedded list with
an updated list, containing just one of the items from the original list. Thus this
method has the effect of breaking down large events into multiple smaller events and
enqueing them.
Parameters
----------
event : dict
The incoming dict should have the structure documented in the
get_reference_to_embedded_list() method above. When it has that structure, the
list found in the "items" key is used as described earlier. If the incoming dict
does not have the expected structure, this method does nothing.
"""
event_copy = copy.deepcopy(event) # work with a copy, so caller sees no changes
list_reference = get_reference_to_embedded_list(event_copy)
list_items = list_reference.copy() # iterate through copy of list, as list will be changing
if len(list_items) == 1:
return
for item in list_items:
list_reference.clear()
list_reference.append(item)
enqueue_event(event_copy)
def extract_event_from_queue_message(queued_event_message):
# -*- coding: utf-8 -*-
"""This method takes a 'queued_event_message', and if it is an SQS structure (i.e. if
'eventSource' is 'aws:sqs'), then it extracts and returns just the value of the "body"
key within it, but it returns it not as a string, but as the JSON the string encodes.
For examples of how to use this method, see the accompanying unit tests.
Parameters
----------
queued_event_message : dict
This parameter holds the event as read from an SQS Queue.
- Note that if 'queued_event_message' is not recognized as an SQS event structure,
then this method will return the value 'None'.
- Note that if 'queued_event_message' is an SQS event structure, but "body" can't
be found, it will return the value None
- Note that if the "body" value can't be parsed as JSON, then an exception
will be raised (json.JSONDecodeError)
Returns
-------
dict or list
The return value is a dict or list containing the JSON as decoded from the value
of the "body" key found in the input parameter.
"""
if not 'Records' in queued_event_message:
return None
for record in queued_event_message['Records']:
if (record.get("eventSource",None) == 'aws:sqs') and 'body' in record:
return(json.loads(record["body"].replace("'", '"')))
else:
return None
| 42.801325 | 104 | 0.656197 |
ace6c2a7e5726f2320d796dd66a6f645e50a8083 | 590 | py | Python | blog/migrations/0007_notification.py | lambsteak/usefulpython | 7f5b8c23e2ee58228210b44ced6ebe8b79cb9888 | [
"MIT"
] | 29 | 2018-03-29T02:56:11.000Z | 2021-12-25T16:49:29.000Z | blog/migrations/0007_notification.py | lambsteak/usefulpython | 7f5b8c23e2ee58228210b44ced6ebe8b79cb9888 | [
"MIT"
] | 5 | 2020-06-05T17:05:49.000Z | 2022-01-13T00:37:26.000Z | blog/migrations/0007_notification.py | lambsteak/usefulpython | 7f5b8c23e2ee58228210b44ced6ebe8b79cb9888 | [
"MIT"
] | 5 | 2018-03-29T07:37:56.000Z | 2021-02-16T18:54:55.000Z | # Generated by Django 2.0 on 2017-12-31 00:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20171231_0406'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('detail', models.CharField(max_length=150)),
('profiles', models.ManyToManyField(to='blog.Profile')),
],
),
]
| 26.818182 | 114 | 0.583051 |
ace6c43fce1407f9cfabbf3bbedb5f0529b55a56 | 29,095 | py | Python | gallery/sbol_interactive/assembly.py | zachfox/dnaplotlib | 142e856a6fb5cd64a2ed522168655b74091ea0ae | [
"MIT"
] | 267 | 2015-05-22T02:01:04.000Z | 2022-03-04T11:51:20.000Z | gallery/sbol_interactive/assembly.py | zachfox/dnaplotlib | 142e856a6fb5cd64a2ed522168655b74091ea0ae | [
"MIT"
] | 23 | 2015-08-20T13:37:27.000Z | 2021-06-19T03:24:27.000Z | gallery/sbol_interactive/assembly.py | zachfox/dnaplotlib | 142e856a6fb5cd64a2ed522168655b74091ea0ae | [
"MIT"
] | 90 | 2015-05-22T13:18:48.000Z | 2022-01-28T15:52:32.000Z | """
Read an SBOL file and display its contents
"""
# Add SBOL module directory to PYTHONPATH
import os, sys
lib_path = os.path.abspath('..')
sys.path.append(lib_path)
import random # For random sequence generation
import sbol
#from sbol import libsbol
from subprocess import Popen, PIPE, STDOUT # For calling command line tools Clustal Omega and EMBOSS
# Command line tools for sequence verification
CLUSTAL_DIR = "C:/Program Files (x86)/clustal-omega-1.2.0-win32"
EMBOSS_DIR = "C:\mEMBOSS"
BASE_URI = "http://sbolstandard.org/examples"
PROMOTER = "http://purl.obolibrary.org/obo/SO_0000167"
RBS = "http://purl.obolibrary.org/obo/SO_0000552"
CDS = "http://purl.obolibrary.org/obo/SO_0000316"
TERMINATOR = "http://purl.obolibrary.org/obo/SO_0000141"
USER_DEFINED = "http://purl.obolibrary.org/obo/SO_0000001"
DESIGN = "http://purl.obolibrary.org/obo/SO_0000546"
SCAR = "http://purl.obolibrary.org/obo/SO_0001953"
SO_INSERTION = "http://purl.obolibrary.org/obo/SO_0000667"
SO_DELETION = "http://purl.obolibrary.org/obo/SO_0000159"
SO_POSSIBLE_ASSEMBLY_ERROR = "http://purl.obolibrary.org/obo/SO_0000702"
SO_SUBSTITUTION = "http://purl.obolibrary.org/obo/SO_1000002"
SO_NUCLEOTIDE_MATCH = "http://purl.obolibrary.org/obo/SO_0000347"
col_map = {}
col_map['red'] = (0.95, 0.30, 0.25)
col_map['green'] = (0.38, 0.82, 0.32)
col_map['blue'] = (0.38, 0.65, 0.87)
col_map['orange'] = (1.00, 0.75, 0.17)
col_map['purple'] = (0.55, 0.35, 0.64)
random.seed()
def populate_subcomponents(parent_component):
for ann in parent_component.annotations:
i_start = ann.start - 1
i_end = ann.end
sub_seq_nucleotides = parent_component.sequence.nucleotides[i_start:i_end]
#ann.subComponent = sbol.DNAComponent(doc, '%s//subComponent' %ann.uri)
ann.subcomponent.sequence = sbol.DNASequence(doc, '%s//Sequence' %ann.subcomponent.uri )
ann.subcomponent.sequence.nucleotides = sub_seq_nucleotides
def find_sequence_homologs(target_seq):
result_handle = NCBIWWW.qblast("blastn", "nt", target_seq)
blast_records = list(NCBIXML.parse(result_handle))
rec = blast_records[0]
E_VALUE_THRESH = 0.04
variant_acc_nos = []
variant_nucleotides = []
variant_urls = []
for alignment in rec.alignments:
hsp = alignment.hsps[0] # high-scoring pairs
variant_acc_nos.append( str(alignment.accession) )
variant_nucleotides.append( str(hsp.sbjct) )
#cds_variant_urls.append(alignment.accession)
return variant_acc_nos, variant_nucleotides, variant_urls
def remove_annotation(parent_component, deleted_ann):
""" An annotation is removed. The precedes relationship, start and end indexes of other annotations
are updated accordingly """
downstream_ann = deleted_ann.precedes[0] # Find annotation downstream of the one to be removed
# Finds the upstream annotation that precedes the annotation to be removed
for ann in parent_component.annotations:
if deleted_ann in ann.precedes:
upstream_ann = ann
# Update precedes relationship of annotations
upstream_ann.precedes.remove(upstream_ann.precedes[0])
upstream_ann.precedes.append(downstream_ann)
# Update all start and end indices for annotations downstream from insertion
deletion_size = deleted_ann.end - deleted_ann.start + 1
while (len(upstream_ann.precedes) > 0):
downstream_ann = upstream_ann.precedes[0]
old_start = downstream_ann.start
old_end = downstream_ann.end
new_start = old_start - deletion_size
new_end = old_end - deletion_size
downstream_ann.start = new_start
downstream_ann.end = new_end
upstream_ann = downstream_ann
#doc.sequences.remove(deleted_ann.subcomponent.sequence)
#doc.components.remove(deleted_ann.subcomponent)
#doc.annotations.remove(deleted_ann)
parent_component.annotations.remove(deleted_ann)
def insert_annotation_downstream(parent_component, upstream_ann, insert_ann):
""" A new annotation is inserted after an upstream annotation.
The precedes relationship, start and end indexes are update accordingly.
The annotation is expected to have a subComponent and Sequence object attached"""
parent_component.annotations.append(insert_ann)
# Update start and end of annotation
insert_size = insert_ann.end
insert_ann.start = upstream_ann.end + 1
insert_ann.end = upstream_ann.end + insert_size
# Update precedes relationship of annotations
# If inserting annotation between two existing annotations
if upstream_ann.precedes:
downstream_ann = upstream_ann.precedes[0] # Assumes annotations only have one precedes relationship
upstream_ann.precedes.remove(upstream_ann.precedes[0])
upstream_ann.precedes.append(insert_ann)
insert_ann.precedes.append(downstream_ann)
else: # If there is no annotation to the right
upstream_ann.precedes.append(insert_ann)
# Update all start and end indices for annotations downstream from insertion
upstream_ann = insert_ann
while (len(upstream_ann.precedes) > 0):
downstream_ann = upstream_ann.precedes[0]
old_start = downstream_ann.start
old_end = downstream_ann.end
new_start = old_start + insert_size
new_end = old_end + insert_size
downstream_ann.start = new_start
downstream_ann.end = new_end
upstream_ann = downstream_ann
def insert_annotation_upstream(parent_component, insert_ann, downstream_ann):
""" A new annotation (upstream) is inserted before the downstream annotation
The precedes relationship, start and end indexes are update accordingly """
#print downstream_ann.uri
#print
for i_ann, ann in enumerate(parent_component.annotations):
#print i_ann, ann.uri
if downstream_ann in ann.precedes:
upstream_uri = ann.uri # finds the annotation upstream, because it owns the precedes
print('Upstream uri: %s' %upstream_uri)
upstream_ann = parent_component.annotations[upstream_uri]
insert_annotation_downstream(parent_component, upstream_ann, insert_ann)
def assemble_subcomponents(parent_component):
parent_seq_len = 0
for ann in parent_component.annotations:
parent_seq_len = parent_seq_len + len(ann.subcomponent.sequence.nucleotides)
assembled_seq = 'n' * parent_seq_len
for ann in parent_component.annotations:
#assembled_seq[ann.start:ann.end] = ann.subcomponent.sequence.nucleotides
assembled_seq = assembled_seq[:ann.start - 1] + \
ann.subcomponent.sequence.nucleotides + \
assembled_seq[ann.end:]
parent_component.sequence.nucleotides = assembled_seq
def print_downstream_Annotations(ann):
reader_head = ann
print(reader_head.uri, end=' ')
while reader_head.precedes:
reader_head = reader_head.precedes[0]
print('->', reader_head.uri, end=' ')
print()
#def write_to_fasta(seq_name, nucleotides, col_length = 20) :
def write_to_fasta(entries, col_length = 20) :
formatted_entries = []
for seq_name, nts in entries:
nts = [ nts[i:i + col_length] for i in range(0, len(nts), col_length)]
nts = '\n'.join(nts)
formatted_entries.append( '>%s\n%s' %(seq_name, nts) )
return '\r\n'.join(formatted_entries)
def parse_fasta(fasta_str):
entries = fasta_str.split('>')
entries = [entry.strip() for entry in entries]
entries = [entry.encode('ascii','ignore') for entry in entries]
entries = entries[1:] # Sequence has empty entry '' at the beginning of the list
if len(entries) < 1 :
print('Invalid FASTA format')
return
else :
parsed_entries = []
for entry in entries :
try:
entry = entry.strip()
tokens = entry.split('\n')
seq_name = tokens[0]
#nucleotides = '\r\n'.join(tokens[1:])
nucleotides = ''.join(tokens[1:])
nucleotides = nucleotides.replace('\r', '')
parsed_entries.append((seq_name, nucleotides))
except:
print(('Invalid entry: %s' %entry))
return parsed_entries
def getSequenceAnnotationsAtBaseNo(parent_component, base_no, annotations_found = None):
# Assumes parent_component is an SBOL data structure of the general form DNAComponent(->SequenceAnnotation->DNAComponent)n
# where n+1 is an integer describing how many hierarchical levels are in the SBOL structure
if not annotations_found :
annotations_found = []
# print "Searching for base no %d" %base_no
# Look at each of this component's annotations, is the target base there?
for ann in parent_component.annotations :
# print ann.uri, ann.start, ann.end
# If target base is found ...
if base_no >= ann.start and base_no <= ann.end :
#print "Annotation FOUND"
annotations_found.append(ann)
# Is this the lowest level of the hierarchy, or are there subcomponents?
if ann.subcomponent and len(ann.subcomponent.annotations) > 0:
#print "Descending one level"
annotations_found = annotations_found[:-1] # Remove parent annotation, continue search for leaf annotations
sub_annotations_found = getSequenceAnnotationsAtBaseNo(ann.subcomponent, base_no, annotations_found)
if len(sub_annotations_found) == len(annotations_found): # If no leaf annotations were found at the lower level, replace the higher level annotation
#print "No sub annotations found"
annotations_found.append(ann)
return annotations_found
else:
#print "Sub annotations found"
return sub_annotations_found
else:
#print "No sub annotations found"
return annotations_found
#print "Completing search at this level"
return annotations_found
# return annotations_found
# else :
# print base_no, ann.start, ann.end
# annotations_found.append(ann)
# return annotations_found
def verify_base(ref_base, query_base) :
if ref_base.upper() == query_base.upper() :
return SO_NUCLEOTIDE_MATCH
elif ref_base == '-' and query_base.upper() == 'N':
return SO_POSSIBLE_ASSEMBLY_ERROR
elif ref_base == '-' and query_base.upper() in ['A', 'C', 'T', 'G'] :
return SO_INSERTION
elif ref_base.upper() in ['A', 'C', 'T', 'G'] and query_base.upper() == 'N' :
return SO_POSSIBLE_ASSEMBLY_ERROR
elif ref_base.upper() in ['A', 'C', 'T', 'G'] and query_base == '-' :
return SO_DELETION
elif not ref_base.upper() == query_base.upper() :
return SO_SUBSTITUTION
raise sbol.SBOLError('Alignment contains unrecognized character %s or %s' %(ref_base,query_base))
# def classify_mutation(ref_base, query_base) :
# if ref_base.upper() == query_base.upper() :
# return None
# elif ref_base == '-' and query_base.upper() == 'N':
# return None
# elif ref_base == '-' and query_base.upper() in ['A', 'C', 'T', 'G'] :
# return SO_INSERTION
# elif ref_base.upper() in ['A', 'C', 'T', 'G'] and query_base == 'N' :
# return SO_POSSIBLE_ASSEMBLY_ERROR
# elif ref_base.upper() in ['A', 'C', 'T', 'G'] and query_base == '-' :
# return SO_DELETION
# elif not ref_base.upper() == query_base.upper() :
# return SO_SUBSTITUTION
# return None
def is_mutation(dna_component) :
if dna_component.type in [
SO_INSERTION,
SO_DELETION,
SO_SUBSTITUTION,
]:
return True
else:
return False
def is_ambiguity(dna_component) :
if dna_component.type == SO_POSSIBLE_ASSEMBLY_ERROR:
return True
else:
return False
def is_match(dna_component) :
if dna_component.type == SO_NUCLEOTIDE_MATCH:
return True
else:
return False
# def calculate_identity(dna_component) :
# reference_seq = dna_component.sequence.nucleotides
# mutations = []
# for i_base, base in enumerate(reference_seq):
# ann_found = getSequenceAnnotationsAtBaseNo(dna_component, i_base)
# for ann in ann_found:
# if is_mutation(ann.subcomponent):
# mutations.append(ann)
# identity = (1. - float(len(mutations))/float(len(reference_seq))) * 100.
# return identity
def flatten_subtree(dc, children_annotations=[]):
for ann in dc.annotations:
if ann.subcomponent:
children_annotations = flatten_subtree(ann.subcomponent, children_annotations)
children_annotations.extend(dc.annotations)
children_annotations = list(set(children_annotations))
return children_annotations
def calculate_identity(dna_component) :
matched_regions = []
reference_seq = dna_component.sequence.nucleotides
for ann in flatten_subtree(dna_component):
if is_match(ann.subcomponent):
region_length = ann.end - ann.start + 1
print(ann.start, ann.end, region_length)
matched_regions.append(region_length)
total_matched = sum(matched_regions)
identity = float(total_matched)/float(len(reference_seq)) * 100.
print("Identity", total_matched, len(reference_seq))
return identity
def calculate_error(dna_component) :
mismatched_regions = []
reference_seq = dna_component.sequence.nucleotides
for ann in flatten_subtree(dna_component):
if is_mutation(ann.subcomponent):
region_length = ann.end - ann.start + 1
print(ann.start, ann.end, region_length)
mismatched_regions.append(region_length)
total_mismatched = sum(mismatched_regions)
percent_mismatched = float(total_mismatched)/float(len(reference_seq)) * 100.
print("Error", total_mismatched, len(reference_seq))
return percent_mismatched
def calculate_ambiguity(dna_component) :
ambiguous_regions = []
reference_seq = dna_component.sequence.nucleotides
for ann in flatten_subtree(dna_component):
if is_ambiguity(ann.subcomponent):
region_length = ann.end - ann.start + 1
print(ann.start, ann.end, region_length)
ambiguous_regions.append(region_length)
total_ambiguous_region = sum(ambiguous_regions)
percent_ambiguity = float(total_ambiguous_region)/float(len(reference_seq)) * 100.
print("Ambiguity", total_ambiguous_region, len(reference_seq))
return percent_ambiguity
def calculate_coverage(dna_component) :
covered_regions = []
reference_seq = dna_component.sequence.nucleotides
for ann in flatten_subtree(dna_component):
if is_match(ann.subcomponent) or is_mutation(ann.subcomponent) or is_ambiguity(ann.subcomponent):
region_length = ann.end - ann.start + 1
print(ann.start, ann.end, region_length)
covered_regions.append(region_length)
total_covered_region = sum(covered_regions)
percent_coverage = float(total_covered_region)/float(len(reference_seq)) * 100.
print("Coverage:", total_covered_region, len(reference_seq))
return percent_coverage
parts_length_dist = {
PROMOTER : 50,
RBS : 15,
CDS : 1000,
TERMINATOR : 100
}
def n():
r = random.random()
if r <= 0.25 :
return 'a'
elif r <= 0.5 :
return 't'
elif r <= 0.75 :
return 'c'
elif r <= 1 :
return 'g'
def nnn(seq_length):
seq = ''
for i in range(0, seq_length) :
seq = seq + n()
return seq
def random_part_length(part_type):
mu_length = parts_length_dist[part_type]
sigma_length = 0.2 * parts_length_dist[part_type]
return int ( random.gauss( mu_length, sigma_length ))
def qc(design, data=None, infile=None):
if infile:
with open (infile, "r") as f:
data = f.read()
if data:
if len(parse_fasta(data)) > 1 :
multialignment = align(data)
clone = find_consensus(multialignment)
else:
clone = data
target_design = write_to_fasta( [(design.uri, design.sequence.nucleotides)] )
alignment_qc = align(target_design + '\r\n' + clone, outfile='%s.align' %design.display_id)
# Scan alignment and classify mutations
design_seq = design.sequence.nucleotides
reference_seq = parse_fasta(alignment_qc)[0][1][:]
query_seq = parse_fasta(alignment_qc)[1][1][:]
assert len(reference_seq) == len(query_seq)
# Translate alignment coordinates into coordinates of the reference and query sequences
l_alignment = len(reference_seq) # Determine length of alignment
l_ref = len(reference_seq.replace('-', ''))
l_que = len(query_seq.replace('-', ''))
# The following dictionaries are used like lists indexed from one
ref_map = {} # Maps nucleotide coordinates of reference sequence to alignment coordinates
i_ref = 0
# If the design sequence is not fully covered by sequencing data, there may be '---' padding the end of
# the query sequence. The following indices mark the padded regions of the query_seq
# Eg,
# ref actggtca
# qry --tggt--
#
i_left = query_seq.index(next(token for token in query_seq if not token == '-'))
i_right = len(query_seq)- query_seq[::-1].index(next(token for token in reversed(query_seq) if not token == '-'))
for i_alignment in range(l_alignment):
ref_base = reference_seq[i_alignment]
que_base = query_seq[i_alignment]
if not ref_base == '-':
i_ref += 1
# Do not map the design coordinates to alignment coordinates if they aren't covered
if i_alignment >= i_left and i_alignment <= i_right:
ref_map[i_ref] = i_alignment
# Should be a unit test
#for i in range(0, l_ref):
# assert design_sequence[i] == reference_seq[ref_map[i+1]], "%d %s does not match %s"%(i,design_sequence[i], reference_seq[ref_map[i+1]])
# Only leaf annotations at the bottom of the hierarchy are annotated...
leaf_annotations = []
for i_design in range(len(design_seq)):
target_annotations = getSequenceAnnotationsAtBaseNo(design, i_design)
for ann in target_annotations:
if not ann in leaf_annotations:
leaf_annotations.append(ann)
# Slice the alignment into segments that pertain to each annotation,
# then determine the covered bases in the annotation. All, part, or several discontiguous parts of an annotation
# may be covered
for i_ann, ann in enumerate(leaf_annotations):
covered_coordinates = list(ref_map.keys()) # List of all base coordinates for this design / reference sequence that are covered
# Now narrow down to find just the bases in this annotation
covered_coordinates = [ x for x in covered_coordinates if x >= ann.start and x <= ann.end ]
# Now translate into alignment coordinates
alignment_coordinates = [ ref_map[x] for x in covered_coordinates ]
if len(alignment_coordinates) > 0:
alignment_start = min(alignment_coordinates)
alignment_end = max(alignment_coordinates)
# Scan alignment
print("Verifying %s from %d to %d" %(ann.subcomponent.display_id, ann.start, ann.end))
print(''.join([ nt for nt in reference_seq[alignment_start:alignment_end]]))
print(''.join([ nt for nt in query_seq[alignment_start:alignment_end]]))
# Classification of alignment
base_comparisons = [ verify_base(reference_seq[x], query_seq[x]) for x in alignment_coordinates ]
for x in alignment_coordinates:
comparison = verify_base(reference_seq[x], query_seq[x])
if comparison == None:
print(x, reference_seq[x], query_seq[x])
# Select a contiguous region of interest in alignment coordinates
# TODO: replace while with for
i_alignment = 0
regions = []
region_classifications = []
while i_alignment < len(base_comparisons):
current_term = base_comparisons[i_alignment]
if i_alignment == 0:
reg_start = 0
reg_end = 0
previous_term = None
elif i_alignment > 0 and i_alignment < (len(base_comparisons) - 1):
# Mark end of an old region of interest and beginning of a new region
if not current_term == previous_term:
ref_start = covered_coordinates[reg_start] # Translate from alignment to design / reference coordinates
ref_end = covered_coordinates[reg_end] # Translate from alignment to design / reference coordinates
region_of_interest = ((ref_start, ref_end), previous_term)
regions.append(region_of_interest)
reg_start = i_alignment
reg_end = i_alignment
# Else extend the old region of interest to include the current coordinate
elif current_term == previous_term:
reg_end = i_alignment
elif i_alignment == (len(base_comparisons) - 1):
if not current_term == previous_term:
reg_start = i_alignment
reg_end = i_alignment
ref_start = covered_coordinates[reg_start] # Translate from alignment to design / reference coordinates
ref_end = covered_coordinates[reg_end] # Translate from alignment to design / reference coordinates
region_of_interest = ((ref_start, ref_end), previous_term)
regions.append(region_of_interest)
elif current_term == previous_term:
reg_end = i_alignment
ref_start = covered_coordinates[reg_start] # Translate from alignment to design / reference coordinates
ref_end = covered_coordinates[reg_end] # Translate from alignment to design / reference coordinates
region_of_interest = ((ref_start, ref_end), previous_term)
regions.append(region_of_interest)
#print i_alignment, current_term, reg_start, reg_end, covered_coordinates[reg_start], covered_coordinates[reg_end]
previous_term = current_term
i_alignment += 1
# TODO: add unit test checking that the first region starts and the last region ends
# TODO: add unit test checking that two distinct regions of interest can be demarcated
# TODO: add unit test checking a single base region of interest at the beginning or the start
# TODO: add unit test checking if first or last bases of query are '-'. These are currently classified as
# insertions, but are in fact uncovered regions
# Create SequenceAnnotations for QC'd regions
doc = design.doc
for i_region, region in enumerate(regions):
print(i_region)
qc_start, qc_end = region[0]
qc_classification = region[1]
n_components = len(doc.components)
n_annotations = len(doc.annotations)
if qc_classification :
if qc_classification == SO_NUCLEOTIDE_MATCH: # The reference sequence matches the query sequence
annotated_region = sbol.SequenceAnnotation(doc, "%s/MatchedSequence/SA%d" %(design.uri, n_annotations))
annotated_region.start = qc_start
annotated_region.end = qc_end
annotated_region.subcomponent = sbol.DNAComponent(doc,"%s/MatchedSequence/SA%d/DC%d" %(design.uri, n_annotations, n_components) )
annotated_region.subcomponent.display_id = ""
annotated_region.subcomponent.type = qc_classification
else: # A mismatch was identified
annotated_region = sbol.SequenceAnnotation(doc, "%s/AssemblyErrors/SA%d" %(design.uri, n_annotations))
annotated_region.start = qc_start
annotated_region.end = qc_end
annotated_region.subcomponent = sbol.DNAComponent(doc,"%s/AssemblyErrors/SA%d/DC%d" %(design.uri, n_annotations, n_components) )
annotated_region.subcomponent.display_id = ""
annotated_region.subcomponent.type = qc_classification
print("Adding %s to %s from %d to %d" %(annotated_region.uri, ann.subcomponent.display_id, annotated_region.start, annotated_region.end))
ann.subcomponent.annotations.append(annotated_region)
def align(sequencing_data, outfile = None):
"""
Sequencing data is a string in FASTA format
"""
if outfile != None :
print ("Aligning")
# align_sequences = Popen(['%s\clustalo.exe' % CLUSTAL_DIR, '-i', '-'], stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=DATA_DIR)
align_sequences = Popen(['%s\clustalo.exe' % CLUSTAL_DIR, '-i', '-', '-o', '%s'%outfile, '--outfmt', 'msf'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
alignment = align_sequences.communicate(sequencing_data)[0].decode()
align_sequences.terminate()
align_sequences = Popen(['%s\clustalo.exe' % CLUSTAL_DIR, '-i', '-'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
alignment = align_sequences.communicate(sequencing_data)[0].decode()
align_sequences.terminate()
return alignment
def find_consensus(alignment):
find_consensus = Popen(['%s\cons.exe' % EMBOSS_DIR, '-filter', '-identity', '2'], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
consensus = find_consensus.communicate(input=alignment)[0].decode()
find_consensus.terminate()
return consensus
def initialize_design(doc):
n_designs = len([part for part in doc.components if part.type and part.type == DESIGN])
root = sbol.DNAComponent(doc, '%s/Design_%d' %(BASE_URI, n_designs + 1))
root.type = DESIGN
root.display_id = 'Design %d' %(n_designs + 1)
root.name = 'Design %d' %(n_designs + 1)
root.sequence = sbol.DNASequence(doc, '%s/Design_%d/Seq_%d' %(BASE_URI, n_designs + 1, n_designs + 1))
root.sequence.nucleotides = 'n'
return root
def construct_design(doc, root, target_design):
# target_design is a list of part uris
n_components = len(doc.components)
n_annotations = len(doc.annotations)
n_sequences = len(doc.sequences)
sbol_parts = []
for uri in target_design:
#uri = uris[part_name]
part = doc.components[uri]
SA = sbol.SequenceAnnotation(doc, '%s/SA_%d' %(root.uri, n_annotations + 1))
n_annotations += 1
if not part.sequence:
part.sequence = sbol.DNASequence(doc, '%s/Seq_%d' %(part.uri, n_sequences + 1))
part.sequence.nucleotides = 'n'
SA.start = 1
SA.end = len(part.sequence.nucleotides)
SA.orientation = '+'
sbol_parts.append(SA)
SA.subcomponent = part
root.annotations.append(sbol_parts[0])
for i_part in range(1, len(sbol_parts)):
upstream_ann = sbol_parts[i_part - 1]
downstream_ann = sbol_parts[i_part]
insert_annotation_downstream( root, upstream_ann, downstream_ann )
assemble_subcomponents(root)
#for part in sbol_parts:
# print part.start, part.end, part.subcomponent.name, part.subcomponent.type, part.subcomponent.uri
return root
def scrape_parts(doc, part_files, parts_list, TARGET_DIR = '.'):
# Scrape parts from files
# doc is the Document to which the scraped parts will be added
# part_files should be a list of file names without .xml extension
for pf in part_files:
print(pf)
sbol_in = sbol.Document()
sbol_in.read(TARGET_DIR + '/' + pf + '.xml')
print("Components in infile", len(sbol_in.components))
for i_dc, dc in enumerate(sbol_in.components):
try:
if dc.uri in parts_list:
dc.move(doc)
except:
print('error in ', i_dc)
libsbol.deleteDocument(sbol_in.ptr)
return doc
| 45.603448 | 165 | 0.647053 |
ace6c4d84dbfac8b3d363c4a96c0bba0ea0910a6 | 543 | py | Python | challenges/challenge_57.py | ysaunier/ring-zer0 | 0e2eae03d38cba313229fbc39ebe75519a424fc1 | [
"MIT"
] | null | null | null | challenges/challenge_57.py | ysaunier/ring-zer0 | 0e2eae03d38cba313229fbc39ebe75519a424fc1 | [
"MIT"
] | 3 | 2021-06-08T21:00:49.000Z | 2022-01-13T02:17:31.000Z | challenges/challenge_57.py | ysaunier/ring-zer0 | 0e2eae03d38cba313229fbc39ebe75519a424fc1 | [
"MIT"
] | null | null | null | from core.client import RingClient
def execute():
client = RingClient()
client.login()
page = client.get_challenge(challenge=57)
messages = page.findAll('div', attrs={'class': 'message'})
hash_to_crack = messages[0].contents[2].strip()
salt = messages[1].contents[2].strip()
print(f'hash '.ljust(20, '.') + f' : {hash_to_crack}')
print(f'salt '.ljust(20, '.') + f' : {salt}')
# response = hashes.get(text)
# print(client.send_answer(response=response))
if __name__ == '__main__':
execute()
| 22.625 | 62 | 0.626151 |
ace6c536ee72eed2bcb0baf28162a9ee1b180ee8 | 6,741 | py | Python | Python/reduceEpsilons.py | KienTTran/PSU-CIDD-MaSim-Support | db4e3c514b1bb85bc3f20e75703d6be4967e98a9 | [
"BSD-3-Clause"
] | 3 | 2021-02-05T06:23:04.000Z | 2022-02-24T21:46:33.000Z | Python/reduceEpsilons.py | KienTTran/PSU-CIDD-MaSim-Support | db4e3c514b1bb85bc3f20e75703d6be4967e98a9 | [
"BSD-3-Clause"
] | 20 | 2021-02-12T16:29:01.000Z | 2021-05-03T21:29:55.000Z | Python/reduceEpsilons.py | KienTTran/PSU-CIDD-MaSim-Support | db4e3c514b1bb85bc3f20e75703d6be4967e98a9 | [
"BSD-3-Clause"
] | 3 | 2021-01-29T20:18:19.000Z | 2021-06-29T16:08:20.000Z | #!/usr/bin/python3
# reduceEpsilons.py
#
# This module takes the inputs used by createBetaMap.py as well as the epsilon
# file to prepare.
import argparse
import csv
import os
import sys
import yaml
# Import our libraries
sys.path.append(os.path.join(os.path.dirname(__file__), "include"))
import include.calibrationLib as cl
from include.ascFile import load_asc
from include.utility import *
# Standardized reference files for the script
CALIBRATION = "data/calibration.csv"
POPULATION_FILE = "{}_population.asc"
# These are generated by createBetaMap, so should be present assuming the
# correct country prefix is derived
BETAVALUES = "out/{}_beta.asc"
EPSILONVALUES = "out/{}_epsilons.asc"
# Default output
RESULTS = "out/reduction.csv"
SCRIPT = "out/script.sh"
parameters = {}
def addBeta(lookup, step, zone, beta, population, treatment):
global parameters
# Determine the population and treatment bin we are working with
populationBin = int(cl.get_bin(population, lookup[zone].keys()))
treatmentBin = cl.get_bin(treatment, lookup[zone][populationBin].keys())
# Update the dictionary
if zone not in parameters:
parameters[zone] = {}
if populationBin not in parameters[zone]:
parameters[zone][populationBin] = {}
if treatmentBin not in parameters[zone][populationBin]:
parameters[zone][populationBin][treatmentBin] = set()
# Add the stepped betas to the set
value = round(beta - (step * 10), 4)
while value < beta + (step * 10):
# Only add values greater than zero
if value > 0:
parameters[zone][populationBin][treatmentBin].add(value)
value = round(value + step, 4)
def getLookupBetas(lookup, zone, population, treatment):
betas = set()
for row in lookup[zone][population][treatment]:
betas.add(row[1])
return betas
def writeBetas(lookup, prefix, username):
global parameters
# Generate a list of populations to create ASC files for
populationAsc = set()
# Generate a list of betas to run (same format as missing.csv) that haven't been seen before
reduced = []
for zone in sorted(parameters.keys()):
for population in sorted(parameters[zone].keys()):
populationAsc.add(population)
for treatment in sorted(parameters[zone][population]):
betas = getLookupBetas(lookup, zone, population, treatment)
for beta in sorted(parameters[zone][population][treatment]):
if beta not in betas:
reduced.append([int(zone), int(population), treatment, beta])
# Double check to see if the list was cleared out
if len(reduced) == 0:
print("Nothing to reduce!")
return
# Save the missing values as a CSV file
print("Preparing inputs, {}".format(RESULTS))
with open(RESULTS, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(reduced)
print("Preparing script, {}".format(SCRIPT))
with open(SCRIPT, "w") as script:
script.write("#!/bin/bash\n")
script.write("source ./calibrationLib.sh\n")
value = " ".join([str(int(x)) for x in sorted(populationAsc)])
script.write("generateAsc \"\\\"{}\\\"\"\n".format(value.strip()))
value = " ".join([str(int(x)) for x in sorted(parameters.keys())])
script.write("generateZoneAsc \"\\\"{}\\\"\"\n".format(value.strip()))
script.write("runCsv '{}' {} {}\n".format(RESULTS[4:], prefix, username))
def main(configuration, gisPath, tolerance, step, username):
global parameters
# Determine the country prefix
prefix = cl.get_prefix(configuration)
if prefix is None:
sys.stderr.write("Invalid country code associated with configuration file: {}\n".format(configuration))
sys.exit(1)
# Load the configuration, and potentially raster data
cfg = cl.load_configuration(configuration)
climate = cl.get_climate_zones(cfg, gisPath)
treatment = cl.get_treatments_raster(cfg, gisPath)
# Load the relevent raster data
filename = os.path.join(gisPath, POPULATION_FILE.format(prefix))
[ascheader, population] = load_asc(filename)
lookup = cl.load_betas(CALIBRATION)
# Read the epsilons file in
[_, beta] = load_asc(BETAVALUES.format(prefix))
[_, epsilon] = load_asc(EPSILONVALUES.format(prefix))
print ("Evaluating epsilons for {} rows, {} columns".format(ascheader['nrows'], ascheader['ncols']))
# Scan each of the epsilons
for row in range(0, ascheader['nrows']):
for col in range(0, ascheader['ncols']):
# Pass on nodata
value = epsilon[row][col]
if value == ascheader['nodata']: continue
# Pass when value is less than maximum
if value < tolerance: continue
# Update the running list
addBeta(lookup, step, climate[row][col], beta[row][col], population[row][col], treatment[row][col])
# Note the progress
progressBar(row + 1, ascheader['nrows'])
# Check to see if we are done
if len(parameters) == 0:
print("Nothing to reduce!")
else:
writeBetas(lookup, prefix, username)
if __name__ == "__main__":
# Parse the parameters
parser = argparse.ArgumentParser()
parser.add_argument('-c', action='store', dest='configuration', required=True,
help='The configuration file to reference when reducing the epsilon values')
parser.add_argument('-g', action='store', dest='gis', required=True,
help='The path to the directory that the GIS files can be found in')
parser.add_argument('-t', action='store', dest='tolerance', required=True,
help='float, maximum epsilon, should not be less than the step')
parser.add_argument('-s', action='store', dest='step', required=True,
help='float, increment +/- 10x around known beta (maximum 0.00001)')
parser.add_argument('-u', action='store', dest='username', required=True,
help='The user who will be running the calibration on the cluster')
args = parser.parse_args()
# Check the step and tolerance
tolerance = float(args.tolerance)
step = float(args.step)
if step >= 1:
sys.stderr.write("The step cannot be greater than one\n")
exit(1)
if round(step, 5) != step:
sys.stderr.write("{} exceeds maximum step of 0.00001\n".format(step))
exit(1)
if tolerance < step:
sys.stderr.write("The tolerance, {}, is less than the step, {}\n".format(step, tolerance))
exit(1)
# Defer to main to do everything else
main(args.configuration, args.gis, tolerance, step, args.username) | 36.836066 | 111 | 0.654947 |
ace6c599f9026f6a6faa1ba6de7ea5e65ef15176 | 500 | py | Python | Module 2/Chapter 7/ch7_rfe.py | saicharanabhishek/machinelearning_examples | f89857ae7e1a2baa76951fe1d55541832d0f0d20 | [
"MIT"
] | 101 | 2016-11-08T11:17:55.000Z | 2021-12-24T10:43:32.000Z | Module 2/Chapter 7/ch7_rfe.py | saicharanabhishek/machinelearning_examples | f89857ae7e1a2baa76951fe1d55541832d0f0d20 | [
"MIT"
] | 2 | 2017-11-28T11:22:25.000Z | 2019-02-12T17:09:18.000Z | Module 2/Chapter 7/ch7_rfe.py | saicharanabhishek/machinelearning_examples | f89857ae7e1a2baa76951fe1d55541832d0f0d20 | [
"MIT"
] | 102 | 2016-10-22T12:14:23.000Z | 2022-03-26T19:59:09.000Z | print(__doc__)
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
import matplotlib.pyplot as plt
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
svc = SVC(kernel="linear", C=1)
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe.fit(X, y)
ranking = rfe.ranking_.reshape(digits.images[0].shape)
plt.matshow(ranking)
plt.colorbar()
plt.title("Ranking of pixels with RFE")
plt.show()
| 23.809524 | 56 | 0.762 |
ace6c76d11c94c8901181c06bd2cdb0a496a9961 | 3,910 | py | Python | main.py | Res2Net/Res2Net-PoolNet | 7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a | [
"MIT"
] | 35 | 2020-02-18T09:21:10.000Z | 2022-03-20T08:59:02.000Z | main.py | Res2Net/Res2Net-PoolNet | 7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a | [
"MIT"
] | 4 | 2020-06-18T03:26:15.000Z | 2021-01-18T06:42:58.000Z | main.py | Res2Net/Res2Net-PoolNet | 7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a | [
"MIT"
] | 10 | 2020-02-15T10:12:47.000Z | 2020-08-26T13:53:44.000Z | import argparse
import os
from dataset.dataset import get_loader
from solver import Solver
def get_test_info(sal_mode='e'):
if sal_mode == 'e':
image_root = './data/ECSSD/Imgs/'
image_source = './data/ECSSD/test.lst'
elif sal_mode == 'p':
image_root = './data/PASCALS/Imgs/'
image_source = './data/PASCALS/test.lst'
elif sal_mode == 'd':
image_root = './data/DUTOMRON/Imgs/'
image_source = './data/DUTOMRON/test.lst'
elif sal_mode == 'h':
image_root = './data/HKU-IS/Imgs/'
image_source = './data/HKU-IS/test.lst'
elif sal_mode == 's':
image_root = './data/SOD/Imgs/'
image_source = './data/SOD/test.lst'
elif sal_mode == 't':
image_root = './data/DUTS-TE/Imgs/'
image_source = './data/DUTS-TE/test.lst'
elif sal_mode == 'm_r': # for speed test
image_root = './data/MSRA/Imgs_resized/'
image_source = './data/MSRA/test_resized.lst'
return image_root, image_source
def main(config):
if config.mode == 'train':
train_loader = get_loader(config)
run = 0
while os.path.exists("%s/run-%d" % (config.save_folder, run)):
run += 1
os.mkdir("%s/run-%d" % (config.save_folder, run))
os.mkdir("%s/run-%d/models" % (config.save_folder, run))
config.save_folder = "%s/run-%d" % (config.save_folder, run)
train = Solver(train_loader, None, config)
train.train()
elif config.mode == 'test':
config.test_root, config.test_list = get_test_info(config.sal_mode)
test_loader = get_loader(config, mode='test')
if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)
test = Solver(None, test_loader, config)
test.test()
else:
raise IOError("illegal input!!!")
if __name__ == '__main__':
vgg_path = './dataset/pretrained/vgg16_20M.pth'
resnet_path = './dataset/pretrained/resnet50_caffe.pth'
res2net_path = '/home/shgao/.torch/models/res2net50_26w_4s-06e79181.pth'
parser = argparse.ArgumentParser()
# Hyper-parameters
parser.add_argument('--n_color', type=int, default=3)
parser.add_argument('--lr', type=float, default=5e-5) # Learning rate resnet:5e-5, vgg:1e-4
parser.add_argument('--wd', type=float, default=0.0005) # Weight decay
parser.add_argument('--no-cuda', dest='cuda', action='store_false')
# Training settings
parser.add_argument('--arch', type=str, default='res2net_path') # resnet or vgg
parser.add_argument('--pretrained_model', type=str, default=res2net_path)
parser.add_argument('--epoch', type=int, default=24)
parser.add_argument('--batch_size', type=int, default=1) # only support 1 now
parser.add_argument('--num_thread', type=int, default=1)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--save_folder', type=str, default='./results')
parser.add_argument('--epoch_save', type=int, default=3)
parser.add_argument('--iter_size', type=int, default=10)
parser.add_argument('--show_every', type=int, default=50)
# Train data
parser.add_argument('--train_root', type=str, default='')
parser.add_argument('--train_list', type=str, default='')
# Testing settings
parser.add_argument('--model', type=str, default=None) # Snapshot
parser.add_argument('--test_fold', type=str, default=None) # Test results saving folder
parser.add_argument('--sal_mode', type=str, default='e') # Test image dataset
# Misc
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
config = parser.parse_args()
if not os.path.exists(config.save_folder):
os.mkdir(config.save_folder)
# Get test set info
test_root, test_list = get_test_info(config.sal_mode)
config.test_root = test_root
config.test_list = test_list
main(config)
| 39.897959 | 95 | 0.651407 |
ace6c7b1d6ab84f86358f1e338b5deef5544c2fd | 1,859 | py | Python | test/test_fflask.py | pylipp/financeager-flask | 2e4f6afb7e0e1de0586b7ab1a013a340e0860a71 | [
"MIT"
] | 2 | 2021-01-22T05:18:33.000Z | 2021-01-27T12:27:09.000Z | test/test_fflask.py | pylipp/financeager-flask | 2e4f6afb7e0e1de0586b7ab1a013a340e0860a71 | [
"MIT"
] | 3 | 2020-12-05T22:43:20.000Z | 2021-12-12T20:11:17.000Z | test/test_fflask.py | pylipp/financeager-flask | 2e4f6afb7e0e1de0586b7ab1a013a340e0860a71 | [
"MIT"
] | null | null | null | import tempfile
import unittest
from os import environ
from unittest import mock
import financeager
from financeager_flask.fflask import create_app
# Patch DATA_DIR to avoid having it created/interfering with logs on actual
# machine
TEST_DATA_DIR = tempfile.mkdtemp(prefix="financeager-")
@mock.patch("financeager.DATA_DIR", TEST_DATA_DIR)
class CreateAppNoDataDirTestCase(unittest.TestCase):
@mock.patch("financeager_flask.fflask.logger.warning")
def test_warning(self, mocked_warning):
create_app()
mocked_warning.assert_called_once_with(
"'data_dir' not given. Application data is stored in "
"memory and is lost when the flask app terminates. Set "
"the environment variable FINANCEAGER_FLASK_DATA_DIR "
"accordingly for persistent data storage.")
def test_debug(self):
app = create_app(config={"DEBUG": True})
self.assertTrue(app.debug)
self.assertEqual(financeager.LOGGER.handlers[0].level, 10)
def test_data_dir_env_variable(self):
data_dir = tempfile.mkdtemp(prefix="financeager-")
environ["FINANCEAGER_FLASK_DATA_DIR"] = data_dir
with mock.patch("os.makedirs") as mocked_makedirs:
create_app(data_dir=None)
# First call is from inside setup_log_file_handler()
self.assertEqual(mocked_makedirs.call_count, 2)
mocked_makedirs.assert_called_with(data_dir, exist_ok=True)
del environ["FINANCEAGER_FLASK_DATA_DIR"]
def test_bad_request(self):
app = create_app()
app.testing = True
with app.test_client() as client:
response = client.post("/pockets/2000")
# Expect Bad Request due to missing data (name and value)
self.assertEqual(response.status_code, 400)
if __name__ == "__main__":
unittest.main()
| 35.75 | 75 | 0.700914 |
ace6c7b903986e6341ca2019e583057414e6d984 | 1,132 | py | Python | tests/data/test.py | bertt/quantized-mesh-tile-cs | 85cc8336e814f9f5c49b735bbe38481a86e5a3f5 | [
"MIT"
] | 4 | 2017-08-10T03:47:14.000Z | 2022-01-10T10:44:23.000Z | tests/data/test.py | bertt/quantized-mesh-tile-cs | 85cc8336e814f9f5c49b735bbe38481a86e5a3f5 | [
"MIT"
] | 3 | 2016-12-20T20:29:43.000Z | 2018-11-13T09:05:06.000Z | tests/data/test.py | bertt/quantized-mesh-tile-cs | 85cc8336e814f9f5c49b735bbe38481a86e5a3f5 | [
"MIT"
] | 3 | 2019-08-21T18:26:10.000Z | 2022-02-21T10:24:45.000Z | # import cStringIO
# import requests
# from quantized_mesh_tile.terrain import TerrainTile
# from quantized_mesh_tile.global_geodetic import GlobalGeodetic
# [z, x, y] = [14, 24297, 10735]
# geodetic = GlobalGeodetic(True)
# [west, south, east, north] = bounds = geodetic.TileBounds(x, y, z)
# url = 'http://assets.agi.com/stk-terrain/world/%s/%s/%s.terrain?v=1.16389.0' % (z, x, y)
# response = requests.get(url)
# content = cStringIO.StringIO(response.content)
# print west, south, east, north
# ter = TerrainTile(west=west, south=south, east=east, north=north)
# ter.fromStringIO(content)
# print ter.header
# print ter.getVerticesCoordinates()
# path = "9_533_383.terrain"
from quantized_mesh_tile.global_geodetic import GlobalGeodetic
from quantized_mesh_tile.terrain import TerrainTile
geodetic = GlobalGeodetic(True)
# [z, x, y] = [16,67465,51617]
[z, x, y] = [0,0,0]
[minx, miny, maxx, maxy] = geodetic.TileBounds(x, y, z)
ter = TerrainTile(west=minx, south=miny, east=maxx, north=maxy)
print geodetic.TileBounds(0,0,0)
# ter.fromFile('ahn_416656.terrain')
ter.fromFile('51617.terrain')
print ter.getTrianglesCoordinates() | 39.034483 | 90 | 0.742049 |
ace6c9292d5b10ed7cce107b9db61b135b80dd28 | 9,370 | py | Python | channels/SSHChannelCustom.py | MLRG-CEFET-RJ/parsl-ml-workflow | db727c1258c606359964fcfa681b08294d16de36 | [
"MIT"
] | 1 | 2019-11-21T20:00:20.000Z | 2019-11-21T20:00:20.000Z | channels/SSHChannelCustom.py | MLRG-CEFET-RJ/parsl-ml-workflow | db727c1258c606359964fcfa681b08294d16de36 | [
"MIT"
] | null | null | null | channels/SSHChannelCustom.py | MLRG-CEFET-RJ/parsl-ml-workflow | db727c1258c606359964fcfa681b08294d16de36 | [
"MIT"
] | null | null | null | import errno
import logging
import os
import paramiko
from parsl.channels.base import Channel
from parsl.channels.errors import *
from parsl.utils import RepresentationMixin
logger = logging.getLogger(__name__)
class NoAuthSSHClient(paramiko.SSHClient):
def _auth(self, username, *args):
self._transport.auth_none(username)
return
class SSHChannelCustom(Channel, RepresentationMixin):
''' SSH persistent channel. This enables remote execution on sites
accessible via ssh. It is assumed that the user has setup host keys
so as to ssh to the remote host. Which goes to say that the following
test on the commandline should work:
>>> ssh <username>@<hostname>
'''
def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, gssapi_auth=False, skip_auth=False, port=22, timeout=None, **kwargs):
''' Initialize a persistent connection to the remote system.
We should know at this point whether ssh connectivity is possible
Args:
- hostname (String) : Hostname
KWargs:
- username (string) : Username on remote system
- password (string) : Password for remote system
- script_dir (string) : Full path to a script dir where
generated scripts could be sent to.
- envs (dict) : A dictionary of environment variables to be set when executing commands
Raises:
'''
self.hostname = hostname
self.username = username
self.password = password
self.port = port
self.timeout = timeout
self.kwargs = kwargs
self.script_dir = script_dir
self.skip_auth = skip_auth
self.gssapi_auth = gssapi_auth
if self.skip_auth:
self.ssh_client = NoAuthSSHClient()
else:
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.envs = {}
if envs is not None:
self.envs = envs
try:
self.ssh_client.connect(
hostname,
username=username,
password=password,
port=port,
timeout=timeout,
allow_agent=True,
gss_auth=gssapi_auth,
gss_kex=gssapi_auth,
)
t = self.ssh_client.get_transport()
self.sftp_client = paramiko.SFTPClient.from_transport(t)
except paramiko.BadHostKeyException as e:
raise BadHostKeyException(e, self.hostname)
except paramiko.AuthenticationException as e:
raise AuthException(e, self.hostname)
except paramiko.SSHException as e:
raise SSHException(e, self.hostname)
except Exception as e:
raise SSHException(e, self.hostname)
def prepend_envs(self, cmd, env={}):
env.update(self.envs)
if len(env.keys()) > 0:
env_vars = ' '.join(['{}={}'.format(key, value) for key, value in env.items()])
return 'env {0} {1}'.format(env_vars, cmd)
return cmd
def execute_wait(self, cmd, walltime=2, envs={}):
''' Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds
Kwargs:
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
# Execute the command
stdin, stdout, stderr = self.ssh_client.exec_command(
self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime
)
# Block on exit status from the command
exit_status = stdout.channel.recv_exit_status()
return exit_status, stdout.read().decode("utf-8"), stderr.read().decode("utf-8")
def execute_no_wait(self, cmd, walltime=2, envs={}):
''' Execute asynchronousely without waiting for exitcode
Args:
- cmd (string): Commandline string to be executed on the remote side
- walltime (int): timeout to exec_command
KWargs:
- envs (dict): A dictionary of env variables
Returns:
- None, stdout (readable stream), stderr (readable stream)
Raises:
- ChannelExecFailed (reason)
'''
# Execute the command
stdin, stdout, stderr = self.ssh_client.exec_command(
self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime
)
return None, stdout, stderr
def push_file(self, local_source, remote_dir):
''' Transport a local file to a directory on a remote machine
Args:
- local_source (string): Path
- remote_dir (string): Remote path
Returns:
- str: Path to copied file on remote machine
Raises:
- BadScriptPath : if script path on the remote side is bad
- BadPermsScriptPath : You do not have perms to make the channel script dir
- FileCopyException : FileCopy failed.
'''
remote_dest = remote_dir + '/' + os.path.basename(local_source)
try:
self.makedirs(remote_dir, exist_ok=True)
except IOError as e:
logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir))
if e.errno == 2:
raise BadScriptPath(e, self.hostname)
elif e.errno == 13:
raise BadPermsScriptPath(e, self.hostname)
else:
logger.exception("File push failed due to SFTP client failure")
raise FileCopyException(e, self.hostname)
try:
self.sftp_client.put(local_source, remote_dest, confirm=True)
# Set perm because some systems require the script to be executable
self.sftp_client.chmod(remote_dest, 0o777)
except Exception as e:
logger.exception("File push from local source {} to remote destination {} failed".format(
local_source, remote_dest))
raise FileCopyException(e, self.hostname)
return remote_dest
def pull_file(self, remote_source, local_dir):
''' Transport file on the remote side to a local directory
Args:
- remote_source (string): remote_source
- local_dir (string): Local directory to copy to
Returns:
- str: Local path to file
Raises:
- FileExists : Name collision at local directory.
- FileCopyException : FileCopy failed.
'''
local_dest = local_dir + '/' + os.path.basename(remote_source)
try:
os.makedirs(local_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.exception("Failed to create script_dir: {0}".format(script_dir))
raise BadScriptPath(e, self.hostname)
# Easier to check this than to waste time trying to pull file and
# realize there's a problem.
if os.path.exists(local_dest):
logger.exception("Remote file copy will overwrite a local file:{0}".format(local_dest))
raise FileExists(None, self.hostname, filename=local_dest)
try:
self.sftp_client.get(remote_source, local_dest)
except Exception as e:
logger.exception("File pull failed")
raise FileCopyException(e, self.hostname)
return local_dest
def close(self):
return self.ssh_client.close()
def isdir(self, path):
"""Return true if the path refers to an existing directory.
Parameters
----------
path : str
Path of directory on the remote side to check.
"""
result = True
try:
self.sftp_client.lstat(path)
except FileNotFoundError:
result = False
return result
def makedirs(self, path, mode=511, exist_ok=False):
"""Create a directory on the remote side.
If intermediate directories do not exist, they will be created.
Parameters
----------
path : str
Path of directory on the remote side to create.
mode : int
Permissions (posix-style) for the newly-created directory.
exist_ok : bool
If False, raise an OSError if the target directory already exists.
"""
if exist_ok is False and self.isdir(path):
raise OSError('Target directory {} already exists'.format(path))
self.execute_wait('mkdir -p {}'.format(path))
self.sftp_client.chmod(path, mode)
def abspath(self, path):
"""Return the absolute path on the remote side.
Parameters
----------
path : str
Path for which the absolute path will be returned.
"""
return self.sftp_client.normalize(path)
@property
def script_dir(self):
return self._script_dir
@script_dir.setter
def script_dir(self, value):
self._script_dir = value
| 32.762238 | 160 | 0.601174 |
ace6c959816e366d07d28dc12bdd0e9397467aa0 | 13,716 | py | Python | luna/gateware/memory.py | asaioe99/luna_JP | 276c9d86d2c964c7c4be149d721156d869c9489b | [
"BSD-3-Clause"
] | 1 | 2021-02-25T01:43:12.000Z | 2021-02-25T01:43:12.000Z | luna/gateware/memory.py | kbeckmann/luna | befb33620d1c4267607a45f331b84bc196c7a8bf | [
"BSD-3-Clause"
] | null | null | null | luna/gateware/memory.py | kbeckmann/luna | befb33620d1c4267607a45f331b84bc196c7a8bf | [
"BSD-3-Clause"
] | null | null | null | #
# This file is part of LUNA.
#
"""
This module contains definitions of memory units that work well for USB applications.
"""
import unittest
from nmigen import Elaboratable, Module, Signal, Memory
from nmigen.hdl.xfrm import DomainRenamer
from .test import LunaGatewareTestCase, sync_test_case
class TransactionalizedFIFO(Elaboratable):
""" Transactionalized, buffer first-in-first-out queue.
This FIFO is "transactionalized", which means that it allows sets of reads and writes to be "undone".
Effectively, this FIFO allows "rewinding" its read and write pointers to a previous point in time,
which makes it ideal for USB transmission or receipt; where the protocol can require blocks of data
to be retransmitted or ignored.
Attributes
----------
read_data: Signal(width), output
Contains the next byte in the FIFO. Valid only when :attr:``empty`` is false.
read_en: Signal(), input
When asserted, the current :attr:``read_data`` will move to the next value. The data is not
internally consumed/dequeued until :attr:``read_commit`` is asserted. This read can be "undone"
by asserting :attr:``read_discard``. Should only be asserted when :attr:``empty`` is false.
read_commit: Signal(), input
Strobe; when asserted, any reads performed since the last commit will be "finalized".
This effectively frees the memory associated with past reads. If this value is tied to '1',
the read port on this FIFO gracefully degrades to non-transactionalized port.
read_discard: Signal(), input
Strobe; when asserted; any reads since the last commit will be "undone", placing the read pointer
back at the queue position it had after the last :attr:``read_commit`.
empty: Signal(), output
Asserted when no data is available in the FIFO. This signal refers to whether data is available to
read. :attr:``read_commit`` will not change this value; but :attr:``read_discard`` will.
write_data: Signal(width), input
Holds the byte to be added to the FIFO when :attr:``write_en`` is asserted.
write_en: Signal(), input
When asserted, the current :attr:``write_data`` will be added to the FIFO; but will not be ready for read
until :attr:``write_commit`` is asserted. This write can be "undone" by asserting :attr:``write_discard``.
Should only be asserted when :attr:``full`` is false.
write_commit: Signal(), input
Strobe; when asserted, any writes reads performed since the last commit will be "finalized".
This makes the relevant data available for read.
write_discard: Signal(), input
Strobe; when asserted; any writes since the last commit will be "undone", placing the write pointer
back at the queue position it had after the last :attr:``write_commit`. This frees the relevant memory
for new writes.
full: Signal(), output
Asserted when no space is available for writes in the FIFO. :attr:``write_commit`` will not change
this value; but :attr:``write_discard`` will.
space_available: Signal(range(0, depth + 1)), output
Indicates the amount of space available in the FIFO. Useful for knowing whether we can add e.g. an
entire packet to the FIFO.
Attributes
----------
width: int
The width of each entry in the FIFO.
depth: int
The number of allowed entries in the FIFO.
name: str
The name of the relevant FIFO; to produce nicer debug output.
If not provided, nMigen will attempt auto-detection.
domain: str
The name of the domain this module should exist in.
"""
def __init__(self, *, width, depth, name=None, domain="sync"):
self.width = width
self.depth = depth
self.name = name
self.domain = domain
#
# I/O port
#
self.read_data = Signal(width)
self.read_en = Signal()
self.read_commit = Signal()
self.read_discard = Signal()
self.empty = Signal()
self.write_data = Signal(width)
self.write_en = Signal()
self.write_commit = Signal()
self.write_discard = Signal()
self.full = Signal()
self.space_available = Signal(range(0, depth + 1))
def elaborate(self, platform):
m = Module()
# Range shortcuts for internal signals.
address_range = range(0, self.depth + 1)
#count_range = range(0, self.depth + 1)
#
# Core internal "backing store".
#
memory = Memory(width=self.width, depth=self.depth + 1, name=self.name)
m.submodules.read_port = read_port = memory.read_port()
m.submodules.write_port = write_port = memory.write_port()
# Always connect up our memory's data/en ports to ours.
m.d.comb += [
self.read_data .eq(read_port.data),
write_port.data .eq(self.write_data),
write_port.en .eq(self.write_en & ~self.full)
]
#
# Write port.
#
# We'll track two pieces of data: our _committed_ write position, and our current un-committed write one.
# This will allow us to rapidly backtrack to our pre-commit position.
committed_write_pointer = Signal(address_range)
current_write_pointer = Signal(address_range)
m.d.comb += write_port.addr.eq(current_write_pointer)
# If we're writing to the fifo, update our current write position.
with m.If(self.write_en & ~self.full):
with m.If(current_write_pointer == self.depth):
m.d.sync += current_write_pointer.eq(0)
with m.Else():
m.d.sync += current_write_pointer.eq(current_write_pointer + 1)
# If we're committing a FIFO write, update our committed position.
with m.If(self.write_commit):
m.d.sync += committed_write_pointer.eq(current_write_pointer)
# If we're discarding our current write, reset our current position,
with m.If(self.write_discard):
m.d.sync += current_write_pointer.eq(committed_write_pointer)
#
# Read port.
#
# We'll track two pieces of data: our _committed_ read position, and our current un-committed read one.
# This will allow us to rapidly backtrack to our pre-commit position.
committed_read_pointer = Signal(address_range)
current_read_pointer = Signal(address_range)
with m.If(self.read_en):
m.d.comb += read_port.addr.eq(current_read_pointer + 1)
with m.Else():
m.d.comb += read_port.addr.eq(current_read_pointer)
# If we're reading from our the fifo, update our current read position.
with m.If(self.read_en & ~self.empty):
with m.If(current_read_pointer == self.depth):
m.d.sync += current_read_pointer.eq(0)
with m.Else():
m.d.sync += current_read_pointer.eq(current_read_pointer + 1)
# If we're committing a FIFO write, update our committed position.
with m.If(self.read_commit):
m.d.sync += committed_read_pointer.eq(current_read_pointer)
# If we're discarding our current write, reset our current position,
with m.If(self.read_discard):
m.d.sync += current_read_pointer.eq(committed_read_pointer)
#
# FIFO status.
#
# Our FIFO is empty if our read and write pointers are in the same. We'll use the current
# read position (which leads ahead) and the committed write position (which lags behind).
m.d.comb += self.empty.eq(current_read_pointer == committed_write_pointer)
# For our space available, we'll use the current write position (which leads ahead) and our committed
# read position (which lags behind). This yields two cases: one where the buffer isn't wrapped around,
# and one where it is.
with m.If(self.full):
m.d.comb += self.space_available.eq(0)
with m.Elif(committed_read_pointer <= current_write_pointer):
m.d.comb += self.space_available.eq(self.depth - (current_write_pointer - committed_read_pointer))
with m.Else():
m.d.comb += self.space_available.eq(committed_read_pointer - current_write_pointer - 1)
# Our FIFO is full if we don't have any space available.
m.d.comb += self.full.eq(current_write_pointer + 1 == committed_read_pointer)
# If we're not supposed to be in the sync domain, rename our sync domain to the target.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class TransactionalizedFIFOTest(LunaGatewareTestCase):
FRAGMENT_UNDER_TEST = TransactionalizedFIFO
FRAGMENT_ARGUMENTS = {'width': 8, 'depth': 16}
def initialize_signals(self):
yield self.dut.write_en.eq(0)
@sync_test_case
def test_simple_fill(self):
dut = self.dut
# Our FIFO should start off empty; and with a full depth of free space.
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 16)
# If we add a byte to the queue...
yield dut.write_data.eq(0xAA)
yield from self.pulse(dut.write_en)
# ... we should have less space available ...
self.assertEqual((yield dut.space_available), 15)
# ... but we still should be "empty", as we won't have data to read until we commit.
self.assertEqual((yield dut.empty), 1)
# Once we _commit_ our write, we should suddenly have data to read.
yield from self.pulse(dut.write_commit)
self.assertEqual((yield dut.empty), 0)
# If we read a byte, we should see the FIFO become empty...
yield from self.pulse(dut.read_en)
self.assertEqual((yield dut.empty), 1)
# ... but we shouldn't see more space become available until we commit the read.
self.assertEqual((yield dut.space_available), 15)
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.space_available), 16)
# If we write 16 more bytes of data...
yield dut.write_en.eq(1)
for i in range(16):
yield dut.write_data.eq(i)
yield
yield dut.write_en.eq(0)
# ... our buffer should be full, but also empty.
# This paradox exists as we've filled our buffer with uncomitted data.
yield
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.empty), 1)
# Once we _commit_ our data, it should suddenly stop being empty.
yield from self.pulse(dut.write_commit)
self.assertEqual((yield dut.empty), 0)
# Reading a byte _without committing_ shouldn't change anything about empty/full/space-available...
yield from self.pulse(dut.read_en)
self.assertEqual((yield dut.empty), 0)
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.space_available), 0)
# ... but committing should increase our space available by one, and make our buffer no longer full.
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.empty), 0)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 1)
# Reading/committing another byte should increment our space available.
yield from self.pulse(dut.read_en)
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.space_available), 2)
# Writing data into the buffer should then fill it back up again...
yield dut.write_en.eq(1)
for i in range(2):
yield dut.write_data.eq(i)
yield
yield dut.write_en.eq(0)
# ... meaning it will again be full, and have no space remaining.
yield
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.space_available), 0)
# If we _discard_ this data, we should go back to having two bytes available.
yield from self.pulse(dut.write_discard)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 2)
# If we read the data that's remaining in the fifo...
yield dut.read_en.eq(1)
for i in range(2, 16):
yield
self.assertEqual((yield dut.read_data), i)
yield dut.read_en.eq(0)
# ... our buffer should again be empty.
yield
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.space_available), 2)
# If we _discard_ our current read, we should then see our buffer no longer empty...
yield from self.pulse(dut.read_discard)
self.assertEqual((yield dut.empty), 0)
# and we should be able to read the same data again.
yield dut.read_en.eq(1)
for i in range(2, 16):
yield
self.assertEqual((yield dut.read_data), i)
yield dut.read_en.eq(0)
# On committing this, we should see a buffer that is no longer full, and is really empty.
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 16)
if __name__ == "__main__":
unittest.main()
| 40.943284 | 114 | 0.638889 |
ace6c9c01cb1b3ab312ea7760aa6d8e9857c414b | 2,830 | py | Python | test/verb_extender_test.py | fulder/openapi-to-aws-apigateway | cdf31669103cb056087b1d41ca85b268dc29ab2b | [
"MIT"
] | null | null | null | test/verb_extender_test.py | fulder/openapi-to-aws-apigateway | cdf31669103cb056087b1d41ca85b268dc29ab2b | [
"MIT"
] | null | null | null | test/verb_extender_test.py | fulder/openapi-to-aws-apigateway | cdf31669103cb056087b1d41ca85b268dc29ab2b | [
"MIT"
] | null | null | null | import logging
import unittest
from generator.generator import VerbExtender
logger = logging.getLogger("generator.verb_extender")
logger.addHandler(logging.StreamHandler())
logger.setLevel("DEBUG")
class TestVerbExtender(unittest.TestCase):
def test_init_integration_internet_type(self):
verb_extender = VerbExtender("get", {}, "/path1", "aws_proxy", "", True, "TEST_URI_START", True)
verb_extender._init_integration()
exp_verb = {
"connectionType": "INTERNET",
"httpMethod": "POST",
"type": "aws_proxy",
"uri": "TEST_URI_START"
}
self.assertEqual(exp_verb, verb_extender.integration)
def test_init_integration_vpc_type(self):
verb_extender = VerbExtender("get", {}, "/path1", "http_proxy", "VPC_LINK_ID", True, "TEST_URI_START", True)
verb_extender._init_integration()
exp_verb = {
"connectionId": "VPC_LINK_ID",
"connectionType": "VPC_LINK",
"httpMethod": "POST",
"type": "http_proxy",
"uri": "TEST_URI_START"
}
self.assertEqual(exp_verb, verb_extender.integration)
def test_init_integration_creates_correct_verb(self):
verb_extender = VerbExtender("get", {}, "/path1", "http_proxy", "", False, "http://${stageVariables.httpHost}", True)
verb_extender._init_integration()
exp_verb = {
"connectionType": "INTERNET",
"httpMethod": "GET",
"type": "http_proxy",
"uri": "http://${stageVariables.httpHost}/path1"
}
self.assertEqual(exp_verb, verb_extender.integration)
def test_init_integration_with_lambda_creates_post_method(self):
verb_extender = VerbExtender("get", {}, "/path1", "aws", "", True, "TEST_START_URL", True)
verb_extender._init_integration()
exp_verb = {
"connectionType": "INTERNET",
"httpMethod": "POST",
"type": "aws",
"uri": "TEST_START_URL"
}
self.assertEqual(exp_verb, verb_extender.integration)
def test_validate_verb_not_supported_param(self):
invalid_verb = {
"parameters": [
{
"in": "formData"
}
]
}
verb_extender = VerbExtender("get", invalid_verb, "/path1", "aws", "", True, "TEST_START_URL", True)
self.assertRaises(RuntimeError, verb_extender._validate_verb)
def test_validate_verb_not_supported_response(self):
invalid_verb = {
"responses": {
"default": {}
}
}
verb_extender = VerbExtender("get", invalid_verb, "/path1", "aws", "", True, "TEST_START_URL", True)
self.assertRaises(RuntimeError, verb_extender._validate_verb) | 36.753247 | 125 | 0.603887 |
ace6c9eda6c9d211ba11d5b94e458890a22c04e3 | 1,628 | py | Python | lotterydraw/core/views.py | j3ygh/universetech-lotterydraw | f3f85b1239bd290f243712ca6a2bf2b60d138661 | [
"MIT"
] | null | null | null | lotterydraw/core/views.py | j3ygh/universetech-lotterydraw | f3f85b1239bd290f243712ca6a2bf2b60d138661 | [
"MIT"
] | null | null | null | lotterydraw/core/views.py | j3ygh/universetech-lotterydraw | f3f85b1239bd290f243712ca6a2bf2b60d138661 | [
"MIT"
] | null | null | null | from .models import Lottery
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
import requests
from django.core.management import call_command
def lottery_list(request):
objects_list = Lottery.objects.all()
data = [{
'game_id': obj.game_id,
'issue': obj.issue,
'winning_number': obj.winning_number,
} for obj in objects_list]
results = {
'data': data,
}
return JsonResponse(results)
def demo(request):
if request.method == 'POST':
action = request.POST.get('action')
if action == 'create':
call_command('createlottery')
elif action == 'update':
call_command('updatelottery')
elif action == 'delete':
call_command('deletelottery')
return redirect(reverse('demo'))
# lotteries
objects_list = Lottery.objects.all()
data = [{
'game_id': obj.game_id,
'issue': obj.issue,
'winning_number': obj.winning_number,
} for obj in objects_list]
results = {
'data': data,
}
# onefake
url1 = 'http://127.0.0.1:8001/v1'
r1 = requests.get(url1)
results1 = r1.json()['result']
# twofake
url2 = 'http://127.0.0.1:8002/newly.do/'
r2 = requests.get(url2)
results2 = r2.json()
host = request.get_host().split(':')[0]
context = {
'results': results,
'results1': results1,
'results2': results2,
'host1': host + ':8001',
'host2': host + ':8002',
}
return render(request, 'core/lottery_list.html', context)
| 27.59322 | 61 | 0.597666 |
ace6ca07763ca3f87f16e77d27d40fc224312c3e | 2,493 | py | Python | notebooks/solve_with_options.py | Bhaskers-Blu-Org1/skills-for-planning | 98575d963e63d2c84075df9500c74c14f8a8553b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-07-08T09:58:13.000Z | 2021-03-19T05:40:29.000Z | notebooks/solve_with_options.py | Bhaskers-Blu-Org1/skills-for-planning | 98575d963e63d2c84075df9500c74c14f8a8553b | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2021-03-19T03:13:37.000Z | 2022-03-11T23:57:41.000Z | notebooks/solve_with_options.py | IBM/skills-for-planning | 98575d963e63d2c84075df9500c74c14f8a8553b | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2019-08-30T15:12:07.000Z | 2020-06-29T14:50:22.000Z | import copy
import random
from tqdm import tqdm
from cube import cube
from cube import formula
from cube import skills
from cube import options
from cube import pattern
c = cube.Cube()
c.apply(pattern.scramble1)
c.render()
#%%
mods = c.summarize_effects()
steps = []
experiences = 0
tqdm.write('experiences:{}--steps:{}--errors:{}'.format(experiences, len(steps),len(mods)))
option_set = options.expert
max_depth = 2
def option_sequences(depth, prefix=None):
assert depth > 0, 'Depth must be > 0'
options = [o for o in random.sample(option_set.options, len(option_set.options))]
if depth==1:
return [[o] if prefix==None else prefix+[o] for o in options]
else:
new_prefixes = [[o] if prefix==None else prefix+[o] for o in options]
result = []
for p in new_prefixes:
result += option_sequences(depth-1, prefix=p)
return result
option_seq = [None]*max_depth
for i in range(max_depth):
option_seq[i] = option_sequences(i+1)
option_seq[1][:4]
mdl = {}
for op, m in zip(option_set.options, option_set.models):
mdl[tuple(op)] = m
for _ in range(100):
good_sequences = []
improvements = []
# Iterative deepening random search
for depth in tqdm(range(max_depth)):
for seq in tqdm(random.sample(option_seq[depth], len(option_seq[depth]))):
c_copy = copy.deepcopy(c)
for op in seq:
c_copy.apply(swap_list=mdl[tuple(op)])
experiences += 1
resulting_mods = c_copy.summarize_effects()
improvement = len(mods) - len(resulting_mods)
if improvement > 0:
good_sequences.append(seq)
improvements.append(improvement)
if depth >= 1:
break
if improvements != []:
break
else:
continue
if improvements == []:
break
else:
rankings = sorted(list(zip(improvements, good_sequences)), reverse=True)
best_impr = rankings[0][0]
best_seqs = [op for impr, op in rankings if impr == best_impr]
seq = random.choice(best_seqs)
for op in seq:
c.apply(swap_list=mdl[tuple(op)])
mods = c.summarize_effects()
steps += seq
tqdm.write('experiences:{}--steps:{}--errors:{}'.format(experiences, len(steps),len(mods)))
c.render()
print()
print()
print()
print('Experiences:', experiences)
print('Steps:', len(steps))
print(steps)
| 29.329412 | 99 | 0.615323 |
ace6caa5c833ae837e492e79d12dfc4ff64016d2 | 4,803 | py | Python | hubspot/crm/timeline/models/collection_response_timeline_event_template.py | jtruty/hubspot-api-python | 3f1b2d2007733a734daee2010611592249b72a0b | [
"Apache-2.0"
] | null | null | null | hubspot/crm/timeline/models/collection_response_timeline_event_template.py | jtruty/hubspot-api-python | 3f1b2d2007733a734daee2010611592249b72a0b | [
"Apache-2.0"
] | null | null | null | hubspot/crm/timeline/models/collection_response_timeline_event_template.py | jtruty/hubspot-api-python | 3f1b2d2007733a734daee2010611592249b72a0b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Timeline events
This feature allows an app to create and configure custom events that can show up in the timelines of certain CRM object like contacts, companies, or deals. You'll find multiple use cases for this API in the sections below. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.timeline.configuration import Configuration
class CollectionResponseTimelineEventTemplate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"results": "list[TimelineEventTemplate]", "paging": "Paging"}
attribute_map = {"results": "results", "paging": "paging"}
def __init__(
self, results=None, paging=None, local_vars_configuration=None
): # noqa: E501
"""CollectionResponseTimelineEventTemplate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._results = None
self._paging = None
self.discriminator = None
self.results = results
if paging is not None:
self.paging = paging
@property
def results(self):
"""Gets the results of this CollectionResponseTimelineEventTemplate. # noqa: E501
A collection of templates. # noqa: E501
:return: The results of this CollectionResponseTimelineEventTemplate. # noqa: E501
:rtype: list[TimelineEventTemplate]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this CollectionResponseTimelineEventTemplate.
A collection of templates. # noqa: E501
:param results: The results of this CollectionResponseTimelineEventTemplate. # noqa: E501
:type: list[TimelineEventTemplate]
"""
if (
self.local_vars_configuration.client_side_validation and results is None
): # noqa: E501
raise ValueError(
"Invalid value for `results`, must not be `None`"
) # noqa: E501
self._results = results
@property
def paging(self):
"""Gets the paging of this CollectionResponseTimelineEventTemplate. # noqa: E501
:return: The paging of this CollectionResponseTimelineEventTemplate. # noqa: E501
:rtype: Paging
"""
return self._paging
@paging.setter
def paging(self, paging):
"""Sets the paging of this CollectionResponseTimelineEventTemplate.
:param paging: The paging of this CollectionResponseTimelineEventTemplate. # noqa: E501
:type: Paging
"""
self._paging = paging
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectionResponseTimelineEventTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CollectionResponseTimelineEventTemplate):
return True
return self.to_dict() != other.to_dict()
| 31.598684 | 241 | 0.607329 |
ace6caa7d15400ca93965c7b9f6bda7d3d49eef8 | 287 | py | Python | tests/tests.py | phluentmed/event-scheduler | 28d300df97e5970a546a97759248521d46d436d3 | [
"MIT"
] | 16 | 2021-03-09T13:51:21.000Z | 2022-03-04T23:19:10.000Z | tests/tests.py | phluentmed/event-scheduler | 28d300df97e5970a546a97759248521d46d436d3 | [
"MIT"
] | 3 | 2020-11-30T01:17:34.000Z | 2021-02-12T02:42:24.000Z | tests/tests.py | phluentmed/event-scheduler | 28d300df97e5970a546a97759248521d46d436d3 | [
"MIT"
] | null | null | null | import sys
import unittest
unit_tests = unittest.TestLoader().discover('tests/unit_tests',
'*tests.py',
'.')
result = unittest.TextTestRunner().run(unit_tests)
sys.exit(not result.wasSuccessful())
| 28.7 | 63 | 0.526132 |
ace6cb70ef0e27e29254a9e74c42b1308199690a | 5,548 | py | Python | titus/test/lib/testSpec.py | jmilleralpine/hadrian | 6a438e0370487bbbac5e64a4d6d7a2728902d153 | [
"Apache-2.0"
] | 127 | 2015-08-05T17:08:35.000Z | 2019-10-17T07:07:08.000Z | titus/test/lib/testSpec.py | jmilleralpine/hadrian | 6a438e0370487bbbac5e64a4d6d7a2728902d153 | [
"Apache-2.0"
] | 54 | 2015-11-20T02:21:29.000Z | 2019-11-23T20:17:23.000Z | titus/test/lib/testSpec.py | jmilleralpine/hadrian | 6a438e0370487bbbac5e64a4d6d7a2728902d153 | [
"Apache-2.0"
] | 58 | 2015-05-27T18:19:29.000Z | 2019-05-23T12:37:17.000Z | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Spec(unittest.TestCase):
def testLogBetaFcn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- m.special.lnBeta: [input, 3] #[a, b]
''')
self.assertAlmostEqual(engine.action(1.0), -1.0986, places=3)
self.assertAlmostEqual(engine.action(4.0), -4.0943, places=3)
self.assertAlmostEqual(engine.action(.01), 4.5902, places=3)
### raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- m.special.lnBeta: [input, -3] #[input, lambda]
''')
self.assertRaises(PFARuntimeException, lambda: engine.action(0.5))
def testNChooseKFcn(self):
engine, = PFAEngine.fromYaml('''
input: int
output: int
action:
- m.special.nChooseK: [input, 2]
''')
self.assertEqual(engine.action(20), 190)
self.assertEqual(engine.action(10), 45)
self.assertEqual(engine.action(3), 3)
### raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: int
action:
- m.special.nChooseK: [input, 4]
''')
self.assertRaises(PFARuntimeException, lambda: engine.action(1))
self.assertRaises(PFARuntimeException, lambda: engine.action(0))
engine, = PFAEngine.fromYaml('''
input: int
output: int
action:
- m.special.nChooseK: [input, -4] #[input, lambda]
''')
self.assertRaises(PFARuntimeException, lambda: engine.action(-2))
def testErfFcn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.special.erf: input}
''')
self.assertAlmostEqual(engine.action(-22.5), -1.00, places=3)
self.assertAlmostEqual(engine.action(-0.5), -0.52, places=3)
self.assertAlmostEqual(engine.action(0), 0.00, places=3)
self.assertAlmostEqual(engine.action(0.5), 0.52, places=3)
self.assertAlmostEqual(engine.action(22.5), 1.00, places=3)
def testErfcFcn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.special.erfc: input}
''')
self.assertAlmostEqual(engine.action(-22.5), 2.00, places=3)
self.assertAlmostEqual(engine.action(-0.5), 1.52, places=3)
self.assertAlmostEqual(engine.action(0), 1.00, places=3)
self.assertAlmostEqual(engine.action(0.5), 0.4795, places=3)
self.assertAlmostEqual(engine.action(22.5), 0.00, places=3)
def testLnGammaFcn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.special.lnGamma: input}
''')
self.assertAlmostEqual(engine.action(0.1), 2.2527, places=3 )
self.assertAlmostEqual(engine.action(0.5), 0.5724, places=3)
self.assertAlmostEqual(engine.action(22.5), 46.9199, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.special.lnGamma: input}
''')
self.assertRaises(PFARuntimeException, lambda: engine.action(-2))
self.assertRaises(PFARuntimeException, lambda: engine.action(-2.2))
# def testRegularizedGammaPFcn(self):
# engine, = PFAEngine.fromYaml('''
#input: double
#output: double
#action:
# - m.special.regularizedgammapfcn: [input, 3] #[a, b]
#''')
# self.assertAlmostEqual(engine.action(1.0), 0.08030, places=3)
# self.assertAlmostEqual(engine.action(2.0), 0.32332, places=3)
# self.assertAlmostEqual(engine.action(3.0), 0.57680, places=3)
#
# ### raise the right exceptions ###
# # error if a, b <= 0
# engine, = PFAEngine.fromYaml('''
#input: double
#output: double
#action:
# - m.special.regularizedgammapfcn: [input, -1.5] #[input, lambda]
#''')
# self.assertRaises(PFARuntimeException, lambda: engine.action(1.40))
# self.assertRaises(PFARuntimeException, lambda: engine.action(-1.2))
#
#
# def testRegularizedGammaQFcn(self):
# engine, = PFAEngine.fromYaml('''
#input: double
#output: double
#action:
# - m.special.regularizedgammaqfcn: [input, 3] #[a, b]
#''')
# self.assertAlmostEqual(engine.action(1.0), 0.91969, places=3)
# self.assertAlmostEqual(engine.action(2.0), 0.67667, places=3)
# self.assertAlmostEqual(engine.action(3.0), 0.42319, places=3)
#
# ### raise the right exceptions ###
# # error if a, b <= 0
# engine, = PFAEngine.fromYaml('''
#input: double
#output: double
#action:
# - m.special.regularizedgammaqfcn: [input, -1.5] #[input, lambda]
#''')
# self.assertRaises(PFARuntimeException, lambda: engine.action(1.40))
# self.assertRaises(PFARuntimeException, lambda: engine.action(-1.2))
#
| 32.444444 | 76 | 0.658435 |
ace6cb74fe1488d9da7d37f20cf8cf46e479b3f6 | 354 | py | Python | demos/magic_route.py | bakalab/imouto | 01944746d4f7530a741bcb082866e18c48d07f3a | [
"BSD-3-Clause"
] | 9 | 2017-06-18T06:03:00.000Z | 2019-05-07T10:06:22.000Z | demos/magic_route.py | bakalab/imouto | 01944746d4f7530a741bcb082866e18c48d07f3a | [
"BSD-3-Clause"
] | 3 | 2017-08-05T08:01:42.000Z | 2017-12-08T01:58:33.000Z | demos/magic_route.py | bakalab/imouto | 01944746d4f7530a741bcb082866e18c48d07f3a | [
"BSD-3-Clause"
] | null | null | null | from imouto.web import Application
from imouto.magicroute import GET, POST
async def hello_world_get(request, response):
response.write("Hello World, it'is get")
async def hello_world_post(request, resposne):
resposne.write("Hello World, it'is post")
GET / '/' > hello_world_get
POST / '/' > hello_world_post
app = Application()
app.run()
| 19.666667 | 46 | 0.731638 |
ace6cb97b5c3301fa4e96e4d9146a49371cb5eb4 | 110,953 | py | Python | google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/datastore/datastore_rpc.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/datastore/datastore_rpc.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/datastore/datastore_rpc.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | #
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Asynchronous datastore API.
This is designed to be the lowest-level API to be used by all Python
datastore client libraries.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
# This defines the names that will be seen by "from ... import *".
__all__ = ['AbstractAdapter',
'BaseConfiguration',
'BaseConnection',
'ConfigOption',
'Configuration',
'Connection',
'IdentityAdapter',
'MultiRpc',
'TransactionalConnection',
'TransactionMode',
'TransactionOptions',
]
# TODO(user): Consider implementing __eq__ for all immutable classes.
# Python imports.
import collections
import copy
import functools
import logging
import os
from googlecloudsdk.third_party.appengine.googlestorage.onestore.v3 import entity_pb
# App Engine imports.
from googlecloudsdk.third_party.appengine.api import api_base_pb
from googlecloudsdk.third_party.appengine.api import apiproxy_rpc
from googlecloudsdk.third_party.appengine.api import apiproxy_stub_map
# TODO(user): Move the stuff we need from _errors and and _types here?
from googlecloudsdk.third_party.appengine.api import datastore_errors
from googlecloudsdk.third_party.appengine.api import datastore_types
# NOTE(user): weird import to get at _ParseFullAppId
from googlecloudsdk.third_party.appengine.api.app_identity import app_identity
from googlecloudsdk.third_party.appengine.datastore import datastore_pb
from googlecloudsdk.third_party.appengine.datastore import datastore_pbs
from googlecloudsdk.third_party.appengine.runtime import apiproxy_errors
_CLOUD_DATASTORE_ENABLED = datastore_pbs._CLOUD_DATASTORE_ENABLED # pylint: disable=protected-access
if _CLOUD_DATASTORE_ENABLED:
from googlecloudsdk.third_party.appengine.datastore.datastore_pbs import googledatastore
# Constants and globals.
# The maximum number of ids that can be allocated at a time
_MAX_ID_BATCH_SIZE = 1000 * 1000 * 1000
# API versions (also the name of the corresponding service).
_DATASTORE_V3 = 'datastore_v3'
_CLOUD_DATASTORE_V1 = 'cloud_datastore_v1'
# TODO(user): Move this to some kind of utility module.
def _positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwds):
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator
def _GetDatastoreType(app=None):
"""Tries to get the datastore type for the given app.
This function is only guaranteed to return something other than
UNKNOWN_DATASTORE when running in production and querying the current app.
"""
current_app = datastore_types.ResolveAppId(None)
if app not in (current_app, None):
return BaseConnection.UNKNOWN_DATASTORE
# TODO(user): Using the presence of a partition is a very fragile way to
# detect the datastore type and does not work for dev_appserver. Switch to a
# more robust system (e.g. RPC call to appserver). See
# http://b/issue?id=6469560.
partition, _, _ = app_identity._ParseFullAppId(current_app)
if partition:
return BaseConnection.HIGH_REPLICATION_DATASTORE
return BaseConnection.MASTER_SLAVE_DATASTORE
class AbstractAdapter(object):
"""Abstract interface between protobufs and user-level classes.
This class defines conversions between the protobuf classes defined
in entity_pb.py on the one hand, and the corresponding user-level
classes (which are defined by higher-level API libraries such as
datastore.py or db.py) on the other hand.
The premise is that the code in this module is agnostic about the
user-level classes used to represent keys and entities, while at the
same time provinging APIs that accept or return such user-level
classes.
Higher-level libraries must subclass this abstract class and pass an
instance of the subclass to the Connection they want to use.
These methods may raise datastore_errors.Error for bad inputs.
"""
# Defaults in case subclasses don't call init.
_entity_converter = datastore_pbs.get_entity_converter()
_query_converter = datastore_pbs._QueryConverter(_entity_converter)
def __init__(self, id_resolver=None):
if id_resolver:
self._entity_converter = datastore_pbs.get_entity_converter(
id_resolver)
self._query_converter = datastore_pbs._QueryConverter(
self._entity_converter)
def get_entity_converter(self):
return self._entity_converter
def get_query_converter(self):
return self._query_converter
def pb_to_key(self, pb):
"""Turn an entity_pb.Reference into a user-level key."""
raise NotImplementedError
def pb_v1_to_key(self, pb):
"""Turn an googledatastore.Key into a user-level key."""
v3_ref = entity_pb.Reference()
self._entity_converter.v1_to_v3_reference(pb, v3_ref)
return self.pb_to_key(v3_ref)
def pb_to_entity(self, pb):
"""Turn an entity_pb.EntityProto into a user-level entity."""
raise NotImplementedError
def pb_v1_to_entity(self, pb, is_projection):
"""Turn an googledatastore.Entity into a user-level entity."""
v3_entity = entity_pb.EntityProto()
self._entity_converter.v1_to_v3_entity(pb, v3_entity, is_projection)
return self.pb_to_entity(v3_entity)
def pb_v1_to_query_result(self, pb, query_options):
"""Turn an googledatastore.Entity into a user-level query result."""
if query_options.keys_only:
return self.pb_v1_to_key(pb.key)
else:
return self.pb_v1_to_entity(pb, bool(query_options.projection))
def pb_to_index(self, pb):
"""Turn an entity_pb.CompositeIndex into a user-level Index
representation."""
raise NotImplementedError
def pb_to_query_result(self, pb, query_options):
"""Turn an entity_pb.EntityProto into a user-level query result."""
if query_options.keys_only:
return self.pb_to_key(pb.key())
else:
return self.pb_to_entity(pb)
def key_to_pb(self, key):
"""Turn a user-level key into an entity_pb.Reference."""
raise NotImplementedError
def key_to_pb_v1(self, key):
"""Turn a user-level key into an googledatastore.Key."""
v3_ref = self.key_to_pb(key)
v1_key = googledatastore.Key()
self._entity_converter.v3_to_v1_key(v3_ref, v1_key)
return v1_key
def entity_to_pb(self, entity):
"""Turn a user-level entity into an entity_pb.EntityProto."""
raise NotImplementedError
def entity_to_pb_v1(self, entity):
"""Turn a user-level entity into an googledatastore.Key."""
v3_entity = self.entity_to_pb(entity)
v1_entity = googledatastore.Entity()
self._entity_converter.v3_to_v1_entity(v3_entity, v1_entity)
return v1_entity
def new_key_pb(self):
"""Create a new, empty entity_pb.Reference."""
return entity_pb.Reference()
def new_entity_pb(self):
"""Create a new, empty entity_pb.EntityProto."""
return entity_pb.EntityProto()
class IdentityAdapter(AbstractAdapter):
"""A concrete adapter that implements the identity mapping.
This is used as the default when a Connection is created without
specifying an adapter; that's primarily for testing.
"""
def __init__(self, id_resolver=None):
super(IdentityAdapter, self).__init__(id_resolver)
def pb_to_key(self, pb):
return pb
def pb_to_entity(self, pb):
return pb
def key_to_pb(self, key):
return key
def entity_to_pb(self, entity):
return entity
def pb_to_index(self, pb):
return pb
class ConfigOption(object):
"""A descriptor for a Configuration option.
This class is used to create a configuration option on a class that inherits
from BaseConfiguration. A validator function decorated with this class will
be converted to a read-only descriptor and BaseConfiguration will implement
constructor and merging logic for that configuration option. A validator
function takes a single non-None value to validate and either throws
an exception or returns that value (or an equivalent value). A validator is
called once at construction time, but only if a non-None value for the
configuration option is specified the constructor's keyword arguments.
"""
def __init__(self, validator):
self.validator = validator
def __get__(self, obj, objtype):
if obj is None: # Descriptor called on class.
return self
return obj._values.get(self.validator.__name__, None)
def __set__(self, obj, value):
raise AttributeError('Configuration options are immutable (%s)' %
(self.validator.__name__,))
def __call__(self, *args):
"""Gets the first non-None value for this option from the given args.
Args:
*arg: Any number of configuration objects or None values.
Returns:
The first value for this ConfigOption found in the given configuration
objects or None.
Raises:
datastore_errors.BadArgumentError if a given in object is not a
configuration object.
"""
name = self.validator.__name__
for config in args:
# apiproxy_stub_map.UserRPC is included for legacy support
if isinstance(config, (type(None), apiproxy_stub_map.UserRPC)):
pass
elif not isinstance(config, BaseConfiguration):
raise datastore_errors.BadArgumentError(
'invalid config argument (%r)' % (config,))
elif name in config._values and self is config._options[name]:
return config._values[name]
return None
class _ConfigurationMetaClass(type):
"""The metaclass for all Configuration types.
This class is needed to store a class specific list of all ConfigOptions in
cls._options, and insert a __slots__ variable into the class dict before the
class is created to impose immutability.
"""
def __new__(metaclass, classname, bases, classDict):
if classname == '_MergedConfiguration':
# Special-cased, so it can be a subclass of BaseConfiguration
return type.__new__(metaclass, classname, bases, classDict)
# Use 'object in bases' as a crutch to distinguish BaseConfiguration
# from its subclasses.
if object in bases:
classDict['__slots__'] = ['_values'] # making class immutable
else:
classDict['__slots__'] = [] # it already has a _values slot
cls = type.__new__(metaclass, classname, bases, classDict)
if object not in bases:
options = {}
for c in reversed(cls.__mro__):
if '_options' in c.__dict__:
options.update(c.__dict__['_options'])
cls._options = options # Each cls gets its own copy of fields.
for option, value in cls.__dict__.iteritems():
if isinstance(value, ConfigOption):
if cls._options.has_key(option):
raise TypeError('%s cannot be overridden (%s)' %
(option, cls.__name__))
cls._options[option] = value
value._cls = cls
return cls
# TODO(user): define __instancecheck__ once we have a released 2.7 environment
class BaseConfiguration(object):
"""A base class for a configuration object.
Subclasses should provide validation functions for every configuration option
they accept. Any public function decorated with ConfigOption is assumed to be
a validation function for an option of the same name. All validation functions
take a single non-None value to validate and must throw an exception or return
the value to store.
This class forces subclasses to be immutable and exposes a read-only
property for every accepted configuration option. Configuration options set by
passing keyword arguments to the constructor. The constructor and merge
function are designed to avoid creating redundant copies and may return
the configuration objects passed to them if appropriate.
Setting an option to None is the same as not specifying the option except in
the case where the 'config' argument is given. In this case the value on
'config' of the same name is ignored. Options that are not specified will
return 'None' when accessed.
"""
__metaclass__ = _ConfigurationMetaClass
_options = {} # Maps option name to ConfigOption objects
def __new__(cls, config=None, **kwargs):
"""Immutable constructor.
If 'config' is non-None all configuration options will default to the value
it contains unless the configuration option is explicitly set to 'None' in
the keyword arguments. If 'config' is None then all configuration options
default to None.
Args:
config: Optional base configuration providing default values for
parameters not specified in the keyword arguments.
**kwargs: Configuration options to store on this object.
Returns:
Either a new Configuration object or (if it would be equivalent)
the config argument unchanged, but never None.
"""
if config is None:
pass
elif isinstance(config, BaseConfiguration):
if cls is config.__class__ and config.__is_stronger(**kwargs):
# Shortcut: return the config argument unchanged.
return config
for key, value in config._values.iteritems():
# Only grab options we know about
if issubclass(cls, config._options[key]._cls):
kwargs.setdefault(key, value)
else:
raise datastore_errors.BadArgumentError(
'config argument should be Configuration (%r)' % (config,))
obj = super(BaseConfiguration, cls).__new__(cls)
obj._values = {}
for key, value in kwargs.iteritems():
if value is not None:
try:
config_option = obj._options[key]
except KeyError, err:
raise TypeError('Unknown configuration option (%s)' % err)
value = config_option.validator(value)
if value is not None:
obj._values[key] = value
return obj
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, BaseConfiguration):
return NotImplemented
return self._options == other._options and self._values == other._values
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def __hash__(self):
return (hash(frozenset(self._values.iteritems())) ^
hash(frozenset(self._options.iteritems())))
def __repr__(self):
args = []
for key_value in sorted(self._values.iteritems()):
args.append('%s=%r' % key_value)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def __is_stronger(self, **kwargs):
"""Internal helper to ask whether a configuration is stronger than another.
A configuration is stronger when it contains every name/value pair in
kwargs.
Example: a configuration with:
(deadline=5, on_configuration=None, read_policy=EVENTUAL_CONSISTENCY)
is stronger than:
(deadline=5, on_configuration=None)
but not stronger than:
(deadline=5, on_configuration=None, read_policy=None)
or
(deadline=10, on_configuration=None, read_policy=None).
More formally:
- Any value is stronger than an unset value;
- Any value is stronger than itself.
Returns:
True if each of the self attributes is stronger than the
corresponding argument.
"""
for key, value in kwargs.iteritems():
if key not in self._values or value != self._values[key]:
return False
return True
@classmethod
def is_configuration(cls, obj):
"""True if configuration obj handles all options of this class.
Use this method rather than isinstance(obj, cls) to test if a
configuration object handles the options of cls (is_configuration
is handled specially for results of merge which may handle the options
of unrelated configuration classes).
Args:
obj: the object to test.
"""
return isinstance(obj, BaseConfiguration) and obj._is_configuration(cls)
def _is_configuration(self, cls):
return isinstance(self, cls)
def merge(self, config):
"""Merge two configurations.
The configuration given as an argument (if any) takes priority;
defaults are filled in from the current configuration.
Args:
config: Configuration providing overrides, or None (but cannot
be omitted).
Returns:
Either a new configuration object or (if it would be equivalent)
self or the config argument unchanged, but never None.
Raises:
BadArgumentError if self or config are of configurations classes
with conflicting options (i.e. the same option name defined in
two different configuration classes).
"""
if config is None or config is self:
# Nothing to do.
return self
# Optimizations to avoid _MergedConfiguration when possible,
# for backwards compatibility of code that uses isinstance.
if not (isinstance(config, _MergedConfiguration) or
isinstance(self, _MergedConfiguration)):
# Return config if for every value self has, config has a value
# that would override it.
if isinstance(config, self.__class__):
for key in self._values:
if key not in config._values:
break
else:
return config
if isinstance(self, config.__class__):
if self.__is_stronger(**config._values):
return self
# Return an instance of a configuration class if possible
def _quick_merge(obj):
obj._values = self._values.copy()
obj._values.update(config._values)
return obj
if isinstance(config, self.__class__):
return _quick_merge(type(config)())
if isinstance(self, config.__class__):
return _quick_merge(type(self)())
# Helper class merges configurations with config taking priority.
return _MergedConfiguration(config, self)
def __getstate__(self):
return {'_values': self._values}
def __setstate__(self, state):
# Re-validate values in case validation changed as logic elsewhere assumes
# validation passed.
obj = self.__class__(**state['_values'])
self._values = obj._values
class _MergedConfiguration(BaseConfiguration):
"""Helper class to handle merges of configurations.
Instances of _MergedConfiguration are in some sense "subclasses" of the
argument configurations, i.e.:
- they handle exactly the configuration options of the argument configurations
- the value of these options is taken in priority order from the arguments
- isinstance is true on this configuration if it is true on any of the
argument configurations
This class raises an exception if two argument configurations have an option
with the same name but coming from a different configuration class.
"""
__slots__ = ['_values', '_configs', '_options', '_classes']
def __new__(cls, *configs):
obj = super(BaseConfiguration, cls).__new__(cls)
obj._configs = configs
# Find out which options we handle and raise an error on name clashes
obj._options = {}
for config in configs:
for name, option in config._options.iteritems():
if name in obj._options:
if option is not obj._options[name]:
error = ("merge conflict on '%s' from '%s' and '%s'" %
(name, option._cls.__name__,
obj._options[name]._cls.__name__))
raise datastore_errors.BadArgumentError(error)
obj._options[name] = option
obj._values = {}
for config in reversed(configs):
for name, value in config._values.iteritems():
obj._values[name] = value
return obj
def __repr__(self):
return '%s%r' % (self.__class__.__name__, tuple(self._configs))
def _is_configuration(self, cls):
for config in self._configs:
if config._is_configuration(cls):
return True
return False
def __getattr__(self, name):
if name in self._options:
if name in self._values:
return self._values[name]
else:
return None
raise AttributeError("Configuration has no attribute '%s'" % (name,))
def __getstate__(self):
return {'_configs': self._configs}
def __setstate__(self, state):
# Using constructor to build the correct state.
obj = _MergedConfiguration(*state['_configs'])
self._values = obj._values
self._configs = obj._configs
self._options = obj._options
class Configuration(BaseConfiguration):
"""Configuration parameters for datastore RPCs.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
The options defined on this class include generic RPC parameters (deadline)
but also datastore-specific parameters (on_completion and read_policy).
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below.
"""
# Flags to determine read policy and related constants.
STRONG_CONSISTENCY = 0
"""A read consistency that will return up to date results."""
EVENTUAL_CONSISTENCY = 1
"""A read consistency that allows requests to return possibly stale results.
This read_policy tends to be faster and less prone to unavailability/timeouts.
May return transactionally inconsistent results in rare cases.
"""
APPLY_ALL_JOBS_CONSISTENCY = 2 # forces READ_CURRENT for 1.0.1 shards
"""A read consistency that aggressively tries to find write jobs to apply.
Use of this read policy is strongly discouraged.
This read_policy tends to be more costly and is only useful in a few specific
cases. It is equivalent to splitting a request by entity group and wrapping
each batch in a separate transaction. Cannot be used with non-ancestor
queries.
"""
ALL_READ_POLICIES = frozenset((STRONG_CONSISTENCY,
EVENTUAL_CONSISTENCY,
APPLY_ALL_JOBS_CONSISTENCY,
))
# Accessors. These are read-only attributes.
@ConfigOption
def deadline(value):
"""The deadline for any RPC issued.
If unset the system default will be used which is typically 5 seconds.
Raises:
BadArgumentError if value is not a number or is less than zero.
"""
if not isinstance(value, (int, long, float)):
raise datastore_errors.BadArgumentError(
'deadline argument should be int/long/float (%r)' % (value,))
if value <= 0:
raise datastore_errors.BadArgumentError(
'deadline argument should be > 0 (%r)' % (value,))
return value
@ConfigOption
def on_completion(value):
"""A callback that is invoked when any RPC completes.
If specified, it will be called with a UserRPC object as argument when an
RPC completes.
NOTE: There is a subtle but important difference between
UserRPC.callback and Configuration.on_completion: on_completion is
called with the RPC object as its first argument, where callback is
called without arguments. (Because a Configuration's on_completion
function can be used with many UserRPC objects, it would be awkward
if it was called without passing the specific RPC.)
"""
# NOTE: There is no on_completion validation. Passing something
# inappropriate will raise an exception when it is called.
return value
@ConfigOption
def read_policy(value):
"""The read policy to use for any relevent RPC.
if unset STRONG_CONSISTENCY will be used.
Raises:
BadArgumentError if value is not a known read policy.
"""
if value not in Configuration.ALL_READ_POLICIES:
raise datastore_errors.BadArgumentError(
'read_policy argument invalid (%r)' % (value,))
return value
@ConfigOption
def force_writes(value):
"""If a write request should succeed even if the app is read-only.
This only applies to user controlled read-only periods.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'force_writes argument invalid (%r)' % (value,))
return value
@ConfigOption
def max_entity_groups_per_rpc(value):
"""The maximum number of entity groups that can be represented in one rpc.
For a non-transactional operation that involves more entity groups than the
maximum, the operation will be performed by executing multiple, asynchronous
rpcs to the datastore, each of which has no more entity groups represented
than the maximum. So, if a put() operation has 8 entity groups and the
maximum is 3, we will send 3 rpcs, 2 with 3 entity groups and 1 with 2
entity groups. This is a performance optimization - in many cases
multiple, small, concurrent rpcs will finish faster than a single large
rpc. The optimal value for this property will be application-specific, so
experimentation is encouraged.
"""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_entity_groups_per_rpc should be a positive integer')
return value
@ConfigOption
def max_allocate_ids_keys(value):
"""The maximum number of keys in a v1 AllocateIds rpc."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_allocate_ids_keys should be a positive integer')
return value
@ConfigOption
def max_rpc_bytes(value):
"""The maximum serialized size of a Get/Put/Delete without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_rpc_bytes should be a positive integer')
return value
@ConfigOption
def max_get_keys(value):
"""The maximum number of keys in a Get without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_get_keys should be a positive integer')
return value
@ConfigOption
def max_put_entities(value):
"""The maximum number of entities in a Put without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_put_entities should be a positive integer')
return value
@ConfigOption
def max_delete_keys(value):
"""The maximum number of keys in a Delete without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_delete_keys should be a positive integer')
return value
# Some Cloud Datastore calls are noops. We install a fake stub to handle them.
_NOOP_SERVICE = 'cloud_datastore_noop'
class _NoopRPC(apiproxy_rpc.RPC):
"""An RPC implementation that does not modify the response object."""
def __init__(self):
super(_NoopRPC, self).__init__()
def _WaitImpl(self):
return True
def _MakeCallImpl(self):
self._state = apiproxy_rpc.RPC.FINISHING
class _NoopRPCStub(object):
"""An RPC stub which always creates a NoopRPC."""
def CreateRPC(self): # pylint: disable=invalid-name
return _NoopRPC()
class MultiRpc(object):
"""A wrapper around multiple UserRPC objects.
This provides an API similar to that of UserRPC, but wraps multiple
RPCs such that e.g. .wait() blocks until all wrapped RPCs are
complete, and .get_result() returns the combined results from all
wrapped RPCs.
Class methods:
flatten(rpcs): Expand a list of UserRPCs and MultiRpcs
into a list of UserRPCs.
wait_any(rpcs): Call UserRPC.wait_any(flatten(rpcs)).
wait_all(rpcs): Call UserRPC.wait_all(flatten(rpcs)).
Instance methods:
wait(): Wait for all RPCs.
check_success(): Wait and then check success for all RPCs.
get_result(): Wait for all, check successes, then merge
all results.
Instance attributes:
rpcs: The list of wrapped RPCs (returns a copy).
state: The combined state of all RPCs.
"""
def __init__(self, rpcs, extra_hook=None):
"""Constructor.
Args:
rpcs: A list of UserRPC and MultiRpc objects; it is flattened
before being stored.
extra_hook: Optional function to be applied to the final result
or list of results.
"""
self.__rpcs = self.flatten(rpcs)
self.__extra_hook = extra_hook
@property
def rpcs(self):
"""Get a flattened list containing the RPCs wrapped.
This returns a copy to prevent users from modifying the state.
"""
return list(self.__rpcs)
@property
def state(self):
"""Get the combined state of the wrapped RPCs.
This mimics the UserRPC.state property. If all wrapped RPCs have
the same state, that state is returned; otherwise, RUNNING is
returned (which here really means 'neither fish nor flesh').
"""
lo = apiproxy_rpc.RPC.FINISHING
hi = apiproxy_rpc.RPC.IDLE
for rpc in self.__rpcs:
lo = min(lo, rpc.state)
hi = max(hi, rpc.state)
if lo == hi:
return lo
return apiproxy_rpc.RPC.RUNNING
def wait(self):
"""Wait for all wrapped RPCs to finish.
This mimics the UserRPC.wait() method.
"""
apiproxy_stub_map.UserRPC.wait_all(self.__rpcs)
def check_success(self):
"""Check success of all wrapped RPCs, failing if any of the failed.
This mimics the UserRPC.check_success() method.
NOTE: This first waits for all wrapped RPCs to finish before
checking the success of any of them. This makes debugging easier.
"""
self.wait()
for rpc in self.__rpcs:
rpc.check_success()
def get_result(self):
"""Return the combined results of all wrapped RPCs.
This mimics the UserRPC.get_results() method. Multiple results
are combined using the following rules:
1. If there are no wrapped RPCs, an empty list is returned.
2. If exactly one RPC is wrapped, its result is returned.
3. If more than one RPC is wrapped, the result is always a list,
which is constructed from the wrapped results as follows:
a. A wrapped result equal to None is ignored;
b. A wrapped result that is a list (but not any other type of
sequence!) has its elements added to the result list.
c. Any other wrapped result is appended to the result list.
After all results are combined, if __extra_hook is set, it is
called with the combined results and its return value becomes the
final result.
NOTE: This first waits for all wrapped RPCs to finish, and then
checks all their success. This makes debugging easier.
"""
# TODO(user): This is a temporary fix. Sub-RPCs are currently wrapping
# exceptions in get_result() (through the extra_hook) instead of
# check_success() so calling check_success() will expose the raw exception.
# Functionality should be added to easily wrap exceptions thrown in
# check_success.
#
# Check all successes before getting any result.
# self.check_success()
# Special-case a single RPC: always return its exact result.
if len(self.__rpcs) == 1:
results = self.__rpcs[0].get_result()
else:
results = []
# NOTE: This merging of results is heuristic: Lists are
# concatenated, other values joined into a list, None is skipped.
for rpc in self.__rpcs:
result = rpc.get_result()
if isinstance(result, list):
results.extend(result)
elif result is not None:
results.append(result)
if self.__extra_hook is not None:
results = self.__extra_hook(results)
return results
@classmethod
def flatten(cls, rpcs):
"""Return a list of UserRPCs, expanding MultiRpcs in the argument list.
For example: given 4 UserRPCs rpc1 through rpc4,
flatten(rpc1, MultiRpc([rpc2, rpc3], rpc4)
returns [rpc1, rpc2, rpc3, rpc4].
Args:
rpcs: A list of UserRPC and MultiRpc objects.
Returns:
A list of UserRPC objects.
"""
flat = []
for rpc in rpcs:
if isinstance(rpc, MultiRpc):
# NOTE: Because MultiRpc calls flatten() on its arguments,
# there is no need to recursively flatten rpc.__rpcs -- it is
# guaranteed to be already flat.
flat.extend(rpc.__rpcs)
else:
if not isinstance(rpc, apiproxy_stub_map.UserRPC):
raise datastore_errors.BadArgumentError(
'Expected a list of UserRPC object (%r)' % (rpc,))
flat.append(rpc)
return flat
@classmethod
def wait_any(cls, rpcs):
"""Wait until one of the RPCs passed in is finished.
This mimics UserRPC.wait_any().
Args:
rpcs: A list of UserRPC and MultiRpc objects.
Returns:
A UserRPC object or None.
"""
return apiproxy_stub_map.UserRPC.wait_any(cls.flatten(rpcs))
@classmethod
def wait_all(cls, rpcs):
"""Wait until all RPCs passed in are finished.
This mimics UserRPC.wait_all().
Args:
rpcs: A list of UserRPC and MultiRpc objects.
"""
apiproxy_stub_map.UserRPC.wait_all(cls.flatten(rpcs))
class TransactionMode(object):
"""The mode of a Datastore transaction.
Specifying the mode of the transaction can help to improve throughput, as it
provides additional information about the intent (or lack of intent, in the
case of a read only transaction) to perform a write as part of the
transaction.
"""
UNKNOWN = 0 # Unknown transaction mode.
READ_ONLY = 1 # Transaction is used for both read and write oeprations.
READ_WRITE = 2 # Transaction is used only for read operations.
class BaseConnection(object):
"""Datastore connection base class.
NOTE: Do not instantiate this class; use Connection or
TransactionalConnection instead.
This is not a traditional database connection -- with App Engine, in
the end the connection is always implicit in the process state.
There is also no intent to be compatible with PEP 249 (Python's
Database-API). But it is a useful abstraction to have an explicit
object that manages the database interaction, and especially
transactions. Other settings related to the App Engine datastore
are also stored here (e.g. the RPC timeout).
A similar class in the Java API to the App Engine datastore is
DatastoreServiceConfig (but in Java, transaction state is always
held by the current thread).
To use transactions, call connection.new_transaction(). This
returns a new connection (an instance of the TransactionalConnection
subclass) which you should use for all operations in the
transaction.
This model supports multiple unrelated concurrent transactions (but
not nested transactions as this concept is commonly understood in
the relational database world).
When the transaction is done, call .commit() or .rollback() on the
transactional connection. If .commit() returns False, the
transaction failed and none of your operations made it to the
datastore; if it returns True, all your operations were committed.
The transactional connection cannot be used once .commit() or
.rollback() is called.
Transactions are created lazily. The first operation that requires
a transaction handle will issue the low-level BeginTransaction
request and wait for it to return.
Transactions keep track of the entity group. All operations within
a transaction must use the same entity group. An entity group
(currently) comprises an app id, a namespace, and a top-level key (a
kind and an id or name). The first operation performed determines
the entity group. There is some special-casing when the first
operation is a put() of an entity with an incomplete key; in this case
the entity group is determined after the operation returns.
NOTE: the datastore stubs in the dev_appserver currently support
only a single concurrent transaction. Specifically, the (old) file
stub locks up if an attempt is made to start a new transaction while
a transaction is already in use, whereas the sqlite stub fails an
assertion.
"""
UNKNOWN_DATASTORE = 0
MASTER_SLAVE_DATASTORE = 1
HIGH_REPLICATION_DATASTORE = 2
__SUPPORTED_VERSIONS = frozenset((_DATASTORE_V3,
_CLOUD_DATASTORE_V1))
@_positional(1)
def __init__(self, adapter=None, config=None, _api_version=_DATASTORE_V3):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
"""
if adapter is None:
adapter = IdentityAdapter()
if not isinstance(adapter, AbstractAdapter):
raise datastore_errors.BadArgumentError(
'invalid adapter argument (%r)' % (adapter,))
self.__adapter = adapter
if config is None:
config = Configuration()
elif not Configuration.is_configuration(config):
raise datastore_errors.BadArgumentError(
'invalid config argument (%r)' % (config,))
self.__config = config
if _api_version not in self.__SUPPORTED_VERSIONS:
raise datastore_errors.BadArgumentError(
'unsupported API version (%s)' % (_api_version,))
if _api_version == _CLOUD_DATASTORE_V1:
if not _CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
# Install the noop service for some Cloud Datastore calls to use.
apiproxy_stub_map.apiproxy.ReplaceStub(_NOOP_SERVICE, _NoopRPCStub())
self._api_version = _api_version
self.__pending_rpcs = set()
# Accessors. These are read-only attributes.
@property
def adapter(self):
"""The adapter used by this connection."""
return self.__adapter
@property
def config(self):
"""The default configuration used by this connection."""
return self.__config
# TODO(user): We don't need to track pending RPCs for
# non-transactional connections.
def _add_pending(self, rpc):
"""Add an RPC object to the list of pending RPCs.
The argument must be a UserRPC object, not a MultiRpc object.
"""
assert not isinstance(rpc, MultiRpc)
self.__pending_rpcs.add(rpc)
def _remove_pending(self, rpc):
"""Remove an RPC object from the list of pending RPCs.
If the argument is a MultiRpc object, the wrapped RPCs are removed
from the list of pending RPCs.
"""
if isinstance(rpc, MultiRpc):
# Remove the wrapped RPCs, not the wrapping RPC.
# NOTE: Avoid the rpcs property since it copies the list.
for wrapped_rpc in rpc._MultiRpc__rpcs:
self._remove_pending(wrapped_rpc)
else:
try:
self.__pending_rpcs.remove(rpc)
except KeyError:
# Catching the exception is faster than first checking if it
# is there (since that's another linear search).
pass
def is_pending(self, rpc):
"""Check whether an RPC object is currently pending.
Note that 'pending' in this context refers to an RPC associated
with this connection for which _remove_pending() hasn't been
called yet; normally this is called by check_rpc_success() which
itself is called by the various result hooks. A pending RPC may
be in the RUNNING or FINISHING state.
If the argument is a MultiRpc object, this returns true if at least
one of its wrapped RPCs is pending.
"""
if isinstance(rpc, MultiRpc):
for wrapped_rpc in rpc._MultiRpc__rpcs:
if self.is_pending(wrapped_rpc):
return True
return False
else:
return rpc in self.__pending_rpcs
def get_pending_rpcs(self):
"""Return (a copy of) the list of currently pending RPCs."""
return set(self.__pending_rpcs) # Make a copy to be on the safe side.
def get_datastore_type(self, app=None):
"""Tries to get the datastore type for the given app.
This function is only guaranteed to return something other than
UNKNOWN_DATASTORE when running in production and querying the current app.
"""
return _GetDatastoreType(app)
def wait_for_all_pending_rpcs(self):
"""Wait for all currently pending RPCs to complete."""
while self.__pending_rpcs:
try:
rpc = apiproxy_stub_map.UserRPC.wait_any(self.__pending_rpcs)
except Exception:
# Most likely the callback raised an exception. Ignore it.
# (Subtle: if it's still in the pending list it will come back
# and then we'll likely take the other path.)
# Log traceback at INFO level.
logging.info('wait_for_all_pending_rpcs(): exception in wait_any()',
exc_info=True)
continue
if rpc is None:
logging.debug('wait_any() returned None')
continue
assert rpc.state == apiproxy_rpc.RPC.FINISHING
if rpc in self.__pending_rpcs:
# Waiting for it did not remove it from the set. This means
# that either it didn't have a callback or the callback didn't
# call self.check_rpc_success(). Call that now so that the
# post-call hooks are called. Note that this will not the
# callback since it has already been called by wait_any().
# Again, we ignore exceptions.
try:
self.check_rpc_success(rpc)
except Exception:
# Log traceback at INFO level.
logging.info('wait_for_all_pending_rpcs(): '
'exception in check_rpc_success()',
exc_info=True)
# TransactionalConnection overrides the following; their base class
# implementations are no-ops. For docstrings, see TransactionalConnection.
def _create_rpc(self, config=None, service_name=None):
"""Create an RPC object using the configuration parameters.
Internal only.
Args:
config: Optional Configuration object.
service_name: Optional datastore service name.
Returns:
A new UserRPC object with the designated settings.
NOTES:
(1) The RPC object returned can only be used to make a single call
(for details see apiproxy_stub_map.UserRPC).
(2) To make a call, use one of the specific methods on the
Connection object, such as conn.put(entities). This sends the
call to the server but does not wait. To wait for the call to
finish and get the result, call rpc.get_result().
"""
deadline = Configuration.deadline(config, self.__config)
on_completion = Configuration.on_completion(config, self.__config)
callback = None
if service_name is None:
# NOTE(user): This is a best-effort attempt to support the
# "hidden feature" in which an RPC may be passed to some methods in place
# of a Configuration object. It will fail in cases where a particular
# operation uses a different service than the connection uses in general
# (e.g. allocate_ids() always uses datastore_v3, even on a v1 connection).
service_name = self._api_version
if on_completion is not None:
# Create an intermediate closure because on_completion must be called
# with an RPC argument whereas callback is called without arguments.
def callback():
return on_completion(rpc)
rpc = apiproxy_stub_map.UserRPC(service_name, deadline, callback)
return rpc
# Backwards compatible alias. # TODO(user): Remove. http://b/11856478.
create_rpc = _create_rpc
def _set_request_read_policy(self, request, config=None):
"""Set the read policy on a request.
This takes the read policy from the config argument or the
configuration's default configuration, and sets the request's read
options.
Args:
request: A read request protobuf.
config: Optional Configuration object.
Returns:
True if the read policy specifies a read current request, False if it
specifies an eventually consistent request, None if it does
not specify a read consistency.
"""
# Hidden feature: config may be a UserRPC object to use.
if isinstance(config, apiproxy_stub_map.UserRPC):
read_policy = getattr(config, 'read_policy', None)
else:
read_policy = Configuration.read_policy(config)
# Compute the combined read_policy value.
if read_policy is None:
read_policy = self.__config.read_policy
if hasattr(request, 'set_failover_ms') and hasattr(request, 'strong'):
# It's a v3 read request.
if read_policy == Configuration.APPLY_ALL_JOBS_CONSISTENCY:
request.set_strong(True)
return True
elif read_policy == Configuration.EVENTUAL_CONSISTENCY:
request.set_strong(False) # let 4.1 shard use READ_CONSISTENT
# It doesn't actually matter what value we set here;
# datastore_client.cc will set its own deadline. All that
# matters is that we set a value.
request.set_failover_ms(-1)
return False
else:
return None
elif hasattr(request, 'read_options'):
# It's a v1 read request.
# NOTE(user): Configuration.APPLY_ALL_JOBS_CONSISTENCY is
# intentionally ignored for v1.
if read_policy == Configuration.EVENTUAL_CONSISTENCY:
request.read_options.read_consistency = (
googledatastore.ReadOptions.EVENTUAL)
return False
else:
return None
else:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
def _set_request_transaction(self, request):
"""Set the current transaction on a request.
NOTE: This version of the method does nothing. The version
overridden by TransactionalConnection is the real thing.
Args:
request: A protobuf with a transaction field.
Returns:
An object representing a transaction or None.
"""
return None
def _make_rpc_call(self, config, method, request, response,
get_result_hook=None, user_data=None,
service_name=None):
"""Make an RPC call.
Internal only.
Except for the added config argument, this is a thin wrapper
around UserRPC.make_call().
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
method: The method name.
request: The request protocol buffer.
response: The response protocol buffer.
get_result_hook: Optional get-result hook function. If not None,
this must be a function with exactly one argument, the RPC
object (self). Its return value is returned from get_result().
user_data: Optional additional arbitrary data for the get-result
hook function. This can be accessed as rpc.user_data. The
type of this value is up to the service module.
Returns:
The UserRPC object used for the call.
"""
# Hidden feature: config may be a UserRPC object to use, and then
# that object is returned.
if isinstance(config, apiproxy_stub_map.UserRPC):
rpc = config # "We already got one."
else:
rpc = self._create_rpc(config, service_name)
rpc.make_call(method, request, response, get_result_hook, user_data)
self._add_pending(rpc)
return rpc
# Backwards compatible alias. # TODO(user): Remove. http://b/11856478.
make_rpc_call = _make_rpc_call
def check_rpc_success(self, rpc):
"""Check for RPC success and translate exceptions.
This wraps rpc.check_success() and should be called instead of that.
This also removes the RPC from the list of pending RPCs, once it
has completed.
Args:
rpc: A UserRPC or MultiRpc object.
Raises:
Nothing if the call succeeded; various datastore_errors.Error
subclasses if ApplicationError was raised by rpc.check_success().
"""
try:
rpc.wait()
finally:
# If wait() raised an exception, it's likely a DeadlineExceededError,
# and then we're better off removing it than keeping it.
self._remove_pending(rpc)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _ToDatastoreError(err)
# Basic operations: Get, Put, Delete.
# Default limits for batching. These can be overridden using the
# corresponding Configuration options.
MAX_RPC_BYTES = 1024 * 1024
MAX_GET_KEYS = 1000
MAX_PUT_ENTITIES = 500
MAX_DELETE_KEYS = 500
MAX_ALLOCATE_IDS_KEYS = 500
# By default, client-side batching kicks in for all ops with more than 10
# entity groups.
DEFAULT_MAX_ENTITY_GROUPS_PER_RPC = 10
# NOTE(user): Keep these in sync with similar constants in
# com.google.appengine.tools.development.ApiProxyLocalImpl and
def __get_max_entity_groups_per_rpc(self, config):
"""Internal helper: figures out max_entity_groups_per_rpc for the config."""
return Configuration.max_entity_groups_per_rpc(
config, self.__config) or self.DEFAULT_MAX_ENTITY_GROUPS_PER_RPC
def _extract_entity_group(self, value):
"""Internal helper: extracts the entity group from a key or entity.
Supports both v3 and v1 protobufs.
Args:
value: an entity_pb.{Reference, EntityProto} or
googledatastore.{Key, Entity}.
Returns:
A tuple consisting of:
- kind
- name, id, or ('new', unique id)
"""
if _CLOUD_DATASTORE_ENABLED and isinstance(value, googledatastore.Entity):
value = value.key
if isinstance(value, entity_pb.EntityProto):
value = value.key()
if _CLOUD_DATASTORE_ENABLED and isinstance(value, googledatastore.Key):
elem = value.path[0]
elem_id = elem.id
elem_name = elem.name
kind = elem.kind
else:
elem = value.path().element(0)
kind = elem.type()
elem_id = elem.id()
elem_name = elem.name()
# We use a tuple when elem has neither id nor name to avoid collisions
# between elem.id() and id(elem).
return (kind, elem_id or elem_name or ('new', id(elem)))
def _map_and_group(self, values, map_fn, group_fn):
"""Internal helper: map values to keys and group by key. Here key is any
object derived from an input value by map_fn, and which can be grouped
by group_fn.
Args:
values: The values to be grouped by applying get_group(to_ref(value)).
map_fn: a function that maps a value to a key to be grouped.
group_fn: a function that groups the keys output by map_fn.
Returns:
A list where each element is a list of (key, index) pairs. Here
index is the location of the value from which the key was derived in
the original list.
"""
indexed_key_groups = collections.defaultdict(list)
for index, value in enumerate(values):
key = map_fn(value)
indexed_key_groups[group_fn(key)].append((key, index))
return indexed_key_groups.values()
def __create_result_index_pairs(self, indexes):
"""Internal helper: build a function that ties an index with each result.
Args:
indexes: A list of integers. A value x at location y in the list means
that the result at location y in the result list needs to be at location
x in the list of results returned to the user.
"""
def create_result_index_pairs(results):
return zip(results, indexes)
return create_result_index_pairs
def __sort_result_index_pairs(self, extra_hook):
"""Builds a function that sorts the indexed results.
Args:
extra_hook: A function that the returned function will apply to its result
before returning.
Returns:
A function that takes a list of results and reorders them to match the
order in which the input values associated with each results were
originally provided.
"""
def sort_result_index_pairs(result_index_pairs):
results = [None] * len(result_index_pairs)
for result, index in result_index_pairs:
results[index] = result
if extra_hook is not None:
results = extra_hook(results)
return results
return sort_result_index_pairs
def _generate_pb_lists(self, grouped_values, base_size, max_count,
max_groups, config):
"""Internal helper: repeatedly yield a list of 2 elements.
Args:
grouped_values: A list of lists. The inner lists consist of objects
grouped by e.g. entity group or id sequence.
base_size: An integer representing the base size of an rpc. Used for
splitting operations across multiple RPCs due to size limitations.
max_count: An integer representing the maximum number of objects we can
send in an rpc. Used for splitting operations across multiple RPCs.
max_groups: An integer representing the maximum number of groups we can
have represented in an rpc. Can be None, in which case no constraint.
config: The config object, defining max rpc size in bytes.
Yields:
Repeatedly yields 2 element tuples. The first element is a list of
protobufs to send in one batch. The second element is a list containing
the original location of those protobufs (expressed as an index) in the
input.
"""
max_size = (Configuration.max_rpc_bytes(config, self.__config) or
self.MAX_RPC_BYTES)
pbs = []
pb_indexes = []
size = base_size
num_groups = 0
for indexed_pbs in grouped_values:
num_groups += 1
if max_groups is not None and num_groups > max_groups:
yield (pbs, pb_indexes)
pbs = []
pb_indexes = []
size = base_size
num_groups = 1
for indexed_pb in indexed_pbs:
(pb, index) = indexed_pb
# Extra 5 bytes come from:
# - 1 byte determined by looking at source of GetRequest.ByteSize().
# - 4 bytes from inspecting code for pb.lengthString(), which is not
# available in proto2. 4 bytes is an upper bound for pb sizes
# up to 100MB.
incr_size = pb.ByteSize() + 5
# The test on the yield checks for several conditions:
# - no batching if config is really a UserRPC object;
# - avoid yielding empty batches;
# - a batch can fill up based on count or serialized size.
if (not isinstance(config, apiproxy_stub_map.UserRPC) and
(len(pbs) >= max_count or (pbs and size + incr_size > max_size))):
yield (pbs, pb_indexes)
pbs = []
pb_indexes = []
size = base_size
num_groups = 1
pbs.append(pb)
pb_indexes.append(index)
size += incr_size
yield (pbs, pb_indexes) # Last batch.
def __force(self, req):
"""Configure a request to force mutations."""
if isinstance(req, (datastore_pb.PutRequest,
datastore_pb.TouchRequest,
datastore_pb.DeleteRequest)):
req.set_force(True)
def get(self, keys):
"""Synchronous Get operation.
Args:
keys: An iterable of user-level key objects.
Returns:
A list of user-level entity objects and None values, corresponding
1:1 to the argument keys. A None means there is no entity for the
corresponding key.
"""
return self.async_get(None, keys).get_result()
def async_get(self, config, keys, extra_hook=None):
"""Asynchronous Get operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
"""
# This function is a closure over self and config
def make_get_call(base_req, pbs, extra_hook=None):
req = copy.deepcopy(base_req)
if self._api_version == _CLOUD_DATASTORE_V1:
method = 'Lookup'
req.keys.extend(pbs)
resp = googledatastore.LookupResponse()
else:
method = 'Get'
req.key_list().extend(pbs)
resp = datastore_pb.GetResponse()
# Note that we embed both the config and an optional user supplied hook in
# the RPC's user_data.
#
# We also pass along the pbs (keys) that were requested. In theory, we
# should be able to simply retrieve these from the req object later, but
# some users may be doing fancy things like intercepting these calls and
# altering the request/response.
user_data = config, pbs, extra_hook
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__get_hook,
user_data=user_data,
service_name=self._api_version)
if self._api_version == _CLOUD_DATASTORE_V1:
base_req = googledatastore.LookupRequest()
key_to_pb = self.__adapter.key_to_pb_v1
else:
base_req = datastore_pb.GetRequest()
base_req.set_allow_deferred(True)
key_to_pb = self.__adapter.key_to_pb
is_read_current = self._set_request_read_policy(base_req, config)
txn = self._set_request_transaction(base_req)
# Special case for legacy support and single value
if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
pbs = [key_to_pb(key) for key in keys]
return make_get_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_get_keys(config, self.__config) or
self.MAX_GET_KEYS)
indexed_keys_by_entity_group = self._map_and_group(
keys, key_to_pb, self._extract_entity_group)
if is_read_current is None:
is_read_current = (self.get_datastore_type() ==
BaseConnection.HIGH_REPLICATION_DATASTORE)
# Entity group based client-side batch splitting is only useful when
# performing a strong or consistent read. However, if we have a transaction
# then all RPCs go to the same task so entity group client-side batching
# won't have any impact.
if is_read_current and txn is None:
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
# Iterator yielding lists of entity protobufs, each
# list representing one batch.
pbsgen = self._generate_pb_lists(indexed_keys_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, indexes in pbsgen:
rpcs.append(make_get_call(base_req, pbs,
self.__create_result_index_pairs(indexes)))
return MultiRpc(rpcs, self.__sort_result_index_pairs(extra_hook))
def __get_hook(self, rpc):
"""Internal method used as get_result_hook for Get operation."""
self.check_rpc_success(rpc)
# get_async stores the config, the requested keys, and an extra_hook on the
# rpc's user_data field.
config, keys_from_request, extra_hook = rpc.user_data
if self._api_version == _DATASTORE_V3 and rpc.response.in_order():
# The response is in the same order as the request. This also implies
# that there are no deferred results.
entities = []
for entity_result in rpc.response.entity_list():
if entity_result.has_entity():
entity = self.__adapter.pb_to_entity(entity_result.entity())
else:
entity = None
entities.append(entity)
else:
# The response is not in order. Start accumulating the results in a dict.
current_get_response = rpc.response
result_dict = {}
self.__add_get_response_entities_to_dict(current_get_response,
result_dict)
# Issue additional (synchronous) RPCs until there are no more deferred
# keys.
deferred_req = copy.deepcopy(rpc.request)
if self._api_version == _CLOUD_DATASTORE_V1:
method = 'Lookup'
deferred_resp = googledatastore.LookupResponse()
while current_get_response.deferred:
deferred_req.ClearField('keys')
deferred_req.keys.extend(current_get_response.deferred)
deferred_resp.Clear()
deferred_rpc = self._make_rpc_call(config, method,
deferred_req, deferred_resp,
service_name=self._api_version)
deferred_rpc.get_result()
current_get_response = deferred_rpc.response
# Add the resulting Entities to the result_dict.
self.__add_get_response_entities_to_dict(current_get_response,
result_dict)
else:
method = 'Get'
deferred_resp = datastore_pb.GetResponse()
while current_get_response.deferred_list():
deferred_req.clear_key()
deferred_req.key_list().extend(current_get_response.deferred_list())
deferred_resp.Clear()
deferred_rpc = self._make_rpc_call(config, method,
deferred_req, deferred_resp,
service_name=self._api_version)
deferred_rpc.get_result()
current_get_response = deferred_rpc.response
# Add the resulting Entities to the result_dict.
self.__add_get_response_entities_to_dict(current_get_response,
result_dict)
# Pull the results out of the dictionary in the order of the request keys.
# Defaults to None for entries not in the dictionary.
entities = [result_dict.get(datastore_types.ReferenceToKeyValue(pb))
for pb in keys_from_request]
# Now we have all of the requested entities in the correct order. Apply the
# extra_hook function if it exists.
if extra_hook is not None:
entities = extra_hook(entities)
return entities
def __add_get_response_entities_to_dict(self, get_response, result_dict):
"""Converts entities from the get response and adds them to the dict.
The Key for the dict will be calculated via
datastore_types.ReferenceToKeyValue. There will be no entry for entities
that were not found.
Args:
get_response: A datastore_pb.GetResponse or
googledatastore.LookupResponse.
result_dict: The dict to add results to.
"""
if (_CLOUD_DATASTORE_ENABLED
and isinstance(get_response, googledatastore.LookupResponse)):
for result in get_response.found:
v1_key = result.entity.key
entity = self.__adapter.pb_v1_to_entity(result.entity, False)
result_dict[datastore_types.ReferenceToKeyValue(v1_key)] = entity
else:
for entity_result in get_response.entity_list():
# Exclude missing entities from dict.
if entity_result.has_entity():
# Note that we take the protopuf Reference from the response and
# create a hashable key from it.
#
# TODO(user): Check on remote api issues with getting key here
reference_pb = entity_result.entity().key()
hashable_key = datastore_types.ReferenceToKeyValue(reference_pb)
entity = self.__adapter.pb_to_entity(entity_result.entity())
result_dict[hashable_key] = entity
def get_indexes(self):
"""Synchronous get indexes operation.
Returns:
user-level indexes representation
"""
return self.async_get_indexes(None).get_result()
def async_get_indexes(self, config, extra_hook=None, _app=None):
"""Asynchronous get indexes operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
req = datastore_pb.GetIndicesRequest()
req.set_app_id(datastore_types.ResolveAppId(_app))
resp = datastore_pb.CompositeIndices()
return self._make_rpc_call(config, 'GetIndices', req, resp,
get_result_hook=self.__get_indexes_hook,
user_data=extra_hook,
service_name=_DATASTORE_V3)
def __get_indexes_hook(self, rpc):
"""Internal method used as get_result_hook for Get operation."""
self.check_rpc_success(rpc)
indexes = [self.__adapter.pb_to_index(index)
for index in rpc.response.index_list()]
if rpc.user_data:
indexes = rpc.user_data(indexes)
return indexes
def put(self, entities):
"""Synchronous Put operation.
Args:
entities: An iterable of user-level entity objects.
Returns:
A list of user-level key objects, corresponding 1:1 to the
argument entities.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
return self.async_put(None, entities).get_result()
def async_put(self, config, entities, extra_hook=None):
"""Asynchronous Put operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
entities: An iterable of user-level entity objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
# This function is a closure over self and config
def make_put_call(base_req, pbs, user_data=None):
req = copy.deepcopy(base_req)
if self._api_version == _CLOUD_DATASTORE_V1:
for entity in pbs:
mutation = req.mutations.add()
mutation.upsert.CopyFrom(entity)
method = 'Commit'
resp = googledatastore.CommitResponse()
else:
req.entity_list().extend(pbs)
method = 'Put'
resp = datastore_pb.PutResponse()
user_data = pbs, user_data
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__put_hook,
user_data=user_data,
service_name=self._api_version)
# See elaborate comments about batching in async_get().
if self._api_version == _CLOUD_DATASTORE_V1:
base_req = googledatastore.CommitRequest()
base_req.mode = googledatastore.CommitRequest.NON_TRANSACTIONAL
entity_to_pb = self.__adapter.entity_to_pb_v1
else:
base_req = datastore_pb.PutRequest()
entity_to_pb = self.__adapter.entity_to_pb
self._set_request_transaction(base_req)
if Configuration.force_writes(config, self.__config):
self.__force(base_req)
# Special case for legacy support and single value.
if isinstance(config, apiproxy_stub_map.UserRPC) or len(entities) <= 1:
pbs = [entity_to_pb(entity) for entity in entities]
return make_put_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_put_entities(config, self.__config) or
self.MAX_PUT_ENTITIES)
if ((self._api_version == _CLOUD_DATASTORE_V1 and
not base_req.transaction) or
not base_req.has_transaction()):
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
indexed_entities_by_entity_group = self._map_and_group(
entities, entity_to_pb, self._extract_entity_group)
# Iterator yielding lists of key protobufs, each list representing
# one batch.
pbsgen = self._generate_pb_lists(indexed_entities_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, indexes in pbsgen:
rpcs.append(make_put_call(base_req, pbs,
self.__create_result_index_pairs(indexes)))
return MultiRpc(rpcs, self.__sort_result_index_pairs(extra_hook))
def __put_hook(self, rpc):
"""Internal method used as get_result_hook for Put operation."""
self.check_rpc_success(rpc)
entities_from_request, extra_hook = rpc.user_data
if (_CLOUD_DATASTORE_ENABLED
and isinstance(rpc.response, googledatastore.CommitResponse)):
keys = []
i = 0
for entity in entities_from_request:
if datastore_pbs.is_complete_v1_key(entity.key):
keys.append(entity.key)
else:
keys.append(rpc.response.mutation_results[i].key)
i += 1
keys = [self.__adapter.pb_v1_to_key(key) for key in keys]
else:
keys = [self.__adapter.pb_to_key(key) for key in rpc.response.key_list()]
# NOTE: We don't patch up the keys of entities that were written
# with an incomplete key here; that's up to the extra_hook.
if extra_hook is not None:
keys = extra_hook(keys)
return keys
def delete(self, keys):
"""Synchronous Delete operation.
Args:
keys: An iterable of user-level key objects.
Returns:
None.
"""
return self.async_delete(None, keys).get_result()
def async_delete(self, config, keys, extra_hook=None):
"""Asynchronous Delete operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
# This function is a closure over self and config
def make_delete_call(base_req, pbs, user_data=None):
req = copy.deepcopy(base_req)
if self._api_version == _CLOUD_DATASTORE_V1:
for pb in pbs:
mutation = req.mutations.add()
mutation.delete.CopyFrom(pb)
method = 'Commit'
resp = googledatastore.CommitResponse()
else:
req.key_list().extend(pbs)
method = 'Delete'
resp = datastore_pb.DeleteResponse()
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__delete_hook,
user_data=user_data,
service_name=self._api_version)
# See elaborate comments about batching in async_get().
if self._api_version == _CLOUD_DATASTORE_V1:
base_req = googledatastore.CommitRequest()
base_req.mode = googledatastore.CommitRequest.NON_TRANSACTIONAL
key_to_pb = self.__adapter.key_to_pb_v1
else:
base_req = datastore_pb.DeleteRequest()
key_to_pb = self.__adapter.key_to_pb
self._set_request_transaction(base_req)
if Configuration.force_writes(config, self.__config):
self.__force(base_req)
# Special case for legacy support and single value.
if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
pbs = [key_to_pb(key) for key in keys]
return make_delete_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_delete_keys(config, self.__config) or
self.MAX_DELETE_KEYS)
if ((self._api_version == _CLOUD_DATASTORE_V1 and
not base_req.transaction) or
not base_req.has_transaction()):
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
indexed_keys_by_entity_group = self._map_and_group(
keys, key_to_pb, self._extract_entity_group)
# Iterator yielding lists of key protobufs, each list representing
# one batch.
pbsgen = self._generate_pb_lists(indexed_keys_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, _ in pbsgen:
rpcs.append(make_delete_call(base_req, pbs))
return MultiRpc(rpcs, extra_hook)
def __delete_hook(self, rpc):
"""Internal method used as get_result_hook for Delete operation."""
self.check_rpc_success(rpc)
if rpc.user_data is not None:
# Call it with None to match MultiRpc.
rpc.user_data(None)
# BeginTransaction operation.
def begin_transaction(self,
app,
previous_transaction=None,
mode=TransactionMode.UNKNOWN):
"""Synchronous BeginTransaction operation.
NOTE: In most cases the new_transaction() method is preferred,
since that returns a TransactionalConnection object which will
begin the transaction lazily.
Args:
app: Application ID.
previous_transaction: The transaction to reset.
mode: The transaction mode.
Returns:
An object representing a transaction or None.
"""
return (self.async_begin_transaction(None, app, previous_transaction, mode)
.get_result())
def async_begin_transaction(self,
config,
app,
previous_transaction=None,
mode=TransactionMode.UNKNOWN):
"""Asynchronous BeginTransaction operation.
Args:
config: A configuration object or None. Defaults are taken from
the connection's default configuration.
app: Application ID.
previous_transaction: The transaction to reset.
mode: The transaction mode.
Returns:
A MultiRpc object.
"""
if not isinstance(app, basestring) or not app:
raise datastore_errors.BadArgumentError(
'begin_transaction requires an application id argument (%r)' % (app,))
if previous_transaction is not None and mode == TransactionMode.READ_ONLY:
raise datastore_errors.BadArgumentError(
'begin_transaction requires mode != READ_ONLY when '
'previous_transaction is not None'
)
if self._api_version == _CLOUD_DATASTORE_V1:
req = googledatastore.BeginTransactionRequest()
resp = googledatastore.BeginTransactionResponse()
# upgrade mode to READ_WRITE for retries
if previous_transaction is not None:
mode = TransactionMode.READ_WRITE
if mode == TransactionMode.UNKNOWN:
pass
elif mode == TransactionMode.READ_ONLY:
req.transaction_options.read_only.SetInParent()
elif mode == TransactionMode.READ_WRITE:
if previous_transaction is not None:
(req.transaction_options.read_write
.previous_transaction) = previous_transaction
else:
req.transaction_options.read_write.SetInParent()
else:
req = datastore_pb.BeginTransactionRequest()
req.set_app(app)
if (TransactionOptions.xg(config, self.__config)):
req.set_allow_multiple_eg(True)
if mode == TransactionMode.UNKNOWN:
pass
elif mode == TransactionMode.READ_ONLY:
req.set_mode(datastore_pb.BeginTransactionRequest.READ_ONLY)
elif mode == TransactionMode.READ_WRITE:
req.set_mode(datastore_pb.BeginTransactionRequest.READ_WRITE)
if previous_transaction is not None:
req.mutable_previous_transaction().CopyFrom(previous_transaction)
resp = datastore_pb.Transaction()
return self._make_rpc_call(config, 'BeginTransaction', req, resp,
get_result_hook=self.__begin_transaction_hook,
service_name=self._api_version)
def __begin_transaction_hook(self, rpc):
"""Internal method used as get_result_hook for BeginTransaction."""
self.check_rpc_success(rpc)
if self._api_version == _CLOUD_DATASTORE_V1:
return rpc.response.transaction
else:
return rpc.response
class Connection(BaseConnection):
"""Transaction-less connection class.
This contains those operations that are not allowed on transactional
connections. (Currently only allocate_ids and reserve_key_ids.)
"""
@_positional(1)
def __init__(self, adapter=None, config=None, _api_version=_DATASTORE_V3):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
"""
super(Connection, self).__init__(adapter=adapter, config=config,
_api_version=_api_version)
self.__adapter = self.adapter # Copy to new private variable.
self.__config = self.config # Copy to new private variable.
# Pseudo-operation to create a new TransactionalConnection.
def new_transaction(self, config=None, previous_transaction=None,
mode=TransactionMode.UNKNOWN):
"""Create a new transactional connection based on this one.
This is different from, and usually preferred over, the
begin_transaction() method; new_transaction() returns a new
TransactionalConnection object.
Args:
config: A configuration object for the new connection, merged
with this connection's config.
previous_transaction: The transaction being reset.
mode: The transaction mode.
"""
config = self.__config.merge(config)
return TransactionalConnection(adapter=self.__adapter, config=config,
_api_version=self._api_version,
previous_transaction=previous_transaction,
mode=mode)
# AllocateIds operation.
def allocate_ids(self, key, size=None, max=None):
"""Synchronous AllocateIds operation.
Exactly one of size and max must be specified.
Args:
key: A user-level key object.
size: Optional number of IDs to allocate.
max: Optional maximum ID to allocate.
Returns:
A pair (start, end) giving the (inclusive) range of IDs allocation.
"""
return self.async_allocate_ids(None, key, size, max).get_result()
def async_allocate_ids(self, config, key, size=None, max=None,
extra_hook=None):
"""Asynchronous AllocateIds operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
key: A user-level key object.
size: Optional number of IDs to allocate.
max: Optional maximum ID to allocate.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
"""
if size is not None:
if max is not None:
raise datastore_errors.BadArgumentError(
'Cannot allocate ids using both size and max')
if not isinstance(size, (int, long)):
raise datastore_errors.BadArgumentError('Invalid size (%r)' % (size,))
if size > _MAX_ID_BATCH_SIZE:
raise datastore_errors.BadArgumentError(
'Cannot allocate more than %s ids at a time; received %s'
% (_MAX_ID_BATCH_SIZE, size))
if size <= 0:
raise datastore_errors.BadArgumentError(
'Cannot allocate less than 1 id; received %s' % size)
if max is not None:
if not isinstance(max, (int, long)):
raise datastore_errors.BadArgumentError('Invalid max (%r)' % (max,))
if max < 0:
raise datastore_errors.BadArgumentError(
'Cannot allocate a range with a max less than 0 id; received %s' %
size)
req = datastore_pb.AllocateIdsRequest()
req.mutable_model_key().CopyFrom(self.__adapter.key_to_pb(key))
if size is not None:
req.set_size(size)
if max is not None:
req.set_max(max)
resp = datastore_pb.AllocateIdsResponse()
rpc = self._make_rpc_call(config, 'AllocateIds', req, resp,
get_result_hook=self.__allocate_ids_hook,
user_data=extra_hook,
service_name=_DATASTORE_V3)
return rpc
def __allocate_ids_hook(self, rpc):
"""Internal method used as get_result_hook for AllocateIds."""
self.check_rpc_success(rpc)
pair = rpc.response.start(), rpc.response.end() # Inclusive range.
if rpc.user_data is not None:
pair = rpc.user_data(pair)
return pair
# AllocateIds operation with keys to reserve for restore/load/copy.
def _reserve_keys(self, keys):
"""Synchronous AllocateIds operation to reserve the given keys.
Sends one or more v3 AllocateIds rpcs with keys to reserve.
Reserved keys must be complete and must have valid ids.
Args:
keys: Iterable of user-level keys.
"""
self._async_reserve_keys(None, keys).get_result()
def _async_reserve_keys(self, config, keys, extra_hook=None):
"""Asynchronous AllocateIds operation to reserve the given keys.
Sends one or more v3 AllocateIds rpcs with keys to reserve.
Reserved keys must be complete and must have valid ids.
Args:
config: A Configuration object or None to use Connection default.
keys: Iterable of user-level keys.
extra_hook: Optional function to be called on rpc result.
Returns:
None, or the result of user-supplied extra_hook.
"""
def to_id_key(key):
if key.path().element_size() == 1:
return 'root_idkey'
else:
return self._extract_entity_group(key)
keys_by_idkey = self._map_and_group(keys, self.__adapter.key_to_pb,
to_id_key)
max_count = (Configuration.max_allocate_ids_keys(config, self.__config) or
self.MAX_ALLOCATE_IDS_KEYS)
rpcs = []
pbsgen = self._generate_pb_lists(keys_by_idkey, 0, max_count, None, config)
for pbs, _ in pbsgen:
req = datastore_pb.AllocateIdsRequest()
req.reserve_list().extend(pbs)
resp = datastore_pb.AllocateIdsResponse()
rpcs.append(self._make_rpc_call(config, 'AllocateIds', req, resp,
get_result_hook=self.__reserve_keys_hook,
user_data=extra_hook,
service_name=_DATASTORE_V3))
return MultiRpc(rpcs)
def __reserve_keys_hook(self, rpc):
"""Internal get_result_hook for _reserve_keys."""
self.check_rpc_success(rpc)
if rpc.user_data is not None:
return rpc.user_data(rpc.response)
class TransactionOptions(Configuration):
"""An immutable class that contains options for a transaction."""
NESTED = 1
"""Create a nested transaction under an existing one."""
MANDATORY = 2
"""Always propagate an existing transaction, throw an exception if there is
no existing transaction."""
ALLOWED = 3
"""If there is an existing transaction propagate it."""
INDEPENDENT = 4
"""Always use a new transaction, pausing any existing transactions."""
_PROPAGATION = frozenset((NESTED, MANDATORY, ALLOWED, INDEPENDENT))
@ConfigOption
def propagation(value):
"""How existing transactions should be handled.
One of NESTED, MANDATORY, ALLOWED, INDEPENDENT. The interpertation of
these types is up to higher level run-in-transaction implementations.
WARNING: Using anything other than NESTED for the propagation flag
can have strange consequences. When using ALLOWED or MANDATORY, if
an exception is raised, the transaction is likely not safe to
commit. When using INDEPENDENT it is not generally safe to return
values read to the caller (as they were not read in the caller's
transaction).
Raises: datastore_errors.BadArgumentError if value is not reconized.
"""
if value not in TransactionOptions._PROPAGATION:
raise datastore_errors.BadArgumentError('Unknown propagation value (%r)' %
(value,))
return value
@ConfigOption
def xg(value):
"""Whether to allow cross-group transactions.
Raises: datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'xg argument should be bool (%r)' % (value,))
return value
@ConfigOption
def retries(value):
"""How many retries to attempt on the transaction.
The exact retry logic is implemented in higher level run-in-transaction
implementations.
Raises: datastore_errors.BadArgumentError if value is not an integer or
is not greater than zero.
"""
datastore_types.ValidateInteger(value,
'retries',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@ConfigOption
def app(value):
"""The application in which to perform the transaction.
Raises: datastore_errors.BadArgumentError if value is not a string
or is the empty string.
"""
datastore_types.ValidateString(value,
'app',
datastore_errors.BadArgumentError)
return value
class TransactionalConnection(BaseConnection):
"""A connection specific to one transaction.
It is possible to pass the transaction and entity group to the
constructor, but typically the transaction is lazily created by
_get_transaction() when the first operation is started.
"""
# Transaction states
OPEN = 0 # Initial state.
COMMIT_IN_FLIGHT = 1 # A commit has started but not finished.
FAILED = 2 # Commit attempt failed.
CLOSED = 3 # Commit succeeded or rollback initiated.
@_positional(1)
def __init__(self,
adapter=None, config=None, transaction=None, entity_group=None,
_api_version=_DATASTORE_V3, previous_transaction=None,
mode=TransactionMode.UNKNOWN):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
transaction: Optional datastore_db.Transaction object.
entity_group: Deprecated, do not use.
previous_transaction: Optional datastore_db.Transaction object
representing the transaction being reset.
mode: Optional datastore_db.TransactionMode representing the transaction
mode.
Raises:
datastore_errors.BadArgumentError: If previous_transaction and transaction
are both set.
"""
super(TransactionalConnection, self).__init__(adapter=adapter,
config=config,
_api_version=_api_version)
self._state = TransactionalConnection.OPEN
if previous_transaction is not None and transaction is not None:
raise datastore_errors.BadArgumentError(
'Only one of transaction and previous_transaction should be set')
self.__adapter = self.adapter # Copy to new private variable.
self.__config = self.config # Copy to new private variable.
if transaction is None:
app = TransactionOptions.app(self.config)
app = datastore_types.ResolveAppId(TransactionOptions.app(self.config))
self.__transaction_rpc = self.async_begin_transaction(
None, app, previous_transaction, mode)
else:
if self._api_version == _CLOUD_DATASTORE_V1:
txn_class = str
else:
txn_class = datastore_pb.Transaction
if not isinstance(transaction, txn_class):
raise datastore_errors.BadArgumentError(
'Invalid transaction (%r)' % transaction)
self.__transaction = transaction
self.__transaction_rpc = None
# Pending v1 transactional mutations.
self.__pending_v1_upserts = {}
self.__pending_v1_deletes = {}
@property
def finished(self):
return self._state != TransactionalConnection.OPEN
@property
def transaction(self):
"""The current transaction. None when state == FINISHED."""
if self.__transaction_rpc is not None:
self.__transaction = self.__transaction_rpc.get_result()
self.__transaction_rpc = None
return self.__transaction
def _set_request_transaction(self, request):
"""Set the current transaction on a request.
This accesses the transaction property. The transaction object
returned is both set as the transaction field on the request
object and returned.
Args:
request: A protobuf with a transaction field.
Returns:
An object representing a transaction or None.
Raises:
ValueError: if called with a non-Cloud Datastore request when using
Cloud Datastore.
"""
if self.finished:
raise datastore_errors.BadRequestError(
'Cannot start a new operation in a finished transaction.')
transaction = self.transaction
if self._api_version == _CLOUD_DATASTORE_V1:
if isinstance(request, (googledatastore.CommitRequest,
googledatastore.RollbackRequest)):
request.transaction = transaction
elif isinstance(request, (googledatastore.LookupRequest,
googledatastore.RunQueryRequest)):
request.read_options.transaction = transaction
else:
# We need to make sure we are not trying to set the transaction on an
# unknown request. This is most likely the TaskQueue API. Once there is
# an external version of that API, we should populate the transaction
# accordingly.
raise ValueError('Cannot use Cloud Datastore V1 transactions with %s.' %
type(request))
request.read_options.transaction = transaction
else:
request.mutable_transaction().CopyFrom(transaction)
return transaction
# Put operation.
def async_put(self, config, entities, extra_hook=None):
"""Transactional asynchronous Put operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
entities: An iterable of user-level entity objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
if self._api_version != _CLOUD_DATASTORE_V1:
# v3 async_put() supports transactional and non-transactional calls.
return super(TransactionalConnection, self).async_put(
config, entities, extra_hook)
v1_entities = [self.adapter.entity_to_pb_v1(entity)
for entity in entities]
# Allocate the IDs now and cache the puts until commit() is called.
v1_req = googledatastore.AllocateIdsRequest()
for v1_entity in v1_entities:
if not datastore_pbs.is_complete_v1_key(v1_entity.key):
v1_req.keys.add().CopyFrom(v1_entity.key)
user_data = v1_entities, extra_hook
service_name = _CLOUD_DATASTORE_V1
if not v1_req.keys:
# We don't need to do any work. Create a fake RPC.
service_name = _NOOP_SERVICE
return self._make_rpc_call(config, 'AllocateIds', v1_req,
googledatastore.AllocateIdsResponse(),
get_result_hook=self.__v1_put_allocate_ids_hook,
user_data=user_data,
service_name=service_name)
def __v1_put_allocate_ids_hook(self, rpc):
"""Internal method used as get_result_hook for AllocateIds call."""
self.check_rpc_success(rpc)
v1_resp = rpc.response
return self.__v1_build_put_result(list(v1_resp.keys),
rpc.user_data)
def __v1_build_put_result(self, v1_allocated_keys, user_data):
"""Internal method that builds the result of a put operation.
Converts the results from a v1 AllocateIds operation to a list of user-level
key objects.
Args:
v1_allocated_keys: a list of googledatastore.Keys that have been allocated
user_data: a tuple consisting of:
- a list of googledatastore.Entity objects
- an optional extra_hook
"""
v1_entities, extra_hook = user_data
keys = []
idx = 0
for v1_entity in v1_entities:
# Copy the entity because (1) we need to put the allocated key in it
# without affecting the user-level Key object and (2) subsequent
# local edits to the user-level Entity object should not affect the
# mutation (unless put() is called again). This defensive copy is only
# actually needed if the adapter does not return a new object when
# converting (e.g. IdentityAdapter or an adapter that returns proxies).
v1_entity = copy.deepcopy(v1_entity)
if not datastore_pbs.is_complete_v1_key(v1_entity.key):
v1_entity.key.CopyFrom(v1_allocated_keys[idx])
idx += 1
hashable_key = datastore_types.ReferenceToKeyValue(v1_entity.key)
# Cancel any pending deletes for this entity.
self.__pending_v1_deletes.pop(hashable_key, None)
# TODO(user): Track size, count, number of entity groups, and raise
# an error if limits are exceeded.
self.__pending_v1_upserts[hashable_key] = v1_entity
keys.append(self.adapter.pb_v1_to_key(copy.deepcopy(v1_entity.key)))
# NOTE: We don't patch up the keys of entities that were written
# with an incomplete key here; that's up to the extra_hook.
if extra_hook:
keys = extra_hook(keys)
return keys
# Delete operation.
def async_delete(self, config, keys, extra_hook=None):
"""Transactional asynchronous Delete operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
if self._api_version != _CLOUD_DATASTORE_V1:
# v3 async_delete() supports transactional and non-transactional calls.
return super(TransactionalConnection, self).async_delete(config,
keys,
extra_hook)
v1_keys = [self.__adapter.key_to_pb_v1(key) for key in keys]
for key in v1_keys:
hashable_key = datastore_types.ReferenceToKeyValue(key)
# Cancel any pending upserts for this entity.
self.__pending_v1_upserts.pop(hashable_key, None)
# TODO(user): Track size, count, number of entity groups, and raise
# an error if limits are exceeded.
self.__pending_v1_deletes[hashable_key] = key
# No need to execute an RPC, but we're still obligated to return one, so
# we use the NOOP service.
return self._make_rpc_call(config, 'Commit', None,
googledatastore.CommitResponse(),
get_result_hook=self.__v1_delete_hook,
user_data=extra_hook,
service_name=_NOOP_SERVICE)
def __v1_delete_hook(self, rpc):
extra_hook = rpc.user_data
if extra_hook:
extra_hook(None)
# Commit operation.
def commit(self):
"""Synchronous Commit operation.
Returns:
True if the transaction was successfully committed. False if
the backend reported a concurrent transaction error.
"""
# TODO(user): Without this create_rpc() call,
# testTransactionRetries() (in datastore_unittest.py) fails. Why?
rpc = self._create_rpc(service_name=self._api_version)
rpc = self.async_commit(rpc)
if rpc is None:
return True
return rpc.get_result()
def async_commit(self, config):
"""Asynchronous Commit operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
Returns:
A MultiRpc object.
"""
self.wait_for_all_pending_rpcs()
if self._state != TransactionalConnection.OPEN:
raise datastore_errors.BadRequestError('Transaction is already finished.')
self._state = TransactionalConnection.COMMIT_IN_FLIGHT
transaction = self.transaction
if transaction is None:
self._state = TransactionalConnection.CLOSED
return None # Neither True nor False.
if self._api_version == _CLOUD_DATASTORE_V1:
req = googledatastore.CommitRequest()
req.transaction = transaction
if Configuration.force_writes(config, self.__config):
self.__force(req)
# Move all pending mutations into the request.
for entity in self.__pending_v1_upserts.itervalues():
mutation = req.mutations.add()
mutation.upsert.CopyFrom(entity)
for key in self.__pending_v1_deletes.itervalues():
mutation = req.mutations.add()
mutation.delete.CopyFrom(key)
# Transactional connections cannot be reused, but clear the cached
# mutations anyway out of an abundance of caution.
self.__pending_v1_upserts.clear()
self.__pending_v1_deletes.clear()
resp = googledatastore.CommitResponse()
else:
req = transaction
resp = datastore_pb.CommitResponse()
return self._make_rpc_call(config, 'Commit', req, resp,
get_result_hook=self.__commit_hook,
service_name=self._api_version)
def __commit_hook(self, rpc):
"""Internal method used as get_result_hook for Commit."""
try:
rpc.check_success()
self._state = TransactionalConnection.CLOSED
self.__transaction = None
except apiproxy_errors.ApplicationError, err:
self._state = TransactionalConnection.FAILED
if err.application_error == datastore_pb.Error.CONCURRENT_TRANSACTION:
return False
else:
raise _ToDatastoreError(err)
else:
return True
# Rollback operation.
def rollback(self):
"""Synchronous Rollback operation."""
rpc = self.async_rollback(None)
if rpc is None:
return None
return rpc.get_result()
def async_rollback(self, config):
"""Asynchronous Rollback operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
Returns:
A MultiRpc object.
"""
self.wait_for_all_pending_rpcs()
if not (self._state == TransactionalConnection.OPEN
or self._state == TransactionalConnection.FAILED):
raise datastore_errors.BadRequestError(
'Cannot rollback transaction that is neither OPEN or FAILED state.')
transaction = self.transaction
if transaction is None:
return None
self._state = TransactionalConnection.CLOSED
self.__transaction = None
if self._api_version == _CLOUD_DATASTORE_V1:
req = googledatastore.RollbackRequest()
req.transaction = transaction
resp = googledatastore.RollbackResponse()
else:
req = transaction
resp = api_base_pb.VoidProto()
return self._make_rpc_call(config, 'Rollback', req, resp,
get_result_hook=self.__rollback_hook,
service_name=self._api_version)
def __rollback_hook(self, rpc):
"""Internal method used as get_result_hook for Rollback."""
self.check_rpc_success(rpc)
_DATASTORE_APP_ID_ENV = 'DATASTORE_APP_ID'
_DATASTORE_PROJECT_ID_ENV = 'DATASTORE_PROJECT_ID'
_DATASTORE_ADDITIONAL_APP_IDS_ENV = 'DATASTORE_ADDITIONAL_APP_IDS'
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV = 'DATASTORE_USE_PROJECT_ID_AS_APP_ID'
# pylint: disable=protected-access,invalid-name
def _CreateDefaultConnection(connection_fn, **kwargs):
"""Creates a new connection to Datastore.
Uses environment variables to determine if the connection should be made
to Cloud Datastore v1 or to Datastore's private App Engine API.
If DATASTORE_PROJECT_ID exists, connect to Cloud Datastore v1. In this case,
either DATASTORE_APP_ID or DATASTORE_USE_PROJECT_ID_AS_APP_ID must be set to
indicate what the environment's application should be.
Args:
connection_fn: The function to use to create the connection.
**kwargs: Addition arguments to pass to the connection_fn.
Raises:
ValueError: If DATASTORE_PROJECT_ID is set but DATASTORE_APP_ID or
DATASTORE_USE_PROJECT_ID_AS_APP_ID is not. If DATASTORE_APP_ID doesn't
resolve to DATASTORE_PROJECT_ID. If DATASTORE_APP_ID doesn't match
an existing APPLICATION_ID.
Returns:
the connection object returned from connection_fn.
"""
datastore_app_id = os.environ.get(_DATASTORE_APP_ID_ENV, None)
datastore_project_id = os.environ.get(_DATASTORE_PROJECT_ID_ENV, None)
if datastore_app_id or datastore_project_id:
# We will create a Cloud Datastore context.
app_id_override = bool(os.environ.get(
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV, False))
if not datastore_app_id and not app_id_override:
raise ValueError('Could not determine app id. To use project id (%s) '
'instead, set %s=true. This will affect the '
'serialized form of entities and should not be used '
'if serialized entities will be shared between '
'code running on App Engine and code running off '
'App Engine. Alternatively, set %s=<app id>.'
% (datastore_project_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV))
elif datastore_app_id:
if app_id_override:
raise ValueError('App id was provided (%s) but %s was set to true. '
'Please unset either %s or %s.' %
(datastore_app_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV))
elif datastore_project_id:
# Project id and app id provided, make sure they are the same.
id_resolver = datastore_pbs.IdResolver([datastore_app_id])
if (datastore_project_id !=
id_resolver.resolve_project_id(datastore_app_id)):
raise ValueError('App id "%s" does not match project id "%s".'
% (datastore_app_id, datastore_project_id))
datastore_app_id = datastore_app_id or datastore_project_id
additional_app_str = os.environ.get(_DATASTORE_ADDITIONAL_APP_IDS_ENV, '')
additional_apps = (app.strip() for app in additional_app_str.split(','))
return _CreateCloudDatastoreConnection(connection_fn,
datastore_app_id,
additional_apps,
kwargs)
return connection_fn(**kwargs)
# pylint: disable=protected-access,invalid-name
def _CreateCloudDatastoreConnection(connection_fn,
app_id,
external_app_ids,
kwargs):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
connection_fn: A connection function which accepts both an _api_version
and an _id_resolver argument.
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
kwargs: The additional kwargs to pass to the connection_fn.
Raises:
ValueError: if the app_id provided doesn't match the current environment's
APPLICATION_ID.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
# Late import to avoid circular deps.
# pylint: disable=g-import-not-at-top
from googlecloudsdk.third_party.appengine.datastore import cloud_datastore_v1_remote_stub
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(user): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
kwargs['_api_version'] = _CLOUD_DATASTORE_V1
kwargs['_id_resolver'] = id_resolver
conn = connection_fn(**kwargs)
# If necessary, install the stubs
# pylint: disable=bare-except
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(_CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(user): Ensure the current stub is connected to the right project.
# Install a memcache and taskqueue stub which throws on everything.
try:
apiproxy_stub_map.apiproxy.RegisterStub('memcache', _ThrowingStub())
except:
pass # The stub is already installed.
try:
apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', _ThrowingStub())
except:
pass # The stub is already installed.
return conn
class _ThrowingStub(object):
"""A Stub implementation which always throws a NotImplementedError."""
# pylint: disable=invalid-name
def MakeSyncCall(self, service, call, request, response):
raise NotImplementedError('In order to use %s.%s you must '
'install the Remote API.' % (service, call))
# pylint: disable=invalid-name
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
# TODO(user): Consider moving these to datastore_errors.py.
# TODO(user): Write unittests for these?
def _ToDatastoreError(err):
"""Converts an apiproxy.ApplicationError to an error in datastore_errors.
Args:
err: An apiproxy.ApplicationError object.
Returns:
An instance of a subclass of datastore_errors.Error.
"""
return _DatastoreExceptionFromErrorCodeAndDetail(err.application_error,
err.error_detail)
_DATASTORE_EXCEPTION_CLASSES = {
datastore_pb.Error.BAD_REQUEST: datastore_errors.BadRequestError,
datastore_pb.Error.CONCURRENT_TRANSACTION: datastore_errors.TransactionFailedError,
datastore_pb.Error.INTERNAL_ERROR: datastore_errors.InternalError,
datastore_pb.Error.NEED_INDEX: datastore_errors.NeedIndexError,
datastore_pb.Error.TIMEOUT: datastore_errors.Timeout,
datastore_pb.Error.BIGTABLE_ERROR: datastore_errors.Timeout,
datastore_pb.Error.COMMITTED_BUT_STILL_APPLYING: datastore_errors.CommittedButStillApplying,
datastore_pb.Error.CAPABILITY_DISABLED: apiproxy_errors.CapabilityDisabledError,
}
_CLOUD_DATASTORE_EXCEPTION_CLASSES = {}
if _CLOUD_DATASTORE_ENABLED:
_CLOUD_DATASTORE_EXCEPTION_CLASSES = {
googledatastore.code_pb2.INVALID_ARGUMENT: datastore_errors.BadRequestError,
googledatastore.code_pb2.ABORTED: datastore_errors.TransactionFailedError,
googledatastore.code_pb2.FAILED_PRECONDITION:
# Could also indicate SAFE_TIME_TOO_OLD.
datastore_errors.NeedIndexError,
googledatastore.code_pb2.DEADLINE_EXCEEDED: datastore_errors.Timeout,
googledatastore.code_pb2.PERMISSION_DENIED: datastore_errors.BadRequestError,
googledatastore.code_pb2.UNAVAILABLE: apiproxy_errors.RPCFailedError,
googledatastore.code_pb2.RESOURCE_EXHAUSTED: apiproxy_errors.OverQuotaError,
googledatastore.code_pb2.INTERNAL:
# Could also indicate COMMITTED_BUT_STILL_APPLYING
datastore_errors.InternalError,
}
def _DatastoreExceptionFromErrorCodeAndDetail(error, detail):
"""Converts a datastore_pb.Error into a datastore_errors.Error.
Args:
error: A member of the datastore_pb.Error enumeration.
detail: A string providing extra details about the error.
Returns:
An instance of a subclass of datastore_errors.Error.
"""
exception_class = _DATASTORE_EXCEPTION_CLASSES.get(error,
datastore_errors.Error)
if detail is None:
return exception_class()
else:
return exception_class(detail)
def _DatastoreExceptionFromCanonicalErrorCodeAndDetail(error, detail):
"""Converts a canonical error code into a datastore_errors.Error.
Args:
error: A canonical error code from google.rpc.code.
detail: A string providing extra details about the error.
Returns:
An instance of a subclass of datastore_errors.Error.
"""
exception_class = _CLOUD_DATASTORE_EXCEPTION_CLASSES.get(
error, datastore_errors.InternalError)
if detail is None:
return exception_class()
else:
return exception_class(detail)
| 37.245049 | 101 | 0.689103 |
ace6cd3aa9327655f0b1ccfbe57037bd452103b0 | 6,948 | py | Python | backend/tst_dani_prod_rel_31446/settings.py | crowdbotics-apps/tst-dani-prod-rel-31446 | 22c2c15b453b6f4546f3ab590d438d9c11ea70f1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/tst_dani_prod_rel_31446/settings.py | crowdbotics-apps/tst-dani-prod-rel-31446 | 22c2c15b453b6f4546f3ab590d438d9c11ea70f1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/tst_dani_prod_rel_31446/settings.py | crowdbotics-apps/tst-dani-prod-rel-31446 | 22c2c15b453b6f4546f3ab590d438d9c11ea70f1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for tst_dani_prod_rel_31446 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tst_dani_prod_rel_31446.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tst_dani_prod_rel_31446.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.692308 | 112 | 0.731721 |
ace6cd6719ae36f983aa36701cd3f19bfb564765 | 5,538 | py | Python | Backend/v1/item/views.py | yaya-BL/Restaurants_self-order_app | c6ea560e41ecc2e7a667d518ec815a2c14c68e88 | [
"MIT"
] | null | null | null | Backend/v1/item/views.py | yaya-BL/Restaurants_self-order_app | c6ea560e41ecc2e7a667d518ec815a2c14c68e88 | [
"MIT"
] | 15 | 2021-01-23T02:07:01.000Z | 2021-02-01T04:54:01.000Z | Backend/v1/item/views.py | yaya-BL/Restaurants_self-order_app | c6ea560e41ecc2e7a667d518ec815a2c14c68e88 | [
"MIT"
] | 1 | 2021-01-11T06:15:49.000Z | 2021-01-11T06:15:49.000Z | # Django imports
from django.shortcuts import render
from django.http import JsonResponse
from django.utils import timezone
import datetime
# DRF imports
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
from rest_framework import generics
# Serializer Class imports
from .serializers import CategorySerializer, ItemSerializer
# Model Class imports
from .models import Category, Item
from v1.shop.models import Shop
#importing pagination
from .paginations import SmallOffsetPagination, SmallPagesPagination
# APIs for category and Item
@api_view(['GET'])
def apiOverview(request):
api_urls = {
'Category List':'/category-list',
'Category Detail View':'/category-detail/<str:pk>',
'Category Create':'/category-create',
'Category Update':'/category-update/<str:pk>',
'Category Delete':'/category-delete/<str:pk>',
'ItemList':'/item-list',
'Item Detail View':'/item-detail/<str:pk>',
'Item Create':'/item-create',
'Item Update':'/item-update/<str:pk>',
'Item Delete':'/item-delete/<str:pk>',
}
return Response(api_urls)
# methods for category
# categoryList
class CategoryList(generics.ListAPIView):
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = [IsAuthenticated]
pagination_class = SmallPagesPagination
# categoryDetail
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def categoryDetail(request, pk):
category = Category.objects.get(id=pk)
serializer = CategorySerializer(category, many=False)
return Response(serializer.data)
# categoryCreate
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def categoryCreate(request):
serializer = CategorySerializer(data=request.data)
data={}
if serializer.is_valid():
serializer.save(user=request.user, shop=request.user.shop)
data["success"] = "Category Has Been Created!"
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# categoryUpdate
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def categoryUpdate(request, pk):
category = Category.objects.get(id=pk)
user = request.user
if category.user != user:
return Response({'Response':"You dont have Permission to edit this data."})
serializer = CategorySerializer(instance=category, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
# categoryDelete
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def categoryDelete(request, pk):
category = Category.objects.get(id=pk)
user = request.user
if category.user != user:
return Response({'Response':"You dont have Permission to delete this data."})
category.delete()
return Response('Category Has Been Deleted!')
# Item List
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def itemList(request):
try:
items = Item.objects.all().order_by('-id')
except Item.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = ItemSerializer(items, many=True)
return Response(serializer.data)
class ItemList(generics.ListAPIView):
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = [IsAuthenticated]
pagination_class = SmallPagesPagination
# Single Item Details
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def itemDetail(request, pk):
try:
item = Item.objects.get(id=pk)
except Item.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = ItemSerializer(item, many=False)
return Response(serializer.data)
# Item Create
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def itemCreate(request):
if request.method == "POST":
shop=request.user.shop
print(shop)
serializer = ItemSerializer(data=request.data)
data={}
if serializer.is_valid():
serializer.save(user=request.user, shop=request.user.shop, date_added=datetime.datetime.now())
data["success"]="Create Successful"
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Item Update
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def itemUpdate(request, pk):
try:
item = Item.objects.get(id=pk)
except Item.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "POST":
user = request.user
if item.user != user:
return Response({'Response':"You dont have Permission to edit this data."})
serializer = ItemSerializer(instance=item, data=request.data)
data={}
if serializer.is_valid():
serializer.save()
data["success"]="Item Has Been Updated"
return Response (data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Item Delete
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def itemDelete(request, pk):
try:
item = Item.objects.get(id=pk)
except Item.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "DELETE":
user = request.user
if item.user != user:
return Response({'Response':"You dont have Permission to delete this data."})
item.delete()
return Response('Category Has Been Deleted!') | 32.197674 | 100 | 0.741423 |
ace6ce2e5075fc76e288bdaec94dfad9a6702509 | 1,363 | py | Python | GSE62944_Normal_TPM/parse.py | srp33/Wishes | c3f4e9ed9e3d30349b998115fcdd08f9340969d2 | [
"MIT"
] | 1 | 2017-11-10T02:19:58.000Z | 2017-11-10T02:19:58.000Z | GSE62944_Normal_TPM/parse.py | srp33/Wishes | c3f4e9ed9e3d30349b998115fcdd08f9340969d2 | [
"MIT"
] | 90 | 2017-06-23T17:26:18.000Z | 2021-12-08T17:27:30.000Z | GSE62944_Normal_TPM/parse.py | srp33/Wishes | c3f4e9ed9e3d30349b998115fcdd08f9340969d2 | [
"MIT"
] | 23 | 2017-06-16T14:55:26.000Z | 2018-10-09T15:49:32.000Z | import sys, gzip
import numpy as np
PatientCancerType = sys.argv[1]
NormalTPM = sys.argv[2]
dataOutFile = sys.argv[3]
metadataOutFile = sys.argv[4]
print(metadataOutFile)
namesToAbbreviations = sys.argv[5]
## Read the namesToAbbreviation
abbvToNamesDict = {}
with open(namesToAbbreviations, 'r') as f:
f.readline()
for line in f :
lineList = line.strip('\n').split('\t')
abbvToNamesDict[lineList[2]] = lineList[1]
# This code takes the new transposedNormalTPM and addes the PatientCancerType to the second column and writes it to the outFile data.tsv.gz
patientIDToCancerDict = {}
with gzip.open(PatientCancerType, 'r') as f:
for line in f:
lineList= line.decode().strip('\n').split('\t')
patientIDToCancerDict[lineList[0]] = lineList[1]
with gzip.open(NormalTPM, 'r') as iF:
data = np.genfromtxt(iF,delimiter='\t',dtype=str)
with open(dataOutFile, 'w') as ofData:
with open(metadataOutFile, 'w') as ofMeta:
firstLine = data.T[0,:]
ofMeta.write(("Sample\tVariable\tValue\n"))
ofData.write(("Sample\t" + '\t'.join(firstLine[1:]) + '\n'))
for lineList in data.T[1:,:]:
ofMeta.write((lineList[0] + "\tCancer_Type\t" + abbvToNamesDict[patientIDToCancerDict[lineList[0]]] + "\n"))
ofData.write(('\t'.join(lineList) + '\n'))
| 37.861111 | 139 | 0.648569 |
ace6ce331d53926fff235bc943062408d60a000a | 2,616 | py | Python | tests/test_legiskenya.py | amrosnik/legiscrapor | 79415a1276345dd9f3d309512ce3592f87b44a3d | [
"MIT"
] | null | null | null | tests/test_legiskenya.py | amrosnik/legiscrapor | 79415a1276345dd9f3d309512ce3592f87b44a3d | [
"MIT"
] | 2 | 2021-02-02T03:20:29.000Z | 2021-02-16T02:16:13.000Z | tests/test_legiskenya.py | amrosnik/legiscrapor | 79415a1276345dd9f3d309512ce3592f87b44a3d | [
"MIT"
] | 1 | 2021-02-02T02:28:36.000Z | 2021-02-02T02:28:36.000Z | import pytest
from legiscrapor.legiskenya import legisKenya
import os
#### UNIT TESTS FOR LEGISKENYA CLASS ####
## The following are unit tests
## for the LegisKenya methods.
## As noted in other unit test files,
## please customize the customize_me.txt file
## prior to running these tests
## to ensure they actually work.
@pytest.fixture
def kl_web():
'''Returns a LegisKenya instance'''
return legisKenya()
def test_language(kl_web):
assert kl_web.language == "English"
def test_country(kl_web):
assert kl_web.country == "Kenya"
def test_search_laws(kl_web,capfd):
kl_web.read_inputs("./src/legiscrapor/data/customize_me.txt",notTesting=True)
k = 'climate'
hrefs = kl_web.search_laws(k)
kl_web.teardown()
assert len(hrefs) == 19
hrefs.sort()
links = ['http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=CAP.%20198&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=CAP.%20326&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2011%20of%202016&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2013%20of%202019&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2014%20of%202019&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2016%20of%202013&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2019%20of%202011&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%202%20of%202000&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%202%20of%202009&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2021%20of%202017&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2024%20of%202011&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2026%20of%202015&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2028%20of%202011&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%204%20of%202006&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%204%20of%202016&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2043%20of%202016&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%2047%20of%202013&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%206%20of%202012&term=climate','http://kenyalaw.org:8181/exist/kenyalex/actview.xql?actid=No.%208%20of%201999&term=climate']
links.sort()
assert hrefs == links
| 72.666667 | 1,773 | 0.758028 |
ace6ce3d1b1ae25c121e759e66d719a88e06b91e | 3,244 | py | Python | pyq/pyq.py | mindriot101/pyq | 29248ab285dfd43c5a634dc895021eacaf61d71f | [
"MIT"
] | 144 | 2016-02-08T19:46:19.000Z | 2020-07-21T12:43:12.000Z | pyq/pyq.py | mindriot101/pyq | 29248ab285dfd43c5a634dc895021eacaf61d71f | [
"MIT"
] | 6 | 2016-02-11T18:38:24.000Z | 2016-02-26T16:03:27.000Z | pyq/pyq.py | mindriot101/pyq | 29248ab285dfd43c5a634dc895021eacaf61d71f | [
"MIT"
] | 3 | 2016-02-09T09:04:25.000Z | 2016-11-27T09:38:41.000Z | import click
import os
from .astmatch import ASTMatchEngine
from pygments import highlight
from pygments.lexers.python import PythonLexer
from pygments.formatters.terminal import TerminalFormatter
@click.command()
@click.argument('selector')
@click.option('-l/--files', is_flag=True,
help='Only print filenames containing matches.')
@click.option('--ignore-dir', multiple=True,
help='Ignore directory.')
@click.option('-n/--no-recurse', is_flag=True, default=False,
help='No descending into subdirectories.')
@click.option('-e/--expand', is_flag=True, default=False,
help='Show multiple matches in the same line.')
@click.argument('path', nargs=-1)
@click.pass_context
def main(ctx, selector, path, **opts):
m = ASTMatchEngine()
if len(path) == 0:
path = ['.']
ignore_dir = (opts['ignore_dir'], not opts['n'])
for fn in walk_files(ctx, path, ignore_dir):
if fn.endswith('.py'):
display_matches(m, selector, os.path.relpath(fn), opts)
def walk_files(ctx, paths, ignore_dir):
for i, p in enumerate(paths):
p = click.format_filename(p)
if p == '.' or os.path.isdir(p):
for root, dirs, files in os.walk(p):
if is_dir_ignored(root.lstrip('./'), *ignore_dir):
continue
for fn in files:
yield os.path.join(root, fn)
elif os.path.exists(p):
yield p
elif i == 0:
ctx.fail('{}: No such file or directory'.format(p))
break
def display_matches(m, selector, filename, opts):
matches = matching_lines(m.match(selector, filename), filename)
if opts.get('l'):
files = {}
for line, no, _ in matches:
if opts.get('l'):
if filename not in files:
click.echo(filename)
# do not repeat files
files[filename] = True
else:
lines = {}
for line, no, col in matches:
text = highlight(line.strip(), PythonLexer(), TerminalFormatter())
if not opts['e']:
if no not in lines:
lines[no] = True
click.echo('{}:{} {}'.format(filename, no, text),
nl=False)
else:
click.echo('{}:{}:{} {}'.format(filename, no, col, text),
nl=False)
def matching_lines(matches, filename):
fp = None
for match, lineno in matches:
if fp is None:
fp = open(filename, 'rb')
else:
fp.seek(0)
i = 1
while True:
line = fp.readline()
if not line:
break
if i == lineno:
text = line.decode('utf-8')
yield text, lineno, match.col_offset
break
i += 1
if fp is not None:
fp.close()
def is_dir_ignored(path, ignore_dir, recurse):
path = path.split(os.sep)
if not recurse:
return path[0] in ignore_dir
for p in path:
if p in ignore_dir:
return True
return False
if __name__ == '__main__':
main()
| 27.726496 | 78 | 0.535142 |
ace6ce3d9a6cf5dd295ac7e4d114475055febf08 | 4,641 | py | Python | backend/scripts/curation/covid19cellatlas/remix.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | null | null | null | backend/scripts/curation/covid19cellatlas/remix.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | 1 | 2021-02-23T22:56:13.000Z | 2021-02-23T22:56:13.000Z | backend/scripts/curation/covid19cellatlas/remix.py | isabella232/corpora-data-portal | 09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9 | [
"MIT"
] | null | null | null | import argparse
import json
import string
import scanpy as sc
import yaml
import ontology
REPLACE_SUFFIX = "_original"
ONTOLOGY_SUFFIX = "_ontology_term_id"
def is_curie(value):
"""Return True iff the value is an OBO-id CURIE like EFO:000001"""
return ":" in value and all(c in string.digits for c in value.split(":")[1])
def is_ontology_field(field_name):
"""Return True iff the field_name is an ontology field like tissue_ontology_term_id"""
return field_name.endswith(ONTOLOGY_SUFFIX)
def get_label_field_name(field_name):
"""Get the associated label field from an ontology field, assay_ontology_term_id --> assay"""
return field_name[: -len(ONTOLOGY_SUFFIX)]
def safe_add_field(adata_attr, field_name, field_value):
"""Add a field and value to an AnnData, but don't clobber an exising value."""
if isinstance(field_value, list) and field_value and isinstance(field_value[0], dict):
field_value = json.dumps(field_value)
if field_name in adata_attr:
adata_attr[field_name + REPLACE_SUFFIX] = adata_attr[field_name]
adata_attr[field_name] = field_value
def get_curie_and_label(maybe_curie):
"""Given a string that might be a curie, return a (curie, label) pair"""
if not is_curie(maybe_curie):
return ("", maybe_curie)
else:
return (maybe_curie, ontology.get_ontology_label(maybe_curie))
def remix_uns(adata, uns_config):
"""Add fields from the config to adata.uns"""
for field_name, field_value in uns_config.items():
if is_ontology_field(field_name):
# If it's an ontology field, look it up
label_field_name = get_label_field_name(field_name)
ontology_term, ontology_label = get_curie_and_label(field_value)
safe_add_field(adata.uns, field_name, ontology_term)
safe_add_field(adata.uns, label_field_name, ontology_label)
else:
safe_add_field(adata.uns, field_name, field_value)
def remix_obs(adata, obs_config):
"""Add fields from the config to adata.obs"""
for field_name, field_value in obs_config.items():
if isinstance(field_value, dict):
# If the value is a dict, that means we are supposed to map from an existing column to the new one
source_column, column_map = next(iter(field_value.items()))
for key in column_map:
if key not in adata.obs[source_column].unique():
print(f'WARNING: key {key} not in adata.obs["{source_column}"]')
for value in adata.obs[source_column].unique():
if value not in column_map:
print(f'WARNING: Value {value} in adata.obs["{source_column}"] not in translation dict')
if is_ontology_field(field_name):
ontology_term_map, ontology_label_map = {}, {}
print("\n", field_name, "\n")
for original_value, maybe_curie in column_map.items():
curie, label = get_curie_and_label(maybe_curie)
ontology_term_map[original_value] = curie
ontology_label_map[original_value] = label
print(";".join([original_value, curie, label]))
ontology_column = adata.obs[source_column].replace(ontology_term_map, inplace=False)
label_column = adata.obs[source_column].replace(ontology_label_map, inplace=False)
safe_add_field(adata.obs, field_name, ontology_column)
safe_add_field(adata.obs, get_label_field_name(field_name), label_column)
else:
if is_ontology_field(field_name):
# If it's an ontology field, look it up
label_field_name = get_label_field_name(field_name)
ontology_term, ontology_label = get_curie_and_label(field_value)
safe_add_field(adata.obs, field_name, ontology_term)
safe_add_field(adata.obs, label_field_name, ontology_label)
else:
safe_add_field(adata.obs, field_name, field_value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--source-h5ad", required=True)
parser.add_argument("--remix-config", required=True)
parser.add_argument("--output-filename", required=True)
args = parser.parse_args()
config = yaml.load(open(args.remix_config), Loader=yaml.FullLoader)
adata = sc.read_h5ad(args.source_h5ad)
remix_uns(adata, config["uns"])
remix_obs(adata, config["obs"])
adata.write_h5ad(args.output_filename, compression="gzip")
if __name__ == "__main__":
main()
| 39 | 110 | 0.670761 |
ace6cf59482a591c8079e6c0d126ec5a31cdaeea | 370 | py | Python | core/migrations/0015_auto_20210313_0338.py | lcbiplove/nepfdb | 56e48bb0dcae34d409b7d75d210d2938e763a953 | [
"MIT"
] | null | null | null | core/migrations/0015_auto_20210313_0338.py | lcbiplove/nepfdb | 56e48bb0dcae34d409b7d75d210d2938e763a953 | [
"MIT"
] | null | null | null | core/migrations/0015_auto_20210313_0338.py | lcbiplove/nepfdb | 56e48bb0dcae34d409b7d75d210d2938e763a953 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-13 03:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20210313_0336'),
]
operations = [
migrations.AlterModelOptions(
name='awardcategory',
options={'verbose_name_plural': 'Award Categories'},
),
]
| 20.555556 | 64 | 0.618919 |
ace6d01e4d588bfd071aad379e9f34e380aca76b | 2,941 | py | Python | data_science_toolbox/data_checks/dictionary/check_dictionary_keys.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | null | null | null | data_science_toolbox/data_checks/dictionary/check_dictionary_keys.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | null | null | null | data_science_toolbox/data_checks/dictionary/check_dictionary_keys.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | 1 | 2020-03-30T20:59:04.000Z | 2020-03-30T20:59:04.000Z | from typing import Any, Dict, Generic, Iterable
def check_dictionary_keys(dictionary: Dict, required_keys: Iterable[Any], verbose: int = 0) -> bool:
"""
Check a dictionary for keys given an iterable of keys to check
Parameters
----------
dictionary : dict
The dictionary to check
required_keys : Iterable
An iterable of keys to check
verbose : int
Optional verbose setting. If non-zero, will print out missing keys
Returns
-------
Boolean
Return True if all keys present in dictionary else False
Example
-------
test_dict = {
'A': [1, 1, 1],
'B': ['foo', 'bar', 'banana'],
'C': [99.0, 23.5, 68.9]
}
print(check_dictionary_keys(test_dict, ['A', 'C']))
> True
"""
missing_keys = []
for key in required_keys:
if key not in dictionary.keys():
missing_keys.append(key)
if missing_keys:
if verbose > 0:
print(
f'Missing keys {missing_keys} not found in dictionary with keys {dictionary.keys()}')
return False
else:
return True
def check_nested_dictionary_keys(dictionary: Dict, nested_keys_dict: Dict[Any, Iterable], verbose: int = 0) -> bool:
"""
Check a dictionary for keys given an iterable of keys to check
Parameters
----------
dictionary : dict
The dictionary to check
required_keys : Iterable
An iterable of keys to check
verbose : int
Optional verbose setting. If non-zero, will print out missing keys and nested keys
Returns
-------
Boolean
Return True if all keys and nested keys present in dictionary else False
Example
-------
test_dict = {
'A': [1, 1, 1],
'B': {'Nested_Key_1': 99.0, 'Nested_Key_2': 5, 'Nested_Key_3': 10},
'C': {'Nested_Key_4': [99.0, 23.5, 68.9]}
}
print(check_nested_dictionary_keys(test_dict,
{'B': ['Nested_Key_1', 'Nested_Key_2', 'Nested_Key_3'],
'C': ['Nested_Key_4']})
> True
"""
missing_keys = []
missing_nested_keys = []
for key, nested_keys in nested_keys_dict.items():
# Check that all top level keys are in the dictionary
if key not in dictionary.keys():
missing_keys.append(key)
for nested_key in nested_keys:
if nested_key not in dictionary[key].keys():
missing_nested_keys.append(nested_key)
if missing_keys:
if verbose > 0:
print(
f'Missing keys {missing_keys} not found in dictionary with keys {dictionary.keys()}')
return False
if missing_nested_keys:
if verbose > 0:
print(
f'Nested keys: {missing_nested_keys} not found in dictionary.')
return False
else:
return True
| 29.41 | 116 | 0.575655 |
ace6d19b16838e370c8d4c2a3c8cead9a14a3417 | 1,209 | py | Python | invenio_records_resources/services/records/params/querystr.py | FlorianCassayre/invenio-records-resources | 80a2f6565653fd00e08c85b5aa8d1b1276cbb4e7 | [
"MIT"
] | null | null | null | invenio_records_resources/services/records/params/querystr.py | FlorianCassayre/invenio-records-resources | 80a2f6565653fd00e08c85b5aa8d1b1276cbb4e7 | [
"MIT"
] | null | null | null | invenio_records_resources/services/records/params/querystr.py | FlorianCassayre/invenio-records-resources | 80a2f6565653fd00e08c85b5aa8d1b1276cbb4e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Query parameter interpreter API."""
from elasticsearch_dsl import Q
from .base import ParamInterpreter
class QueryParser:
"""Parse query string into a Elasticsearch DSL Q object."""
def __init__(self, identity=None):
"""Initialise the parser."""
self.identity = identity
def parse(self, query_str):
"""Parse the query."""
return Q('query_string', query=query_str)
class QueryStrParam(ParamInterpreter):
"""Evaluate the 'q' parameter."""
def apply(self, identity, search, params):
"""Evaluate the query str on the search."""
query_str = params.get('q')
if not query_str:
return search
try:
parser_cls = self.config.search_query_parser_cls
query = parser_cls(identity).parse(query_str)
return search.query(query)
except SyntaxError:
# TOOD: raise a proper type of exception
raise Exception("Failed to parse query.")
| 27.477273 | 76 | 0.645161 |
ace6d1df9cc8e09365438d78122424c84d370072 | 1,231 | py | Python | src/create_npy_dataset.py | davidsteinar/structural-uncertainty | 8cb29a7b25a203e7acce528d63b1a0733cfa89b2 | [
"MIT"
] | 1 | 2019-04-10T22:06:33.000Z | 2019-04-10T22:06:33.000Z | src/create_npy_dataset.py | davidsteinar/structural-uncertainty | 8cb29a7b25a203e7acce528d63b1a0733cfa89b2 | [
"MIT"
] | null | null | null | src/create_npy_dataset.py | davidsteinar/structural-uncertainty | 8cb29a7b25a203e7acce528d63b1a0733cfa89b2 | [
"MIT"
] | 3 | 2018-01-01T14:59:16.000Z | 2020-03-24T12:00:51.000Z | import os
import zipfile
import pandas as pd
import numpy as np
i = 0
for filename in os.listdir('../data/z24zipped/'): #permanent zipped files
i += 1
stem = filename.replace('.zip','')
print(stem)
print(i)
archive = zipfile.ZipFile('../data/z24zipped/'+filename, 'r')
df_list = []
for end in ['03','05','06', '07', '12', '14', '16']: # skip sensor 10
df = pd.read_csv(archive.open(stem+end+'.aaa'), sep=' ', nrows=65536, skiprows=2)
df.columns = [end]
df_list.append(df)
data = pd.concat(df_list, axis=1).as_matrix()
env = pd.read_csv(archive.open(stem+'PRE.env'), delim_whitespace=True, nrows=9, header=None, skiprows=1)
env_mean_matrix = env.mean().as_matrix()
#np.save(file='../data/z24_clean/'+stem+'_env', arr=env_mean_matrix)
#np.save(file='../data/z24_clean/'+stem+'_vibrations', arr=data)
filename_vib = '../data/z24_clean/'+stem+'_vibrations.npy'
filename_env = '../data/z24_clean/'+stem+'_env.npy'
f_vib = np.memmap(filename_vib, dtype=np.float64, mode='w+', shape=(65536, 7))
f_vib[:] = data
f_env = np.memmap(filename_env, dtype=np.float64, mode='w+', shape=(53,))
f_env[:] = env_mean_matrix | 36.205882 | 108 | 0.621446 |
ace6d1e3d9f35c1c9a6876a80287d7270d93e890 | 2,066 | py | Python | config_utils.py | armerg/ca1_muscarinic_modulation | c67f53fa6a04a31b11b12956d94bb2e3e147e439 | [
"MIT"
] | 1 | 2020-10-05T09:29:41.000Z | 2020-10-05T09:29:41.000Z | config_utils.py | armerg/ca1_muscarinic_modulation | c67f53fa6a04a31b11b12956d94bb2e3e147e439 | [
"MIT"
] | 2 | 2021-02-19T16:50:32.000Z | 2021-02-23T21:20:58.000Z | config_utils.py | armerg/ca1_muscarinic_modulation | c67f53fa6a04a31b11b12956d94bb2e3e147e439 | [
"MIT"
] | 1 | 2021-05-14T06:19:15.000Z | 2021-05-14T06:19:15.000Z | import matplotlib as plt
from configobj import ConfigObj, SimpleVal
from validate import Validator
def check_file_exists(file_name) :
"""Check if the file actually exists or throw exception """
try:
with open(file_name) as file:
pass
except IOError as e:
print("Unable to open file: ", file_name) #Does not exist OR no read permissions
exit(1)
def check_config_validity(configFile, config, configSpecFile, configSpec):
"""Check the validity of the configuration file with respect to its specifications"""
val = Validator()
test = config.validate(val, preserve_errors=True)
if test == True:
print('Extensive Test of Configuration File ', configFile, ' according to specifications ' , configSpecFile, ' Succeeded.')
else:
print('Config file failed specifications test: \n ', config.validate(val, preserve_errors=True ))
exit(1)
val = SimpleVal()
test = config.validate(val)
if test == True:
print('All values present.')
elif test == False:
print('No values present!')
else:
for entry in test:
if test[entry] == False:
print('"{}" missing.'.format(entry))
def check_config_boolean(config):
if config['ConfigCheck']['checkWorks']:
print('VALIDATION CHECK SUCCEDED')
else:
print('VALIDATION CHECK FAILED')
exit(1)
def load_config(conf_file, conf_spec_file):
check_file_exists(conf_file)
check_file_exists(conf_spec_file)
##############################################################################
# CHECK CONFIG FILE AND LOAD PARAMETERS INTO CONFIG OBJECT
##############################################################################
config_spec = ConfigObj(conf_spec_file, interpolation=False, list_values=False, _inspec=True)
config = ConfigObj(conf_file, configspec=config_spec)
# Check validity
check_config_validity(conf_file, config, conf_spec_file, config_spec)
check_config_boolean(config)
return config | 32.793651 | 131 | 0.627783 |
ace6d227ab08984b6be795000fb79eb9e75f9dd5 | 1,578 | py | Python | xlsxwriter/test/comparison/test_chart_column02.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2018-02-26T12:31:41.000Z | 2020-10-10T14:14:11.000Z | xlsxwriter/test/comparison/test_chart_column02.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_column02.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_column02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
chart.axis_ids = [49388544, 69387008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 26.745763 | 79 | 0.567174 |
ace6d26a8782d871c7bfebca8eff9fe9bb3b4392 | 628 | py | Python | src/my_project/medium_problems/from1to50/partition_array_maximum_sum.py | ivan1016017/LeetCodeAlgorithmProblems | f617f30201fb1cd53e32de35084fdeb88ef36023 | [
"MIT"
] | null | null | null | src/my_project/medium_problems/from1to50/partition_array_maximum_sum.py | ivan1016017/LeetCodeAlgorithmProblems | f617f30201fb1cd53e32de35084fdeb88ef36023 | [
"MIT"
] | 1 | 2021-09-22T12:26:14.000Z | 2021-09-22T12:26:14.000Z | src/my_project/medium_problems/from1to50/partition_array_maximum_sum.py | ivan1016017/LeetCodeAlgorithmProblems | 454284b76634cc34ed41f7fa30d857403cedf1bf | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def maxSumAfterPartitioning(self, arr: List[int], k: int) -> int:
dp = [0 for _ in range(len(arr))]
maximum = 0
for i in range(k):
maximum = max(arr[i], maximum)
dp[i] = maximum * (i + 1)
print(dp)
for i in range(k, len(arr)):
maximum = arr[i]
for j in range(k):
maximum = max(maximum, arr[i - j])
dp[i] = max(dp[i], dp[i - j - 1] + maximum * (j + 1))
return dp[-1]
solution = Solution()
solution.maxSumAfterPartitioning(arr = [1,15,7,9,2,5,10], k = 3) | 29.904762 | 69 | 0.503185 |
ace6d31bebbb74e34837ff2bc58a2cbac74ca95b | 4,774 | py | Python | checker/alexa/spectrogram/spectrogram/stft/stft.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | checker/alexa/spectrogram/spectrogram/stft/stft.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | checker/alexa/spectrogram/spectrogram/stft/stft.py | fausecteam/faustctf-2017-alexa | 53be850957e88642aaaaffd61360195091aaa35c | [
"0BSD"
] | null | null | null | # Copyright (c) 2017, Hristo Zhivomirov
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Implementation based on https://uk.mathworks.com/matlabcentral/fileexchange/45197-short-time-fourier-transformation--stft--with-matlab-implementation
import math
import numpy as np
from scipy.signal import hamming
def stft(x, window_length, h, nfft, fs):
"""
Computes the short time fourier transform of a given signal
:param x: signal in time domain
:param window_length: length of the hamming window
:param h: hop size
:param nfft: number of FFT points
:param fs: sampling frequency in Hz
:return: (stft, f, t) where
STFT matrix, time across columns, frequency coefficients across rows
f frequency vector in Hz
t time vector in seconds
"""
signal_length = len(x)
# Create a periodic hamming window
window = hamming(window_length, sym=False)
# Form the STFT matrix
num_rows = math.ceil((1.0 + nfft) / 2.0)
num_cols = 1 + int((signal_length - window_length) / float(h))
stft = np.zeros((num_rows, num_cols), dtype=np.complex)
idx = 0
col = 0
while idx + window_length < signal_length:
# Windowing
signal_window = x[idx:idx + window_length] * window
# FFT
signal_window_ft = np.fft.fft(signal_window, nfft)
# Update STFT matrix
stft[:, col] = signal_window_ft[0:num_rows]
# Update indices
idx = idx + h
col += 1
# Calculate time and frequency vectors
t = np.arange(window_length / 2, window_length / 2 + num_cols * h, step=h) / fs
f = np.arange(num_rows) * fs / nfft
return stft, f, t
def istft(stft, window_length, h, nfft, fs):
"""
Computes the inverse short term Fourier transform of the given signal
:param stft: STFT matrix
:param window_length: length of the hamming window
:param h: hop size
:param nfft: number of FFT points
:param fs: sampling frequency in Hz
:return: (x, t) where x is the signal in time domain and t the time vector in seconds
"""
# Estimate the length of the signal
num_cols = stft.shape[1]
signal_length = nfft + (num_cols - 1) * h
x = np.zeros((1, signal_length))
# Form a periodic hamming window
window = hamming(window_length, sym=False)
# Perform IFFT and weighted OLA
if nfft % 2 == 1:
# Odd nfft excludes Nyquist point
for b in np.arange(0, h * num_cols, step=h):
# Extract FFT points
X = stft[:, b//h]
X = np.hstack((X, np.conj(X[1::-1])))
# IFFT
xprim = np.real(np.fft.ifft(X))
# Weighted OLA
x[b:b+nfft] += xprim * window
else:
# Even nfft includes Nyquist point
for b in np.arange(0, h * num_cols, step=h):
# Extract FFT points
X = stft[:, b//h]
X = np.hstack((X, np.conj(X[::-1][1:-1])))
# IFFT
xprim = np.real(np.fft.ifft(X))
# Weighted OLA
x[:, b:b+nfft] += xprim * window
# Find W0
W0 = np.sum(np.square(window))
# Scale the weighted OLA
x *= h / W0
# Calculate the time vector
# Find actual length of the signal
actual_signal_length = x.shape[1]
# Generate time vector
t = np.array(range(actual_signal_length)) / fs
return x, t
def plot_spectrogram(stft):
"""
Displays the spectrogram from the given stft matrix
:param stft: matrix with columns across time steps and rows across frequencies
:return: None
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
fig = plt.figure()
ax = plt.gca()
im = ax.matshow(np.abs(stft), cmap=plt.get_cmap('plasma'), norm=LogNorm(vmin=0.01, vmax=1), origin='lower')
fig.colorbar(im)
#plt.imshow(np.log(np.abs(stft) + 1), origin='lower')
plt.title("Spectrogram")
plt.show()
| 30.8 | 151 | 0.718894 |
ace6d3788636cb6b2bf89d577042fc18daf8f587 | 617 | py | Python | sandbox/bfs.py | manubhardwaj/exercism | 27b22a8d52520ada30a644315167cab127e7ac43 | [
"Apache-2.0"
] | null | null | null | sandbox/bfs.py | manubhardwaj/exercism | 27b22a8d52520ada30a644315167cab127e7ac43 | [
"Apache-2.0"
] | 4 | 2020-07-17T11:30:45.000Z | 2021-03-23T21:25:36.000Z | sandbox/bfs.py | manubhardwaj/exercism | 27b22a8d52520ada30a644315167cab127e7ac43 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
def bfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
if __name__ == '__main__':
graph = {
'A': set(['B','C']),
'B': set(['D','E']),
'C': set(['F','G']),
'D': set(['H','I']),
'E': set(['B']),
'F': set(['C']),
'G': set(['C']),
'H': set(['D']),
'I': set(['D'])
}
print(bfs(graph,'A'))
| 22.035714 | 49 | 0.388979 |
ace6d3c5f37b3a20672c73e9425a08055f00d97d | 728 | py | Python | teamcat_service/doraemon/doraemon/auth_extend/user/viewmodels/vm_user.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 6 | 2018-11-26T08:42:52.000Z | 2020-06-01T08:33:48.000Z | teamcat_service/doraemon/doraemon/auth_extend/user/viewmodels/vm_user.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | null | null | null | teamcat_service/doraemon/doraemon/auth_extend/user/viewmodels/vm_user.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 1 | 2019-01-22T06:45:36.000Z | 2019-01-22T06:45:36.000Z | #coding=utf-8
'''
Created on 2015-11-18
@author: Devuser
'''
class VM_User(object):
'''
classdocs
'''
def __init__(self,user,selected_user_id):
'''
Constructor
'''
self.user=user
self.selected_user_id=selected_user_id
def user_name(self):
result=self.user.username
if self.user.last_name and self.user.first_name:
result=self.user.last_name+self.user.first_name
if self.user.email:
result=result+" ("+self.user.email+")"
return result
def is_selected(self):
result="";
if self.user.id==self.selected_user_id:
result="selected"
return result
| 20.8 | 59 | 0.568681 |
ace6d48b2e740cd2ad2e944790a717fb3569bd7a | 372 | py | Python | aliyun/api/rest/Ecs20140526ResizeDiskRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ecs20140526ResizeDiskRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ecs20140526ResizeDiskRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Ecs20140526ResizeDiskRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ClientToken = None
self.DiskId = None
self.NewSize = None
def getapiname(self):
return 'ecs.aliyuncs.com.ResizeDisk.2014-05-26'
| 26.571429 | 55 | 0.725806 |
ace6d4ba466b192f55f5321c732c0613ae33c4c8 | 390 | py | Python | ssbwp2/urls.py | antorof/django-simple | 786e93b5084c17b364bac6bceb7dddcce1c789d2 | [
"MIT"
] | null | null | null | ssbwp2/urls.py | antorof/django-simple | 786e93b5084c17b364bac6bceb7dddcce1c789d2 | [
"MIT"
] | null | null | null | ssbwp2/urls.py | antorof/django-simple | 786e93b5084c17b364bac6bceb7dddcce1c789d2 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'ssbwp2.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^service/', include('service.urls')),
# url(r'^admins/', include(simple.urls)),
url(r'^admin/', include(admin.site.urls)),
)
| 27.857143 | 51 | 0.638462 |
ace6d51d75f6b88f4dc5cb45386e116ac2f73bde | 22,786 | py | Python | Lib/test/test_compiler/test_strict/test_rewriter.py | itamaro/cinder | a08198c185a255b59f85dc84183558370a0c5284 | [
"CNRI-Python-GPL-Compatible"
] | 1,886 | 2021-05-03T23:58:43.000Z | 2022-03-31T19:15:58.000Z | Lib/test/test_compiler/test_strict/test_rewriter.py | itamaro/cinder | a08198c185a255b59f85dc84183558370a0c5284 | [
"CNRI-Python-GPL-Compatible"
] | 70 | 2021-05-04T23:25:35.000Z | 2022-03-31T18:42:08.000Z | Lib/test/test_compiler/test_strict/test_rewriter.py | itamaro/cinder | a08198c185a255b59f85dc84183558370a0c5284 | [
"CNRI-Python-GPL-Compatible"
] | 52 | 2021-05-04T21:26:03.000Z | 2022-03-08T18:02:56.000Z | from __future__ import annotations
import ast
import symtable
import sys
import unittest
from compiler.strict import strict_compile
from compiler.strict.common import FIXED_MODULES
from compiler.strict.loader import StrictModule
from compiler.strict.preprocessor import ENABLE_SLOTS_DECORATOR
from compiler.strict.rewriter import rewrite
from textwrap import dedent
from types import CoroutineType, FunctionType, ModuleType
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, final
from weakref import ref
from .common import StrictTestWithCheckerBase
class RewriterTestPreprocessor(ast.NodeVisitor):
def visit_ClassDef(self, node: ast.ClassDef):
name = ast.Name(ENABLE_SLOTS_DECORATOR, ast.Load())
name.lineno = node.lineno
name.col_offset = node.col_offset
node.decorator_list.append(name)
class RewriterTestCase(StrictTestWithCheckerBase):
def compile_to_strict(
self,
code: str,
builtins: Dict[str, Any] = __builtins__,
modules: Optional[Dict[str, Dict[str, Any]]] = None,
globals: Optional[Dict[str, Any]] = None,
track_import_call: bool = False,
import_call_tracker: Optional[Set[str]] = None,
) -> StrictModule:
code = dedent(code)
root = ast.parse(code)
name = "foo"
filename = "foo.py"
symbols = symtable.symtable(code, filename, "exec")
RewriterTestPreprocessor().visit(root)
root = rewrite(
root,
symbols,
filename,
name,
builtins=builtins,
track_import_call=track_import_call,
)
c = strict_compile(name, filename, root)
def freeze_type(freeze: Type[object]) -> None:
pass
def loose_slots(freeze: Type[object]) -> None:
pass
def strict_slots(typ: Type[object]) -> Type[object]:
return typ
def track_import_call(mod: str) -> None:
if import_call_tracker is not None:
import_call_tracker.add(mod)
fixed_modules = modules or dict(FIXED_MODULES)
fixed_modules.update(
__strict__={
"freeze_type": freeze_type,
"loose_slots": loose_slots,
"track_import_call": track_import_call,
"strict_slots": strict_slots,
}
)
additional_dicts = globals or {}
additional_dicts.update(
{"<fixed-modules>": fixed_modules, "<builtins>": builtins}
)
d, m = self._exec_strict_code(c, name, additional_dicts=additional_dicts)
return m
@final
class ImmutableModuleTestCase(RewriterTestCase):
def test_simple(self) -> None:
code = """
x = 1
def f():
return x
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 1)
self.assertEqual(type(mod.f), FunctionType)
self.assertEqual(mod.f(), 1)
self.assertEqual(mod.f.__name__, "f")
def test_decorators(self) -> None:
code = """
from __strict__ import strict_slots
def dec(x):
return x
@dec
@strict_slots
def f():
return 1
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.f), FunctionType)
self.assertEqual(type(mod.dec), FunctionType)
self.assertEqual(type(mod.f()), int)
code = """
from __strict__ import strict_slots
def dec(x):
return x
@dec
@strict_slots
class C:
x = 1
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.C), type)
self.assertEqual(type(mod.dec), FunctionType)
self.assertEqual(type(mod.C.x), int)
def test_visit_method_global(self) -> None:
"""test visiting an explicit global decl inside of a nested scope"""
code = """
from __strict__ import strict_slots
X = 1
@strict_slots
class C:
def f(self):
global X
X = 2
return X
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.C().f(), 2)
def test_class_def(self) -> None:
code = """
from __strict__ import strict_slots
x = 42
@strict_slots
class C:
def f(self):
return x
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.C.__name__, "C")
self.assertEqual(mod.C().f(), 42)
def test_nested_class_def(self) -> None:
code = """
from __strict__ import strict_slots
x = 42
@strict_slots
class C:
def f(self):
return x
@strict_slots
class D:
def g(self):
return x
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.C.__name__, "C")
self.assertEqual(mod.C.__qualname__, "C")
self.assertEqual(mod.C.D.__name__, "D")
self.assertEqual(mod.C.D.__qualname__, "C.D")
self.assertEqual(mod.C.f.__name__, "f")
self.assertEqual(mod.C.f.__qualname__, "C.f")
self.assertEqual(mod.C.D.g.__name__, "g")
self.assertEqual(mod.C.D.g.__qualname__, "C.D.g")
self.assertEqual(mod.C().f(), 42)
self.assertEqual(mod.C.D().g(), 42)
@final
class SlotificationTestCase(RewriterTestCase):
def test_init(self) -> None:
"""__init__ assignemnts are initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
self.x = 42
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
with self.assertRaises(AttributeError):
inst.y = 100
def test_init_seq_tuple(self) -> None:
"""__init__ assignemnts are initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
(self.x, self.y) = 42, 100
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
self.assertEqual(inst.y, 100)
with self.assertRaises(AttributeError):
inst.z = 100
def test_init_seq_list(self) -> None:
"""__init__ assignemnts are initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
[self.x, self.y] = 42, 100
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
self.assertEqual(inst.y, 100)
with self.assertRaises(AttributeError):
inst.z = 100
def test_init_seq_nested(self) -> None:
"""__init__ assignemnts are initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
[self.x, (self.y, self.z)] = 42, (100, 200)
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
self.assertEqual(inst.y, 100)
self.assertEqual(inst.z, 200)
with self.assertRaises(AttributeError):
inst.w = 100
def test_init_self_renamed(self) -> None:
"""self doesn't need to be called self..."""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(weirdo):
weirdo.x = 42
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
with self.assertRaises(AttributeError):
inst.y = 100
def test_init_ann(self) -> None:
"""__init__ annotated assignemnts are initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
self.x: int = 42
self.y: int
self.init_y()
def init_y(self):
self.y = 100
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
self.assertEqual(inst.y, 100)
def test_init_ann_self_renamed(self) -> None:
"""self doesn't need to be called self..."""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(weirdo):
weirdo.x: int = 42
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
def test_class_ann(self) -> None:
"""class annotations w/o assignments get promoted to instance vars"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
x: int
def __init__(self):
self.init_x()
def init_x(self):
self.x = 42
"""
mod = self.compile_to_strict(code)
inst = mod.C()
self.assertEqual(inst.x, 42)
with self.assertRaises(AttributeError):
inst.y = 100
def test_class_ann_assigned(self) -> None:
"""class annotations w/ assignments aren't promoted"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
x: int = 42
"""
mod = self.compile_to_strict(code)
inst = mod.C()
with self.assertRaises(AttributeError):
inst.x = 100
def test_no_class_ann(self) -> None:
"""only __init__ assignments count"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__(self):
self.init_x()
def init_x(self):
self.x = 42
"""
mod = self.compile_to_strict(code)
with self.assertRaises(AttributeError):
mod.C()
def test_bad_init(self) -> None:
"""__init__ is missing self"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__():
self.x = 42
"""
mod = self.compile_to_strict(code)
with self.assertRaises(TypeError):
mod.C()
def test_bad_init_ann(self) -> None:
"""__init__ is missing self"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __init__():
self.x: int = 42
"""
mod = self.compile_to_strict(code)
with self.assertRaises(TypeError):
mod.C()
def test_fixed_module_import(self) -> None:
code = """
from typing import TypeVar
x = TypeVar('foo')
"""
modules = {"typing": {"TypeVar": TypeVar}}
globals = {"__name__": "test_fixed_module_import"}
mod = self.compile_to_strict(code, modules=modules, globals=globals)
self.assertEqual(mod.x.__name__, "foo")
def test_fixed_module_import_replaced(self) -> None:
@final
class FakeTypeVar:
def __init__(self, value: str) -> None:
self.value = value
code = """
from typing import TypeVar
x = TypeVar('foo')
"""
mod = self.compile_to_strict(code, modules={"typing": {"TypeVar": FakeTypeVar}})
self.assertEqual(mod.x.value, "foo")
def test_fixed_module_import_multiple_values(self) -> None:
code = """
from typing import TypeVar, Dict
"""
mod = self.compile_to_strict(
code, modules={"typing": {"TypeVar": 42, "Dict": 100}}
)
self.assertEqual(mod.TypeVar, 42)
self.assertEqual(mod.Dict, 100)
def test_fixed_module_unknown(self) -> None:
code = """
from typing import collections
"""
mod = self.compile_to_strict(code, modules={"typing": {"TypeVar": TypeVar}})
self.assertEqual(type(mod.collections), ModuleType)
def test_fixed_module_mixed_unknown(self) -> None:
code = """
from typing import collections, TypeVar
x = TypeVar('foo')
"""
modules = {"typing": {"TypeVar": TypeVar}}
globals = {"__name__": "test_fixed_module_mixed_unknown"}
mod = self.compile_to_strict(code, modules=modules, globals=globals)
self.assertEqual(type(mod.collections), ModuleType)
self.assertEqual(mod.x.__name__, "foo")
def test_private_members(self) -> None:
code = """
from __strict__ import strict_slots
@strict_slots
class C:
def __x(self):
return 42
def g(self):
return self.__x()
"""
mod = self.compile_to_strict(code)
a = mod.C()
self.assertEqual(a.g(), 42)
self.assertEqual(a._C__x(), 42)
def test_dotted_from_imports(self) -> None:
code = """
from xml.dom import SyntaxErr
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.SyntaxErr), type)
def test_dotted_imports(self) -> None:
code = """
import xml.dom
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.xml), ModuleType)
def test_future_imports(self) -> None:
code = """
from __future__ import annotations
def f():
def g() -> doesntexist:
return 1
return g
"""
mod = self.compile_to_strict(code)
self.assertEqual(1, mod.f()())
code = """
def f():
def g() -> doesntexist:
return 1
return g
"""
mod = self.compile_to_strict(code)
with self.assertRaises(NameError):
mod.f()
def test_decorator_with_generator(self) -> None:
code = """
def mydec(gen):
def myfunc(x):
for i in gen:
return x
return myfunc
@mydec(x for x in (1, 2, 3))
def f():
return 42
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.f(), 42)
def test_lambda(self) -> None:
code = """
x = lambda: 42
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x(), 42)
def test_nested_lambda(self) -> None:
code = """
def f():
return lambda: 42
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.f()(), 42)
def test_nested_lambdas(self) -> None:
code = """
def f():
return lambda: 42, lambda: 100
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.f()[0](), 42)
self.assertEqual(mod.f()[1](), 100)
def test_nested_lambdas_and_funcs(self) -> None:
code = """
def f():
x = lambda: 42
def f():
return 100
return x, f
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.f()[0](), 42)
self.assertEqual(mod.f()[1](), 100)
def test_async_func(self) -> None:
code = """
async def f():
pass
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.f()), CoroutineType)
def test_async_func_shadowed(self) -> None:
code = """
async def min():
pass
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.min()), CoroutineType)
def test_class_shadowed(self) -> None:
code = """
from __strict__ import strict_slots
@strict_slots
class min:
pass
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.min), type)
def test_func_shadowed(self) -> None:
code = """
def min():
return 'abc'
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.min(), "abc")
def test_accessed_before_shadowed(self) -> None:
code = """
x = min(1,2)
def min():
return 'abc'
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 1)
self.assertEqual(mod.min(), "abc")
def test_deleted_shadowed_func(self) -> None:
code = """
def min():
return 'abc'
del min
x = min(1,2)
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 1)
self.assertFalse(hasattr(mod, "min"))
def test_deleted_shadowed_async_func(self) -> None:
code = """
async def min():
return 'abc'
del min
x = min(1,2)
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 1)
self.assertFalse(hasattr(mod, "min"))
def test_deleted_shadowed_class(self) -> None:
code = """
class min:
pass
del min
x = min(1,2)
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 1)
self.assertFalse(hasattr(mod, "min"))
def test_async_func_first(self) -> None:
code = """
async def f():
pass
def g():
pass
"""
mod = self.compile_to_strict(code)
self.assertEqual(type(mod.f()), CoroutineType)
self.assertEqual(type(mod.g), FunctionType)
def test_explicit_dict(self) -> None:
"""declaring __dict__ at the class level allows arbitrary attributes to be added"""
code = """
from typing import Dict, Any
from __strict__ import strict_slots
@strict_slots
class C:
__dict__: Dict[str, Any]
"""
mod = self.compile_to_strict(
code, modules={"typing": {"Dict": Dict, "Any": Any}}
)
inst = mod.C()
inst.x = 100
def test_explicit_weakref(self) -> None:
"""declaring __weakref__ at the class level allows weak references"""
code = """
from typing import Any
from __strict__ import strict_slots
@strict_slots
class C:
__weakref__: Any
"""
mod = self.compile_to_strict(code, modules={"typing": {"Any": Any}})
inst = mod.C()
r = ref(inst)
self.assertEqual(r(), inst)
def test_weakref(self) -> None:
"""lack of __weakref__ disallows weak references to instances"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
pass
"""
mod = self.compile_to_strict(code, modules={"typing": {"Any": Any}})
with self.assertRaises(TypeError):
ref(mod.C())
@final
class LazyLoadingTestCases(RewriterTestCase):
"""test cases which verify the behavior of lazy loading is the same as
non-lazy"""
def test_lazy_load_exception(self) -> None:
"""lazy code raising an exception should run"""
code = """
raise Exception('no way')
"""
with self.assertRaises(Exception) as e:
self.compile_to_strict(code)
self.assertEqual(e.exception.args[0], "no way")
def test_lazy_load_exception_2(self) -> None:
code = """
from __strict__ import strict_slots
@strict_slots
class MyException(Exception):
pass
raise MyException('no way')
"""
with self.assertRaises(Exception) as e:
self.compile_to_strict(code)
self.assertEqual(type(e.exception).__name__, "MyException")
def test_lazy_load_exception_3(self) -> None:
code = """
from pickle import PicklingError
raise PicklingError('no way')
"""
with self.assertRaises(Exception) as e:
self.compile_to_strict(code)
self.assertEqual(type(e.exception).__name__, "PicklingError")
def test_lazy_load_exception_4(self) -> None:
code = """
raise ShouldBeANameError()
"""
with self.assertRaises(NameError):
self.compile_to_strict(code)
def test_lazy_load_no_reinit(self) -> None:
"""only run earlier initialization once"""
code = """
try:
y.append(0)
except:
y = []
try:
y.append(1)
raise Exception()
except:
pass
z = y
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.z, [1])
def test_finish_initialization(self) -> None:
"""values need to be fully initialized upon their first access"""
code = """
x = 1
y = x
x = 2
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.y, 1)
self.assertEqual(mod.x, 2)
def test_full_initialization(self) -> None:
"""values need to be fully initialized upon their first access"""
code = """
x = 1
y = x
x = 2
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 2)
self.assertEqual(mod.y, 1)
def test_transitive_closure(self) -> None:
"""we run the transitive closure of things required to be initialized"""
code = """
x = 1
y = x
z = y
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.z, 1)
def test_annotations(self) -> None:
"""annotations are properly initialized"""
code = """
x: int = 1
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.__annotations__, {"x": int})
self.assertEqual(mod.x, 1)
def test_annotations_no_value(self) -> None:
"""annotations are properly initialized w/o values"""
code = """
x: int
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.__annotations__, {"x": int})
with self.assertRaises(AttributeError):
mod.x
def test_annotations_del(self) -> None:
"""values deleted after use are deleted, when accessed after initial var"""
code = """
x = 1
y = x
del x
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.y, 1)
with self.assertRaises(AttributeError):
mod.x
def test_annotations_del_2(self) -> None:
"""deleted values are deleted when accessed initially, previous values are okay"""
code = """
x = 1
y = x
del x
"""
mod = self.compile_to_strict(code)
with self.assertRaises(AttributeError):
mod.x
self.assertEqual(mod.y, 1)
def test_forward_dep(self) -> None:
"""forward dependencies cause all values to be initialized"""
code = """
from __strict__ import strict_slots
@strict_slots
class C:
pass
C.x = 42
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.C.x, 42)
def test_not_init(self) -> None:
"""unassigned values don't show up (definite assignment would disallow this)"""
code = """
x = 1
if x != 1:
y = 2
"""
mod = self.compile_to_strict(code)
with self.assertRaises(AttributeError):
mod.y
def test_try_except_shadowed_handler_no_body_changes(self) -> None:
"""the try body doesn't get rewritten, but the except handler does"""
code = """
try:
x = 2
except Exception as min:
pass
"""
mod = self.compile_to_strict(code)
self.assertEqual(mod.x, 2)
self.assertFalse(hasattr(mod, "min"))
def test_insert_track_import_call(self) -> None:
"""
track import call is inserted to top of function
"""
code = """
def f():
pass
"""
tracker = set()
mod = self.compile_to_strict(
code, track_import_call=True, import_call_tracker=tracker
)
mod.f()
self.assertIn("foo", tracker)
async def test_insert_track_import_call_async(self) -> None:
"""
track import call is inserted to top of function
"""
code = """
async def f():
pass
"""
tracker = set()
mod = self.compile_to_strict(
code, track_import_call=True, import_call_tracker=tracker
)
await mod.f()
self.assertIn("foo", tracker)
| 25.893182 | 91 | 0.600456 |
ace6d5c06a3adb72e2ab94b7fa43fda693094e9a | 1,121 | py | Python | ros/src/util/packages/data_preprocessor/scripts/get_PCD.py | izeki/Autoware | 21dcd18c4166331c290bd573733e0b881ca29ad7 | [
"BSD-3-Clause"
] | 64 | 2018-11-19T02:34:05.000Z | 2021-12-27T06:19:48.000Z | ros/src/util/packages/data_preprocessor/scripts/get_PCD.py | anhnv3991/autoware | d5b2ed9dc309193c8a2a7c77a2b6c88104c28328 | [
"Apache-2.0"
] | 18 | 2019-04-08T16:09:37.000Z | 2019-06-05T15:24:40.000Z | ros/src/util/packages/data_preprocessor/scripts/get_PCD.py | anhnv3991/autoware | d5b2ed9dc309193c8a2a7c77a2b6c88104c28328 | [
"Apache-2.0"
] | 34 | 2018-11-27T08:57:32.000Z | 2022-02-18T08:06:04.000Z | #!/usr/bin/env python
import sys
import os
import rospy
import numpy as np
import cv2
import pcl
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from cv_bridge import CvBridge
save_path = None
def cloud_loader(msg):
timestamp = msg.header.stamp.secs + ((msg.header.stamp.nsecs + 0.0) / 1000000000)
save_pcd(msg, timestamp, save_path)
def save_pcd(cloud, timestamp, path):
p = pcl.PointCloud(np.array(list(pc2.read_points(cloud)), dtype=np.float32)[:, 0:3])
p.to_file(path + '/pcd' + '_' + "{:.5f}".format(timestamp) + '.pcd')
def rosbag_data_extract_sample():
global save_path
try:
save_path = sys.argv[1]
topic = sys.argv[2]
except Exception, e:
#sys.exit("Please specify the save path. Example: rosbag_data_extract_unsync.py /media/0/output/")
save_path = './sample'
node_name = "get_%s_and_convert_to_PCD_data" % topic
rospy.init_node('rosbag_pcd_extract_unsync', anonymous=True)
rospy.Subscriber(topic, PointCloud2, cloud_loader)
rospy.spin()
if __name__ == '__main__':
rosbag_data_extract_sample()
| 28.74359 | 106 | 0.706512 |
ace6d820eaf2832776023c012c8181cf6242edca | 939 | py | Python | {{cookiecutter.project_slug}}/server/apps/handlers.py | Egor4ik325/cookiecutter-django-api | 34e4a923f2d3eaa3af4a58fbe230e8e4de4b4334 | [
"MIT"
] | 1 | 2022-03-28T16:26:01.000Z | 2022-03-28T16:26:01.000Z | {{cookiecutter.project_slug}}/server/apps/handlers.py | Egor4ik325/cookiecutter-django-api | 34e4a923f2d3eaa3af4a58fbe230e8e4de4b4334 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/server/apps/handlers.py | Egor4ik325/cookiecutter-django-api | 34e4a923f2d3eaa3af4a58fbe230e8e4de4b4334 | [
"MIT"
] | null | null | null | from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework.exceptions import ValidationError as DRFValidationError
from rest_framework.views import exception_handler as drf_exception_handler
def exception_handler(exception, context):
"""Extended Django REST Framework error handler to handle `DjangoValidationError` raised from the model `.save()`
i.e. from the DRF `view` (`perform create/update`) method outside DRF `.validate()`.
"""
# def transform_exceptions(exception):
if isinstance(exception, DjangoValidationError):
if hasattr(exception, "message_dict"):
detail = exception.message_dict
elif hasattr(exception, "message"):
detail = exception.message
else:
detail = "Some other exception have happed."
exception = DRFValidationError(detail=detail)
return drf_exception_handler(exception, context)
| 40.826087 | 117 | 0.735889 |
ace6d8da53e909c28cfc24ebf9efdf61944bf215 | 2,341 | py | Python | s3prl/downstream/speech_translation/AdditionalDataset.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 856 | 2021-01-15T15:40:32.000Z | 2022-03-31T07:08:17.000Z | s3prl/downstream/speech_translation/AdditionalDataset.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 210 | 2021-01-15T13:28:50.000Z | 2022-03-30T06:13:51.000Z | s3prl/downstream/speech_translation/AdditionalDataset.py | hhhaaahhhaa/s3prl | a469787f05c42196c4d989555082f5fd9dcbe8a6 | [
"Apache-2.0"
] | 208 | 2021-01-15T03:03:12.000Z | 2022-03-31T08:33:27.000Z | import fairseq
from fairseq.data import Dictionary, encoders
import csv
from argparse import Namespace
import torch
class AdditionalDataset:
@classmethod
def from_tsv(cls, file, key, bpe_tokenizer=None, pre_tokenizer=None):
data = []
with open(file, 'r') as file:
reader = csv.DictReader(file,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
for line in reader:
data.append(line[key])
return cls(data, bpe_tokenizer, pre_tokenizer)
def __init__(self, data, dictionary, bpe_tokenizer=None, pre_tokenizer=None):
self.data = data
self.bpe_tokenizer = bpe_tokenizer
self.pre_tokenizer = pre_tokenizer
self.dictionary = dictionary
def _create_target(self, index):
tokenized = self._tokenize_text(self.data[index])
target = self.dictionary.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
return target
def get_addtional_input(self, id_list):
target = [self._create_target(id) for id in id_list]
batched_target = fairseq.data.data_utils.collate_tokens(
target,
self.dictionary.pad(),
self.dictionary.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target_lengths = torch.tensor(
[t.size(0) for t in target], dtype=torch.long
)
prev_output_tokens = fairseq.data.data_utils.collate_tokens(
target,
self.dictionary.pad(),
self.dictionary.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
ntokens = sum(t.size(0) for t in target)
return {
"target": batched_target,
"prev_output_tokens": prev_output_tokens,
"target_lengths": target_lengths,
"ntokens": ntokens,
}
def _tokenize_text(self, text):
if self.pre_tokenizer is not None:
text = self.pre_tokenizer.encode(text)
if self.bpe_tokenizer is not None:
text = self.bpe_tokenizer.encode(text)
return text
| 28.901235 | 81 | 0.584366 |
ace6da112a0063437f3ce5fe8cfd949b9975a251 | 5,272 | py | Python | quark/plugin_modules/segment_allocation_ranges.py | roaet/quark | 229843006f335fcb725b369b1b78f964ac587fb4 | [
"Apache-2.0"
] | null | null | null | quark/plugin_modules/segment_allocation_ranges.py | roaet/quark | 229843006f335fcb725b369b1b78f964ac587fb4 | [
"Apache-2.0"
] | null | null | null | quark/plugin_modules/segment_allocation_ranges.py | roaet/quark | 229843006f335fcb725b369b1b78f964ac587fb4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
from quark.db import api as db_api
from quark import exceptions as q_exc
from quark import plugin_views as v
from quark import segment_allocations
SA_REGISTRY = segment_allocations.REGISTRY
LOG = logging.getLogger(__name__)
def get_segment_allocation_range(context, id, fields=None):
LOG.info("get_segment_allocation_range %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
if not context.is_admin:
raise n_exc.NotAuthorized()
sa_range = db_api.segment_allocation_range_find(
context, id=id, scope=db_api.ONE)
if not sa_range:
raise q_exc.SegmentAllocationRangeNotFound(
segment_allocation_range_id=id)
# Count up allocations so we can calculate how many are free.
allocs = db_api.segment_allocation_find(
context,
segment_allocation_range_id=sa_range["id"],
deallocated=False).count()
return v._make_segment_allocation_range_dict(
sa_range, allocations=allocs)
def get_segment_allocation_ranges(context, **filters):
LOG.info("get_segment_allocation_ranges for tenant %s" % context.tenant_id)
if not context.is_admin:
raise n_exc.NotAuthorized()
sa_ranges = db_api.segment_allocation_range_find(
context, scope=db_api.ALL, **filters)
return [v._make_segment_allocation_range_dict(m) for m in sa_ranges]
def create_segment_allocation_range(context, sa_range):
LOG.info("create_segment_allocation_range for tenant %s"
% context.tenant_id)
if not context.is_admin:
raise n_exc.NotAuthorized()
sa_range = sa_range.get("segment_allocation_range")
if not sa_range:
raise n_exc.BadRequest(resource="segment_allocation_range",
msg=("segment_allocation_range not in "
"request body."))
# TODO(morgabra) Figure out how to get the api extension to validate this
# for us.
# parse required fields
for k in ["first_id", "last_id", "segment_id", "segment_type"]:
sa_range[k] = sa_range.get(k, None)
if sa_range[k] is None:
raise n_exc.BadRequest(
resource="segment_allocation_range",
msg=("Missing required key %s in request body." % (k)))
# parse optional fields
for k in ["do_not_use"]:
sa_range[k] = sa_range.get(k, None)
# use the segment registry to validate and create/populate the range
if not SA_REGISTRY.is_valid_strategy(sa_range["segment_type"]):
raise n_exc.BadRequest(
resource="segment_allocation_range",
msg=("Unknown segment type '%s'" % (k)))
strategy = SA_REGISTRY.get_strategy(sa_range["segment_type"])
# Create the new range
with context.session.begin():
new_range = strategy.create_range(context, sa_range)
# Bulk-populate the range, this could take a while for huge ranges
# (millions) so we do this in chunks outside the transaction. That
# means we need to rollback the range creation if it fails for
# whatever reason (and it will cascade delete any added allocations)
try:
strategy.populate_range(context, new_range)
except Exception:
LOG.exception("Failed to populate segment allocation range.")
delete_segment_allocation_range(context, new_range["id"])
raise
return v._make_segment_allocation_range_dict(new_range)
def _delete_segment_allocation_range(context, sa_range):
allocs = db_api.segment_allocation_find(
context,
segment_allocation_range_id=sa_range["id"],
deallocated=False).count()
if allocs:
raise q_exc.SegmentAllocationRangeInUse(
segment_allocation_range_id=sa_range["id"])
db_api.segment_allocation_range_delete(context, sa_range)
def delete_segment_allocation_range(context, sa_id):
"""Delete a segment_allocation_range.
: param context: neutron api request context
: param id: UUID representing the segment_allocation_range to delete.
"""
LOG.info("delete_segment_allocation_range %s for tenant %s" %
(sa_id, context.tenant_id))
if not context.is_admin:
raise n_exc.NotAuthorized()
with context.session.begin():
sa_range = db_api.segment_allocation_range_find(
context, id=sa_id, scope=db_api.ONE)
if not sa_range:
raise q_exc.SegmentAllocationRangeNotFound(
segment_allocation_range_id=sa_id)
_delete_segment_allocation_range(context, sa_range)
| 36.358621 | 79 | 0.700114 |
ace6da324afe3acd85a4bbddf0e8ad3cd6a75978 | 339 | py | Python | capitulo-05/ex26.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | 3 | 2021-11-09T17:54:10.000Z | 2022-01-30T22:32:25.000Z | capitulo-05/ex26.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | null | null | null | capitulo-05/ex26.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | null | null | null | # Escreva um programa que calcule o resto da divisão inteira entre dois números
# Utilize apenas as operações de soma e subtração para calcular o resultado
dividend = int(input('Dividendo: '))
divider = int(input('Divisor: '))
n = dividend
while n >= divider:
n -= divider
rest = n
print(f"{dividend} / {divider} = {rest} (resto)")
| 26.076923 | 79 | 0.705015 |
ace6da9b9a3c0f48171eeea0a2e43b746ead0722 | 1,039 | py | Python | huxley/api/tests/test_reset_password.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 18 | 2015-07-12T00:55:51.000Z | 2021-12-13T15:41:06.000Z | huxley/api/tests/test_reset_password.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 288 | 2015-01-13T23:05:09.000Z | 2022-03-25T17:35:36.000Z | huxley/api/tests/test_reset_password.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 47 | 2015-05-12T15:39:57.000Z | 2022-03-30T09:12:48.000Z | # Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from rest_framework import status
from huxley.api.tests import CreateAPITestCase
from huxley.utils.test import models
class ResetPasswordTestCase(CreateAPITestCase):
url_name = 'api:user_password'
is_resource = False
params = {'username': 'mikejones'}
def setUp(self):
self.user = models.new_user(username='mikejones', email='who@mj.com')
def test_username(self):
response = self.get_response()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_email(self):
params = self.get_params(username='who@mj.com')
response = self.get_response(params=params)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_nonexistant(self):
params = self.get_params(username='nobody')
response = self.get_response(params=params)
self.assertNotFound(response)
| 33.516129 | 77 | 0.720885 |
ace6dadd6194418b1f0aebcbf9e0004bd85f493c | 3,270 | py | Python | visualizer/app/routes.py | gudgud96/AI-Music-Composition | 626f4713b0d60400ce2d588d6d96e9dd98f74625 | [
"MIT"
] | 9 | 2019-04-19T05:59:50.000Z | 2022-03-04T03:22:54.000Z | visualizer/app/routes.py | gudgud96/AI-Music-Composition | 626f4713b0d60400ce2d588d6d96e9dd98f74625 | [
"MIT"
] | 1 | 2022-03-04T15:08:03.000Z | 2022-03-04T15:08:03.000Z | visualizer/app/routes.py | gudgud96/AI-Music-Composition | 626f4713b0d60400ce2d588d6d96e9dd98f74625 | [
"MIT"
] | 2 | 2021-10-21T04:48:25.000Z | 2022-03-04T03:17:23.000Z | import sys,os
sys.path.append('\\'.join(os.getcwd().split('\\')[:-1]))
import datetime
from app import app
from flask import render_template, request, flash
from generator.song_generator import generate_song, song_styling
import os
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
MODEL_DICT = {"Bidirectional": "bidem", "Regularized": "bidem_regularized",
"Embedding": "bidem_preload", "Seq2seq": "seq2seq"}
@app.route('/')
@app.route('/index')
def index():
return render_template('test_generate.html')
@app.route("/play", methods=['POST'])
def play():
# Get chords and bar numbers from user
chords = request.form['chords']
model_name = "bidem" # bidem or bidem_preload. bidem sounds better for now
style = request.form['style'].lower()
bar_number = 32 if '32' in request.form['bar_number'] else 16
if style == '':
style = 'strings'
# Preprocess chords and bar numbers
if chords == '':
generate_song(style=style, model_name=model_name, bar_number=bar_number)
else:
chord_lst = chords.replace(' ', '').split(',')
for i in range(len(chord_lst)):
key, tonality = chord_lst[i][:-1], chord_lst[i][-1]
tonality = 'maj' if tonality == '+' else 'min'
key = key.upper()
if key[-1] == "B" and len(key) == 2:
key = key[:-1] + "b" # don't make flat sign uppercase
chord_lst[i] = key + ':' + tonality
generate_song(chords=chord_lst, style=style, model_name=model_name, bar_number=bar_number)
# Move generated song files
folder_name = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
os.mkdir('app/static/' + folder_name + '/')
os.rename('melody.mid', 'app/static/' + folder_name + '/melody.mid')
os.rename('chords.mid', 'app/static/' + folder_name + '/chords.mid')
os.rename('song.mid', 'app/static/' + folder_name + '/song.mid')
# os.rename('song.wav', 'app/static/' + folder_name + '/song.wav')
os.rename('example_chord.txt', 'app/static/' + folder_name + '/example_chord.txt')
os.rename('result_chord.txt', 'app/static/' + folder_name + '/result_chord.txt')
message = "MIDIjs.play('static/" + folder_name + "/song.mid');"
download_link = 'static/' + folder_name + "/song.mid"
return render_template('test_play.html', message=message,
style=None, download_link=download_link)
@app.route("/style", methods=['POST'])
def style():
style = request.form['style']
songs = os.listdir('app/static')
songs.remove('styles')
song = songs[-1]
melody_file = 'app/static/{}/melody.mid'.format(song)
chord_file = 'app/static/{}/chords.mid'.format(song)
song_file = 'app/static/{}/song-{}.mid'.format(song, style.lower())
song_styling(melody_file, chord_file, song_file, style=style.lower())
songs = os.listdir('app/static')
songs.remove('styles')
song = songs[-1]
# print(songs)
message = "MIDIjs.play('static/" + song + "/song-{}.mid');".format(style.lower())
download_link = 'static/' + song + '/song-{}.mid'.format(style.lower())
return render_template('test_play.html', message=message,
style=style, download_link=download_link) | 39.39759 | 98 | 0.624771 |
ace6daf40ed0119d183bd6dbde1ad95bb9ac4ade | 14,682 | py | Python | test/functional/interface_rest.py | satcoin-dev/satcoin | a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5 | [
"MIT"
] | 4 | 2021-02-28T04:34:58.000Z | 2021-09-14T15:25:31.000Z | test/functional/interface_rest.py | satcoin-dev/satcoin | a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5 | [
"MIT"
] | null | null | null | test/functional/interface_rest.py | satcoin-dev/satcoin | a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5 | [
"MIT"
] | 1 | 2021-06-18T13:13:17.000Z | 2021-06-18T13:13:17.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.test_framework import SatcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (SatcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Satcoin to node 1")
# Random address so node1's balance doesn't increase
not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("<i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, 102) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| 44.490909 | 153 | 0.661082 |
ace6db595cbd970273fc701766a329ed289bec7c | 6,989 | py | Python | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_member.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_member.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_member.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
# Copyright (c) 2018 Catalyst Cloud Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_member
short_description: Add/Delete a member for a pool in load balancer from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.7"
author: "Lingxian Kong (@lingxiankong)"
description:
- Add or Remove a member for a pool from the OpenStack load-balancer service.
options:
name:
description:
- Name that has to be given to the member
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
pool:
description:
- The name or id of the pool that this member belongs to.
required: true
protocol_port:
description:
- The protocol port number for the member.
default: 80
address:
description:
- The IP address of the member.
subnet_id:
description:
- The subnet ID the member service is accessible from.
wait:
description:
- If the module should wait for the load balancer to be ACTIVE.
type: bool
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the load balancer to get
into ACTIVE state.
default: 180
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
RETURN = '''
id:
description: The member UUID.
returned: On success when I(state) is 'present'
type: string
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
member:
description: Dictionary describing the member.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Unique UUID.
type: string
sample: "39007a7e-ee4f-4d13-8283-b4da2e037c69"
name:
description: Name given to the member.
type: string
sample: "test"
description:
description: The member description.
type: string
sample: "description"
provisioning_status:
description: The provisioning status of the member.
type: string
sample: "ACTIVE"
operating_status:
description: The operating status of the member.
type: string
sample: "ONLINE"
is_admin_state_up:
description: The administrative state of the member.
type: bool
sample: true
protocol_port:
description: The protocol port number for the member.
type: int
sample: 80
subnet_id:
description: The subnet ID the member service is accessible from.
type: string
sample: "489247fa-9c25-11e8-9679-00224d6b7bc1"
address:
description: The IP address of the backend member server.
type: string
sample: "192.168.2.10"
'''
EXAMPLES = '''
# Create a member, wait for the member to be created.
- os_member:
cloud: mycloud
endpoint_type: admin
state: present
name: test-member
pool: test-pool
address: 192.168.10.3
protocol_port: 8080
# Delete a listener
- os_member:
cloud: mycloud
endpoint_type: admin
state: absent
name: test-member
pool: test-pool
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, \
openstack_module_kwargs, openstack_cloud_from_module
def _wait_for_member_status(module, cloud, pool_id, member_id, status,
failures, interval=5):
timeout = module.params['timeout']
total_sleep = 0
if failures is None:
failures = []
while total_sleep < timeout:
member = cloud.load_balancer.get_member(member_id, pool_id)
provisioning_status = member.provisioning_status
if provisioning_status == status:
return member
if provisioning_status in failures:
module.fail_json(
msg="Member %s transitioned to failure state %s" %
(member_id, provisioning_status)
)
time.sleep(interval)
total_sleep += interval
module.fail_json(
msg="Timeout waiting for member %s to transition to %s" %
(member_id, status)
)
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
pool=dict(required=True),
address=dict(default=None),
protocol_port=dict(default=80, type='int'),
subnet_id=dict(default=None),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
name = module.params['name']
pool = module.params['pool']
try:
changed = False
pool_ret = cloud.load_balancer.find_pool(name_or_id=pool)
if not pool_ret:
module.fail_json(msg='pool %s is not found' % pool)
pool_id = pool_ret.id
member = cloud.load_balancer.find_member(name, pool_id)
if module.params['state'] == 'present':
if not member:
member = cloud.load_balancer.create_member(
pool_ret,
address=module.params['address'],
name=name,
protocol_port=module.params['protocol_port'],
subnet_id=module.params['subnet_id']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed,
member=member.to_dict(),
id=member.id)
if module.params['wait']:
member = _wait_for_member_status(module, cloud, pool_id,
member.id, "ACTIVE",
["ERROR"])
module.exit_json(changed=changed, member=member.to_dict(),
id=member.id)
elif module.params['state'] == 'absent':
if member:
cloud.load_balancer.delete_member(member, pool_ret)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == "__main__":
main()
| 30.653509 | 92 | 0.603234 |
ace6db6cfee5643c14edddf49ae472318bce857b | 10,227 | py | Python | corenlp/corenlp.py | deepnlpf/plugin_stanfordcorenlp | cbd1f983f1a15d19e8d0c1d4fcf0fd53036e1840 | [
"MIT"
] | null | null | null | corenlp/corenlp.py | deepnlpf/plugin_stanfordcorenlp | cbd1f983f1a15d19e8d0c1d4fcf0fd53036e1840 | [
"MIT"
] | 1 | 2020-03-21T00:34:18.000Z | 2020-03-21T00:34:18.000Z | corenlp/corenlp.py | deepnlpf/plugin_stanfordcorenlp | cbd1f983f1a15d19e8d0c1d4fcf0fd53036e1840 | [
"MIT"
] | null | null | null | # _*_coding:utf-8_*_
from __future__ import print_function
import glob, json, logging, os, re, socket, subprocess, sys, time, pathlib, psutil, path
from os import path
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
class StanfordCoreNLP:
def __init__(self, path_or_host, port=None, memory='4g', lang='en', timeout=1500, quiet=True, logging_level=logging.WARNING, max_retries=5):
self.path_or_host = path_or_host
self.port = port
self.memory = memory
self.lang = lang
self.timeout = timeout
self.quiet = quiet
self.logging_level = logging_level
logging.basicConfig(level=self.logging_level)
here = path.abspath(path.dirname(__file__))
# Check args
self._check_args()
if self.path_or_host.startswith('http'):
self.url = self.path_or_host + ':' + str(port)
logging.info('Using an existing server {}'.format(self.url))
else:
# Check Java
if not subprocess.call(['java', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0:
raise RuntimeError('Java not found.')
# Check if the dir exists
if not os.path.isdir(self.path_or_host):
raise IOError(str(self.path_or_host) + ' is not a directory.')
directory = os.path.normpath(self.path_or_host) + os.sep
self.class_path_dir = directory
# Check if the language specific model file exists
switcher = {
'en': 'stanford-corenlp-[0-9].[0-9].[0-9]-models.jar',
'zh': 'stanford-chinese-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'ar': 'stanford-arabic-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'fr': 'stanford-french-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'de': 'stanford-german-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'es': 'stanford-spanish-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar'
}
jars = {
'en': 'stanford-corenlp-x.x.x-models.jar',
'zh': 'stanford-chinese-corenlp-yyyy-MM-dd-models.jar',
'ar': 'stanford-arabic-corenlp-yyyy-MM-dd-models.jar',
'fr': 'stanford-french-corenlp-yyyy-MM-dd-models.jar',
'de': 'stanford-german-corenlp-yyyy-MM-dd-models.jar',
'es': 'stanford-spanish-corenlp-yyyy-MM-dd-models.jar'
}
if len(glob.glob(directory + switcher.get(self.lang))) <= 0:
raise IOError(jars.get(
self.lang) + ' not exists. You should download and place it in the ' + directory + ' first.')
# If port not set, auto select
if self.port is None:
for port_candidate in range(9000, 65535):
if port_candidate not in [conn.laddr[1] for conn in psutil.net_connections()]:
self.port = port_candidate
break
# Check if the port is in use
if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:
raise IOError('Port ' + str(self.port) + ' is already in use.')
# Start native server
logging.info('Initializing native server...')
cmd = "java"
java_args = "-Xmx{}".format(self.memory)
java_class = "edu.stanford.nlp.pipeline.StanfordCoreNLPServer"
class_path = '"{}*"'.format(directory)
args = [cmd, java_args, '-cp', class_path,
java_class, '-port', str(self.port)]
args = ' '.join(args)
logging.info(args)
# Silence
with open(os.devnull, 'w') as null_file:
out_file = None
if self.quiet:
out_file = null_file
self.p = subprocess.Popen(
args, shell=True, stdout=out_file, stderr=subprocess.STDOUT)
logging.info('Server shell PID: {}'.format(self.p.pid))
self.url = 'http://localhost:' + str(self.port)
# Wait until server starts
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = urlparse(self.url).hostname
time.sleep(1) # OSX, not tested
trial = 1
while sock.connect_ex((host_name, self.port)):
if trial > max_retries:
raise ValueError('Corenlp server is not available')
logging.info('Waiting until the server is available.')
trial += 1
time.sleep(1)
logging.info('The server is available.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
logging.info('Cleanup...')
if hasattr(self, 'p'):
try:
parent = psutil.Process(self.p.pid)
except psutil.NoSuchProcess:
logging.info('No process: {}'.format(self.p.pid))
return
if self.class_path_dir not in ' '.join(parent.cmdline()):
logging.info('Process not in: {}'.format(parent.cmdline()))
return
children = parent.children(recursive=True)
for process in children:
logging.info('Killing pid: {}, cmdline: {}'.format(
process.pid, process.cmdline()))
# process.send_signal(signal.SIGTERM)
process.kill()
logging.info('Killing shell pid: {}, cmdline: {}'.format(
parent.pid, parent.cmdline()))
# parent.send_signal(signal.SIGTERM)
parent.kill()
def annotate(self, text, properties=None):
if sys.version_info.major >= 3:
text = text.encode('utf-8')
r = requests.post(self.url, params={'properties': str(properties)}, data=text,
headers={'Connection': 'close'})
return r.text
def tregex(self, sentence, pattern):
tregex_url = self.url + '/tregex'
r_dict = self._request(
tregex_url, "tokenize,ssplit,depparse,parse", sentence, pattern=pattern)
return r_dict
def tokensregex(self, sentence, pattern):
tokensregex_url = self.url + '/tokensregex'
r_dict = self._request(
tokensregex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def semgrex(self, sentence, pattern):
semgrex_url = self.url + '/semgrex'
r_dict = self._request(
semgrex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def word_tokenize(self, sentence, span=False):
r_dict = self._request('ssplit,tokenize', sentence)
tokens = [token['originalText']
for s in r_dict['sentences'] for token in s['tokens']]
# Whether return token span
if span:
spans = [(token['characterOffsetBegin'], token['characterOffsetEnd']) for s in r_dict['sentences'] for token
in s['tokens']]
return tokens, spans
else:
return tokens
def pos_tag(self, sentence):
r_dict = self._request(self.url, 'pos', sentence)
words = []
tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
tags.append(token['pos'])
return list(zip(words, tags))
def ner(self, sentence):
r_dict = self._request(self.url, 'ner', sentence)
words = []
ner_tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
ner_tags.append(token['ner'])
return list(zip(words, ner_tags))
def parse(self, sentence):
r_dict = self._request(self.url, 'pos,parse', sentence)
return [s['parse'] for s in r_dict['sentences']][0]
def dependency_parse(self, sentence):
r_dict = self._request(self.url, 'depparse', sentence)
return [(dep['dep'], dep['governor'], dep['dependent']) for s in r_dict['sentences'] for dep in
s['basicDependencies']]
def coref(self, text):
r_dict = self._request('coref', text)
corefs = []
for k, mentions in r_dict['corefs'].items():
simplified_mentions = []
for m in mentions:
simplified_mentions.append(
(m['sentNum'], m['startIndex'], m['endIndex'], m['text']))
corefs.append(simplified_mentions)
return corefs
def switch_language(self, language="en"):
self._check_language(language)
self.lang = language
def _request(self, url, annotators=None, data=None, *args, **kwargs):
if sys.version_info.major >= 3:
data = data.encode('utf-8')
properties = {'annotators': annotators, 'outputFormat': 'json'}
params = {'properties': str(properties), 'pipelineLanguage': self.lang}
if 'pattern' in kwargs:
params = {"pattern": kwargs['pattern'], 'properties': str(
properties), 'pipelineLanguage': self.lang}
logging.info(params)
r = requests.post(url, params=params, data=data,
headers={'Connection': 'close'})
r_dict = json.loads(r.text)
return r_dict
def _check_args(self):
self._check_language(self.lang)
if not re.match('\dg', self.memory):
raise ValueError('memory=' + self.memory +
' not supported. Use 4g, 6g, 8g and etc. ')
def _check_language(self, lang):
if lang not in ['en', 'zh', 'ar', 'fr', 'de', 'es']:
raise ValueError('lang=' + self.lang + ' not supported. Use English(en), Chinese(zh), Arabic(ar), '
'French(fr), German(de), Spanish(es).')
| 39.183908 | 144 | 0.555784 |
ace6dba4d27dd7fc3f7cd9123c70afe70223b6da | 303 | py | Python | about/models.py | kirubarajan/old_site | db50c79fec37e494891f2361e1c75e8f834255ce | [
"MIT"
] | 1 | 2020-04-19T04:48:47.000Z | 2020-04-19T04:48:47.000Z | about/models.py | kirubarajan/old_site | db50c79fec37e494891f2361e1c75e8f834255ce | [
"MIT"
] | 3 | 2020-02-11T23:11:11.000Z | 2021-06-10T20:48:38.000Z | about/models.py | kirubarajan/old_site | db50c79fec37e494891f2361e1c75e8f834255ce | [
"MIT"
] | null | null | null | from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=200)
github = models.URLField()
link = models.URLField()
users = models.CharField(max_length=50)
class Meta:
db_table = 'Project' | 27.545455 | 50 | 0.69967 |
ace6dbabd169d36d746d3168951a7a0f383344fe | 2,053 | py | Python | config/custom_components/mbapi2020/switch.py | mhaack/home-assistant-conf | 7cb856bee67906ba066adffe2151f6b50b6b73ce | [
"MIT"
] | 28 | 2019-05-31T12:30:15.000Z | 2022-03-10T18:54:57.000Z | config/custom_components/mbapi2020/switch.py | mhaack/home-assistant-conf | 7cb856bee67906ba066adffe2151f6b50b6b73ce | [
"MIT"
] | 2 | 2020-04-15T20:02:42.000Z | 2021-03-09T19:45:21.000Z | config/custom_components/mbapi2020/switch.py | mhaack/home-assistant-conf | 7cb856bee67906ba066adffe2151f6b50b6b73ce | [
"MIT"
] | 2 | 2021-03-31T08:27:19.000Z | 2021-04-30T15:13:24.000Z | import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.restore_state import RestoreEntity
from . import MercedesMeEntity
from .const import (
CONF_FT_DISABLE_CAPABILITY_CHECK,
CONF_PIN,
DOMAIN,
SWITCHES,
Sensor_Config_Fields as scf
)
LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Setup the sensor platform."""
data = hass.data[DOMAIN]
if not data.client.cars:
LOGGER.info("No Cars found.")
return
sensor_list = []
for car in data.client.cars:
for key, value in sorted(SWITCHES.items()):
if (value[5] is None or
entry.options.get(CONF_FT_DISABLE_CAPABILITY_CHECK, False) is False or
getattr(car.features, value[5], False) is True):
device = MercedesMESwitch(
hass=hass,
data=data,
internal_name = key,
sensor_config = value,
vin = car.finorvin
)
sensor_list.append(device)
async_add_entities(sensor_list, True)
class MercedesMESwitch(MercedesMeEntity, SwitchEntity, RestoreEntity):
"""Representation of a Sensor."""
async def async_turn_on(self, **kwargs):
"""Turn device component on"""
await getattr(self._data.client, self._internal_name + "_start")(self._vin)
async def async_turn_off(self, **kwargs):
"""Turn device component off"""
await getattr(self._data.client, self._internal_name + "_stop")(self._vin)
@property
def is_on(self):
"""Return true if device is locked."""
return self._get_car_value(self._feature_name, self._object_name, self._attrib_name, False)
| 29.753623 | 99 | 0.658548 |
ace6dc4561a8c6a72958fcc2138afe6e30ee7e86 | 866 | py | Python | backend/api/views/sessions.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | null | null | null | backend/api/views/sessions.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | 5 | 2022-03-07T08:30:47.000Z | 2022-03-22T09:15:52.000Z | backend/api/views/sessions.py | ChristchurchCityWeightlifting/lifter-api | a82b79c75106e7f4f8ea4b4e3e12d727213445e3 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from api.models import Session
from api.serializers import SessionDetailSerializer, SessionSerializer
class SessionViewSet(viewsets.ModelViewSet):
"""
# Session
- A Session is part of a competition. Session will contain all the lifts for that particular competition as well the officials responsible for that session.
- Officials:
- 1st Referee, 2nd Referee (or center), 3rd Referee
- Timekeeper, Announcer
- Jury
- Conditions:
1. A single Session can only be in one competition.
"""
def get_queryset(self):
return Session.objects.filter(
competition=self.kwargs["competitions_pk"],
)
def get_serializer_class(self):
if self.action == "retrieve":
return SessionDetailSerializer
return SessionSerializer
| 28.866667 | 160 | 0.688222 |
ace6dd5ebb182e0bde9424633538c5bff596fdd7 | 6,979 | py | Python | owslib/owscontext/common.py | jannefleischer/OWSLib | e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad | [
"BSD-3-Clause"
] | 218 | 2015-01-09T12:55:09.000Z | 2022-03-29T12:22:54.000Z | owslib/owscontext/common.py | jannefleischer/OWSLib | e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad | [
"BSD-3-Clause"
] | 512 | 2015-01-01T09:52:58.000Z | 2022-03-30T11:57:07.000Z | owslib/owscontext/common.py | jannefleischer/OWSLib | e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad | [
"BSD-3-Clause"
] | 218 | 2015-01-01T09:44:06.000Z | 2022-03-31T14:09:13.000Z | # -*- coding: utf-8 -*-
# =============================================================================
# Authors : Alexander Kmoch <allixender@gmail.com>
#
# =============================================================================
"""
place for some constants to avoid circular imports
"""
# seen in wms.py
from urllib.parse import urlparse
from datetime import datetime
from dateutil import parser
"""
The spec reference uri is of course difference for each encoding,
so holding it in the context container object is maybe a bit silly,
thus I use a generic spec reference and and only encode at serialisation time
"""
GENERIC_OWCSPEC_URL = "http://www.opengis.net/spec/owc-generic/1.0/req"
ATOM_OWCSPEC_URL = "http://www.opengis.net/spec/owc-atom/1.0/req"
GEOJSON_OWCSPEC_URL = "http://www.opengis.net/spec/owc-geojson/1.0/req"
"""
supported geojson profiles
"""
SUPPORTED_GEOJSON_PROFILES = [GEOJSON_OWCSPEC_URL + "/core"]
"""
those are for compliance testing when parsing geojson owc input
"""
SUPPORTED_GEOJSON_OFFERING_EXTENSIONS = [
GEOJSON_OWCSPEC_URL + "/wms",
GEOJSON_OWCSPEC_URL + "/wfs",
GEOJSON_OWCSPEC_URL + "/wcs",
GEOJSON_OWCSPEC_URL + "/wps",
GEOJSON_OWCSPEC_URL + "/csw",
GEOJSON_OWCSPEC_URL + "/geotiff",
GEOJSON_OWCSPEC_URL + "/sos"
]
# FIXME are the geosjson and atom offering codes ok,
# because ATOM offering codes are different (spec vs conf vs req)
ATOM_OFFERING_CODES = [
'http://www.opengis.net/spec/owc/1.0/conf/atom/gml',
'http://www.opengis.net/spec/owc/1.0/req/atom/wms'
'http://www.opengis.net/spec/owc-atom/1.0/req/gml',
'http://www.opengis.net/spec/owc-atom/1.0/req/csw',
]
def encodedspecurl_to_genericspecurl(encodedspecurl, genericspecurl):
parsed = urlparse(encodedspecurl)
speccode = "/" + parsed.path.split("/").last.trim
return genericspecurl + speccode
def genericspecurl_to_encodedspecurl(genericspecurl, encodedspecurl):
parsed = urlparse(genericspecurl)
speccode = parsed.path.split("/").last.trim
return encodedspecurl + speccode
class TimeIntervalFormat(object):
"""
little helper to have time intervals
"""
def __init__(self,
start,
end=None):
"""
constructor:
:param start: datetime
:param end: datetime
"""
self.start = start
self.end = end
def __str__(self):
if self.end is None:
return self.start.isoformat()
else:
return self.start.isoformat() + "/" + self.end.isoformat()
def to_dict(self):
"""
dict representation of object, for simple object comparison
:return: dict
"""
return {
"start": None if self.start is None else self.start.isoformat(),
"end": None if self.start is None else self.start.isoformat()
}
@classmethod
def from_string(cls, date_str):
if date_str is None:
return None
try:
date_arr = date_str.split("/")
if len(date_arr) > 1:
start_dt = parser.parse(date_arr[0])
end_dt = parser.parse(date_arr[1])
return TimeIntervalFormat(start_dt, end_dt)
else:
single_dt = parser.parse(date_str)
return TimeIntervalFormat(single_dt)
except Exception:
raise ValueError("Error parsing datetime string: %s" % date_str)
def skip_nulls(dict_obj):
"""
drops key/val pairs where js value is null, not needed in the JSON
:param o: needs to be dict
:return:
"""
reduced = {k: v for k, v in list(dict_obj.items()) if v is not None}
return reduced
def skip_nulls_rec(dict_obj):
"""
removes dict key/val pairs recursively where value is None,
not needed/wanted(?) in the JSON
:param o: needs to be dict
:return: the trimed dict, or exceptionally the value if it wasn't a dict
"""
if not isinstance(dict_obj, dict):
return dict_obj
else:
result = {}
for k, v in list(dict_obj.items()):
if v is None:
pass
else:
if isinstance(v, dict):
tmp = skip_nulls_rec(v)
result.update({k: tmp})
elif isinstance(v, list):
tmp = [skip_nulls_rec(o) for o in v]
result.update({k: tmp})
else:
result.update({k: v})
return result
def extract_p(path, dict_obj, default):
"""
try to extract dict value in key path, if key error provide default
:param path: the nested dict key path, separated by '.'
(therefore no dots in key names allowed)
:param dict_obj: the dictinary object from which to extract
:param default: a default return value if key error
:return: extracted value
"""
if dict_obj is None:
return default
keys = path.split('.')
tmp_iter = dict_obj
for key in keys:
try:
# dict.get() might make KeyError exception unnecessary
tmp_iter = tmp_iter.get(key, default)
except KeyError:
return default
return tmp_iter
def build_from_xp(path, dict_obj, build_class, default):
"""
try to build class instance from extracted path, else return default
:param path: path: the nested dict key path, separated by '.'
(therefore no dots in key names allowed)
:param dict_obj: the dictinary object from which to extract
:param build_class: the class name from which to build
:param default: default return value
:return: ideally the inquired class instance, else default
"""
xp = extract_p(path, dict_obj, default)
if xp is None:
return default
elif xp == default:
return default
else:
return build_class.from_dict(xp)
def is_empty(dict_obj):
"""
query if a dict is empty
:param dict_obj: the to be tested dictionary
:return: True, if it is empty, False if not empty
"""
if isinstance(dict_obj, dict):
if len(list(dict_obj.items())) <= 0:
return True
else:
switch = True
for k, v in list(dict_obj.items()):
if v is None:
pass
else:
switch = False
return switch
else:
return False
def try_int(num_string):
"""
short hand cast to number
:param num_string:
:return: int or None
"""
if num_string is not None:
try:
return int(num_string)
except ValueError:
pass
return None
def try_float(num_string):
"""
short hand cast to number
:param num_string:
:return: float or None
"""
if num_string is not None:
try:
return float(num_string)
except ValueError:
pass
return None
| 28.141129 | 79 | 0.597793 |
ace6de6da72d24affa9ef0bfad62fc1d0797c4d8 | 4,787 | py | Python | src/operations/restore.py | shivam5489/test | 7461db55bcf44ecf70a30812620ed012ffecf80a | [
"Apache-2.0"
] | null | null | null | src/operations/restore.py | shivam5489/test | 7461db55bcf44ecf70a30812620ed012ffecf80a | [
"Apache-2.0"
] | 4 | 2021-05-18T09:17:42.000Z | 2021-05-18T11:43:43.000Z | src/operations/restore.py | shivam5489/test | 7461db55bcf44ecf70a30812620ed012ffecf80a | [
"Apache-2.0"
] | 5 | 2021-04-07T21:13:16.000Z | 2022-03-01T02:42:14.000Z | #
# Copyright (c) 2021 by Delphix. All rights reserved.
#
from utils import setupLogger, executeScript
from generated.definitions import RepositoryDefinition, SourceConfigDefinition
import json
def initial_sync (source_connection,parameters,repository,source_config):
logger = setupLogger._setup_logger(__name__)
env = {
"DLPX_TOOLKIT_NAME" : "Oracle on Windows",
"DLPX_TOOLKIT_WORKFLOW" : "initial_sync",
"DLPX_TOOLKIT_PATH" : repository.delphix_tookit_path,
"ORACLE_HOME" : repository.ora_home,
"ORACLE_INST" : parameters.instance_name,
"ORACLE_USER" : parameters.username,
"ORACLE_PASSWD" : parameters.password,
"ORACLE_BASE" : repository.ora_base,
"ORACLE_DBID" : parameters.dbid,
"ORACLE_CTRL_FILE_BKP" : parameters.dbctrlbkppiece,
"ORACLE_BKP_LOC" : parameters.dbrmanbkploc,
"STG_MNT_PATH" : parameters.mount_path,
"ORA_SRC" : source_config.db_name,
"ORACLE_DB_IDENTITY_NAME" : source_config.db_identity_name,
"ORA_UNQ_NAME" : source_config.db_uniq_name,
"CUSTOM_INIT_PARAMS" : str(parameters.custom_init_params),
"CUSTOM_INIT_PARAMS_FILE" : parameters.custom_init_params_file,
"RMAN_CHANNELS" : str(parameters.rman_channels)
}
logger.debug("Staged Parameters: {}".format(parameters))
logger.debug("Repository Parameters: {}".format(repository))
logger.debug("Source Config Parameters: {}".format(source_config))
reSyncPrep = executeScript.execute_powershell(source_connection,'ds_resyncprep.ps1',env)
logger.debug("reSyncPrep: {}".format(reSyncPrep))
crt_svc = executeScript.execute_powershell(source_connection,'crtOraSvc.ps1',env)
logger.debug("Creating Service: {}".format(crt_svc))
crt_init = executeScript.execute_powershell(source_connection,'ds_crtOraInit.ps1',env)
logger.debug("Creating Initial Init File: {}".format(crt_init))
crt_dirs = executeScript.execute_powershell(source_connection,'ds_crtDirectories.ps1',env)
logger.debug("Creating Directories: {}".format(crt_dirs))
start_nomount = executeScript.execute_powershell(source_connection,'ds_startup_nomount.ps1',env)
logger.debug("Startup No-Mount: {}".format(start_nomount))
restore_ctrlfile = executeScript.execute_powershell(source_connection,'ds_restore_controlfile.ps1',env)
logger.debug("Restore Control File: {}".format(restore_ctrlfile))
start_mount_spfile = executeScript.execute_powershell(source_connection,'ds_startup_spfile.ps1',env)
logger.debug("Startup Mount with SP File: {}".format(start_mount_spfile))
crt_rstr_files = executeScript.execute_powershell(source_connection,'ds_crtRestoreScripts.ps1',env)
logger.debug("Create Restore Files: {}".format(crt_rstr_files))
rstr_db = executeScript.execute_powershell(source_connection,'ds_restore.ps1',env)
logger.debug("Restore Database: {}".format(rstr_db))
def incremental_sync (source_connection,parameters,repository,source_config):
logger = setupLogger._setup_logger(__name__)
env = {
"DLPX_TOOLKIT_NAME" : "Oracle on Windows",
"DLPX_TOOLKIT_WORKFLOW" : "initial_sync",
"DLPX_TOOLKIT_PATH" : repository.delphix_tookit_path,
"ORACLE_HOME" : repository.ora_home,
"ORACLE_INST" : parameters.instance_name,
"ORACLE_USER" : parameters.username,
"ORACLE_PASSWD" : parameters.password,
"ORACLE_BASE" : repository.ora_base,
"ORACLE_DBID" : parameters.dbid,
"ORACLE_CTRL_FILE_BKP" : parameters.dbctrlbkppiece,
"ORACLE_BKP_LOC" : parameters.dbrmanbkploc,
"STG_MNT_PATH" : parameters.mount_path,
"ORA_SRC" : source_config.db_name,
"ORACLE_DB_IDENTITY_NAME" : source_config.db_identity_name,
"ORA_UNQ_NAME" : source_config.db_uniq_name
}
logger.debug("Staged Parameters: {}".format(parameters))
logger.debug("Repository Parameters: {}".format(repository))
ds_inc_find_bkp = executeScript.execute_powershell(source_connection,'ds_inc_find_bkp.ps1',env)
logger.debug("Find New Backups: {}".format(ds_inc_find_bkp))
if (ds_inc_find_bkp != 'NoNewBackup'):
crt_rstr_files = executeScript.execute_powershell(source_connection,'ds_inc_crtRestoreScripts.ps1',env)
logger.debug("Create Restore Files: {}".format(crt_rstr_files))
rstr_db = executeScript.execute_powershell(source_connection,'ds_inc_restore.ps1',env)
logger.debug("Restore Database: {}".format(rstr_db))
| 48.846939 | 113 | 0.696052 |
ace6de90d3d1de1e09d0b4fa967538523c778551 | 21,156 | py | Python | nlp.py | morriexj/my-aima-python | 1ba1aeddb822f3dddc8ff851036003fa2edf360d | [
"MIT"
] | 1 | 2018-05-12T17:17:05.000Z | 2018-05-12T17:17:05.000Z | nlp.py | morriexj/my-aima-python | 1ba1aeddb822f3dddc8ff851036003fa2edf360d | [
"MIT"
] | null | null | null | nlp.py | morriexj/my-aima-python | 1ba1aeddb822f3dddc8ff851036003fa2edf360d | [
"MIT"
] | 1 | 2018-07-08T20:34:23.000Z | 2018-07-08T20:34:23.000Z | """Natural Language Processing; Chart Parsing and PageRanking (Chapter 22-23)"""
from collections import defaultdict
from utils import weighted_choice
import urllib.request
import re
# ______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Article = "the | a | an")
{'Article': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z) for rules in the form:
X -> Y Z"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z) in rules:
cnf.append((X, Y, Z))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
rewrite(random.choice(self.rules[token]), into)
elif token in self.lexicon:
into.append(random.choice(self.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(S.split(), []))
def __repr__(self):
return '<Grammar {}>'.format(self.name)
def ProbRules(**rules):
"""Create a dictionary mapping symbols to alternative sequences,
with probabilities.
>>> ProbRules(A = "B C [0.3] | D E [0.7]")
{'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [alt.strip().split() for alt in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
rhs_rule = (r[:-1], prob)
rules[lhs].append(rhs_rule)
return rules
def ProbLexicon(**rules):
"""Create a dictionary mapping symbols to alternative words,
with probabilities.
>>> ProbLexicon(Article = "the [0.5] | a [0.25] | an [0.25]")
{'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [word.strip().split() for word in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
word = r[:-1][0]
rhs_rule = (word, prob)
rules[lhs].append(rhs_rule)
return rules
class ProbGrammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon.
Each rule has a probability."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word, prob in lexicon[lhs]:
self.categories[word].append((lhs, prob))
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in [c for c, _ in self.categories[word]]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z, p) for rules in the form:
X -> Y Z [p]"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z), p in rules:
cnf.append((X, Y, Z, p))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively).
Returns a tuple of (sentence, probability)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
non_terminal, prob = weighted_choice(self.rules[token])
into[1] *= prob
rewrite(non_terminal, into)
elif token in self.lexicon:
terminal, prob = weighted_choice(self.lexicon[token])
into[0].append(terminal)
into[1] *= prob
else:
into[0].append(token)
return into
rewritten_as, prob = rewrite(S.split(), [[], 1])
return (' '.join(rewritten_as), prob)
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # Another Trivial Grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
E_Prob = ProbGrammar('E_Prob', # The Probabilistic Grammar from the notebook
ProbRules(
S="NP VP [0.6] | S Conjunction S [0.4]",
NP="Pronoun [0.2] | Name [0.05] | Noun [0.2] | Article Noun [0.15] \
| Article Adjs Noun [0.1] | Digit [0.05] | NP PP [0.15] | NP RelClause [0.1]",
VP="Verb [0.3] | VP NP [0.2] | VP Adjective [0.25] | VP PP [0.15] | VP Adverb [0.1]",
Adjs="Adjective [0.5] | Adjective Adjs [0.5]",
PP="Preposition NP [1]",
RelClause="RelPro VP [1]"
),
ProbLexicon(
Verb="is [0.5] | say [0.3] | are [0.2]",
Noun="robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective="good [0.5] | new [0.2] | sad [0.3]",
Adverb="here [0.6] | lightly [0.1] | now [0.3]",
Pronoun="me [0.3] | you [0.4] | he [0.3]",
RelPro="that [0.5] | who [0.3] | which [0.2]",
Name="john [0.4] | mary [0.4] | peter [0.2]",
Article="the [0.5] | a [0.25] | an [0.25]",
Preposition="to [0.4] | in [0.3] | at [0.3]",
Conjunction="and [0.5] | or [0.2] | but [0.3]",
Digit="0 [0.35] | 1 [0.35] | 2 [0.3]"
))
E_Chomsky = Grammar('E_Prob_Chomsky', # A Grammar in Chomsky Normal Form
Rules(
S='NP VP',
NP='Article Noun | Adjective Noun',
VP='Verb NP | Verb Adjective',
),
Lexicon(
Article='the | a | an',
Noun='robot | sheep | fence',
Adjective='good | new | sad',
Verb='is | say | are'
))
E_Prob_Chomsky = ProbGrammar('E_Prob_Chomsky', # A Probabilistic Grammar in CNF
ProbRules(
S='NP VP [1]',
NP='Article Noun [0.6] | Adjective Noun [0.4]',
VP='Verb NP [0.5] | Verb Adjective [0.5]',
),
ProbLexicon(
Article='the [0.5] | a [0.25] | an [0.25]',
Noun='robot [0.4] | sheep [0.4] | fence [0.2]',
Adjective='good [0.5] | new [0.2] | sad [0.3]',
Verb='is [0.5] | say [0.3] | are [0.2]'
))
# ______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure.
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"""Add edge to chart, and see if it extends or predicts another edge."""
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"""For each edge expecting a word of this category here, extend the edge."""
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"""Add to chart any rules for B that could help extend this edge."""
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"""See what edges can be extended by this edge."""
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
def CYK_parse(words, grammar):
""" [Figure 23.5] """
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]:
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules():
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
# ______________________________________________________________________________
# Page Ranking
# First entry in list is the base URL, and then following are relative URL pages
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
# ______________________________________________________________________________
# HITS Helper Functions
def expand_pages(pages):
"""Adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain all of the query words. They are obtained by
intersecting the hit lists of the query words."""
hit_intersection = {addr for addr in pagesIndex}
query_words = query.split()
for query_word in query_words:
hit_list = set()
for addr in pagesIndex:
if query_word.lower() in pagesContent[addr].lower():
hit_list.add(addr)
hit_intersection = hit_intersection.intersection(hit_list)
return {addr: pagesIndex[addr] for addr in hit_intersection}
def normalize(pages):
"""Normalize divides each page's score by the sum of the squares of all
pages' scores (separately for both the authority and hub scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub**0.5
page.authority /= summed_auth**0.5
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.outlinks]
# ______________________________________________________________________________
# HITS Algorithm
class Page(object):
def __init__(self, address, inlinks=None, outlinks=None, hub=0, authority=0):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query))
for p in pages.values():
p.authority = 1
p.hub = 1
while not convergence():
authority = {p: pages[p].authority for p in pages}
hub = {p: pages[p].hub for p in pages}
for p in pages:
# p.authority ← ∑i Inlinki(p).Hub
pages[p].authority = sum(hub[x] for x in getInlinks(pages[p]))
# p.hub ← ∑i Outlinki(p).Authority
pages[p].hub = sum(authority[x] for x in getOutlinks(pages[p]))
normalize(pages)
return pages
| 37.846154 | 114 | 0.554453 |
ace6e11dd2c37cb4d2255b5a7148639ad40c246e | 717 | py | Python | NetCatKS/Logger/api/implementers/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/Logger/api/implementers/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/Logger/api/implementers/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'dimd'
from twisted.python import log
from zope.interface import implementer
from NetCatKS.Logger.api.interfaces import ILogger
GLOBAL_DEBUG = True
@implementer(ILogger)
class Logger(object):
def __init__(self):
pass
def debug(self, msg):
if GLOBAL_DEBUG is True:
log.msg('[ ====== DEBUG ]: {}'.format(msg))
def info(self, msg):
log.msg('[ ++++++ INFO ]: {}'.format(msg))
def warning(self, msg):
log.msg('[ !!!!!! WARNING ]: {}'.format(msg))
def error(self, msg):
log.msg('[ ------ ERROR ]: {}'.format(msg))
def critical(self, msg):
log.msg('[ @@@@@@ CRITICAL ]: {}'.format(msg))
__all__ = [
'Logger'
] | 19.378378 | 55 | 0.563459 |
ace6e12c90dda5a909028717ea62263b14b5be12 | 3,118 | py | Python | pypureclient/flasharray/FA_2_11/models/username_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_11/models/username_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_11/models/username_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class UsernameResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[Username]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.Username]
):
"""
Keyword args:
items (list[Username])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `UsernameResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UsernameResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UsernameResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.839286 | 105 | 0.542976 |
ace6e194bfa22c5052d2652a3507181375ba5d98 | 14,194 | py | Python | tests/test_autogen_composition.py | gtest-org/test-deb-alembic | 539227fcbf4a7d2196c931696e6aa8ae1cad3dbe | [
"MIT"
] | null | null | null | tests/test_autogen_composition.py | gtest-org/test-deb-alembic | 539227fcbf4a7d2196c931696e6aa8ae1cad3dbe | [
"MIT"
] | null | null | null | tests/test_autogen_composition.py | gtest-org/test-deb-alembic | 539227fcbf4a7d2196c931696e6aa8ae1cad3dbe | [
"MIT"
] | 1 | 2020-01-23T05:18:17.000Z | 2020-01-23T05:18:17.000Z | import re
from alembic import autogenerate
from alembic.migration import MigrationContext
from alembic.testing import TestBase
from alembic.testing import eq_
from ._autogen_fixtures import AutogenTest, ModelOne, _default_include_object
class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase):
__only_on__ = 'sqlite'
def test_render_nothing(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'target_metadata': self.m1,
'upgrade_token': "upgrades",
'downgrade_token': "downgrades",
}
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
def test_render_nothing_batch(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'target_metadata': self.m1,
'upgrade_token': "upgrades",
'downgrade_token': "downgrades",
'alembic_module_prefix': 'op.',
'sqlalchemy_module_prefix': 'sa.',
'render_as_batch': True,
'include_symbol': lambda name, schema: False
}
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
def test_render_diffs_standard(self):
"""test a full render including indentation"""
template_args = {}
autogenerate._render_migration_diffs(self.context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
op.add_column('address', sa.Column('street', sa.String(length=50), \
nullable=True))
op.create_unique_constraint('uq_email', 'address', ['email_address'])
op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True))
op.alter_column('order', 'amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'])
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
op.drop_index('pw_idx', table_name='user')
op.drop_column('user', 'pw')
### end Alembic commands ###""")
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \
nullable=True))
op.create_index('pw_idx', 'user', ['pw'], unique=False)
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
op.drop_constraint(None, 'order', type_='foreignkey')
op.alter_column('order', 'amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'))
op.drop_column('order', 'user_id')
op.drop_constraint('uq_email', 'address', type_='unique')
op.drop_column('address', 'street')
op.create_table('extra',
sa.Column('x', sa.CHAR(), nullable=True),
sa.Column('uid', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['uid'], ['user.id'], )
)
op.drop_table('item')
### end Alembic commands ###""")
def test_render_diffs_batch(self):
"""test a full render in batch mode including indentation"""
template_args = {}
self.context.opts['render_as_batch'] = True
autogenerate._render_migration_diffs(self.context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
with op.batch_alter_table('address', schema=None) as batch_op:
batch_op.add_column(sa.Column('street', sa.String(length=50), nullable=True))
batch_op.create_unique_constraint('uq_email', ['email_address'])
with op.batch_alter_table('order', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_id', sa.Integer(), nullable=True))
batch_op.alter_column('amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
batch_op.create_foreign_key(None, 'user', ['user_id'], ['id'])
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
batch_op.drop_index('pw_idx')
batch_op.drop_column('pw')
### end Alembic commands ###""")
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('pw', sa.VARCHAR(length=50), nullable=True))
batch_op.create_index('pw_idx', ['pw'], unique=False)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nullable=True)
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
with op.batch_alter_table('order', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.alter_column('amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'))
batch_op.drop_column('user_id')
with op.batch_alter_table('address', schema=None) as batch_op:
batch_op.drop_constraint('uq_email', type_='unique')
batch_op.drop_column('street')
op.create_table('extra',
sa.Column('x', sa.CHAR(), nullable=True),
sa.Column('uid', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['uid'], ['user.id'], )
)
op.drop_table('item')
### end Alembic commands ###""")
def test_imports_maintined(self):
template_args = {}
self.context.opts['render_as_batch'] = True
def render_item(type_, col, autogen_context):
autogen_context.imports.add(
"from mypackage import my_special_import"
)
autogen_context.imports.add(
"from foobar import bat"
)
self.context.opts["render_item"] = render_item
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
set(
template_args['imports'].split("\n")
),
set([
"from foobar import bat",
"from mypackage import my_special_import"
])
)
class AutogenerateDiffTestWSchema(ModelOne, AutogenTest, TestBase):
__only_on__ = 'postgresql'
schema = "test_schema"
def test_render_nothing(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
'compare_type': True,
'compare_server_default': True,
'target_metadata': self.m1,
'upgrade_token': "upgrades",
'downgrade_token': "downgrades",
'alembic_module_prefix': 'op.',
'sqlalchemy_module_prefix': 'sa.',
'include_symbol': lambda name, schema: False
}
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###""")
def test_render_diffs_extras(self):
"""test a full render including indentation (include and schema)"""
template_args = {}
self.context.opts.update({
'include_object': _default_include_object,
'include_schemas': True
})
autogenerate._render_migration_diffs(self.context, template_args)
eq_(re.sub(r"u'", "'", template_args['upgrades']),
"""### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['%(schema)s.order.order_id'], ),
sa.PrimaryKeyConstraint('id'),
schema='%(schema)s'
)
op.drop_table('extra', schema='%(schema)s')
op.add_column('address', sa.Column('street', sa.String(length=50), \
nullable=True), schema='%(schema)s')
op.create_unique_constraint('uq_email', 'address', ['email_address'], \
schema='test_schema')
op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True), \
schema='%(schema)s')
op.alter_column('order', 'amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'),
schema='%(schema)s')
op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'], \
source_schema='%(schema)s', referent_schema='%(schema)s')
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True,
schema='%(schema)s')
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=False,
schema='%(schema)s')
op.drop_index('pw_idx', table_name='user', schema='test_schema')
op.drop_column('user', 'pw', schema='%(schema)s')
### end Alembic commands ###""" % {"schema": self.schema})
eq_(re.sub(r"u'", "'", template_args['downgrades']),
"""### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \
autoincrement=False, nullable=True), schema='%(schema)s')
op.create_index('pw_idx', 'user', ['pw'], unique=False, schema='%(schema)s')
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=True,
schema='%(schema)s')
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True,
schema='%(schema)s')
op.drop_constraint(None, 'order', schema='%(schema)s', type_='foreignkey')
op.alter_column('order', 'amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'),
schema='%(schema)s')
op.drop_column('order', 'user_id', schema='%(schema)s')
op.drop_constraint('uq_email', 'address', schema='test_schema', type_='unique')
op.drop_column('address', 'street', schema='%(schema)s')
op.create_table('extra',
sa.Column('x', sa.CHAR(length=1), autoincrement=False, nullable=True),
sa.Column('uid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['uid'], ['%(schema)s.user.id'], \
name='extra_uid_fkey'),
schema='%(schema)s'
)
op.drop_table('item', schema='%(schema)s')
### end Alembic commands ###""" % {"schema": self.schema})
| 41.023121 | 85 | 0.595322 |
ace6e201dca9126546d7bf6b81496ec280ad3ddd | 1,750 | py | Python | venidium/wallet/puzzles/puzzle_utils.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | 7 | 2021-06-29T22:23:55.000Z | 2022-02-09T04:32:46.000Z | venidium/wallet/puzzles/puzzle_utils.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | 2 | 2021-09-13T03:23:59.000Z | 2022-01-12T20:20:27.000Z | venidium/wallet/puzzles/puzzle_utils.py | Venidium-Network/venidium-blockchain | 600af545018e2cc03c808315239d57c74cffd57d | [
"Apache-2.0"
] | null | null | null | from venidium.util.condition_tools import ConditionOpcode
def make_create_coin_condition(puzzle_hash, amount):
return [ConditionOpcode.CREATE_COIN, puzzle_hash, amount]
def make_assert_aggsig_condition(pubkey):
return [ConditionOpcode.AGG_SIG_UNSAFE, pubkey]
def make_assert_my_coin_id_condition(coin_name):
return [ConditionOpcode.ASSERT_MY_COIN_ID, coin_name]
def make_assert_absolute_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, block_index]
def make_assert_relative_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_RELATIVE, block_index]
def make_assert_absolute_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, time]
def make_assert_relative_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_RELATIVE, time]
def make_reserve_fee_condition(fee):
return [ConditionOpcode.RESERVE_FEE, fee]
def make_assert_coin_announcement(announcement_hash):
return [ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, announcement_hash]
def make_assert_puzzle_announcement(announcement_hash):
return [ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, announcement_hash]
def make_create_coin_announcement(message):
return [ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, message]
def make_create_puzzle_announcement(message):
return [ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, message]
def make_assert_my_parent_id(parent_id):
return [ConditionOpcode.ASSERT_MY_PARENT_ID, parent_id]
def make_assert_my_puzzlehash(puzzlehash):
return [ConditionOpcode.ASSERT_MY_PUZZLEHASH, puzzlehash]
def make_assert_my_amount(amount):
return [ConditionOpcode.ASSERT_MY_AMOUNT, amount]
| 28.225806 | 74 | 0.84 |
ace6e23f364b95310e5b3d626a1caf290419d68e | 5,027 | py | Python | linebot/models/messages.py | g-bot-2017/line-bot-sdk-python | 4510920a4711e2cb111bf3f557cb5b2955bf7aeb | [
"Apache-2.0"
] | 2 | 2020-08-03T15:10:55.000Z | 2020-08-03T15:13:03.000Z | linebot/models/messages.py | g-bot-2017/line-bot-sdk-python | 4510920a4711e2cb111bf3f557cb5b2955bf7aeb | [
"Apache-2.0"
] | null | null | null | linebot/models/messages.py | g-bot-2017/line-bot-sdk-python | 4510920a4711e2cb111bf3f557cb5b2955bf7aeb | [
"Apache-2.0"
] | 1 | 2018-07-22T14:58:29.000Z | 2018-07-22T14:58:29.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.messages module."""
from __future__ import unicode_literals
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
class Message(with_metaclass(ABCMeta, Base)):
"""Abstract Base Class of Message."""
def __init__(self, id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param kwargs:
"""
super(Message, self).__init__(**kwargs)
self.type = None
self.id = id
class TextMessage(Message):
"""TextMessage.
https://devdocs.line.me/en/#text-message
Message object which contains the text sent from the source.
"""
def __init__(self, id=None, text=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str text: Message text
:param kwargs:
"""
super(TextMessage, self).__init__(id=id, **kwargs)
self.type = 'text'
self.text = text
class ImageMessage(Message):
"""ImageMessage.
https://devdocs.line.me/en/#image-message
Message object which contains the image content sent from the source.
The binary image data can be retrieved with the Content API.
"""
def __init__(self, id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param kwargs:
"""
super(ImageMessage, self).__init__(id=id, **kwargs)
self.type = 'image'
class VideoMessage(Message):
"""VideoMessage.
https://devdocs.line.me/en/#video-message
Message object which contains the video content sent from the source.
The binary video data can be retrieved with the Content API.
"""
def __init__(self, id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param kwargs:
"""
super(VideoMessage, self).__init__(id=id, **kwargs)
self.type = 'video'
class AudioMessage(Message):
"""AudioMessage.
https://devdocs.line.me/en/#audio-message
Message object which contains the audio content sent from the source.
The binary audio data can be retrieved with the Content API.
"""
def __init__(self, id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param kwargs:
"""
super(AudioMessage, self).__init__(id=id, **kwargs)
self.type = 'audio'
class LocationMessage(Message):
"""LocationMessage.
https://devdocs.line.me/en/#location-message
"""
def __init__(self, id=None, title=None, address=None, latitude=None, longitude=None,
**kwargs):
"""__init__ method.
:param str id: Message ID
:param str title: Title
:param str address: Address
:param float latitude: Latitude
:param float longitude: Longitude
:param kwargs:
"""
super(LocationMessage, self).__init__(id=id, **kwargs)
self.type = 'location'
self.title = title
self.address = address
self.latitude = latitude
self.longitude = longitude
class StickerMessage(Message):
"""StickerMessage.
https://devdocs.line.me/en/#sticker-message
Message object which contains the sticker data sent from the source.
For a list of basic LINE stickers and sticker IDs, see sticker list.
"""
def __init__(self, id=None, package_id=None, sticker_id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str package_id: Package ID
:param str sticker_id: Sticker ID
:param kwargs:
"""
super(StickerMessage, self).__init__(id=id, **kwargs)
self.type = 'sticker'
self.package_id = package_id
self.sticker_id = sticker_id
class FileMessage(Message):
"""FileMessage.
https://devdocs.line.me/en/#file-message
Message object which contains the file content sent from the source.
The binary file data can be retrieved with the Content API.
"""
def __init__(self, id=None, file_name=None, file_size=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str file_name: File Name
:param int file_size: File Size
:param kwargs:
"""
super(FileMessage, self).__init__(id=id, **kwargs)
self.type = 'file'
self.file_size = file_size
self.file_name = file_name
| 25.912371 | 88 | 0.634971 |
ace6e2dcd9aa42ae5f7141acf75e8a553507e493 | 1,745 | py | Python | python/github-gist.py | manthansharma/fun-code | 4af443269ee0d0f2d9fcb78bdd422d3785eb902a | [
"MIT"
] | null | null | null | python/github-gist.py | manthansharma/fun-code | 4af443269ee0d0f2d9fcb78bdd422d3785eb902a | [
"MIT"
] | null | null | null | python/github-gist.py | manthansharma/fun-code | 4af443269ee0d0f2d9fcb78bdd422d3785eb902a | [
"MIT"
] | null | null | null | import argparse
import json
import platform
import random
import string
from pprint import pprint
import clipboard
import requests
''' Add Github Gists directly from Clipboard
Requirements:
clipboard==0.0.4
requests==2.11.1
'''
username = "" # Set Username for GitHub Account
password = "" # Set Password for GitHub Account
class Github:
@staticmethod
def create_gists_clipboard(description: str, file: str, public: bool):
content = clipboard.paste()
if not description:
description = "Gist file created on " + platform.platform() + " from clipboard"
if not file:
file = "gist_clipboard_" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
data = {
"description": description,
"public": public,
"files": {
file: {
"content": content
}
}
}
json_result = requests.post('https://api.github.com/gists',
auth=(username, password),
data=json.dumps(data))
return json_result.json()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-d', '--description', type=str, help='Description for gists')
parser.add_argument('-f', '--file', type=str, help='Files that make up this gist.')
parser.add_argument('-p', '--public', action='store_true',
help='Indicates whether the gist is public. Default: false')
args = parser.parse_args()
response = Github.create_gists_clipboard(args.description, args.file, args.public)
pprint(response)
| 30.614035 | 120 | 0.610315 |
ace6e318b70f3655cded2766cce42b4755d7287f | 9,212 | py | Python | metartg/checks/cassandra.py | simplegeo/metartg | 0d8a5acc3741769ebfc50d2a296924645f50d2c9 | [
"BSD-3-Clause"
] | 1 | 2015-09-20T20:27:22.000Z | 2015-09-20T20:27:22.000Z | metartg/checks/cassandra.py | simplegeo/metartg | 0d8a5acc3741769ebfc50d2a296924645f50d2c9 | [
"BSD-3-Clause"
] | 1 | 2015-12-17T20:19:07.000Z | 2015-12-17T20:19:07.000Z | metartg/checks/cassandra.py | simplegeo/metartg | 0d8a5acc3741769ebfc50d2a296924645f50d2c9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import simplejson as json
from time import time
import subprocess
import os
import urllib2
import sys
import re
def cfstats_cache_metrics():
now = int(time())
url = "http://localhost:8778/jolokia/read/*:*,type=Caches"
try:
caches = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error fetching list of CF Cache mbeans: %s" % e)
return None
pattern = re.compile(":cache\=(?P<column_family>.+?)(?P<cache_type>Row|Key)Cache,keyspace\=(?P<keyspace>.+?),")
metrics = {}
for mbean, cache in caches.items():
attrs = pattern.search(mbean).groupdict()
for metric in ('RecentHitRate', 'Capacity', 'Size'):
metrics['%s-%s-%sCache%s' % (attrs['keyspace'], attrs['column_family'], attrs['cache_type'], metric)] = {
'ts': now,
'type': 'GAUGE',
'value': cache[metric] or 0,
}
return metrics
def tpstats_metrics():
now = int(time())
url = 'http://localhost:8778/jolokia/read/org.apache.cassandra.concurrent:*'
try:
pools = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching list of beans for tpstats: %s" % e)
return None
metrics = {}
for mbean, values in pools.items():
pool = mbean.split('=')[-1]
for metric, datatype in (('ActiveCount', 'GAUGE'), ('PendingTasks', 'GAUGE'), ('CompletedTasks', 'COUNTER')):
metrics['%s_%s' % (pool, metric)] = {
'ts': now,
'type': datatype,
'value': values[metric]
}
return metrics
def sstables_metrics():
metrics = {}
for dirname in ('penelope', 'cassandra'):
if os.path.exists('/mnt/var/lib/' + dirname):
break
for keyspace in os.listdir('/mnt/var/lib/%s/data' % dirname):
now = int(time())
sizes = {}
for filename in os.listdir('/mnt/var/lib/%s/data/%s' % (dirname, keyspace)):
if not filename.endswith('-Data.db'):
continue
columnfamily = filename.split('-', 1)[0]
if not columnfamily in sizes:
sizes[columnfamily] = []
st = os.stat('/mnt/var/lib/%s/data/%s/%s' % (dirname, keyspace, filename))
sizes[columnfamily].append(st.st_size)
for columnfamily in sizes:
metrics['%s.%s.min' % (keyspace, columnfamily)] = {
'ts': now,
'type': 'GAUGE',
'value': min(sizes[columnfamily]),
}
metrics['%s.%s.max' % (keyspace, columnfamily)] = {
'ts': now,
'type': 'GAUGE',
'value': max(sizes[columnfamily]),
}
metrics['%s.%s.avg' % (keyspace, columnfamily)] = {
'ts': now,
'type': 'GAUGE',
'value': (sum(sizes[columnfamily]) / len(sizes[columnfamily])),
}
metrics['%s.%s.total' % (keyspace, columnfamily)] = {
'ts': now,
'type': 'GAUGE',
'value': sum(sizes[columnfamily]),
}
metrics['%s.%s.count' % (keyspace, columnfamily)] = {
'ts': now,
'type': 'GAUGE',
'value': len(sizes[columnfamily]),
}
return metrics
def scores_metrics():
now = int(time())
try:
keyspace = file('/etc/metartg_cassandra_keyspace', 'r').read().strip('\r\n\t ')
except:
keyspace = 'Underdog_Records'
url = 'http://localhost:8778/jolokia/read/org.apache.cassandra.db:keyspace=%s,type=DynamicEndpointSnitch/Scores' % keyspace
try:
scores = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching DES scores: %s" % e)
return None
metrics = {}
for endpoint, score in scores.items():
endpoint = endpoint.lstrip('/')
try:
metrics[endpoint] = {
'ts': now,
'type': 'GAUGE',
'value': float(score),
}
except TypeError:
pass
return metrics
def memory_metrics():
now = int(time())
url = 'http://localhost:8778/jolokia/read/java.lang:type=Memory'
try:
results = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching memory metrics: %s" % e)
return None
mapping = {
'jvm.heap.committed': ('HeapMemoryUsage', 'committed'),
'jvm.heap.used': ('HeapMemoryUsage', 'used'),
'jvm.nonheap.committed': ('NonHeapMemoryUsage', 'committed'),
'jvm.nonheap.used': ('NonHeapMemoryUsage', 'used'),
}
metrics = {}
for name, (memory_type, metric) in mapping.items():
metrics[name] = {
'ts': now,
'type': 'GAUGE',
'value': results[memory_type][metric],
}
return metrics
def compaction_metrics():
now = int(time())
url = 'http://localhost:8778/jolokia/read/org.apache.cassandra.db:type=CompactionManager'
try:
results = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching compaction metrics: %s " % e)
return None
metrics = {}
if results['PendingTasks']:
metrics['tasks.pending'] = {
'ts': now,
'type': 'GAUGE',
'value': results['PendingTasks']
}
if results['BytesTotalInProgress']:
metrics.update({
'bytes.compacting': {
'ts': now,
'type': 'GAUGE',
'value': results['BytesTotalInProgress'],
},
'bytes.remaining': {
'ts': now,
'type': 'GAUGE',
'value': results['BytesCompacted'],
}
})
return metrics or None
def commitlog_metrics():
now = int(time())
url = 'http://localhost:8778/jolokia/read/org.apache.cassandra.db:type=Commitlog'
try:
results = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching streaming metrics: %s " % e)
return None
metrics = {
'tasks.completed': {
'ts': now,
'type': 'COUNTER',
'value': results['CompletedTasks'],
},
'tasks.pending': {
'ts': now,
'type': 'GAUGE',
'value': results['PendingTasks'],
},
}
return metrics
def streaming_metrics():
now = int(time())
url = 'http://localhost:8778/jolokia/read/org.apache.cassandra.streaming:type=StreamingService'
try:
results = json.loads(urllib2.urlopen(url).read())['value']
except Exception, e:
sys.stderr.write("Error while fetching streaming metrics: %s " % e)
return None
metrics = {}
pattern = re.compile("Receiving from:\n(?P<sources>.*?)\nSending to:\n(?P<destinations>.*?)\n", re.S)
match = pattern.search(results['Status'])
if not match:
return None
values = match.groupdict()
if values['sources']:
metrics.update({
'streaming.from': {
'ts': now,
'type': 'GAUGE',
'value': len(values['sources'].strip().split('\n')),
}
})
if values['destinations']:
metrics.update({
'streaming.to': {
'ts': now,
'type': 'GAUGE',
'value': len(values['destinations'].strip().split('\n')),
}
})
return metrics
def connection_metrics():
command = subprocess.Popen("netstat -an | grep ESTABLISHED | awk '{print $4}' | grep \":9160$\" | wc -l", shell=True, stdout=subprocess.PIPE)
count = int(command.communicate()[0].strip())
return {
'connections.open': {
'ts': int(time()),
'type': 'GAUGE',
'value': count,
}
}
def run_check(callback):
callback('cassandra_tpstats', tpstats_metrics())
callback('cassandra_sstables', sstables_metrics())
callback('cassandra_scores', scores_metrics())
callback('cassandra_memory', memory_metrics())
callback('cassandra_cfstats_cache', cfstats_cache_metrics())
callback('cassandra_compaction', compaction_metrics())
callback('cassandra_commitlog', commitlog_metrics())
callback('cassandra_streaming', streaming_metrics())
callback('cassandra_connection', connection_metrics())
if __name__ == '__main__':
print json.dumps(scores_metrics(), indent=2)
print json.dumps(cfstats_cache_metrics(), indent=2)
print json.dumps(tpstats_metrics(), indent=2)
print json.dumps(sstables_metrics(), indent=2)
print json.dumps(memory_metrics(), indent=2)
print json.dumps(compaction_metrics(), indent=2)
print json.dumps(commitlog_metrics(), indent=2)
print json.dumps(streaming_metrics(), indent=2)
print json.dumps(connection_metrics(), indent=2)
| 31.440273 | 145 | 0.550695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.