max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
utilities.py
|
aws-samples/real-time-churn-prediction-with-amazon-connect-and-amazon-sagemaker
| 0
|
12783751
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This code snippet is lightly modified from that provided by AWS Secrets Manager during secrets creation.
import boto3
import base64
from botocore.exceptions import ClientError
import json
import matplotlib.pyplot as plt
import graphviz
import sagemaker
from sagemaker.feature_store.feature_group import FeatureGroup
from typing import Dict
def get_secret(secret_name, region_name):
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
# In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
return get_secret_value_response
except ClientError as e:
print(e)
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
print('now in else')
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
print(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
# Extract training and validation AUC values from the results returned by
# method describe_training_job()
def get_auc_from_metrics(response, metric_type):
for x in range(len(response['FinalMetricDataList'])):
if metric_type in response['FinalMetricDataList'][x].values():
return x
# Functions for model feature exploration
def plot_feature_importance(booster, f, maxfeats = 15):
from xgboost import plot_importance
res = {k:round(v, 2) for k, v in booster.get_score(importance_type = f).items()}
gain_plot = plot_importance(res,
max_num_features = maxfeats,
importance_type = f,
title = 'Feature Importance: ' + f,
color = "#4daf4a")
plt.show()
# Calculate tree depth. Adapted the code from here
# https://stackoverflow.com/questions/29005959/depth-of-a-json-tree to Python 3.
def calculate_tree_depth(tree_dict):
# input: single tree as a dictionary
# output: depth of the tree
if 'children' in tree_dict:
return 1 + max([0] + list(map(calculate_tree_depth, tree_dict['children'])))
else:
return 1
def get_depths_as_list(all_trees):
# input: list of all trees, generated by xgboost's get_dump in json format
# output: list of the same length as all_trees where each element contains
# the depth of a tree
# list to store the depth of each tree
tree_depth = []
for i in range(len(all_trees)):
tree = json.loads(all_trees[i])
tree_depth.append(calculate_tree_depth(tree))
return tree_depth
def calculate_list_unique_elements(input_list):
# calculate number of unique elements in a list
# input: list
# output: dictionary. Keys: unique elements, values: their count
res = dict()
for i in input_list:
if i in res:
res[i] += 1
else:
res[i] = 1
return res
def find_feature(tree_dict, feature):
# input:
# tree_dict: single tree as a dictionary
# feature: feature name, str
# output: 0 if a feature is not a split, 1 if the feature is a split at any node
if "split" in tree_dict:
if tree_dict["split"] == feature:
return 1
else:
for child in tree_dict["children"]:
res = find_feature(child, feature)
if res != 0:
return res
return 0
else:
return 0
# find all trees that have a feature
def find_all_trees_with_feature(all_trees, feature):
# input:
# all_trees: list of all trees, generated by xgboost's get_dump in json format
# feature: feature name, str
# output: indices of trees where a feature has been found at any node
trees_with_features = []
for i in range(len(all_trees)):
tree = json.loads(all_trees[i])
if find_feature(tree, feature) == 1:
trees_with_features.append(i)
return trees_with_features
# given a list of features find how many trees have it
def count_trees_with_features(all_trees, feature_list):
# input:
# all_trees: list of all trees, generated by xgboost's get_dump in json format
# feature_list: list of features
# output: dictionary, keys = feature_list, values = number of trees where a feature has been found
tree_count = dict()
for i in feature_list:
tree_count[i] = 0
for i in feature_list:
for j in range(len(all_trees)):
tree = json.loads(all_trees[j])
if find_feature(tree, i) == 1:
tree_count[i] += 1
return tree_count
def get_fg_info(fg_name: str, sagemaker_session: sagemaker.Session):
boto_session = sagemaker_session.boto_session
featurestore_runtime = sagemaker_session.sagemaker_featurestore_runtime_client
feature_store_session = sagemaker.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_session.sagemaker_client,
sagemaker_featurestore_runtime_client=featurestore_runtime,
)
fg = FeatureGroup(name=fg_name, sagemaker_session=feature_store_session)
return fg.athena_query()
def generate_query(dataset_dict: Dict, sagemaker_session: sagemaker.Session):
customers_fg_info = get_fg_info(
dataset_dict["customers_fg_name"],
sagemaker_session=sagemaker_session,
)
label_name = dataset_dict["label_name"]
features_names = dataset_dict["features_names"]
training_columns = [label_name] + features_names
training_columns_string = ", ".join(f'"{c}"' for c in training_columns)
query_string = f"""SELECT DISTINCT {training_columns_string}
FROM "{customers_fg_info.table_name}"
"""
return dict(
catalog=claims_fg_info.catalog,
database=claims_fg_info.database,
query_string=query_string,
)
| 1.890625
| 2
|
Codes/Files to SQL/communes.py
|
ThibaultLanthiez/Projet-observatoire-UNICAEN
| 0
|
12783752
|
<reponame>ThibaultLanthiez/Projet-observatoire-UNICAEN
import sqlite3
import csv
conn = sqlite3.connect('data.db')
c= conn.cursor()
with open('villes_france.csv', newline='') as csvfile:
csvfile.readline()
spamreader = csv.reader(csvfile, delimiter=',', quotechar='\"')
for row in spamreader :
requete = "INSERT INTO communes VALUES("
requete+="\""+row[10]+"\",\""+row[1]+"\",\""+row[3]+"\",\""+row[19]+"\",\""+row[20]+"\")"
c.execute(requete)
conn.commit()
conn.close()
| 3.234375
| 3
|
net_io.py
|
Kaslanarian/welt-net
| 4
|
12783753
|
import numpy as np
from net import Net
from functional import *
from os import remove
temp_path = "./model/param"
def save_model(net: Net, name: str):
'''
将网络信息保存
parameters
----------
net : 神经网络类
name : 文件名,文件将被保存到model文件夹中的指定名称文件中
return
------
1 : 表示保存成功
'''
path = "./model/{}".format(name)
args = net.args
layer_info = "layer info:\n"
for layer in args:
layer_info += "{} {}\n".format(*layer)
criterion = "criterion : {}\n".format("ce" if net.criterion ==
ce_loss else "mse")
regualarize = "regularize : " + ("{} with alpha={}\n".format(
net.regularize, net.alpha) if net.regularize else "None\n")
with open(path, "w") as f:
f.write(layer_info)
f.write(criterion)
f.write(regualarize)
for param in net.parameters():
np.savetxt(temp_path, param)
with open(temp_path, "r") as fa:
f.write(fa.read())
remove(temp_path)
return 1
def load_model(name: str):
'''
指定文件名,函数将读取文件,生成文件中描述的神经网络模型
return
------
net : 模型文件所描述的网络
'''
path = "./model/{}".format(name)
parameters = []
with open(path, "r") as f:
f.readline() # 读掉第一行
layer_info = []
while True:
s = f.readline()[:-1]
if "criterion" in s:
break
n, act = s.split()
layer_info.append((eval(n), act))
criterion = s.split(" : ")[-1]
s = f.readline()
if "alpha" in s: # 有正则化设置
regualarize = s[:2]
alpha = eval(s.split("=")[-1])
else:
regualarize = None
alpha = 0.01
net = Net(
*layer_info,
criterion=criterion,
regularize=regualarize,
alpha=alpha,
)
for l in range(len(layer_info) - 1):
i, o = layer_info[l][0], layer_info[l + 1][0]
str_W = "".join([f.readline() for l in range(i)])
str_b = f.readline()
with open(temp_path, "w") as fw:
fw.writelines(str_W)
W = np.loadtxt(temp_path).reshape(i, o)
with open(temp_path, "w") as fb:
fb.writelines(str_b)
b = np.loadtxt(temp_path).reshape(1, o)
parameters.extend((W, b))
net.reset_net(parameters)
remove(temp_path)
return net
def random_init(net: Net, path="./data/random.npy"):
'''用指定数组来初始化参数'''
n_layer = net.ct_layer
n_weight_list = [
n_layer[i] * n_layer[i + 1] for i in range(len(n_layer) - 1)
]
parameters = []
x = np.load(path)[:sum(n_weight_list)]
ptr = 0
for i in range(len(n_layer) - 1):
W = x[ptr:ptr + n_weight_list[i]].reshape((n_layer[i], n_layer[i + 1]))
b = np.zeros((1, n_layer[i + 1]))
parameters.extend((W, b))
ptr += n_weight_list[i]
net.reset_net(parameters, net.xavier, net.he)
return net
| 2.421875
| 2
|
First_Example/Basic_Truss.py
|
johnkour/Openseespy_projects
| 0
|
12783754
|
# Basic truss example in Openseespy:
import openseespy.opensees as ops
import openseespy.postprocessing.Get_Rendering as opsplt
##############################################################################
##### Define units in SI #####
##############################################################################
# Basic units:
m = 1
kg = 1
s = 1
N = kg * m / s**2
Pa = N / m
inches = 0.0254 * m
ft = 12 * inches
kip = 4.45 * 10**3 * N
ksi = 6.89 * 10**6 * Pa
##############################################################################
##### Input Variables #####
##############################################################################
x = [0.0, 12.0, 14.0, 6.0]
# x = list(map(lambda a: a * ft, x))
y = [0.0, 0.0, 0.0, 8.0]
# y = list(map(lambda a: a * ft, y))
A = [10.0, 5.0]
E = 3 * 10**3 * ksi
F = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [100.0, -50.0, 0.0]]
def aux_function(lst, scalar):
'''
Description
-----------
This function is used multiply each value in a list with a scalar number.
It will be very usefull when converting the variables from the Imperial
system to S.I.
Parameters
----------
lst : LIST OF FLOATS
Initial list with the values in the Imperial System.
scalar : FLOAT
The value used to convert from Imperial to International System.
Returns
-------
result : LIST OF FLOATS
The list with the values converted to S.I..
'''
result = [t * scalar for t in lst]
return result
(x, y, A) = ([t * ft for t in x], [t * ft for t in y],
[t * inches**2 for t in A])
F = [[t * kip for t in f] for f in F]
##############################################################################
##### Main Analysis' functions #####
##############################################################################
def Model_Build(x, y, A, E):
'''
Description
-----------
This function is used to determine the basic parameters of the structural
problem at hand.
Parameters
----------
x : LIST OF FLOATS
The list of the coordinates of the nodes along the x-axis.
y : LIST OF FLOATS
The list of the coordinates of the nodes along the y-axis.
A : LIST OF FLOATS
The list with the materials used for the different elements.
E : FLOAT
The modulus of elesticity of the elements.
Returns
-------
None.
'''
# Delete existing model.
ops.wipe()
# Define the model.
ops.model('basic', '-ndm', 2, '-ndf', 3)
# Define materials.
ops.uniaxialMaterial('Elastic', 1, E)
# Define the nodes.
m = len(x)
[ops.node(i + 1, *[x[i], y[i]]) for i in range(m)]
# Fix the nodes.
fixxity = [[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]
[ops.fix(i + 1, *fixxity[3]) if i + 1 != 4 else ops.fix(i + 1, *fixxity[0])
for i in range(m)]
# Define elements.
conn = [[1, 4], [2, 4], [3, 4]]
[ops.element('Truss', i + 1, *conn[i], A[1], 1) if i != 0
else ops.element('Truss', i + 1, *conn[i], A[0], 1)
for i in range(len(conn))]
# Plot model.
opsplt.plot_model()
def Rec_Setup(analysis):
'''
Description
-----------
This function is used to set up the recorders. It stores the output of the
recorders to a folder, whose name is the value of the variable: analysis.
Parameters
----------
analysis : STRING
The name of the analysis, currently performed.
Returns
-------
None.
'''
analysis += '/'
# Set recorders.
ops.recorder('Node', '-file', analysis + 'NodeDisp.txt', '-time', '-node', *[4], '-dof', *[1, 2, 3], 'disp')
ops.recorder('Node', '-file', analysis + 'ReactDisp.txt', '-time', '-node', *[4], '-dof', *[1, 2, 3], 'reaction')
ops.recorder('Element', '-file', analysis + 'ElementsForces.txt', '-time', '-ele', *[1, 2, 3], 'forces')
def Analysis_setup(analysis, F, N = 1):
'''
Description
-----------
This functions is used to setup and then run the analysis.
Parameters
----------
analysis : STRING
The name of the analysis, currently performed.
F : LIST OF LISTS OF FLOATS
The list containig a list of loads along the x and y axises and around
the z axis for every node.
N : INTEGER
The number of the analysises to run. Default value: 1
Returns
-------
None.
'''
# Auxiliary variable.
m = len(F)
# Create timeseries.
ops.timeSeries('Linear', 1)
# Create load pattern.
ops.pattern('Plain', 1 , 1)
# Define loads.
[ops.load(i + 1, *F[i]) for i in range(m)]
# Define system.
ops.system('BandSPD')
# Define numberer.
ops.numberer('RCM')
# Define constraint handler
ops.constraints('Plain')
# Define integrator.
ops.integrator('LoadControl', 1.0)
# Define algorithm
ops.algorithm('Linear')
# Create analysis object
ops.analysis('Static')
# Execute the analysis
ops.initialize() # Set recorders to start recording at 0 time.
ok = ops.analyze(N)
if ok == 0:
status = 'Analysis complete, everything went smoothly.'
else:
status = 'ERROR: ANALYSIS FAILED TO CONVERGE' + '\n' + 'Tip: Change algorithm'
print(analysis + '\n' + status + '\n')
# Close recorders and scrap model.
ops.wipe()
##############################################################################
##### Main Analysis #####
##############################################################################
# Step 1: Initilize model parameters.
Model_Build(x, y, A, E)
# Step 2: Name the type of the analysis to be performed.
analysis = 'Static'
# Step 3: Set up the recorders.
Rec_Setup(analysis)
# Step 4: Perform the analysis.
N = 10 # Number of analysises to be performed.
Analysis_setup(analysis, F, N)
# Step 5: Initilize model parameters.
Model_Build(x, y, A, E)
# Step 6: Name the type of the analysis to be performed.
analysis = 'Pushover'
# Step 7: Set up the recorders.
Rec_Setup(analysis)
# Step 8: Perform the analysis.
N = 10 # Number of analysises to be performed.
F[3][1] = 0.0 # Perform only horizontal forces.
Analysis_setup(analysis, F, N)
| 2.65625
| 3
|
magpie.py
|
kratos-batteries/data_extract
| 1
|
12783755
|
from __future__ import print_function
import pandas as pd
import json
import sys
import requests
_api_version = str('0.0.1')
class MagpieServer:
"""Object to store how to connect to a server running Magpie"""
_url = None
""" URL of server """
_models = None
""" Cached information about models held by this server. """
def __init__(self, url="http://josquin.northwestern.edu:4581/"):
"""Create a connection to a Magpie server. Defaults to
connecting with a server running at Northwestern University
hosted by the Wolverton group.
:param url: URL of server
:param port: Port number"""
self._url = url
# Test whether API versions agree
self.api_version()
def _make_request(self, path, data=None, method='get'):
"""Perform a request. Handles making error messages
:param path: str, path of request
:param data: Any data to be passed as JSON
:return: requests.Request"""
r = requests.request(method=method, url=self._url + path,
data=data)
# Check error status
if r.ok:
return r
else:
raise Exception('Request failed. Status = %d. Reason = %s'%(r.status_code, r.reason))
def api_version(self):
"""Get the API version of the server.
Prints error message of that version is different than what is supported
by this wrapper.
:return: API version"""
# Make the requested
r = self._make_request("server/version")
v = r.content
# If Python 3, convert to string
if isinstance(v, bytes):
v = v.decode()
# Check whether it agrees with version of this wrapper
if _api_version != v:
print("WARNING: API version of Magpie server different than wrapper: %s!=%s"%(_api_version, v), file=sys.stderr)
return v
def status(self):
"""Get the status of the Magpie server
:return: Status of server as dict"""
return self._make_request("server/status").json()
def models(self):
"""Get information about models held by this server
:return: dict, Information about all the models"""
if self._models is None:
self._models = self._make_request("models").json()
return self._models
def get_model_info(self, name):
"""Get information about a specific model
:param name: str, name of model
:return: dict, information about a model"""
if self._models is None or name not in self._models:
r = self._make_request("model/%s/info"%name)
return r.json()
else:
return self._models[name]
def generate_attributes(self, name, entries):
"""Generate attributes that serve as input to a certain model
:param name: str, name of model
:param entries: list, list of entries to be run (as strings)
:return: Pandas array, where [i,j] is attribute j of entries[i]"""
# Package the request
data = dict(entries=json.dumps(dict(entries=[dict(name=e) for e in entries])))
r = self._make_request("model/%s/attributes"%name, data=data, method='POST')
# Compile entries into numpy array
results = r.json()
attrs = pd.DataFrame([x['attributes'] for x in results['entries']],
columns=results['attributes'])
return attrs
def run_model(self, name, entries):
"""Run a particular model.
:param name: str, Name of model to be run
:param entries: list, list of entries to be run (as strings)
:return: Predicted values. Also generates the probabilities
for membership in each class for classifier models
Second column is always the predicted value as a number."""
# Get the information about this model
model_info = self.get_model_info(name)
# Check whether it is a regression model
reg = model_info['modelType'] == "regression"
# Run the model
data = dict(entries=json.dumps(dict(entries=[dict(name=e) for e in entries])))
r = self._make_request("model/%s/run"%name, data=data, method='POST')
# Generate the output dataframe
results = r.json()
if reg:
return pd.DataFrame(list(zip(entries,[x['predictedValue'] for x in results['entries']])),
columns=['Entry']+['%s (%s)'%(model_info['property'], model_info['units'])])
else:
# Get probabilities
classes = model_info['units']
probs = []
for c in classes:
probs.append([e['classProbabilities'][c] for e in results['entries']])
return pd.DataFrame(list(zip(entries,[x['predictedValue'] for x in results['entries']],
[x['predictedClass'] for x in results['entries']], *probs)),
columns=['Entry']+['Class','ClassName']+['P(%s)'%c for c in classes])
| 3.296875
| 3
|
djangocms_moderation/__init__.py
|
Aiky30/djangocms-moderation
| 2
|
12783756
|
__version__ = "1.0.28"
default_app_config = "djangocms_moderation.apps.ModerationConfig"
| 1.039063
| 1
|
src/mesh_edit.py
|
mbirkholzupc/hmd
| 259
|
12783757
|
<filename>src/mesh_edit.py
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import lsqr, cg, eigsh
import matplotlib.pyplot as plt
import scipy.io as sio
import pickle
import sparseqr
import time
WEIGHT = 1.0
##############################################################
## Laplacian Mesh Editing ##
##############################################################
#Purpose: To return a sparse matrix representing a Laplacian matrix with
#the graph Laplacian (D - A) in the upper square part and anchors as the
#lower rows
#Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points)
#Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices
#and K is the number of anchors)
def getLaplacianMatrixUmbrella(mesh, anchorsIdx):
n = mesh.n_vertices() # N x 3
k = anchorsIdx.shape[0]
I = []
J = []
V = []
vv_idx_list = list(mesh.vertex_vertex_indices())
# Build sparse Laplacian Matrix coordinates and values
for i in range(n):
idx_nbr = filter(lambda x:x != -1, vv_idx_list[i])
num_nbr = len(idx_nbr)
I = I + ([i] * (num_nbr + 1)) # repeated row
J = J + idx_nbr + [i] # column indices and this row
V = V + ([-1] * num_nbr) + [num_nbr] # negative weights and row degree
# augment Laplacian matrix with anchor weights
for i in range(k):
I = I + [n + i]
J = J + [anchorsIdx[i]]
V = V + [WEIGHT] # default anchor weight
L = sparse.coo_matrix((V, (I, J)), shape=(n + k, n)).tocsr()
return L
# Modified for openmesh.mesh, Note that only suitable for watertight model
#Purpose: To return a sparse matrix representing a laplacian matrix with
#cotangent weights in the upper square part and anchors as the lower rows
#Inputs: mesh (polygon mesh object), anchorsIdx (indices of the anchor points)
#Returns: L (An (N+K) x N sparse matrix, where N is the number of vertices
#and K is the number of anchors)
def getLaplacianMatrixCotangent(mesh, anchorsIdx):
n = mesh.n_vertices() # N x 3
k = anchorsIdx.shape[0]
I = []
J = []
V = []
#l = mesh.vertex_vertex_indices()
for v in mesh.vertices():
weights = []
p_this = mesh.point(v)
p_nbrs = []
id_this = v.idx()
id_nbrs = []
for vv in mesh.vv(v):
p_nbrs.append(mesh.point(vv))
id_nbrs.append(vv.idx())
num_nbr = len(id_nbrs)
for i in range(num_nbr):
u = p_this - p_nbrs[(i+num_nbr-1)%num_nbr]
v = p_nbrs[(i+num_nbr)%num_nbr]- p_nbrs[(i+num_nbr-1)%num_nbr]
cotangent_1 = (np.dot(u, v)
/np.sqrt(np.sum(np.square(np.cross(u, v)))))
u = p_this - p_nbrs[(i+num_nbr+1)%num_nbr]
v = p_nbrs[(i+num_nbr)%num_nbr]- p_nbrs[(i+num_nbr+1)%num_nbr]
cotangent_2 = (np.dot(u, v)
/np.sqrt(np.sum(np.square(np.cross(u, v)))))
weights.append(-0.5 * (cotangent_1 + cotangent_2)) # cotangent weights
I = I + ([id_this] * (num_nbr + 1)) # repeated row
J = J + id_nbrs + [id_this] # column indices and this row
V = V + weights + [(-1 * np.sum(weights))] # n negative weights and row vertex sum
# augment Laplacian matrix with anchor weights
for i in range(k):
I = I + [n + i]
J = J + [anchorsIdx[i]]
V = V + [WEIGHT] # default anchor weight
L = sparse.coo_matrix((V, (I, J)), shape=(n + k, n)).tocsr()
return L
#Purpose: Given a mesh, to perform Laplacian mesh editing by solving the system
#of delta coordinates and anchors in the least squared sense
#Inputs: mesh (polygon mesh object), anchors (a K x 3 numpy array of anchor
#coordinates), anchorsIdx (a parallel array of the indices of the anchors)
#Returns: Nothing (should update mesh.VPos)
def solveLaplacianMesh(mesh, anchors, anchorsIdx, cotangent=True):
n = mesh.n_vertices()
k = anchorsIdx.shape[0]
operator = (getLaplacianMatrixUmbrella, getLaplacianMatrixCotangent)
L = operator[1](mesh, anchorsIdx) if cotangent else operator[0](mesh, anchorsIdx)
delta = np.array(L.dot(mesh.points()))
# augment delta solution matrix with weighted anchors
for i in range(k):
delta[n + i, :] = WEIGHT * anchors[i, :]
# update mesh vertices with least-squares solution
for i in range(3):
#mesh.points()[:, i] = lsqr(L, delta[:, i])[0]
mesh.points()[:, i] = sparseqr.solve(L, delta[:, i], tolerance = 1e-8)
return mesh
##############################################################
## High Speed Laplacian Mesh Editing ##
##############################################################
# using umbrella weights for higher speed
class fast_deform():
def __init__(self,
f_ijv_pkl = '../predef/dsa_IJV.pkl',
f_achr_pkl = '../predef/dsa_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
I = dic_IJV['I']
J = dic_IJV['J']
V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
if weight != 1.0:
num_V = len(V)
for i in range(num_V-self.k,num_V):
V[i] = V[i] * self.weight
self.L = sparse.coo_matrix((V, (I, J)), shape=(self.n + self.k, self.n)).tocsr()
def deform(self, mesh, anchors):
#t_start = time.time()
delta = np.array(self.L.dot(mesh.points()))
#t_end = time.time()
#print("delta computation time is %.5f seconds." % (t_end - t_start))
#t_start = time.time()
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * anchors[i, :]
#t_end = time.time()
#print("give anchor value computation time is %.5f seconds." % (t_end - t_start))
#t_start = time.time()
# update mesh vertices with least-squares solution
for i in range(3):
mesh.points()[:, i] = sparseqr.solve(self.L, delta[:, i], tolerance = 1e-8)
#mesh.points()[:, i] = lsqr(self.L, delta[:, i])[0]
#t_end = time.time()
#print("sparse lsqr time is %.5f seconds." % (t_end - t_start))
return mesh
##############################################################
## High Speed Laplacian Mesh Editing for DSA ##
##############################################################
class fast_deform_dsa():
def __init__(self,
f_ijv_pkl = '../predef/dsa_IJV.pkl',
f_achr_pkl = '../predef/dsa_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
self.I = dic_IJV['I']
self.J = dic_IJV['J']
self.V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
self.num_V = len(self.V)
if self.weight != 1.0:
for i in range(self.num_V-self.k, self.num_V):
self.V[i] = self.V[i] * self.weight
# for inactive index, zero means inactive, non-zeros means active
def deform(self, verts, achr_verts, active_index = []):
if active_index != []:
for i in range(len(active_index)):
if active_index[i] == 0:
self.V[self.num_V-self.k+i] = 0
self.L = sparse.coo_matrix((self.V, (self.I, self.J)),
shape=(self.n + self.k, self.n)).tocsr()
delta = np.array(self.L.dot(verts))
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * achr_verts[i, :]
# update mesh vertices with least-squares solution
deformed_verts = np.zeros(verts.shape)
for i in range(3):
deformed_verts[:, i] = sparseqr.solve(self.L,
delta[:, i],
tolerance = 1e-8
)
return deformed_verts
##############################################################
## High Speed Laplacian Mesh Editing for Joint Adapt ##
##############################################################
class fast_deform_dja():
def __init__(self,
f_ijv_pkl = '../predef/dja_IJV.pkl',
f_achr_pkl = '../predef/dja_achr.pkl',
weight = 1.0,
):
self.weight = weight
with open (f_ijv_pkl, 'rb') as fp:
dic_IJV = pickle.load(fp)
self.I = dic_IJV['I']
self.J = dic_IJV['J']
self.V = dic_IJV['V']
self.n = dic_IJV['num_vert']
with open (f_achr_pkl, 'rb') as fp:
dic_achr = pickle.load(fp)
#achr_id = dic_achr['achr_id']
self.k = dic_achr['achr_num']
self.num_V = len(self.V)
if self.weight != 1.0:
for i in range(self.num_V-self.k, self.num_V):
self.V[i] = self.V[i] * self.weight
# for inactive index, zero means inactive, non-zeros means active
def deform(self, verts, achr_verts):
self.L = sparse.coo_matrix((self.V, (self.I, self.J)),
shape=(self.n + self.k, self.n)).tocsr()
delta = np.array(self.L.dot(verts))
# augment delta solution matrix with weighted anchors
for i in range(self.k):
delta[self.n + i, :] = self.weight * achr_verts[i, :]
# update mesh vertices with least-squares solution
deformed_verts = np.zeros(verts.shape)
for i in range(3):
deformed_verts[:, i] = sparseqr.solve(self.L,
delta[:, i],
tolerance = 1e-8
)
return deformed_verts
| 2.953125
| 3
|
netpyne/sim/gather.py
|
FernandoSBorges/netpyne
| 0
|
12783758
|
<filename>netpyne/sim/gather.py<gh_stars>0
"""
Module for gathering data from nodes after a simulation
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import numpy as np
from ..specs import Dict, ODict
#------------------------------------------------------------------------------
# Gather data from nodes
#------------------------------------------------------------------------------
def gatherData(gatherLFP = True):
"""
Function for/to <short description of `netpyne.sim.gather.gatherData`>
Parameters
----------
gatherLFP : bool
<Short description of gatherLFP>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
sim.timing('start', 'gatherTime')
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
simDataVecs = ['spkt', 'spkid', 'stims', 'dipole'] + list(sim.cfg.recordTraces.keys())
if sim.cfg.recordDipoles:
_aggregateDipoles()
simDataVecs.append('dipole')
singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
nodeData = {'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0: # simData
print(' Gathering only sim data...')
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val, dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allPops = ODict() # pops
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
# gather cells, pops and sim data
else:
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids}
sim.allSimData = Dict()
for k in list(gather[0]['simData'].keys()): # initialize all keys of allSimData dict
if gatherLFP and k == 'LFP':
sim.allSimData[k] = np.zeros((gather[0]['simData']['LFP'].shape))
elif sim.cfg.recordDipoles and k == 'dipole':
for dk in sim.cfg.recordDipoles:
sim.allSimData[k][dk] = np.zeros(len(gather[0]['simData']['dipole'][dk]))
else:
sim.allSimData[k] = {}
for key in singleNodeVecs: # store single node vectors (eg. 't')
sim.allSimData[key] = list(nodeData['simData'][key])
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
for key,val in node['simData'].items(): # update simData dics of dics of h.Vector
if key in simDataVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for key2,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({key2:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][key2].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
#elif key == 'dipole':
# sim.allSimData[key][key2] = np.add(sim.allSimData[key][key2],val2.as_numpy()) # add together dipole values from each node
else:
sim.allSimData[key].update({key2:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
elif gatherLFP and key == 'LFP':
sim.allSimData[key] += np.array(val)
elif key not in singleNodeVecs:
sim.allSimData[key].update(val) # update simData dicts which are not Vectors
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
else: # if single node, save data in same format as for multiple nodes for consistency
if sim.cfg.createNEURONObj:
sim.net.allCells = [Dict(c.__getstate__()) for c in sim.net.cells]
else:
sim.net.allCells = [c.__dict__ for c in sim.net.cells]
sim.net.allPops = ODict()
for popLabel,pop in sim.net.pops.items(): sim.net.allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
sim.allSimData = Dict()
for k in list(sim.simData.keys()): # initialize all keys of allSimData dict
sim.allSimData[k] = Dict()
for key,val in sim.simData.items(): # update simData dics of dics of h.Vector
if key in simDataVecs+singleNodeVecs: # simData dicts that contain Vectors
if isinstance(val,dict):
for cell,val2 in val.items():
if isinstance(val2,dict):
sim.allSimData[key].update(Dict({cell:Dict()}))
for stim,val3 in val2.items():
sim.allSimData[key][cell].update({stim:list(val3)}) # udpate simData dicts which are dicts of dicts of Vectors (eg. ['stim']['cell_1']['backgrounsd']=h.Vector)
else:
sim.allSimData[key].update({cell:list(val2)}) # udpate simData dicts which are dicts of Vectors (eg. ['v']['cell_1']=h.Vector)
else:
sim.allSimData[key] = list(sim.allSimData[key])+list(val) # udpate simData dicts which are Vectors
else:
sim.allSimData[key] = val # update simData dicts which are not Vectors
## Print statistics
sim.pc.barrier()
if sim.rank == 0:
sim.timing('stop', 'gatherTime')
if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime']))
print('\nAnalyzing...')
sim.totalSpikes = len(sim.allSimData['spkt'])
sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells])
if sim.cfg.createPyStruct:
if sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0
sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sim.totalSynapses
sim.numCells = len(sim.net.allCells)
if sim.totalSpikes > 0:
sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate
else:
sim.firingRate = 0
if sim.numCells > 0:
sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell
sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell
else:
sim.connsPerCell = 0
sim.synsPerCell = 0
print((' Cells: %i' % (sim.numCells) ))
print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell)))
if sim.totalSynapses != sim.totalConnections:
print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell)))
if 'runTime' in sim.timingData:
print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate)))
print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts)))
print((' Run time: %0.2f s' % (sim.timingData['runTime'])))
if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData:
trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates, list) else None
sim.allSimData['popRates'] = sim.analysis.popAvgRates(tranges=trange)
if 'plotfI' in sim.cfg.analysis:
sim.analysis.calculatefI() # need to call here so data is saved to file
sim.allSimData['avgRate'] = sim.firingRate # save firing rate
return sim.allSimData
#------------------------------------------------------------------------------
# Gathers simData from filess
#------------------------------------------------------------------------------
def fileGather(gatherLFP = True):
"""
Function for/to <short description of `netpyne.sim.gather.fileGather`>
Parameters
----------
gatherLFP : bool
<Short description of gatherLFP>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
import os, pickle
from .. import sim
sim.timing('start', 'gatherTime')
# iterate through the saved files and concat their data
fileData = Dict()
if sim.rank == 0:
import re
if hasattr(sim.cfg, 'intervalFolder'):
targetFolder = sim.cfg.intervalFolder
else:
targetFolder = os.path.dirname(sim.cfg.filename)
for f in os.listdir(targetFolder):
if re.search(r'data_\d+.pkl$', f) is not None:
with open(targetFolder + '/' + f, 'rb') as data:
temp = pickle.load(data)
for k in temp.keys():
if k in fileData:
if isinstance(temp[k], list):
fileData[k] = fileData[k] + temp[k]
elif isinstance(temp[k], dict):
fileData[k].update(temp[k])
else:
fileData[k] = temp[k]
simDataVecs = ['spkt','spkid','stims']+list(sim.cfg.recordTraces.keys())
singleNodeVecs = ['t']
if sim.rank == 0:
sim.allSimData = Dict()
sim.allSimData.update(fileData)
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
# 1 get the right data, now check that we have right amount
# 2 use that data rather than gathering later
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data from files...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
# simDataVecs = ['spkt','spkid','stims']+list(sim.cfg.recordTraces.keys())
# singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
pass
# gather the non-simData
else:
# nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids} ####################
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
## Print statistics
sim.pc.barrier()
if sim.rank == 0:
sim.timing('stop', 'gatherTime')
if sim.cfg.timing: print((' Done; gather time = %0.2f s.' % sim.timingData['gatherTime']))
print('\nAnalyzing...')
sim.totalSpikes = len(sim.allSimData['spkt'])
sim.totalSynapses = sum([len(cell['conns']) for cell in sim.net.allCells])
if sim.cfg.createPyStruct:
if sim.cfg.compactConnFormat:
preGidIndex = sim.cfg.compactConnFormat.index('preGid') if 'preGid' in sim.cfg.compactConnFormat else 0
sim.totalConnections = sum([len(set([conn[preGidIndex] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sum([len(set([conn['preGid'] for conn in cell['conns']])) for cell in sim.net.allCells])
else:
sim.totalConnections = sim.totalSynapses
sim.numCells = len(sim.net.allCells)
if sim.totalSpikes > 0:
sim.firingRate = float(sim.totalSpikes)/sim.numCells/sim.cfg.duration*1e3 # Calculate firing rate
else:
sim.firingRate = 0
if sim.numCells > 0:
sim.connsPerCell = sim.totalConnections/float(sim.numCells) # Calculate the number of connections per cell
sim.synsPerCell = sim.totalSynapses/float(sim.numCells) # Calculate the number of connections per cell
else:
sim.connsPerCell = 0
sim.synsPerCell = 0
print((' Cells: %i' % (sim.numCells) ))
print((' Connections: %i (%0.2f per cell)' % (sim.totalConnections, sim.connsPerCell)))
if sim.totalSynapses != sim.totalConnections:
print((' Synaptic contacts: %i (%0.2f per cell)' % (sim.totalSynapses, sim.synsPerCell)))
if 'runTime' in sim.timingData:
print((' Spikes: %i (%0.2f Hz)' % (sim.totalSpikes, sim.firingRate)))
print((' Simulated time: %0.1f s; %i workers' % (sim.cfg.duration/1e3, sim.nhosts)))
print((' Run time: %0.2f s' % (sim.timingData['runTime'])))
if sim.cfg.printPopAvgRates and not sim.cfg.gatherOnlySimData:
trange = sim.cfg.printPopAvgRates if isinstance(sim.cfg.printPopAvgRates,list) else None
sim.allSimData['popRates'] = sim.analysis.popAvgRates(tranges=trange)
if 'plotfI' in sim.cfg.analysis:
times = get(sim.cfg.analysis['plotfI'], 'times', [0, sim.cfg.duration])
dur = get(sim.cfg.analysis['plotfI'], 'dur', sim.cfg.duration)
sim.allSimData['fI'] = [len([spkt for spkt in sim.allSimData['spkt']
if t <= spkt < t + dur]) / (dur / 1000.0) for t in times]
sim.allSimData['avgRate'] = sim.firingRate # save firing rate
return sim.allSimData
#------------------------------------------------------------------------------
# Gathers simData from filess
#------------------------------------------------------------------------------
def mergeFiles(gatherLFP = True, targetFolder = None, saveFilename = None):
"""
Function for/to <short description of `netpyne.sim.gather.fileGather`>
Parameters
----------
gatherLFP : bool
<Short description of gatherLFP>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
import os, pickle
from .. import sim
# iterate through the saved files and concat their data
fileData = Dict()
if sim.rank == 0:
import re
if not targetFolder:
if hasattr(sim.cfg, 'intervalFolder'):
targetFolder = sim.cfg.intervalFolder
else:
targetFolder = os.path.dirname(sim.cfg.filename)
# find all individual sim labels whose files need to be gathered
simLabels = [f.replace('_node0.pkl','') for f in os.listdir(targetFolder) if f.endswith('_node0.pkl')]
for simLabel in simLabels:
print('Merging files for simulation %s...' % (simLabel))
fileList = [f for f in os.listdir(targetFolder) if f.startswith(simLabel+'_node')]
for f in fileList:
with open(targetFolder + '/' + f, 'rb') as data:
temp = pickle.load(data)
print(' Merging data file %s' % (f))
for k in temp.keys():
if k in fileData:
if isinstance(temp[k], list):
fileData[k] = fileData[k] + temp[k]
elif isinstance(temp[k], dict):
fileData[k].update(temp[k])
else:
fileData[k] = temp[k]
simDataVecs = ['spkt','spkid','stims']+list(sim.cfg.recordTraces.keys())
singleNodeVecs = ['t']
sim.allSimData = Dict()
sim.allSimData.update(fileData)
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
# 1 get the right data, now check that we have right amount
# 2 use that data rather than gathering later
## Pack data from all hosts
if sim.rank==0:
print('\nGathering data from files...')
# flag to avoid saving sections data for each cell (saves gather time and space; cannot inspect cell secs or re-simulate)
if not sim.cfg.saveCellSecs:
for cell in sim.net.cells:
cell.secs = None
cell.secLists = None
# flag to avoid saving conns data for each cell (saves gather time and space; cannot inspect cell conns or re-simulate)
if not sim.cfg.saveCellConns:
for cell in sim.net.cells:
cell.conns = []
# Store conns in a compact list format instead of a long dict format (cfg.compactConnFormat contains list of keys to include)
elif sim.cfg.compactConnFormat:
sim.compactConnFormat()
# remove data structures used to calculate LFP
if gatherLFP and sim.cfg.recordLFP and hasattr(sim.net, 'compartCells') and sim.cfg.createNEURONObj:
for cell in sim.net.compartCells:
try:
del cell.imembVec
del cell.imembPtr
del cell._segCoords
except:
pass
for pop in list(sim.net.pops.values()):
try:
del pop._morphSegCoords
except:
pass
# simDataVecs = ['spkt','spkid','stims']+list(sim.cfg.recordTraces.keys())
# singleNodeVecs = ['t']
if sim.nhosts > 1: # only gather if >1 nodes
netPopsCellGids = {popLabel: list(pop.cellGids) for popLabel,pop in sim.net.pops.items()}
# gather only sim data
if getattr(sim.cfg, 'gatherOnlySimData', False):
pass
# gather the non-simData
else:
# nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids, 'simData': sim.simData}
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells], 'netPopsCellGids': netPopsCellGids}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
#print data
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
allPops = ODict()
for popLabel,pop in sim.net.pops.items(): allPops[popLabel] = pop.__getstate__() # can't use dict comprehension for OrderedDict
allPopsCellGids = {popLabel: [] for popLabel in netPopsCellGids} ####################
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
for popLabel,popCellGids in node['netPopsCellGids'].items():
allPopsCellGids[popLabel].extend(popCellGids)
if len(sim.allSimData['spkt']) > 0:
sim.allSimData['spkt'], sim.allSimData['spkid'] = zip(*sorted(zip(sim.allSimData['spkt'], sim.allSimData['spkid']))) # sort spks
sim.allSimData['spkt'], sim.allSimData['spkid'] = list(sim.allSimData['spkt']), list(sim.allSimData['spkid'])
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
for popLabel,pop in allPops.items():
pop['cellGids'] = sorted(allPopsCellGids[popLabel])
sim.net.allPops = allPops
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
sim.saveData(filename=saveFilename)
# SAVE TO SINGLE FILE!! option to save only parts of it
#------------------------------------------------------------------------------
# Gather tags from cells
#------------------------------------------------------------------------------
def _gatherAllCellTags():
from .. import sim
data = [{cell.gid: cell.tags for cell in sim.net.cells}]*sim.nhosts # send cells data to other nodes
gather = sim.pc.py_alltoall(data) # collect cells data from other nodes (required to generate connections)
sim.pc.barrier()
allCellTags = {}
for dataNode in gather:
allCellTags.update(dataNode)
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
return allCellTags
#------------------------------------------------------------------------------
# Gather tags from cells
#------------------------------------------------------------------------------
def _gatherAllCellConnPreGids():
from .. import sim
data = [{cell.gid: [conn['preGid'] for conn in cell.conns] for cell in sim.net.cells}]*sim.nhosts # send cells data to other nodes
gather = sim.pc.py_alltoall(data) # collect cells data from other nodes (required to generate connections)
sim.pc.barrier()
allCellConnPreGids = {}
for dataNode in gather:
allCellConnPreGids.update(dataNode)
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
return allCellConnPreGids
#------------------------------------------------------------------------------
# Gather data from nodes
#------------------------------------------------------------------------------
def _gatherCells():
from .. import sim
## Pack data from all hosts
if sim.rank==0:
print('\nUpdating sim.net.allCells...')
if sim.nhosts > 1: # only gather if >1 nodes
nodeData = {'netCells': [c.__getstate__() for c in sim.net.cells]}
data = [None]*sim.nhosts
data[0] = {}
for k,v in nodeData.items():
data[0][k] = v
gather = sim.pc.py_alltoall(data)
sim.pc.barrier()
if sim.rank == 0:
allCells = []
# fill in allSimData taking into account if data is dict of h.Vector (code needs improvement to be more generic)
for node in gather: # concatenate data from each node
allCells.extend(node['netCells']) # extend allCells list
sim.net.allCells = sorted(allCells, key=lambda k: k['gid'])
# clean to avoid mem leaks
for node in gather:
if node:
node.clear()
del node
for item in data:
if item:
item.clear()
del item
else: # if single node, save data in same format as for multiple nodes for consistency
sim.net.allCells = [c.__getstate__() for c in sim.net.cells]
#------------------------------------------------------------------------------
# Aggregate dipole data for each cell on nodes
#------------------------------------------------------------------------------
def _aggregateDipoles ():
from .. import sim
if not hasattr(sim.net, 'compartCells'):
sim.net.compartCells = [c for c in sim.net.cells if type(c) is sim.CompartCell]
for k in sim.cfg.recordDipoles:
sim.simData['dipole'][k] = sim.h.Vector((sim.cfg.duration/sim.cfg.recordStep)+1)
for cell in sim.net.compartCells:
for k, v in sim.cfg.recordDipoles.items():
if cell.tags['pop'] in v:
sim.simData['dipole'][k].add(cell.dipole['hRec'])
| 2.203125
| 2
|
alembic/versions/0184d8fc0100_map_stats.py
|
FlorianSW/hll_rcon_tool
| 49
|
12783759
|
<filename>alembic/versions/0184d8fc0100_map_stats.py<gh_stars>10-100
"""map stats
Revision ID: 0184d8fc0100
Revises: <KEY>
Create Date: 2021-04-11 13:25:34.188212
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0184d8fc0100'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('map_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('creation_time', sa.TIMESTAMP(), nullable=True),
sa.Column('start', sa.DateTime(), nullable=False),
sa.Column('end', sa.DateTime(), nullable=True),
sa.Column('server_number', sa.Integer(), nullable=True),
sa.Column('map_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('start', 'end', 'server_number', 'map_name', name='unique_map')
)
op.create_index(op.f('ix_map_history_end'), 'map_history', ['end'], unique=False)
op.create_index(op.f('ix_map_history_map_name'), 'map_history', ['map_name'], unique=False)
op.create_index(op.f('ix_map_history_server_number'), 'map_history', ['server_number'], unique=False)
op.create_index(op.f('ix_map_history_start'), 'map_history', ['start'], unique=False)
op.create_table('player_stats',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('playersteamid_id', sa.Integer(), nullable=False),
sa.Column('map_id', sa.Integer(), nullable=False),
sa.Column('kills', sa.Integer(), nullable=True),
sa.Column('kills_streak', sa.Integer(), nullable=True),
sa.Column('death', sa.Integer(), nullable=True),
sa.Column('deaths_without_kill_streak', sa.Integer(), nullable=True),
sa.Column('teamkills', sa.Integer(), nullable=True),
sa.Column('teamkills_streak', sa.Integer(), nullable=True),
sa.Column('deaths_by_tk', sa.Integer(), nullable=True),
sa.Column('deaths_by_tk_streak', sa.Integer(), nullable=True),
sa.Column('nb_vote_started', sa.Integer(), nullable=True),
sa.Column('nb_voted_yes', sa.Integer(), nullable=True),
sa.Column('nb_voted_no', sa.Integer(), nullable=True),
sa.Column('time_seconds', sa.Integer(), nullable=True),
sa.Column('kills_per_minute', sa.Float(), nullable=True),
sa.Column('deaths_per_minute', sa.Float(), nullable=True),
sa.Column('kill_death_ratio', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['map_id'], ['map_history.id'], ),
sa.ForeignKeyConstraint(['playersteamid_id'], ['steam_id_64.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('playersteamid_id', 'map_id', name='unique_map_player')
)
op.create_index(op.f('ix_player_stats_map_id'), 'player_stats', ['map_id'], unique=False)
op.create_index(op.f('ix_player_stats_playersteamid_id'), 'player_stats', ['playersteamid_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_player_stats_playersteamid_id'), table_name='player_stats')
op.drop_index(op.f('ix_player_stats_map_id'), table_name='player_stats')
op.drop_table('player_stats')
op.drop_index(op.f('ix_map_history_start'), table_name='map_history')
op.drop_index(op.f('ix_map_history_server_number'), table_name='map_history')
op.drop_index(op.f('ix_map_history_map_name'), table_name='map_history')
op.drop_index(op.f('ix_map_history_end'), table_name='map_history')
op.drop_table('map_history')
# ### end Alembic commands ###
| 1.875
| 2
|
pocean/tests/dsg/trajectory/test_trajectory_cr.py
|
axiom-data-science/pocean-core
| 13
|
12783760
|
#!python
# coding=utf-8
import os
import unittest
import tempfile
from os.path import join as jn
from os.path import dirname as dn
import pytest
from pocean.dsg import ContiguousRaggedTrajectory, get_calculated_attributes
from pocean.tests.dsg.test_new import test_is_mine
import logging
from pocean import logger
logger.level = logging.INFO
logger.handlers = [logging.StreamHandler()]
@pytest.mark.parametrize("fp", [
#jn(dn(__file__), 'resources', 'cr-single.nc'),
jn(dn(__file__), 'resources', 'cr-multiple.nc'),
jn(dn(__file__), 'resources', 'cr-oot-A.nc'),
jn(dn(__file__), 'resources', 'cr-oot-B.nc'),
])
def test_crt_load(fp):
test_is_mine(ContiguousRaggedTrajectory, fp)
class TestContiguousRaggedTrajectory(unittest.TestCase):
def setUp(self):
self.multi = jn(dn(__file__), 'resources', 'cr-multiple.nc')
self.oot_A = jn(dn(__file__), 'resources', 'cr-oot-A.nc')
self.oot_B = jn(dn(__file__), 'resources', 'cr-oot-B.nc')
def test_crt_dataframe_multiple(self):
axes = {
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'z',
}
fid, tmpnc = tempfile.mkstemp(suffix='.nc')
with ContiguousRaggedTrajectory(self.multi) as ncd:
df = ncd.to_dataframe(axes=axes)
with ContiguousRaggedTrajectory.from_dataframe(df, tmpnc, axes=axes) as result_ncd:
assert 'trajectory' in result_ncd.dimensions
test_is_mine(ContiguousRaggedTrajectory, tmpnc) # Try to load it again
os.close(fid)
os.remove(tmpnc)
def test_crt_dataframe_multiple_unique_dims(self):
axes = {
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'z',
}
fid, tmpnc = tempfile.mkstemp(suffix='.nc')
with ContiguousRaggedTrajectory(self.multi) as ncd:
df = ncd.to_dataframe(axes=axes)
with ContiguousRaggedTrajectory.from_dataframe(df, tmpnc, axes=axes, unique_dims=True) as result_ncd:
assert 'trajectory_dim' in result_ncd.dimensions
test_is_mine(ContiguousRaggedTrajectory, tmpnc) # Try to load it again
os.close(fid)
os.remove(tmpnc)
def test_crt_dataframe_unlimited_dim(self):
axes = {
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'z',
}
fid, tmpnc = tempfile.mkstemp(suffix='.nc')
with ContiguousRaggedTrajectory(self.multi) as ncd:
df = ncd.to_dataframe(axes=axes)
with ContiguousRaggedTrajectory.from_dataframe(df, tmpnc, axes=axes, unlimited=True, unique_dims=True) as result_ncd:
assert 'trajectory_dim' in result_ncd.dimensions
assert 'obs_dim' in result_ncd.dimensions
assert result_ncd.dimensions['obs_dim'].isunlimited() is True
test_is_mine(ContiguousRaggedTrajectory, tmpnc) # Try to load it again
os.close(fid)
os.remove(tmpnc)
def test_crt_dataframe_oot_A(self):
axes = {
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'depth',
'sample': 'sample'
}
fid, tmpnc = tempfile.mkstemp(suffix='.nc')
with ContiguousRaggedTrajectory(self.oot_A) as ncd:
df = ncd.to_dataframe(axes=axes)
df = df.sort_values(['trajectory', 'time'])
attrs = get_calculated_attributes(df, axes=axes)
with ContiguousRaggedTrajectory.from_dataframe(df, tmpnc, axes=axes, mode='a') as result_ncd:
assert 'sample' in result_ncd.dimensions
assert result_ncd.dimensions['sample'].size == 6610
assert 'trajectory' in result_ncd.dimensions
# This is removing null trajectories that have no data. Not much to do about this
# because there is no way to store this empty trajectory in a dataframe.
assert result_ncd.dimensions['trajectory'].size == 507
result_ncd.apply_meta(attrs)
test_is_mine(ContiguousRaggedTrajectory, tmpnc) # Try to load it again
os.close(fid)
os.remove(tmpnc)
def test_crt_dataframe_oot_B(self):
axes = {
't': 'time',
'x': 'lon',
'y': 'lat',
'z': 'depth',
}
fid, tmpnc = tempfile.mkstemp(suffix='.nc')
with ContiguousRaggedTrajectory(self.oot_B) as ncd:
df = ncd.to_dataframe(axes=axes)
df = df.sort_values(['trajectory', 'time'])
attrs = get_calculated_attributes(df, axes=axes)
with ContiguousRaggedTrajectory.from_dataframe(df, tmpnc, axes=axes, mode='a') as result_ncd:
assert 'obs' in result_ncd.dimensions
assert result_ncd.dimensions['obs'].size == 64116
assert 'trajectory' in result_ncd.dimensions
# This is removing null trajectories that have no data. Not much to do about this
# because there is no way to store this empty trajectory in a dataframe.
assert result_ncd.dimensions['trajectory'].size == 1000
result_ncd.apply_meta(attrs)
test_is_mine(ContiguousRaggedTrajectory, tmpnc) # Try to load it again
os.close(fid)
os.remove(tmpnc)
| 2.09375
| 2
|
share/python/repo/packages.py
|
globus/globus-release-tools
| 0
|
12783761
|
<filename>share/python/repo/packages.py
# Copyright 2014-2015 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Package to manage the Globus Toolkit source tarball repository
"""
import glob
import os
import re
import repo
import repo.package
import shutil
class Repository(repo.Repository):
"""
Repository class
================
This class contains the source package repository metadata.
"""
def __init__(self, repo_path, name, pkg_re):
super(Repository, self).__init__()
self.repo_path = repo_path
self.name = name
self.pkg_re = re.compile(pkg_re)
self.dirty = False
if not os.path.exists(self.repo_path):
self.update_metadata(True)
for tarball in os.listdir(self.repo_path):
m = self.pkg_re.match(tarball)
if m is not None:
d = m.groupdict()
pkg = repo.package.Metadata(
d.get('name'),
d.get('version'),
d.get('release', '0'),
os.path.join(repo_path, tarball),
d.get('arch', 'src'),
os.path.join(repo_path, tarball),
name)
if pkg.name not in self.packages:
self.packages[pkg.name] = []
self.packages[pkg.name].append(pkg)
for p in self.packages:
self.packages[p].sort()
def add_package(self, package, update_metadata=False):
dest_path = os.path.join(
self.repo_path, os.path.basename(package.path))
if not os.path.exists(dest_path):
shutil.copy(package.path, dest_path)
if package.name not in self.packages:
self.packages[package.name] = []
# Create a new repo.package.Metadata with the new path
new_package = repo.package.Metadata(
package.name,
package.version.strversion, package.version.release,
dest_path,
package.arch,
package.source_name,
'src')
self.packages[package.name].append(new_package)
self.packages[package.name].sort()
if update_metadata:
self.update_metadata()
else:
self.dirty = True
return new_package
def remove_package(self, package, update_metadata=False):
dest_path = os.path.join(
self.repo_path, os.path.basename(package.path))
if os.path.exists(dest_path):
os.remove(dest_path)
for fn in glob.glob(dest_path + ".*"):
if os.path.exists(fn):
os.remove(fn)
if package.name in self.packages:
self.packages[package.name] = [
p for p in self.packages[package.name]
if p.version != package.version
]
self.packages[package.name].sort()
if update_metadata:
self.update_metadata()
else:
self.dirty = True
def update_metadata(self, force=False):
"""
Update the checksums for the packages in this repository
"""
if self.dirty or force:
distro_repodir = self.repo_path
if not os.path.exists(distro_repodir):
os.makedirs(distro_repodir, 0o755)
if repo.gid is not None:
os.chown(distro_repodir, repo.uid, repo.gid)
os.chmod(distro_repodir, 0o2775)
entries = os.listdir(distro_repodir)
entries.sort()
for pkg in entries:
pkg_filename = os.path.join(distro_repodir, pkg)
if (os.path.isfile(pkg_filename)
and not pkg_filename.endswith(".html")):
repo._digest_file(pkg_filename)
self.create_index(distro_repodir)
def update_gcs_version_file(self):
"""
Update the GLOBUS_CONNECT_SERVER_LATEST file, which is used by
the GCS scripts to nag the user about not being up-to-date
"""
gcs = self.packages.get('globus_connect_server', [])
max_gcs_version = repo.package.Version("0")
for gcs_pkg in gcs:
if gcs_pkg.version > max_gcs_version:
max_gcs_version = gcs_pkg.version
latest_gcs_file_version = repo.package.Version("0")
latest_gcs_file_path = os.path.join(
self.repo_path, "GLOBUS_CONNECT_SERVER_LATEST")
try:
latest_gcs_file = file(latest_gcs_file_path, "r")
latest_gcs_file_version = repo.package.Version(
latest_gcs_file.read().strip())
except IOError:
pass
else:
latest_gcs_file.close()
if latest_gcs_file_version < max_gcs_version:
try:
latest_file = file(latest_gcs_file_path, "w")
latest_file.write(max_gcs_version.strversion + "\n")
finally:
latest_file.close()
class Release(repo.Release):
"""
Release
=======
Each Release contains a collection of repositories for different
architectures for a particular operating system release.
"""
pkg_re = re.compile(
r"(?P<name>"
"([^0-9].*(?=-[0-9])))"
"-(?P<version>.*?)(-src|-gt5.2)?.tar.gz$")
def __init__(self, name, topdir):
r = Repository(topdir, "packages", Release.pkg_re)
super(Release, self).__init__(name, r)
def repositories_for_os_arch(self, osname, arch):
return [self.repositories]
def repositories_for_package(self, package):
return [self.repositories]
class Manager(repo.Manager):
"""
Package Manager
===============
The repo.packages.Manager object manages the packages in a
release tree. New packages from the repositories can be
promoted to the release tree.
"""
def __init__(self, root=repo.default_root):
"""
Constructor
-----------
Create a new Manager object.
Parameters
----------
*root*::
Root of the release trees
"""
release = {
"release": Release(
"release", os.path.join(root, 'packages'))
}
super(Manager, self).__init__(release)
def get_release(self, releasename):
return self.releases['release']
def package_name(self, name):
if name:
if name in ['globus-connect-server5.4', 'globus-connect-server-prereqs']:
return name
else:
return name.replace("-", "_")
def __str__(self):
return " ".join(
["Packages Manager [", ",".join(self.releases.keys()), "]"])
# vim: filetype=python:
| 2.140625
| 2
|
src/agent/dora/specifier.py
|
abagaria/opiq
| 13
|
12783762
|
from .maze_conv import DQN as maze_conv
def get_net(name):
nets = {
"maze": maze_conv,
}
return nets[name]
| 2
| 2
|
src/nitsm/debug.py
|
ni/nitsm-python
| 4
|
12783763
|
<reponame>ni/nitsm-python
import os
import tkinter.messagebox
def prompt_attach_debugger() -> None:
"""
Pauses the Python interpreter and displays the process ID (PID). The PID can be used by an IDE
such as PyCharm to attach to the process for debugging. This is useful for stepping into nitsm
code modules from TestStand.
Instructions for use with PyCharm:
1. Call this function from the code module you want to debug. Placing it at the beginning
of the code module is recommended.
2. Add a breakpoint at the location where you want to start debugging. Make sure this
breakpoint will be reached after this function is called.
3. In TestStand, execute a sequence that calls into the code module.
4. A dialog box will appear displaying the PID of the current process. Before clicking
"Okay" on the dialog, select Run -> Attach To Process... from the PyCharm menu.
5. PyCharm will display a window of discovered processes. Click the process with the
matching PID.
6. PyCharm will open a debug terminal and attach to the process. Wait for PyCharm to
indicate it has successfully attached.
6. Once PyCharm is attached, click "Okay" on the dialog to continue execution. If these
steps were performed correctly, PyCharm will break at the first breakpoint it reaches in
the code.
"""
tkinter.Tk().withdraw() # hide root window
tkinter.messagebox.showinfo(
"Attach debugger", "Process name: niPythonHost.exe and Process ID: " + str(os.getpid())
)
return
if __name__ == "__main__":
prompt_attach_debugger()
| 3.203125
| 3
|
src/rctgen/__init__.py
|
mballance/pyrctgen
| 1
|
12783764
|
<reponame>mballance/pyrctgen<gh_stars>1-10
from .activity_stmts import *
from .decorators import *
from .claims_refs import *
from .types import *
| 1.085938
| 1
|
testplan/common/entity/__init__.py
|
ymn1k/testplan
| 0
|
12783765
|
<reponame>ymn1k/testplan
"""Common entity implementations."""
from .base import (Entity, RunnableManager, RunnableManagerConfig,
Resource, ResourceStatus, ResourceConfig, Environment,
Runnable, RunnableStatus, RunnableConfig, RunnableResult,
FailedAction)
| 0.984375
| 1
|
adv/benchmark.py
|
hcc123915/dl
| 0
|
12783766
|
<filename>adv/benchmark.py
import adv_test
from adv import *
import mikoto
def module():
return Mikoto
class Mikoto(mikoto.Mikoto):
conf = {
"mod_a2" : ('crit' , 'chance' , 0.08) ,
}
def pre(this):
if this.condition('hp70'):
this.conf['mod_a'] = ('crit' , 'passive', 0.10)
if this.condition('connect s1'):
this.s1_proc = this.c_s1_proc
def init(this):
this.s1buff = Selfbuff("s1",0.0, 15)
this.s2buff = Selfbuff("s2",0.2, 10, 'spd')
def speed(this):
return 1+this.s2buff.get()
def s1latency(this, e):
this.s1buff.on()
def s1_proc(this, e):
this.s1buff.off()
this.dmg_make('s1',5.32*2)
this.s1buff.set(0.10).on()
Timer(this.s1latency).on(1.5/this.speed())
def c_s1_proc(this, e):
buff = this.s1buff.get()
if buff == 0:
stance = 0
elif buff == 0.10:
stance = 1
elif buff == 0.15:
stance = 2
if stance == 0:
this.dmg_make('s1',5.32*2)
this.s1buff.set(0.10) #.on()
Timer(this.s1latency).on(1.5/this.speed())
elif stance == 1:
this.dmg_make('s1',3.54*3)
this.s1buff.off()
this.s1buff.set(0.15) #.on()
Timer(this.s1latency).on(1.5/this.speed())
elif stance == 2:
this.dmg_make('s1',2.13*4+4.25)
this.s1buff.off().set(0)
def s2_proc(this, e):
this.s2buff.on()
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1, seq=5 and cancel or fsc
`s2, seq=5 and cancel or fsc
`s3, seq=5 and cancel or fsc
"""
import cProfile
p = cProfile.Profile()
p.enable()
adv_test.test(module(), conf, verbose=0, mass=1)
p.print_stats()
| 2.46875
| 2
|
tests/tod/test_tod_world_script_metrics.py
|
twstewart42/ParlAI
| 1
|
12783767
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tests tod world metrics in the full script, *without* making the script properly set up
the agents on its own.
Use a few of the API Call + goal hit metrics as the metric handlers to test proper
functionality.
"""
import copy
import unittest
import parlai.core.tod.tod_test_utils.test_agents as test_agents
import parlai.scripts.tod_world_script as tod_world_script
from parlai.core.tod.tod_agents import StandaloneApiAgent
from parlai.core.tod.world_metrics_handlers import METRICS_HANDLER_CLASSES_TEST_REGISTRY
from parlai.core.metrics import dict_report
# Ignore lint on following line; want to have registered classes show up for tests
import projects.tod_simulator.world_metrics.extended_world_metrics # noqa: F401
class TestTodWorldScript(tod_world_script.TodWorldScript):
"""
Wrap around it to check its logic; also makes it easier to do things w/ underlying
World.
"""
def _get_tod_agents(self, opt):
"""
Hack so we can separate out logic of making sure agent parsing is correct.
"""
if hasattr(self, "agents"):
return self.agents
return super()._get_tod_agents(opt)
def _setup_world(self):
world = super()._setup_world()
for i in range(len(world.batch_tod_world_metrics)):
world.batch_tod_world_metrics[i].handlers = [
x() for x in METRICS_HANDLER_CLASSES_TEST_REGISTRY
]
return world
def _save_outputs(self, opt, world, logger, episode_metrics):
self.world = world
self.episode_metrics = episode_metrics
class TodWorldInScriptTestBase(unittest.TestCase):
def add_tod_world_opts(self, base_opts):
"""
Convenience since we're initing the opt directly without parlai parser.
"""
opts = copy.deepcopy(base_opts)
opts["datatype"] = "DUMMY"
opts["datafile"] = "DUMMY"
opts["standalone_api_file"] = test_agents.API_DATABASE_FILE
opts["exact_api_call"] = True
opts["log_keep_fields"] = "all"
opts["display_examples"] = False
opts[
"include_api_schemas"
] = True # do this to test_agents.make sure they're done correctly.
return opts
def setup_agents(self, added_opts):
full_opts = self.add_tod_world_opts(added_opts)
sys = test_agents.ApiCallAndSysUttAgent(full_opts)
agents = [
test_agents.UserUttAgent(full_opts),
sys,
StandaloneApiAgent(full_opts),
sys,
test_agents.ApiSchemaAgent(full_opts),
test_agents.GoalAgent(full_opts),
]
return agents, full_opts
def _run_test(self):
self._run_test_helper(test_agents.EPISODE_SETUP__SINGLE_API_CALL)
self._run_test_helper(test_agents.EPISODE_SETUP__MULTI_ROUND)
self._run_test_helper(test_agents.EPISODE_SETUP__MULTI_EPISODE)
self._run_test_helper(test_agents.EPISODE_SETUP__MULTI_EPISODE_BS)
def _run_test_helper(self, config_base):
config = copy.deepcopy(config_base)
config["use_broken_mock_api_calls"] = True
add = self.config_args()
for key in add:
config[key] = add[key]
agents, opt = self.setup_agents(config)
script = TestTodWorldScript(opt)
script.agents = agents
script.run()
self._check_metrics_correct(script, opt)
def _check_metrics_correct(self, script, opt):
"""
Last argument is only relevant for the max_turn test.
"""
max_rounds = opt[test_agents.TEST_NUM_ROUNDS_OPT_KEY]
max_episodes = opt[test_agents.TEST_NUM_EPISODES_OPT_KEY]
episode_metrics = script.episode_metrics
for episode_idx, episode in enumerate(episode_metrics):
goal, episode_metric = episode
episode_metric = dict_report(episode_metric.report())
self.assertAlmostEqual(
episode_metric["all_goals_hit"],
not test_agents.episode_has_broken_api_turn(episode_idx, max_rounds),
)
broken_episodes = sum(
[
test_agents.episode_has_broken_api_turn(i, max_rounds)
for i in range(max_episodes)
]
)
report = dict_report(script.world.report())
self.assertAlmostEqual(
report["all_goals_hit"],
float(max_episodes - broken_episodes) / max_episodes,
)
class TodWorldSingleBatchTest(TodWorldInScriptTestBase):
def config_args(self):
config = {}
config["batchsize"] = 1
config["max_turns"] = 10
return config
def test_metricsCorrect(self):
self._run_test()
class TodWorldNonSingleBatchTest(TodWorldInScriptTestBase):
def config_args(self):
config = {}
config["batchsize"] = 4
config["max_turns"] = 10
return config
def test_metricsCorrect(self):
self._run_test()
if __name__ == "__main__":
unittest.main()
| 2.09375
| 2
|
example/plugins/microservices/processors/legacy_processor.py
|
enricouniurb/Satosa-Saml2Spid
| 0
|
12783768
|
import re
from satosa.micro_services.processors.base_processor import BaseProcessor
class LegacyProcessor:
@staticmethod
def codice_fiscale_rs(schacpersonaluniqueids=[], nationprefix=False, nationprefix_sep=':'):
if isinstance(schacpersonaluniqueids, str):
schacpersonaluniqueids = [schacpersonaluniqueids]
# R&S format
rs_regexp = (r'(?P<urn_prefix>urn:schac:personalUniqueID:)?'
r'(?P<nation>[a-zA-Z]{2}):'
r'(?P<doc_type>[a-zA-Z]{2,3}):(?P<uniqueid>[\w]+)')
for uniqueid in schacpersonaluniqueids:
result = re.match(rs_regexp, uniqueid, re.I)
if result:
data = result.groupdict()
#if data.get('nation') == 'it' and data.get('doc_type') in ['CF', 'TIN']:
if nationprefix:
# returns IT:CODICEFISCALE
return nationprefix_sep.join((data['nation'].lower(),
data['uniqueid']))
# returns CODICEFISCALE
return data['uniqueid']
@staticmethod
def codice_fiscale_spid(fiscalNumbers, nationprefix=False, nationprefix_sep=':'):
if isinstance(fiscalNumbers, str):
fiscalNumbers = [fiscalNumbers]
# SPID/eIDAS FORMAT
spid_regexp = r'(?P<prefix>TIN)(?P<nation>[a-zA-Z]{2})-(?P<uniqueid>[\w]+)'
for fiscalNumber in fiscalNumbers:
result = re.match(spid_regexp, fiscalNumber, re.I)
if result:
data = result.groupdict()
if nationprefix:
# returns IT:CODICEFISCALE
return nationprefix_sep.join((data['nation'].lower(),
data['uniqueid']))
# returns CODICEFISCALE
return data['uniqueid']
@staticmethod
def matricola(personalUniqueCodes=[], id_string='dipendente', orgname='unical.it'):
if isinstance(personalUniqueCodes, str):
personalUniqueCodes = [personalUniqueCodes]
_regexp = (r'(?P<urn_prefix>urn:schac:personalUniqueCode:)?'
r'(?P<nation>[a-zA-Z]{2}):'
#r'(?P<organization>[a-zA-Z\.\-]+):'
'ORGNAME:'
'IDSTRING:'
r'(?P<uniqueid>[\w]+)').replace('IDSTRING', id_string).replace('ORGNAME', orgname)
for uniqueid in personalUniqueCodes:
result = re.match(_regexp, uniqueid, re.I)
if result:
return result.groupdict()['uniqueid']
class LegacyAttributeGenerator(BaseProcessor):
def matricola_dipendente(self, attributes):
v = None
if attributes.get('schacpersonaluniquecode'):
v = 'schacpersonaluniquecode'
elif attributes.get('schacPersonalUniqueCode'):
v = 'schacPersonalUniqueCode'
if v:
return LegacyProcessor.matricola(attributes[v],
id_string='dipendente')
def matricola_studente(self, attributes):
v = None
if attributes.get('schacpersonaluniquecode'):
v = 'schacpersonaluniquecode'
elif attributes.get('schacPersonalUniqueCode'):
v = 'schacPersonalUniqueCode'
if v:
return LegacyProcessor.matricola(attributes[v],
id_string='studente')
def codice_fiscale(self, attributes):
v = None
if attributes.get('schacpersonaluniqueid'):
return UniAttributeProcessor.codice_fiscale_rs(attributes['schacpersonaluniqueid'])
elif attributes.get('schacPersonalUniqueID'):
return UniAttributeProcessor.codice_fiscale_rs(attributes['schacPersonalUniqueID'])
elif attributes.get('fiscalNumber'):
v = 'fiscalNumber'
elif attributes.get('fiscalnumber'):
v = 'fiscalnumber'
if v:
fiscalNumber = LegacyProcessor.codice_fiscale_spid(attributes[v])
# put a fake 'schacpersonaluniqueid' to do ldap account linking with the next microservice
attributes['schacpersonaluniqueid'] = 'urn:schac:personalUniqueID:it:CF:{}'.format(fiscalNumber)
return fiscalNumber
def process(self, internal_data, attribute, **kwargs):
if hasattr(self, attribute) and callable(getattr(self, attribute)):
internal_data.attributes[attribute] = getattr(self, attribute)(internal_data.attributes)
| 2.296875
| 2
|
test/test3.py
|
astraw/svg_stack
| 91
|
12783769
|
#!/usr/bin/env python
from __future__ import print_function
import subprocess
# stack two Inkscape generated files
subprocess.check_call(
'../svg_stack.py --direction=h --margin=100 red_ball.svg blue_triangle.svg > shapes_test.svg',
shell=True)
# Inkscape files don't pass xmllint -- don't test
print('You should manually verify that shapes_test.svg looks exactly the same as shapes.svg')
# subprocess.check_call(
# 'rasterizer shapes_test.svg',
# shell=True)
| 2.359375
| 2
|
pyglidein/config.py
|
xs5871/pyglidein
| 6
|
12783770
|
<reponame>xs5871/pyglidein<filename>pyglidein/config.py
try:
from configparser import SafeConfigParser
except ImportError:
from ConfigParser import SafeConfigParser
import os
import ast
class Config(dict):
def __init__(self, path, default=os.path.join(os.path.dirname(os.path.abspath(__file__)),
'etc/client_defaults.cfg')):
self.path = path
# read defaults
tmp = SafeConfigParser()
tmp.optionxform = str
tmp.read(default)
self._config_options_dict(tmp)
# read file
tmp = SafeConfigParser()
tmp.optionxform = str
tmp.read(path)
self._config_options_dict(tmp)
self._populate_partitions()
def _config_options_dict(self, config):
"""
Parsing config file
Args:
config: Python config parser object
"""
for section in config.sections():
if section not in self:
self[section] = {}
for option in config.options(section):
val = config.get(section, option)
try:
val = ast.literal_eval(val)
except Exception as e:
pass
self[section][option] = val
def _populate_partitions(self):
cluster_config = self.get('Cluster', dict())
if 'partitions' in cluster_config:
cluster_config['partitions'] = [k.strip() for k in cluster_config['partitions'].split(',')]
for k in cluster_config['partitions']:
if not k in self:
continue
config = dict(cluster_config)
config.update(self[k])
self[k] = config
| 2.046875
| 2
|
workspaces/tests.py
|
marcphilippebeaujean-abertay/recur-notion
| 2
|
12783771
|
<reponame>marcphilippebeaujean-abertay/recur-notion
from unittest import mock
from django.contrib.auth import get_user_model
from django.test import TestCase
from .models import NotionWorkspace, NotionWorkspaceAccess
from .service import create_access_workspace_from_user_code
OLD_WORKSPACE_CODE = "54321"
OLD_WORKSPACE_ID = "oldworkspaceid"
OLD_WORKSPACE_NAME = "oldworkspacename"
UPDATED_WORKSPACE_ICON = "icon2.svg"
NEW_WORKSPACE_CODE = "12345"
NEW_WORKSPACE_ID = "newworkspaceid"
NEW_WORKSPACE_NAME = "newworkspacename"
NEW_ACCESS_TOKEN = "<PASSWORD>"
def mocked_oauth_notion_api(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] != "https://api.notion.com/v1/oauth/token":
return MockResponse({"message": "bad request"}, 400)
if kwargs["json"]["code"] == NEW_WORKSPACE_CODE:
return MockResponse(
{
"workspace_id": NEW_WORKSPACE_ID,
"workspace_name": NEW_WORKSPACE_NAME,
"workspace_icon": UPDATED_WORKSPACE_ICON,
"access_token": NEW_ACCESS_TOKEN,
},
200,
)
elif kwargs["json"]["code"] == OLD_WORKSPACE_CODE:
return MockResponse(
{
"workspace_id": OLD_WORKSPACE_ID,
"workspace_name": OLD_WORKSPACE_NAME,
"workspace_icon": UPDATED_WORKSPACE_ICON,
"access_token": NEW_ACCESS_TOKEN,
},
200,
)
return MockResponse(None, 404)
class NotionWorkspacesTestCase(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="testuser", email="<EMAIL>", password="<PASSWORD>"
)
self.init_workspace = NotionWorkspace.objects.create(
name=OLD_WORKSPACE_NAME, notion_id=OLD_WORKSPACE_ID, icon_url="icon.svg"
)
self.init_workspace_access = NotionWorkspaceAccess.objects.create(
access_token="access", workspace=self.init_workspace, owner=self.user
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_create_new_workspace_and_access(self, m):
create_access_workspace_from_user_code(self.user, NEW_WORKSPACE_CODE)
self.assertNotEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertNotEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_create_workspace_but_using_bad_code(self, m):
self.assertRaises(
Exception,
lambda x: create_access_workspace_from_user_code(self.user, "bad_code"),
)
self.assertEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_update_existing_workspace_access(self, m):
create_access_workspace_from_user_code(self.user, OLD_WORKSPACE_CODE)
new_workspace = NotionWorkspace.objects.filter(
notion_id=NEW_WORKSPACE_ID
).first()
self.assertEqual(new_workspace, None)
old_workspace = NotionWorkspace.objects.filter(
notion_id=OLD_WORKSPACE_ID
).first()
self.assertEqual(old_workspace.icon_url, UPDATED_WORKSPACE_ICON)
updated_workspace_access = NotionWorkspaceAccess.objects.filter(
access_token=NEW_ACCESS_TOKEN
).first()
self.assertNotEqual(updated_workspace_access, None)
updated_workspace_access.workspace = old_workspace
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_client_request_create_new_workspace_and_access(self, m):
self.client.force_login(
get_user_model().objects.get_or_create(username=self.user.username)[0]
)
response = self.client.get("/notion-oauth?code=" + NEW_WORKSPACE_CODE)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertNotEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_client_request_bad_method(self, m):
self.client.force_login(
get_user_model().objects.get_or_create(username=self.user.username)[0]
)
response = self.client.post("/notion-oauth?code=" + NEW_WORKSPACE_CODE)
self.assertEqual(response.status_code, 405)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_client_request_not_logged_in(self, m):
response = self.client.get("/notion-oauth?code=" + NEW_WORKSPACE_CODE)
self.assertEqual(response.status_code, 302)
self.assertEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_client_request_no_code(self, m):
self.client.force_login(
get_user_model().objects.get_or_create(username=self.user.username)[0]
)
response = self.client.get("/notion-oauth")
self.assertEqual(response.status_code, 400)
self.assertEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
@mock.patch("workspaces.service.requests.post", side_effect=mocked_oauth_notion_api)
def test_client_request_bad_code(self, m):
self.client.force_login(
get_user_model().objects.get_or_create(username=self.user.username)[0]
)
response = self.client.get("/notion-oauth?code=bad_code")
self.assertEqual(response.status_code, 500)
self.assertEqual(
NotionWorkspace.objects.filter(notion_id=NEW_WORKSPACE_ID).first(), None
)
self.assertEqual(
NotionWorkspaceAccess.objects.filter(access_token=NEW_ACCESS_TOKEN).first(),
None,
)
| 2.34375
| 2
|
src/parser.py
|
vistrcm/gsclparser
| 0
|
12783772
|
<gh_stars>0
import logging
from typing import Dict, NewType
import bs4
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
# some additional types defined
BS4ResultSet = NewType("BS4ResultSet", bs4.element.ResultSet)
def _parse_attrgroups(attrgroups: BS4ResultSet) -> Dict[str, str]:
"""parse attribute groups and return dict representing attributes"""
attr_tags = []
attributes = {}
for group in attrgroups:
attr_tags.extend(group.find_all("span"))
for attr in attr_tags:
key = attr.find(text=True, recursive=False)
# sometimes key may be empty. Usually it is first one with search name.
# will replace it with '_name'
if key is None:
key = "_name"
# also will remove unnecessary symbols: spaces and ':'
key = key.rstrip(" :")
if attr.b is None:
value = str(attr)
else:
value = attr.b.text
# build attributes dict
attributes[key] = value
return attributes
def parse(record: str) -> Dict[str, str]:
"""parse record using beautifulsoup4"""
soup = BeautifulSoup(record, 'html.parser')
raw = record
title = soup.title.string
text = soup.get_text()
page_container = soup.find("section", class_="page-container")
body = page_container.find("section", class_="body")
display_date = body.header.find("p", id="display-date")
post_date = display_date.time["datetime"]
posting_title = body.find("h2", class_="postingtitle")
postingtitletext = posting_title.find("span", class_="postingtitletext")
titletextonly_span = postingtitletext.find("span", id="titletextonly")
# handle empty titletextonly span
if titletextonly_span is None:
titletextonly = None
else:
titletextonly = titletextonly_span.text
price_span = postingtitletext.find("span", class_="price")
# handle empty price span
if price_span is None:
price_text = None
price = None
else:
price_text = price_span.text
# price usually goes with '$' sign. Let's try to remove it.
price_no_dollar = price_text.lstrip("$")
if price_no_dollar.isdigit():
price = float(price_no_dollar)
else:
price = None
userbody = body.find("section", class_="userbody")
thumbs = userbody.figure.find("div", id="thumbs")
thumb_links = [link["href"] for link in thumbs.find_all("a")]
map_and_attrs = userbody.find("div", class_="mapAndAttrs")
mapbox = map_and_attrs.find("div", class_="mapbox")
if mapbox is not None:
map = mapbox.find("div", id="map")
map_attrs = map.attrs
mapaddress_div = mapbox.find("div", class_="mapaddress")
# it may not be div with class mapaddress
if mapaddress_div is None:
mapaddress = None
else:
mapaddress = mapaddress_div.text
map_element = {
"mapaddress": mapaddress,
"map_attrs": map_attrs,
}
else:
map_element = None
attrgroups = map_and_attrs.find_all("p", class_="attrgroup")
attributes = _parse_attrgroups(attrgroups)
postingbody = userbody.find("section", id="postingbody")
post_text = postingbody.get_text()
notices = [notice.text for notice in userbody.find("ul", class_="notices").find_all("li")]
result = {
"raw": raw,
"text": text,
"title": title,
"post_date": post_date,
"titletextonly": titletextonly,
"price_text": price_text,
"price": price,
"thumb_links": thumb_links,
"map": map_element,
"attributes": attributes,
"post_text": post_text,
"noticies": notices
}
return result
| 3.046875
| 3
|
libra/ledger_info.py
|
MoveOnLibra/libra-core
| 5
|
12783773
|
<filename>libra/ledger_info.py
from canoser import Struct, BytesT, RustEnum
from libra.account_address import Address
from libra.block_info import BlockInfo, OptionEpochInfo
from libra.epoch_info import EpochInfo
from libra.hasher import HashValue, LCSCryptoHash
from libra.crypto.ed25519 import ED25519_SIGNATURE_LENGTH
from libra.validator_verifier import ValidatorVerifier
from libra.proto_helper import ProtoHelper
class LedgerInfo(Struct, LCSCryptoHash):
_fields = [
('commit_info', BlockInfo),
# Hash of consensus specific data that is opaque to all parts of the system other than
# consensus.
('consensus_data_hash', HashValue)
]
@classmethod
def from_proto(cls, proto):
ret = cls()
block_info = BlockInfo()
block_info.version = proto.version
block_info.executed_state_id = proto.transaction_accumulator_hash
block_info.id = proto.consensus_block_id
block_info.epoch = proto.epoch
block_info.round = proto.round
block_info.timestamp_usecs = proto.timestamp_usecs
if proto.HasField("next_epoch_info"):
einfo = EpochInfo.from_proto(proto.next_epoch_info)
block_info.next_epoch_info = OptionEpochInfo(einfo)
else:
block_info.next_epoch_info = OptionEpochInfo(None)
ret.commit_info = block_info
ret.consensus_data_hash = proto.consensus_data_hash
return ret
def to_proto(self):
proto = ProtoHelper.new_proto_obj(self)
proto.version = self.version
proto.transaction_accumulator_hash = self.transaction_accumulator_hash
proto.consensus_data_hash = self.consensus_data_hash
proto.consensus_block_id = self.consensus_block_id
proto.epoch = self.epoch
proto.round = self.round
proto.timestamp_usecs = self.timestamp_usecs
if self.has_next_epoch_info():
proto.next_epoch_info.MergeFrom(ProtoHelper.to_proto(self.next_epoch_info))
return proto
@property
def epoch(self):
return self.commit_info.epoch
@property
def round(self):
return self.commit_info.round
@property
def consensus_block_id(self):
return self.commit_info.id
@property
def transaction_accumulator_hash(self):
return self.commit_info.executed_state_id
@property
def version(self):
return self.commit_info.version
@property
def timestamp_usecs(self):
return self.commit_info.timestamp_usecs
@property
def next_epoch_info(self):
return self.commit_info.next_epoch_info
def has_next_epoch_info(self):
return self.commit_info.next_epoch_info.value is not None
# The validator node returns this structure which includes signatures
# from validators that confirm the state. The client needs to only pass back
# the LedgerInfo element since the validator node doesn't need to know the signatures
# again when the client performs a query, those are only there for the client
# to be able to verify the state
class LedgerInfoWithV0(Struct):
_fields = [
('ledger_info', LedgerInfo),
# The validator is identified by its account address: in order to verify a signature
# one needs to retrieve the public key of the validator for the given epoch.
('signatures', {Address: BytesT(ED25519_SIGNATURE_LENGTH)})
]
@classmethod
def from_proto(cls, proto):
ret = cls()
ret.ledger_info = LedgerInfo.from_proto(proto.ledger_info)
signatures = {}
for x in proto.signatures:
#address = Address.normalize_to_bytes(x.validator_id)
signatures[x.validator_id] = x.signature
ret.signatures = signatures
return ret
def to_proto(self):
proto = ProtoHelper.new_proto_obj(self)
proto.ledger_info.MergeFrom(self.ledger_info.to_proto())
for k, v in self.signatures.items():
sig = proto.signatures.add()
sig.validator_id = k
sig.signature = v
return proto
def verify_signatures(self, validator: ValidatorVerifier):
ledger_hash = self.ledger_info.hash()
validator.batch_verify_aggregated_signature(ledger_hash, self.signatures)
class LedgerInfoWithSignatures(RustEnum):
_enums = [
('V0', LedgerInfoWithV0),
]
@classmethod
def from_proto(cls, proto):
return LedgerInfoWithSignatures.deserialize(proto.bytes).value
| 2.203125
| 2
|
socket_futures.py
|
wefner/experiments
| 1
|
12783774
|
#!/usr/bin/env python3
import os
import concurrent.futures
from time import time
from utils import load_csv, write_results, check_open_port
# https://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor-example
start = time()
results = {}
top_sites = f'{os.path.dirname(os.path.realpath(__file__))}/top-1m.csv'
endpoints = load_csv(top_sites)[0:100]
max_workers = 10
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Start the load operations and mark each future with its endpoint
futures = {executor.submit(check_open_port, endpoint): endpoint for endpoint in endpoints}
for future in concurrent.futures.as_completed(futures):
endpoint = futures[future]
try:
data = future.result()
results[data[0]] = data[1]
except Exception as exc:
print('%r generated an exception: %s' % (endpoint, exc))
write_results(results, 'futures')
end = time()
print(f"Endpoints took {end-start} seconds")
| 3.125
| 3
|
exploit.py
|
soezayzay/AP-Finder
| 0
|
12783775
|
<reponame>soezayzay/AP-Finder
import requests
import time
import os
#Author:SoeZayZay
#Github:https://github.com/soezayzay
banner = """\033[1;34m
_ ____ _____ _ _
/ \ | _ \ | ___(_)_ __ __| | ___ _ __
/ _ \ | |_) | | |_ | | '_ \ / _` |/ _ \ '__|
/ ___ \| __/ | _| | | | | | (_| | __/ |
/_/ \_\_| |_| |_|_| |_|\__,_|\___|_|
\033[0m
\033[101m\033[1;37m Author:SoeZayZay Github: https://github.com/soezayzay\033[0m
"""
def bruteforce():
os.system("clear")
print(banner)
print("")
target_url = input(" \033[1;32m[\033[1;34m*\033[1;32m]\033[1;32m Enter Target Url \033[1;33m: \033[0m\033[1;37m")
if target_url[-1] == "/":
target_url = target_url[:-1]
else:
pass
print("")
crack_file = input(" \033[1;32m[\033[1;34m*\033[1;32m]\033[1;32m Enter Crack File \033[1;33m: \033[0m\033[1;37m")
print("")
print("\t\t\t\033[1;32m[\033[1;36m*\033[1;32m]\033[1;33m Cracking \033[1;32m[\033[1;36m*\033[1;32m]\033[0m")
print("")
directories = []
file = open(crack_file)
read_file = file.readlines()
for dirs in read_file:
p_dir = dirs.strip()
directories.append(p_dir)
status = 0
for directorie in directories:
req = requests.get(f"{target_url}/{directorie}")
if req.status_code == 200 :
status = status + 1
success_dirs = open("success_dirs.txt","a")
success_dirs.write(f"{target_url}/{directorie}")
success_dirs.write("\n")
success_dirs.close()
print(f" \033[1;36m[\033[1;32m{status}\033[1;36m]\033[1;32m {target_url}/{directorie}\033[0m")
elif req.status_code == 403:
status = status + 1
print(f" \033[1;36m[\033[1;33m{status}\033[1;36m]\033[1;37m {target_url}/\033[1;33m{directorie}\033[0m")
elif req.status_code == 404:
print(f" \033[1;36m[\033[1;31m{status}\033[1;36m]\033[1;37m {target_url}/\033[1;31m{directorie}\033[0m")
else :
print(f" \033[1;36m[\033[1;31m{req.status_code}\033[1;36m]\033[1;37m {target_url}/\033[1;31m{directorie}\033[0m")
print("")
bruteforce()
| 2.78125
| 3
|
packages/watchmen-rest-doll/src/watchmen_rest_doll/system/external_writer_router.py
|
Indexical-Metrics-Measure-Advisory/watchmen
| 0
|
12783776
|
from typing import List, Optional
from fastapi import APIRouter, Body, Depends
from watchmen_auth import PrincipalService
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.system import ExternalWriterService
from watchmen_model.admin import UserRole
from watchmen_model.common import DataPage, ExternalWriterId, Pageable
from watchmen_model.system import ExternalWriter
from watchmen_rest import get_any_admin_principal, get_super_admin_principal
from watchmen_rest.util import raise_400, raise_403, raise_404
from watchmen_rest_doll.doll import ask_tuple_delete_enabled
from watchmen_rest_doll.util import trans, trans_readonly
from watchmen_utilities import is_blank
router = APIRouter()
def get_external_writer_service(principal_service: PrincipalService) -> ExternalWriterService:
return ExternalWriterService(ask_meta_storage(), ask_snowflake_generator(), principal_service)
@router.get('/external_writer', tags=[UserRole.ADMIN, UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def load_external_writer_by_id(
writer_id: Optional[ExternalWriterId] = None,
principal_service: PrincipalService = Depends(get_any_admin_principal)
) -> ExternalWriter:
if is_blank(writer_id):
raise_400('External writer id is required.')
if not principal_service.is_super_admin():
if writer_id != principal_service.get_tenant_id():
raise_403()
external_writer_service = get_external_writer_service(principal_service)
def action() -> ExternalWriter:
# noinspection PyTypeChecker
external_writer: ExternalWriter = external_writer_service.find_by_id(writer_id)
if external_writer is None:
raise_404()
return external_writer
return trans_readonly(external_writer_service, action)
@router.post('/external_writer', tags=[UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def save_external_writer(
external_writer: ExternalWriter, principal_service: PrincipalService = Depends(get_super_admin_principal)
) -> ExternalWriter:
external_writer_service = get_external_writer_service(principal_service)
# noinspection DuplicatedCode
def action(writer: ExternalWriter) -> ExternalWriter:
if external_writer_service.is_storable_id_faked(writer.writerId):
external_writer_service.redress_storable_id(writer)
# noinspection PyTypeChecker
writer: ExternalWriter = external_writer_service.create(writer)
else:
# noinspection PyTypeChecker
writer: ExternalWriter = external_writer_service.update(writer)
return writer
return trans(external_writer_service, lambda: action(external_writer))
class QueryExternalWriterDataPage(DataPage):
data: List[ExternalWriter]
@router.post(
'/external_writer/name', tags=[UserRole.ADMIN, UserRole.SUPER_ADMIN], response_model=QueryExternalWriterDataPage)
async def find_external_writers_by_name(
query_name: Optional[str] = None, pageable: Pageable = Body(...),
principal_service: PrincipalService = Depends(get_any_admin_principal)
) -> QueryExternalWriterDataPage:
external_writer_service = get_external_writer_service(principal_service)
# noinspection DuplicatedCode
def action() -> QueryExternalWriterDataPage:
tenant_id = None
if principal_service.is_tenant_admin():
tenant_id = principal_service.get_tenant_id()
if is_blank(query_name):
# noinspection PyTypeChecker
return external_writer_service.find_by_text(None, tenant_id, pageable)
else:
# noinspection PyTypeChecker
return external_writer_service.find_by_text(query_name, tenant_id, pageable)
return trans_readonly(external_writer_service, action)
@router.get(
"/external_writer/all", tags=[UserRole.ADMIN], response_model=List[ExternalWriter])
async def find_all_external_writers(
principal_service: PrincipalService = Depends(get_any_admin_principal)) -> List[ExternalWriter]:
tenant_id = None
if principal_service.is_tenant_admin():
tenant_id = principal_service.get_tenant_id()
external_writer_service = get_external_writer_service(principal_service)
def action() -> List[ExternalWriter]:
return external_writer_service.find_all(tenant_id)
return trans_readonly(external_writer_service, action)
@router.delete('/external_writer', tags=[UserRole.SUPER_ADMIN], response_model=ExternalWriter)
async def delete_external_writer_by_id(
writer_id: Optional[ExternalWriterId] = None,
principal_service: PrincipalService = Depends(get_super_admin_principal)
) -> ExternalWriter:
if not ask_tuple_delete_enabled():
raise_404('Not Found')
if is_blank(writer_id):
raise_400('External writer id is required.')
external_writer_service = get_external_writer_service(principal_service)
def action() -> ExternalWriter:
# noinspection PyTypeChecker
external_writer: ExternalWriter = external_writer_service.delete(writer_id)
if external_writer is None:
raise_404()
return external_writer
return trans(external_writer_service, action)
| 1.921875
| 2
|
cardano-node-tests/cardano_node_tests/utils/cli_coverage.py
|
MitchellTesla/Cardano-SCK
| 6
|
12783777
|
<gh_stars>1-10
"""Functionality for CLI coverage data collected by the `clusterlib`."""
import json
import logging
import shutil
from pathlib import Path
from typing import Optional
from _pytest.config import Config
from cardano_clusterlib import clusterlib
from cardano_node_tests.utils import helpers
LOGGER = logging.getLogger(__name__)
def save_cli_coverage(cluster_obj: clusterlib.ClusterLib, pytest_config: Config) -> Optional[Path]:
"""Save CLI coverage info."""
cli_coverage_dir = pytest_config.getoption("--cli-coverage-dir")
if not (cli_coverage_dir and cluster_obj.cli_coverage):
return None
json_file = Path(cli_coverage_dir) / f"cli_coverage_{helpers.get_timestamped_rand_str()}.json"
with open(json_file, "w") as out_json:
json.dump(cluster_obj.cli_coverage, out_json, indent=4)
LOGGER.info(f"Coverage file saved to '{cli_coverage_dir}'.")
return json_file
def save_start_script_coverage(log_file: Path, pytest_config: Config) -> Optional[Path]:
"""Save info about CLI commands executed by cluster start script."""
cli_coverage_dir = pytest_config.getoption("--cli-coverage-dir")
if not (cli_coverage_dir and log_file.exists()):
return None
dest_file = (
Path(cli_coverage_dir) / f"cli_coverage_script_{helpers.get_timestamped_rand_str()}.log"
)
shutil.copy(log_file, dest_file)
LOGGER.info(f"Start script coverage log file saved to '{dest_file}'.")
return dest_file
| 2.046875
| 2
|
rsa_implementation_test.py
|
thalees/RSA-implementation
| 0
|
12783778
|
<reponame>thalees/RSA-implementation<filename>rsa_implementation_test.py
import base64
import binascii
from rsa_implementation import (encrypt, dencrypt)
def is_base64(s):
try:
base64.decodebytes(s)
return True
except binascii.Error:
return False
public_key_mock = (15776038139088582906797, 66678275526824262145921)
private_key_mock = (51395388263817200503133, 66678275526824262145921)
def test_returns_the_encrypted_value_successfully():
message = 'some message'
response = encrypt(message, public_key_mock).encode()
assert is_base64(response)
def test_returns_the_decrypted_value_successfully():
message = 'test'
encrypted_message = encrypt(message, public_key_mock)
response = dencrypt(encrypted_message, private_key_mock)
assert response == message
| 2.765625
| 3
|
detect_conv_roi.py
|
sc2h6o/DT
| 0
|
12783779
|
import cv2
import os
import sys
sys.path.append("/home/syc/py-faster-rcnn/caffe-fast-rcnn/python")
import caffe
import numpy as np
import random
import time
from math import *
from utils import *
from DataBase import DataBase
video_dir = '/media/syc/My Passport/_dataset/tracking2013/'
video_name = "Liquor/img"
video_transpose = False
video_resize = (960, 540)
bbox = 256,152,73,210
# bbox = 100,20,60,60
(x,y,w,h) = bbox
batch_size = 4
from_seq = True
scale = 1
model_dir = 'model/'
data_dir = 'data/'
proto_solver = model_dir + 'solver.prototxt'
proto_feat = model_dir + 'feat.prototxt'
model_feat = model_dir + "ZF_faster_rcnn_final.caffemodel"
# mean_file = model_dir + 'ilsvrc_2012_mean.npy'
target_size = 127.0
pad_in = 32
class DeepTracker:
def __init__(self):
caffe.set_device(0)
caffe.set_mode_gpu()
self.inited = False
self.prob = None
self.pad_w = self.pad_h = 0
self.mean = np.array([102.9801, 115.9465, 122.7717])
self.idx = 0
def transpose(self, frame, bbox=None):
if bbox == None:
data = frame
else:
(x,y,w,h) = bbox
pad = 200
_frame = np.zeros((frame.shape[0] + 2*pad, frame.shape[1] + 2*pad, 3))
_frame[pad:pad+frame.shape[0], pad:pad+frame.shape[1], :] = frame
data = _frame[pad+y:pad+y+h, pad+x:pad+x+w, :]
# data = cv2.resize(data, (scale*w,scale*h))
data = data - self.mean
data = data.transpose((2,0,1))
return data
def saveImage(self, frame, bbox, idx):
x,y,w,h = bbox
cv2.imwrite('data/%d.jpg'%idx, frame[y:y+h,x:x+w,:])
def makeLabels(self, bbox, box_large, w_sm, h_sm, range_out = 12, range_in = 6, scale=0.25):
(_x,_y,_w,_h) = bbox
(x,y,w,h) = box_large
labels = np.zeros((1,h_sm,w_sm))
rad_out = scale*range_out
rad_in = scale*range_in
cx = scale*(_x-x+self.pad_w)
cy = scale*(_y-y+self.pad_h)
labels[0, rdint(cy-rad_out):rdint(cy+rad_out), rdint(cx-rad_out):rdint(cx+rad_out)] = -1
labels[0, rdint(cy-rad_in):rdint(cy+rad_in), rdint(cx-rad_in):rdint(cx+rad_in)] = 1
return labels
def getFeat(self, frame, bbox, box_large):
(_x,_y,_w,_h) = bbox
(x,y,w,h) = box_large
w_feat = int_(0.25*w) - rdint(0.25*(_w-2*self.pad_w))
h_feat = int_(0.25*h) - rdint(0.25*(_h-2*self.pad_h))
data = self.transpose(frame, box_large)
self.featnet.blobs['data'].reshape(1,3,h,w)
self.featnet.blobs['data'].data[0] = data
self.featnet.blobs['rois'].reshape(w_feat*h_feat,5)
for i in range(w_feat):
for j in range(h_feat):
idx = j + i*h_feat
self.featnet.blobs['rois'].data[idx] = np.array([0,4*i,4*j,4*i+_w-2*self.pad_w,4*j+_h-2*self.pad_h])
self.featnet.forward()
pool = self.featnet.blobs['roi_pool_conv5'].data
feat = pool.reshape(w_feat,h_feat,1024).transpose((2,1,0))
return feat
def update(self, frame, bbox ,step = 16):
t1 = time.clock()
(_x,_y,_w,_h) = bbox
self.pad_w = min(_w//2, pad_in)
self.pad_h = min(_h//2, pad_in)
(x,y,w,h) = box_large = padding(bbox, 1.0, 60)
feat = self.getFeat(frame, bbox, box_large)
(c_sm, h_sm, w_sm) = feat.shape
labels = self.makeLabels(bbox,box_large,w_sm, h_sm)
self.solver.net.blobs['data'].reshape(1,c_sm,h_sm,w_sm)
self.solver.net.blobs['data'].data[0] = feat
self.solver.net.blobs['labels'].reshape(1,1,h_sm,w_sm)
self.solver.net.blobs['labels'].data[0] = labels
self.solver.step(step)
t2 = time.clock()
print 'update takes %f seconds.' % (1.0*(t2-t1))
def init(self, frame, bbox):
self.solver = caffe.SGDSolver(proto_solver)
self.featnet = caffe.Net(proto_feat,model_feat,caffe.TEST)
self.update(frame, bbox, 1024)
self.inited = True
self.prob = np.zeros((frame.shape[0],frame.shape[1]))
def track(self, frame, bbox):
(_x,_y,_w,_h) = bbox
(x,y,w,h) = box_large = padding(bbox, 0.7, 35)
feat = self.getFeat(frame, bbox, box_large)
(c_sm, h_sm, w_sm) = feat.shape
self.solver.net.blobs['data'].reshape(1,c_sm,h_sm,w_sm)
self.solver.net.blobs['labels'].reshape(1,1,h_sm, w_sm)
self.solver.net.blobs['data'].data[0] = feat
self.solver.net.forward()
score = softmax(self.solver.net.blobs['score'].data[0])
score_big = cv2.resize(score, (4*w_sm,4*h_sm))
self.prob = score_big.copy() ##
dx = score_big.argmax() % (4*w_sm)
dy = score_big.argmax() // (4*w_sm)
_x = x+dx-self.pad_w
_y = y+dy-self.pad_h
bbox = (_x,_y,_w,_h)
self.update(frame, bbox)
return bbox
if __name__ == "__main__":
dt = DeepTracker()
success, frame = True, None
seq = []
idx = 0
if from_seq:
for filename in os.listdir(os.path.join(video_dir,video_name)):
if '.jpg' in filename:
seq.append(os.path.join(video_dir,video_name,filename))
seq.sort()
frame = cv2.imread(seq[idx])
idx += 1
else:
cap = cv2.VideoCapture(video_dir+video_name)
success, frame = cap.read()
while success :
t1 = time.clock()
if dt.inited:
bbox = dt.track(frame, bbox)
cv2.imshow('prob', dt.prob)
(x,y,w,h) = bbox
result = frame.copy()
cv2.rectangle(result, (x,y), (x+w,y+h), (0, 255, 255), 2)
cv2.imshow(video_name, result)
key = cv2.waitKey(3)
if key == 27:
break
elif key == 112 or from_seq and not dt.inited:
dt.init(frame, bbox)
if from_seq:
if idx >= len(seq):
break
else:
frame = cv2.imread(seq[idx])
idx += 1
else:
success, frame = cap.read()
t2 = time.clock()
print "total speed: %ffps."% (1.0/(t2-t1))
| 2.125
| 2
|
webrcon/connector.py
|
LewdNeko/WebRcon
| 3
|
12783780
|
import asyncio
import json
import websockets
from .exceptions import InvalidServer, ConnectionClosed
from .utils import maybe_await
# noinspection SpellCheckingInspection
class RconConnector:
# noinspection PyTypeChecker
def __init__(self, host, port, password, message_callback=None, console_callback=None):
self.uri = f'ws://{host}:{port}/{password}'
self.ws: websockets.WebSocketClientProtocol = None
self._loop = None
self._ws_kwargs = {}
self._counter = 1
self._process_task: asyncio.Future = None
self._bucket = {}
self._closed = True
if message_callback and not callable(message_callback):
raise TypeError('Expected type `function` for `message_callback`, got type `{0}`'.format(
type(message_callback)))
elif message_callback:
self._bucket[-1] = message_callback
if console_callback and not callable(console_callback):
raise TypeError('Expected type `function` for `console_callback`, got type `{0}`'.format(
type(console_callback)))
elif console_callback:
self._bucket[0] = console_callback
async def start(self, loop, **kwargs):
self._loop = loop
try:
self.ws = await websockets.connect(self.uri, **kwargs)
self._ws_kwargs = kwargs
self._closed = False
if self._process_task:
self._process_task.cancel()
self._process_task = self._loop.create_task(self.receive_data())
except websockets.WebSocketProtocolError:
raise InvalidServer
async def close(self):
self._closed = True
await self.ws.close(reason='Client requested shutdown of WS connection.')
async def command(self, command, callback):
if not callable(callback):
raise TypeError('Expected type `function` for `message_callback`, got type `{0}`'.format(
type(callback)))
if self._closed:
raise ConnectionClosed
self._bucket[self._counter] = callback
data = json.dumps(dict(Message=command, Identifier=self._counter, Name="WebRcon"))
self._counter += 1
retry_counter = 0
sent = False
while not sent:
try:
await self.ws.send(data)
sent = True
except websockets.ConnectionClosed:
await asyncio.sleep((retry_counter + 1) * 5)
retry_counter += 1
await self.start(self._loop, **self._ws_kwargs)
except (websockets.WebSocketProtocolError, websockets.InvalidHandshake):
await asyncio.sleep((retry_counter + 1) * 5)
retry_counter += 1
if retry_counter >= 5:
# Could not reconnect / send the data
return False
return True
async def receive_data(self):
# noinspection DuplicatedCode
closed_counter = 0
while not self._closed:
data = {}
try:
resp = await self.ws.recv()
data = json.loads(resp)
except websockets.ConnectionClosed:
closed_counter += 1
if closed_counter >= 3:
await self.start(self._loop, **self._ws_kwargs)
except json.JSONDecodeError:
# Invalid response, ignore
pass
identifier = data.get('Identifier')
if identifier == -1 and self._bucket.get(-1):
self._loop.create_task(maybe_await(self._bucket[-1], data))
elif identifier == 0 and self._bucket.get(0):
self._loop.create_task(maybe_await(self._bucket[0], data))
elif identifier in self._bucket:
self._loop.create_task(maybe_await(self._bucket[identifier], data))
del self._bucket[identifier]
| 2.296875
| 2
|
scripts/test_endpoints.py
|
onerandomusername/pixels
| 0
|
12783781
|
<gh_stars>0
import colorsys
import math
import multiprocessing
import random
import httpx
import requests
from PIL import Image
from decouple import config
api_token = config("API_TOKEN")
base_url = config("BASE_URL", default="https://pixels.pythondiscord.com")
HEADERS = {
"Authorization": f"Bearer {api_token}"
}
def check_if_mod() -> dict:
"""Calls the `/mod` endpoint and returns the response."""
r = requests.get(f"{base_url}/mod", headers=HEADERS)
return r.json()
def set_to_mod(user_id: int) -> dict:
"""Makes the given `user_id` a mod."""
r = requests.post(
f"{base_url}/set_mod",
headers=HEADERS,
json={"user_id": user_id}
)
return r.json()
def show_image() -> None:
"""Gets the current image it displays it on screen."""
a = requests.get(base_url+'/get_pixels', headers=dict(Authorization='Bearer ' + api_token))
a.raise_for_status()
Image.frombytes('RGB', (160, 90), a.content).save('2.png')
def do_webhook() -> None:
"""Gets the current image it displays it on screen."""
a = requests.post('https://pixels.pythondiscord.com/webhook', headers=dict(Authorization='Bearer ' + api_token))
a.raise_for_status()
def generate_coordinates() -> list:
"""Generates the list of coordinates to populate."""
coordinates = []
for x in range(0, 160):
for y in range(0, 90):
coordinates.append((x, y))
return coordinates
def set_pixel(coordinate: list) -> None:
"""Sets the coordinate to a random colour."""
[r, g, b] = [math.ceil(x * 255) for x in colorsys.hsv_to_rgb(random.random() * 0.089, 0.8, 1)]
resp = httpx.post(base_url+"/set_pixel", json={
"x": coordinate[0],
"y": coordinate[1],
"rgb": f"{r:02x}{g:02x}{b:02x}"
}, headers=HEADERS)
resp.raise_for_status()
print(resp.text)
if __name__ == "__main__":
with multiprocessing.Pool(5) as p:
p.map(set_pixel, generate_coordinates())
| 2.65625
| 3
|
doc/notebooks/frefine.py
|
rrjudd/jvsip
| 10
|
12783782
|
<reponame>rrjudd/jvsip<filename>doc/notebooks/frefine.py
def frefine(a,rs):
"""
% f = frefine(a,rs);
% refine local minima and maxima of H using Newton's method
% H : H = a(1)+a(2)*cos(w)+...+a(n+1)*cos(n*w)
% rs : initial values for the extrema of H
% see also : frefine.m, frefine_e.m
"""
w = rs.copy
m = a.empty.ramp(0.0,1.0)
for k in range(12):
H = w.outer(m).cos.prod(a)
H1 = (w.outer(m).sin.neg).prod(m * a);
H2 = (w.outer(m).cos.neg).prod(m.copy.sq * a)
w -= H1*H2.recip;
return w
| 2.859375
| 3
|
src/dominh/constants.py
|
gavanderhoorn/dominh
| 25
|
12783783
|
<filename>src/dominh/constants.py
# Copyright (c) 2020, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# author: <NAME>
IO_ON: int = 1
IO_OFF: int = 0
JSON_SUCCESS: str = 'success'
JSON_REASON: str = 'reason'
HLPR_RAW_VAR: str = 'raw_var'
HLPR_SCALAR_VAR: str = 'scalar_var'
| 1.289063
| 1
|
lesson13n2_projects/house3n2/data/state_gen_conf.py
|
muzudho/py-state-machine-practice
| 0
|
12783784
|
"""State Generator"""
from lesson12_projects.house3.data.const import (
MY_ROOM,
OUT,
CLOSE_DOOR,
OPEN_DOOR,
STAIRS,
)
from lesson13n2.states.myroom import MyroomState
from lesson13n2.states.out import OutState
from lesson13n2.states.out_closedoor import OutClosedoorState
from lesson13n2.states.out_opendoor import OutOpendoorState
from lesson13n2.states.stairs import StairsState
# ステートを使い回すのではなく、アクセスするたびに ステートの生成を実行しなおせるよう、ラムダ関数を返します
house3n2_state_gen = {
OUT: {
"": lambda: OutState(),
CLOSE_DOOR: lambda: OutClosedoorState(),
OPEN_DOOR: lambda: OutOpendoorState(),
},
STAIRS: lambda: StairsState(),
MY_ROOM: lambda: MyroomState(),
}
| 2.4375
| 2
|
main.py
|
panvlas/pr2
| 0
|
12783785
|
<gh_stars>0
import sys
from PyQt5 import QtGui
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUi
# Основной класс программы
class Main(QDialog):
def __init__(self):
super(Main, self).__init__()
loadUi('form.ui', self) # Загрузка формы из файла
# Задание заголовка окна
self.setWindowTitle('Задача #2')
# Задание иконки окна
self.setWindowIcon(QtGui.QIcon('images/logo.png'))
# Задание картинки с заданием с масштабированием в компоненте
self.label_img.setPixmap(QPixmap('images/main.png'))
self.label_img.setScaledContents(True)
# Привязываем к кнопкам наши процедуры-обработчики
self.btn_solve.clicked.connect(self.solve)
self.btn_clear.clicked.connect(self.clear)
self.btn_exit.clicked.connect(self.close)
# Процедура решения примера
def solve(self):
try:
a = float(self.lineEdit_a.text())
b = float(self.lineEdit_b.text())
x = float(self.lineEdit_x.text())
if x >= 8:
answer = ((x ** 2) / (a ** 2)) + ((x ** 2) / (b ** 2))
else:
answer = (x * ((a + b) ** 2))
print(answer)
self.label_answer.setText('Ответ: ' + format(answer, '.2f'))
except:
self.label_answer.setText('Ошибка!')
# Процедура очистки данных
def clear(self):
self.lineEdit_a.setText('')
self.lineEdit_b.setText('')
self.lineEdit_x.setText('')
self.label_answer.setText('Ответ: ')
# Основная часть программы
def main():
app = QApplication(sys.argv)
window = Main()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.53125
| 3
|
bootcamp/wiki/plugins/globalhistory/views.py
|
basiltiger/easy_bootcamp
| 0
|
12783786
|
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.db.models import F
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from wiki import models
from wiki.core.paginator import WikiPaginator
class GlobalHistory(ListView):
template_name = 'wiki/plugins/globalhistory/globalhistory.html'
paginator_class = WikiPaginator
paginate_by = 30
model = models.ArticleRevision
context_object_name = 'revisions'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.only_last = kwargs.get('only_last', 0)
return super(GlobalHistory, self).dispatch(
request, *args, **kwargs)
def get_queryset(self):
if self.only_last == '1':
return self.model.objects.can_read(self.request.user) \
.filter(article__current_revision=F('id')).order_by('-modified')
else:
return self.model.objects.can_read(self.request.user).order_by('-modified')
def get_context_data(self, **kwargs):
kwargs['only_last'] = self.only_last
return super(GlobalHistory, self).get_context_data(**kwargs)
| 2.046875
| 2
|
test/layer/test_patching.py
|
ControlNet/tensorneko
| 9
|
12783787
|
import unittest
from torch import rand
from tensorneko.layer import PatchEmbedding2d
class TestPatching(unittest.TestCase):
# TODO
pass
class TestPatchEmbedding2d(unittest.TestCase):
def test_simple_patching(self):
# test input for 64x64 RGB image batches
b, c, h, w = (8, 3, 64, 64)
x = rand(b, c, h, w)
# patch size
p = 16
# embedding output
e = 512
# build layer
patch_layer = PatchEmbedding2d((c, h, w), p, e)
# patch grid size
seq_length = (h // p) * (w // p)
self.assertTrue(patch_layer(x).shape == (b, seq_length, e))
def test_overlap_patching(self):
# test input for 64x64 RGB image batches
b, c, h, w = (8, 3, 64, 64)
x = rand(b, c, h, w)
# patch size
p = 16
# embedding output
e = 512
# strides
s = 8
# build layer
patch_layer = PatchEmbedding2d((c, h, w), p, e, strides=(s, s))
# patch grid size
seq_length = ((h - p) // s + 1) * ((w - p) // s + 1)
self.assertTrue(patch_layer(x).shape == (b, seq_length, e))
| 2.84375
| 3
|
app/ini_utils.py
|
mbroz/feel-the-streets
| 5
|
12783788
|
<filename>app/ini_utils.py
import configparser
def ini_file_to_dict(ini_file_path):
parser = configparser.ConfigParser()
parser.read(ini_file_path)
result = {}
for section, values in parser.items():
result[section] = {}
result[section].update(values)
return result
def dict_to_ini_file(ini_dict, dest_file):
parser = configparser.ConfigParser()
for section, values in ini_dict.items():
parser.add_section(section)
parser[section].update({k: str(v) for k, v in values.items()})
with open(dest_file, "w", encoding="utf-8") as fp:
parser.write(fp)
| 3.0625
| 3
|
viewer.py
|
facerecon/finddash
| 0
|
12783789
|
<filename>viewer.py
import cv2
import zmq
import base64
import numpy as np
ip = input("please enter ip: ")
# ip ='10.19.129.177'
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
footage_socket.connect('tcp://' + ip + ':5555')
while True:
try:
frame = footage_socket.recv_string()
img = base64.b64decode(frame)
# print(img[:20])
npimg = np.fromstring(img, dtype=np.uint8)
print(npimg[:20])
source = cv2.imdecode(npimg, 1)
cv2.imshow("Stream", source)
cv2.waitKey(1)
except KeyboardInterrupt:
cv2.destroyAllWindows()
break
| 2.59375
| 3
|
models/nlp/electra/utils.py
|
kevinyang8/deep-learning-models
| 129
|
12783790
|
<reponame>kevinyang8/deep-learning-models
from colorama import Fore, Style
def colorize(token: str, color: str) -> str:
return f"{color}{token}{Style.RESET_ALL}"
def colorize_gen(tokenizer, true_ids, gen_ids, mask):
gen_ids = gen_ids.numpy()
true_ids = true_ids.numpy()
mask = mask.numpy()
tokens = tokenizer.convert_ids_to_tokens(gen_ids)
styled_tokens = tokens.copy()
for i in range(len(tokens)):
if mask[i]:
styled_tokens[i] = colorize(
tokens[i], color=Fore.GREEN if (true_ids[i] == gen_ids[i]) else Fore.RED
)
else:
styled_tokens[i] = tokens[i]
return " ".join(styled_tokens)
def colorize_dis(tokenizer, gen_ids, dis_preds):
gen_ids = gen_ids.numpy()
dis_preds = dis_preds.numpy()
tokens = tokenizer.convert_ids_to_tokens(gen_ids)
styled_tokens = tokens.copy()
for i in range(len(tokens)):
if dis_preds[i]:
styled_tokens[i] = colorize(tokens[i], color=Fore.YELLOW)
else:
styled_tokens[i] = tokens[i]
return " ".join(styled_tokens)
| 2.875
| 3
|
onto_app/models.py
|
Remorax/SemValidator
| 0
|
12783791
|
from flask_sqlalchemy import SQLAlchemy
from onto_app import db
from onto_app.helper import add_new_ontologies
class users(db.Model):
__tabelname__ = 'users'
id = db.Column(db.String, primary_key=True)
username = db.Column(db.String(200), unique=True, nullable=False)
# password = db.Column(db.String(200), nullable=False)
privilege = db.Column(db.Integer, nullable=False)
ontology = db.relationship('ontologies', backref='users')
decisions = db.relationship('class_decisions', cascade="all,delete", backref='users')
class ontologies(db.Model):
__tablename__ = 'ontologies'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(200), nullable=False)
# filepath = db.Column(db.String(200), unique=True, nullable=False)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
relations = db.relationship('class_relations', cascade="all,delete", backref='ontologies')
class class_relations(db.Model):
__tablename__ = 'class_relations'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
property = db.Column(db.String(200))
domain = db.Column(db.String(200), nullable=False)
range = db.Column(db.String(200), nullable=False)
onto_id = db.Column(db.Integer, db.ForeignKey('ontologies.id'), nullable=False)
decisions = db.relationship('class_decisions', cascade="all,delete", backref='class_relations')
final_class_decisions = db.relationship('final_class_decisions', cascade="all,delete", backref='class_relations')
class nodes(db.Model):
__tablename__ = 'nodes'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
onto_id = db.Column(db.Integer, db.ForeignKey('ontologies.id'), nullable=False)
name = db.Column(db.String(200), nullable=False)
decisions = db.relationship('node_decisions', cascade="all,delete", backref='nodes')
final_node_decisions = db.relationship('final_node_decisions', cascade="all,delete", backref='nodes')
class class_decisions(db.Model):
__tablename__ = 'class_decisions'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
relation_id = db.Column(db.Integer, db.ForeignKey('class_relations.id'), nullable=False)
approved = db.Column(db.Integer, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
class node_decisions(db.Model):
__tablename__ = 'node_decisions'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
node_id = db.Column(db.Integer, db.ForeignKey('nodes.id'), nullable=False)
approved = db.Column(db.Integer, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
class final_class_decisions(db.Model):
__tablename__ = 'final_class_decisions'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
relation_id = db.Column(db.Integer, db.ForeignKey('class_relations.id'), nullable=False)
approved = db.Column(db.Integer, nullable=False)
class final_node_decisions(db.Model):
__tablename__ = 'final_node_decisions'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
node_id = db.Column(db.Integer, db.ForeignKey('nodes.id'), nullable=False)
approved = db.Column(db.Integer, nullable=False)
# db.drop_all()
try:
db.create_all()
except:
pass
add_new_ontologies()
| 2.484375
| 2
|
django/first_project/first_app/models.py
|
SandraCoburn/python-django
| 0
|
12783792
|
from django.db import models
'''
SuperUser information:
User: Sandra
Email: <EMAIL>
Password: <PASSWORD>
'''
# Create your models here.
class Topic(models.Model):
top_name = models.CharField(max_length=264, unique=True)
def __str__(self) -> str:
return self.top_name
class Webpage(models.Model):
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
name = models.CharField(max_length=264, unique=True)
url = models.URLField(unique=True)
def __str__(self) -> str:
return self.name
class AccessRecord(models.Model):
name = models.ForeignKey(Webpage, on_delete=models.CASCADE)
date = models.DateField()
def __str__(self) -> str:
return str(self.date)
| 2.546875
| 3
|
step1/paddle/pd_forward.py
|
rainyBJ/fast-transformer-paddle
| 0
|
12783793
|
<filename>step1/paddle/pd_forward.py
import paddle
from fast_transformer_pd import FastTransformer
import numpy as np
from reprod_log import ReprodLogger
reprod_logger = ReprodLogger()
seed = 42
paddle.seed(seed)
model = FastTransformer(
num_tokens=20000,
dim=512,
depth=2,
max_seq_len=4096,
absolute_pos_emb=True
)
# 模型初始化对齐
model_dict = model.state_dict()
torch_model_dict = paddle.load("paddle_init.pdparams")
torch_model_dict = {k: v for k, v in torch_model_dict.items() if k in model_dict}
model_dict.update(torch_model_dict)
model.load_dict(model_dict)
# 输入数据对齐
x_np = np.load('../../fake_data/x.npy')
x = paddle.to_tensor(x_np)
mask_np= np.load('../../fake_data/mask.npy')
mask = paddle.to_tensor(mask_np)
logits = model(x, mask=mask)
reprod_logger.add("logits", logits.cpu().detach().numpy())
reprod_logger.save("forward_paddle.npy")
| 2.28125
| 2
|
restaurant_tracker/urls.py
|
psmith150/restaurant-tracker
| 0
|
12783794
|
<filename>restaurant_tracker/urls.py
from django.urls import path
from . import views
app_name = 'restaurant_tracker'
urlpatterns = [
# ex: /restaurants/
path('', views.IndexView.as_view(), name='index'),
# ex: /restaurants/1/
path('<int:pk>/', views.RestaurantDetailView.as_view(), name='detail'),
# ex: /restaurants/1/edit/
path('<int:pk>/edit/', views.RestaurantEditView.as_view(), name='restaurant_edit'),
# ex: /restaurants/create/
path('create/', views.create_restaurant, name='restaurant_create'),
# ex: /restaurants/1/delete/
path('<int:pk>/delete/', views.RestaurantDeleteView.as_view(), name='restaurant_delete'),
# ex: /restaurants/tags/
path('tags/', views.TagIndexView.as_view(), name='tag_index'),
# ex: /restaurants/tags/1/edit/
path('tags/<int:pk>/edit/', views.TagEditView.as_view(), name='tag_edit'),
# ex: /restaurants/tags/create/
path('tags/create/', views.create_tag, name='tag_create'),
# ex: /restaurants/tags/1/delete/
path('tags/<int:pk>/delete/', views.TagDeleteView.as_view(), name='tag_delete'),
# ex: /restaurants/1/menu_items/create/
path('<int:pk>/menu_items/create/', views.create_menu_item, name='menu_item_create'),
]
| 1.9375
| 2
|
blst/config.py
|
collinmutembei/II
| 0
|
12783795
|
import os
from flask.ext.dotenv import DotEnv
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Config(object):
"""Main configuration class"""
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = os.environ.get('SECRET')
@classmethod
def init_app(self, app):
env = DotEnv()
env.init_app(app, os.path.join(BASE_DIR, '.env'), verbose_mode=True)
# configuration for when in production
class ProductionConfig(Config):
"""configuration for when in production"""
DEBUG = False
# configuration for when in development
class DevelopmentConfig(Config):
"""configuration for when in development"""
DEVELOPMENT = True
DEBUG = True
# configuration for when testing
class TestingConfig(Config):
"""configuration for when testing"""
TESTING = True
if os.getenv('TRAVIS_BUILD', None):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
else:
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DB_URL')
config = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'testing': TestingConfig,
'default': ProductionConfig,
}
| 2.140625
| 2
|
zmb.py
|
grm34/ZenMaxBuilder
| 0
|
12783796
|
# -*- coding: utf-8 -*-
"""
ZenMaxBuilder Copyright © 2021 darkmaster@grm34 https://github.com/grm34
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from modules.main.banner import app_banner
from modules.main.helper import app_helper
from modules.main.logger import app_logger
from modules.main.translator import app_translator
from modules.manager.error import (prevent_android_folder,
prevent_defconfig_folder,
prevent_img_folder, prevent_kernel_folder,
prevent_out_folder, prevent_wrong_usage,
prevent_zip_folder)
from modules.manager.exit import app_exit
from modules.manager.json import load_json_file
from modules.manager.options import options_manager
from modules.session.debug import session_debug
from modules.session.prompt import ask_for_mode, ask_questions
from modules.session.requirements import session_requirements
from modules.session.run import run_session
from modules.session.settings import (global_settings, mode_settings,
session_settings)
class ZenMaxBuilder:
"""Application `main` object
Project structure
-----------------
" ZenMaxBuilder.py
" |
" |---- modules/
" | |
" | |---- cloner/
" | | |---- anykernel.py
" | | |---- toolchains.py
" | | |---- zipsigner.py
" | |
" | |---- compiler/
" | | |---- build.py
" | | |---- clean.py
" | | |---- defconfig.py
" | | |---- menuconfig.py
" | |
" | |---- inquirer/
" | | |---- mode.py
" | | |---- save.py
" | | |---- session.py
" | | |---- validator.py
" | |
" | |---- main/
" | | |---- banner.py
" | | |---- helper.py
" | | |---- logger.py
" | | |---- translator.py
" | |
" | |---- manager/
" | | |---- cmd.py
" | | |---- error.py
" | | |---- exit.py
" | | |---- json.py
" | | |---- options.py
" | |
" | |---- obtainer/
" | | |---- compiler.py
" | | |---- defconfigs.py
" | | |---- devices.py
" | | |---- images.py
" | | |---- processor.py
" | | |---- version.py
" | | |---- zips.py
" | |
" | |---- session/
" | | |---- debug.py
" | | |---- prompt.py
" | | |---- requirements.py
" | | |---- run.py
" | | |---- settings.py
" | |
" | |---- zipper/
" | | |---- config.py
" | | |---- makezip.py
" | | |---- signer.py
" | |
" |
"""
def __init__(self):
"""Set main `class` instance
Initialize
----------
self.app: "Dictionary containing application informations"
self.language: "String containing desired language code"
self.themes: "Dictionary containing application themes"
self.theme: "Dictionary containing selected theme settings"
self.options: "Tuple containing cmd line options from sys.argv"
self.session: "Dictionary to store session parameters"
self.devices: "Array of dict of availables devices and data"
self.trad: "Gettext function to translate strings"
"""
self.app = load_json_file('app.json')
self.language = self.app['language']
self.themes = load_json_file('themes.json')
self.theme = self.themes['default']
self.options = app_helper(self)
self.session = load_json_file('settings.json')
self.devices = {}
self.trad = ''
def __str__(self):
"""Add extra method to the class.
Returns
-------
Current class name
"""
return self.__class__.__name__
def run(self):
"""Start the `application`
Actions
-------
1) "Set global settings"
2) "Set user options"
3) "Prevent bad settings"
3) "Ask for mode to use"
4) "Set mode settings"
5) "Ask required questions"
6) "Set session settings"
7) "Check for requirements"
8) "Run selected action"
"""
# Options
global_settings(self)
options_manager(self)
self.trad = app_translator(self.language)
# Prevent wrong settings
prevent_kernel_folder(self)
prevent_defconfig_folder(self)
prevent_out_folder(self)
prevent_img_folder(self)
prevent_zip_folder(self)
prevent_android_folder()
# Session
app_banner(self)
ask_for_mode(self)
mode_settings(self)
ask_questions(self)
session_debug(self)
session_settings(self)
session_requirements(self)
run_session(self)
if __name__ == '__main__':
try:
app_logger()
prevent_wrong_usage()
ZenMaxBuilder().run()
except KeyboardInterrupt:
app_exit()
| 1.65625
| 2
|
scenarios/dispute_list/executable.py
|
timgates42/balanced-python
| 12
|
12783797
|
<filename>scenarios/dispute_list/executable.py
import balanced
balanced.configure('ak-test-<KEY>')
disputes = balanced.Dispute.query
| 1.328125
| 1
|
diving_in_python/week_5/client.py
|
assassinen/coursera_mfti_python
| 0
|
12783798
|
<reponame>assassinen/coursera_mfti_python
import socket
import time
class Client:
def __init__(self, host, port, timeout=None):
# sock = socket.socket()
# sock.connect(("127.0.0.1", 10001))
# sock.sendall("ping".encode("utf8"))
# sock.close()
# более короткая запись
self.sock = socket.create_connection((host, port))
def send_message(self):
self.sock.sendall("ping".encode("utf8"))
def responce_is_ok(self, data):
return data[0:2] == 'ok' and data[-1] == '\n' and data[-2] == '\n'
def put(self, metric, value, timestamp=str(int(time.time()))):
self.sock.sendall("put {} {} {}\n".format(metric, value, timestamp).encode("utf8"))
data = self.sock.recv(1024).decode("utf8")
if not self.responce_is_ok(data):
raise ClientError("get_client_error")
def get(self, key):
self.sock.sendall("get {}\n".format(key).encode("utf8"))
rez = {}
data = self.sock.recv(1024).decode("utf8")
status, payload = data.split("\n", 1)
print(status)
print(payload)
if not self.responce_is_ok(data):
raise ClientError("get_client_error")
for metric, value, timestamp in (metrics.split() for metrics in data.split('\n') if len(metrics.split()) == 3):
if metric not in rez:
rez[metric] = []
rez[metric].append((int(timestamp), float(value)))
return rez
class ClientError(Exception):
pass
| 3.25
| 3
|
qub/mask_encoding.py
|
mksarker/data_preprocessing
| 0
|
12783799
|
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import cv2
import imageio
import numpy as np
import cv2 as cv
import scipy
import xmltodict
from pycocotools import mask as mask_util
# ref: https://www.kaggle.com/stainsby/fast-tested-rle-and-input-routines
def rle_encode(mask):
pixels = mask.T.flatten()
# We need to allow for cases where there is a '1' at either end of the sequence.
# We do this by padding with a zero at each end when needed.
use_padding = False
if pixels[0] or pixels[-1]:
use_padding = True
pixel_padded = np.zeros([len(pixels) + 2], dtype=pixels.dtype)
pixel_padded[1:-1] = pixels
pixels = pixel_padded
rle = np.where(pixels[1:] != pixels[:-1])[0] + 2
if use_padding:
rle = rle - 1
rle[1::2] = rle[1::2] - rle[:-1:2]
return rle
def rle_to_string(runs):
return ' '.join(str(x) for x in runs)
# This is copied from https://www.kaggle.com/paulorzp/run-length-encode-and-decode.
# Thanks to <NAME>.
def rle_decode(rle_str, mask_shape, mask_dtype):
s = rle_str.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
mask = np.zeros(np.prod(mask_shape), dtype=mask_dtype)
for lo, hi in zip(starts, ends):
mask[lo:hi] = 1
return mask.reshape(mask_shape[::-1]).T
def encode_mask_to_poly(mask, mask_id, image_id):
if len(mask.shape) == 3:
mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
kernel = np.ones((2, 2), np.uint8)
mask = cv.dilate(mask, kernel, iterations=1)
_, C, h = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
seg = [[float(x) for x in contour.flatten()] for contour in C]
seg = [cont for cont in seg if len(cont) > 4] # filter all polygons that are boxes
rle = mask_util.frPyObjects(seg, mask.shape[0], mask.shape[1])
return {
'area': float(sum(mask_util.area(rle))),
'bbox': list(mask_util.toBbox(rle)[0]),
'category_id': 1,
'id': mask_id,
'image_id': image_id,
'iscrowd': 0,
'segmentation': seg
}
def encode_mask_to_rle(mask, mask_id, image_id):
seg = mask_util.encode(np.asarray(mask, order='F'))
return encode_rle(seg, mask_id, image_id)
def encode_rle(rle, mask_id, image_id):
rle['counts'] = rle['counts'].decode('utf-8')
return {
'image_id': image_id,
'segmentation': rle,
'category_id': 1,
'id': mask_id,
'area': int(mask_util.area(rle)),
'bbox': list(mask_util.toBbox(rle)),
'iscrowd': 0
}
def regions_to_rle(regions, shape):
R = [r.flatten() for r in regions]
rle = mask_util.frPyObjects(R, shape[0], shape[1])
return rle
def parse_xml_annotations(file_path):
with open(file_path) as f:
xml = f.read()
ann = xmltodict.parse(xml)
regions = []
if isinstance(ann['Annotations']['Annotation'], list):
print('Found Multiple regions')
for a in ann['Annotations']['Annotation']:
if 'Regions' in a and 'Region' in a['Regions']:
for region in a['Regions']['Region']:
vertices = []
for v in region['Vertices']['Vertex']:
vertices.append([float(v['@X']), float(v['@Y'])])
regions.append(np.asarray(vertices))
else:
for region in ann['Annotations']['Annotation']['Regions']['Region']:
vertices = []
for v in region['Vertices']['Vertex']:
vertices.append([float(v['@X']), float(v['@Y'])])
regions.append(np.asarray(vertices))
return regions
def filter_contours(contours, H):
C = []
i = 0
while i != -1:
j = H[i][2]
while j != -1:
C.append(contours[j])
j = H[j][0]
i = H[i][0]
kernel = np.ones((3, 3), np.uint8)
def dedupe_contours(rles, dataset):
M = mask_util.decode(rles)
all_mask = M[:, :, 0].copy()
all_mask[:] = False
areas = np.sum(M, (0, 1))
sort_idx = areas.argsort()
areas = areas[sort_idx]
M = M[:, :, sort_idx]
res = []
im_size = M.shape[0] * M.shape[1]
for idx in range(M.shape[-1]):
if areas[idx] < 30 or areas[idx] > im_size * 0.5:
continue
m = M[:, :, idx]
intersection = m & all_mask
area_inter = intersection.sum()
if area_inter > 30:
continue
else:
mask = m & ~all_mask
total_area = mask.sum()
if total_area < 30:
continue
if dataset not in ['2009_ISBI_2DNuclei', 'cluster_nuclei']:
m = cv.dilate(m, kernel, iterations=1)
all_mask = m | all_mask
res.append(m)
if not res:
return None
M2 = np.stack(res).transpose((1, 2, 0))
rles = mask_util.encode(np.asarray(M2, dtype=np.uint8, order='F'))
return rles
def parse_segments_from_outlines(outline_path, dataset):
outlines = imageio.imread(outline_path)
if dataset == 'cd3':
outlines[outlines != [255, 0, 0]] = 0
imgray = cv2.cvtColor(outlines, cv2.COLOR_RGB2GRAY)
ret, thresh = cv2.threshold(imgray, 0, 255, 0)
thresh[0, :] = 1
thresh[:, 0] = 1
thresh[:, -1] = 1
thresh[-1, :] = 1
contours, hierarchy = cv2.findContours(thresh,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
seg = [[float(x) for x in c.flatten()] for c in contours]
seg = [cont for cont in seg if len(cont) > 4] # filter all polygons that are boxes
if not seg:
return []
rles = mask_util.frPyObjects(seg, outlines.shape[0], outlines.shape[1])
rles = dedupe_contours(rles, dataset)
return rles
| 2.296875
| 2
|
SLpackage/private/pacbio/pythonpkgs/pbcommand/lib/python2.7/site-packages/pbcommand/common_options.py
|
fanglab/6mASCOPE
| 5
|
12783800
|
"""Common options and utils that can me used in commandline utils"""
import logging
import argparse
import sys
RESOLVED_TOOL_CONTRACT_OPTION = "--resolved-tool-contract"
EMIT_TOOL_CONTRACT_OPTION = "--emit-tool-contract"
def add_debug_option(p):
p.add_argument("--pdb", action="store_true", default=False,
help="Enable Python debugger")
return p
def add_log_debug_option(p):
"""This requires the log-level option"""
p.add_argument('--debug', action="store_true", default=False, help="Alias for setting log level to DEBUG")
return p
def add_log_quiet_option(p):
"""This requires the log-level option"""
p.add_argument('--quiet', action="store_true", default=False, help="Alias for setting log level to CRITICAL to suppress output.")
return p
def add_log_verbose_option(p):
p.add_argument(
"-v",
"--verbose",
dest="verbosity",
action="count",
help="Set the verbosity level.")
return p
def add_log_level_option(p, default_level='INFO'):
"""Add logging level with a default value"""
if isinstance(default_level, int):
default_level = logging.getLevelName(default_level)
p.add_argument('--log-level', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
default=default_level, help="Set log level")
return p
def add_log_file_option(p):
p.add_argument('--log-file', default=None, type=str,
help="Write the log to file. Default(None) will write to stdout.")
return p
def add_resolved_tool_contract_option(p):
p.add_argument(RESOLVED_TOOL_CONTRACT_OPTION, type=str,
help="Run Tool directly from a PacBio Resolved tool contract")
return p
def add_emit_tool_contract_option(p):
p.add_argument(EMIT_TOOL_CONTRACT_OPTION, action="store_true",
default=False,
help="Emit Tool Contract to stdout")
return p
def add_nproc_option(p, default=1):
p.add_argument("-j", "--nproc", type=int, default=default,
help="Number of processors to use")
return p
def add_base_options(p, default_level='INFO'):
"""Add the core logging options to the parser and set the default log level
If you don't want the default log behavior to go to stdout, then set
the default log level to be "ERROR". This will essentially suppress all
output to stdout.
Default behavior will only emit to stderr. This is essentially a '--quiet'
default mode.
my-tool --my-opt=1234 file_in.txt
To override the default behavior:
my-tool --my-opt=1234 --log-level=INFO file_in.txt
Or write the file to an explict log file
my-tool --my-opt=1234 --log-level=DEBUG --log-file=file.log file_in.txt
"""
# This should automatically/required be added to be added from get_default_argparser
add_log_file_option(p)
p_log = p.add_mutually_exclusive_group()
add_log_verbose_option(add_log_quiet_option(add_log_debug_option(
add_log_level_option(p_log, default_level=default_level))))
return p
def add_common_options(p, default_level='INFO'):
"""
New model for 3.1 release. This should replace add_base_options
"""
return add_log_quiet_option(add_log_debug_option(add_log_level_option(add_log_file_option(p), default_level=default_level)))
def add_base_options_with_emit_tool_contract(p, default_level='INFO'):
# can't use compose here because of circular imports via parser
return add_base_options(add_resolved_tool_contract_option(add_emit_tool_contract_option(p)), default_level=default_level)
def _to_print_message_action(msg):
class PrintMessageAction(argparse.Action):
"""Print message and exit"""
def __call__(self, parser, namespace, values, option_string=None):
sys.stdout.write(msg + "\n")
sys.exit(0)
return PrintMessageAction
def add_subcomponent_versions_option(p, subcomponents):
"""Add subcomponents to a subparser to provide more information
about the tools dependencies.
Subcomponents must be provided as a list of tuples (component, version)
"""
max_length = max(len(x) for x, _ in subcomponents)
pad = 2
msg = "\n" .join([" : ".join([x.rjust(max_length + pad), y]) for x, y in subcomponents])
action = _to_print_message_action(msg)
p.add_argument("--versions",
nargs=0,
help="Show versions of individual components",
action=action)
return p
| 3.03125
| 3
|
stable_nalu/layer/gumbel_nalu.py
|
wlm2019/Neural-Arithmetic-Units
| 147
|
12783801
|
from .gumbel_nac import GumbelNACLayer
from .gumbel_mnac import GumbelMNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GumbelNALULayer(AbstractNALULayer):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(GumbelNACLayer, GumbelMNACLayer, in_features, out_features, **kwargs)
class GumbelNALUCell(AbstractRecurrentCell):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GumbelNALULayer, GumbelMNACLayer, input_size, hidden_size, **kwargs)
| 2.59375
| 3
|
src/clincoded/upgrade/annotation.py
|
ClinGen/clincoded
| 30
|
12783802
|
from contentbase.upgrader import upgrade_step
@upgrade_step('annotation', '1', '2')
def annotation_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/453
value['status'] = 'in progress'
@upgrade_step('annotation', '2', '3')
def annotation_2_3(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
@upgrade_step('annotation', '3', '4')
def annotation_3_4(value, system):
# https://github.com/ClinGen/clincoded/issues/1486
# Add articleNotes property and update schema version
return
| 2.09375
| 2
|
python_code/easy/111_Minimum_Depth_of_Binary_Tree_easy/solution.py
|
timshenkao/interview_coding_exercises
| 0
|
12783803
|
# Copyright (c) 2021 - present, <NAME>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import Optional
from python_code.helper.binary_trees import TreeNode
# 111. Minimum Depth of Binary Tree https://leetcode.com/problems/minimum-depth-of-binary-tree/
# Given a binary tree, find its minimum depth.
# The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
# Note: A leaf is a node with no children.
class Solution:
def min_depth_recursion(self, root: Optional[TreeNode]) -> int:
""" Time complexity: O(N). We check / visit every node
Space complexity: if we don't count recursion stack then O(1).
Else O(log N) in case of balanced tree or O(N) in case of unbalanced tree.
"""
# empty tree
if not root:
return 0
# node is a leaf
if not root.left and not root.right:
return 1
# either left or right child is missing
if not root.left or not root.right:
return max(self.min_depth_recursion(root.left), self.min_depth_recursion(root.right)) + 1
# both children are present
else:
return min(self.min_depth_recursion(root.left), self.min_depth_recursion(root.right)) + 1
def min_depth_bfs_iteration(self, root):
""" Time complexity: O(N). We check / visit every node
Space complexity: O(N) if we have to keep the whole tree.
"""
# empty tree
if not root:
return 0
queue = [(root, 1)]
while queue:
curr_node, curr_depth = queue.pop(0)
if not curr_node.left and not curr_node.right:
return curr_depth
if curr_node.left:
queue.append((curr_node.left, curr_depth + 1))
if curr_node.right:
queue.append((curr_node.right, curr_depth + 1))
| 3.921875
| 4
|
crawler/crawler.py
|
manhph2211/SentimentAnalysis
| 4
|
12783804
|
import multiprocessing as mp
import pandas as pd
from selenium import webdriver
from time import sleep
import config
import json
from utils import access_url
import argparse
def crawler(cate_name,item_links):
driver = webdriver.Chrome()
fb_li = []
counter = 0
for item_link in item_links:
access_url(driver,item_link.replace('now','foody'))
fb_n = 1
while True:
while True:
try:
more_fb_button = driver.find_element_by_xpath(config.more_fb_bt.format('/'.join(item_link.split('/')[3:])))
more_fb_button.click()
except:
break
try:
dic = {}
dic['category'] = cate_name
dic['text'] = driver.find_element_by_xpath(config.text_element.format('/'.join(item_link.split('/')[3:]),fb_n)).text
dic['star'] = driver.find_element_by_xpath(config.star_element.format('/'.join(item_link.split('/')[3:]),fb_n)).text
fb_li.append(dic)
df = pd.DataFrame(fb_li)
df.to_csv('./_data/{}.csv'.format(cate_name))
counter += 1
except:
break
fb_n += 1
print(counter)
def multiprocess(data):
parser = argparse.ArgumentParser(description='Multiprocessing!!!')
parser.add_argument("-p","--processes", help="Number of processes for Multiprocessing.", type=int)
args = parser.parse_args()
pool = mp.Pool(args.processes)
pool.starmap(crawler,data.items())
if __name__ == '__main__':
with open('item_links.json','r') as f:
data = json.load(f)
multiprocess(data)
| 2.890625
| 3
|
run_continuation.py
|
RasmooL/dqn-tf
| 1
|
12783805
|
"""
Copyright 2016 <NAME>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE.txt file for details.
"""
import sys
import time
from sacred import Experiment
from core.ALEEmulator import ALEEmulator
from continuation.OriginalNet import OriginalNet
from core.ScreenBuffer import ScreenBuffer
import numpy as np
import cv2
ex = Experiment('continuation')
@ex.config
def net_config():
conv_layers = 3
conv_units = [32, 64, 64]
filter_sizes = [8, 4, 2]
strides = [4, 2, 1]
hidden_units = 512
num_heads = 3
gate_noise = 0.01
sharpening_slope = 10
in_width = 84
in_height = 84
device = '/gpu:0'
lr = 0.0001
opt_decay = 0.95
momentum = 0.5
opt_eps = 0.01
tensorboard = False
tensorboard_freq = 50
@ex.config
def emu_config():
rom_path = '../ale-git/roms/'
rom_name = 'breakout'
display_screen = True
frame_skip = 4
repeat_prob = 0.0
color_avg = True
random_seed = 42
random_start = 30
@ex.config
def agent_config():
batch_size = 16
train_start = 5e3
train_frames = 5e6
test_freq = 5e4
test_frames = 5e3
save_freq = 5e3
@ex.automain
def main(_config, _log):
sys.stdout = open('log_' + _config['rom_name'] + time.strftime('%H%M%d%m', time.gmtime()), 'w', buffering=True)
print "#{}".format(_config)
emu = ALEEmulator(_config)
_config['num_actions'] = emu.num_actions
net = OriginalNet(_config)
cv2.startWindowThread()
cv2.namedWindow("prediction")
# fill screen history up to batch size
buf = ScreenBuffer(_config, _config['batch_size'])
for n in range(_config['batch_size']):
emu.act(emu.actions[np.random.randint(0, emu.num_actions)]) # act randomly
buf.insert(emu.get_screen_rgb())
# train
step = 0
while step < _config['train_frames']:
cost = net.train(buf.get(), [step])
print step, cost
# predict next frame
hidden = net.encode(buf.get()[np.newaxis, -1])
pred = net.predict_from_hidden(hidden)
emu.act(emu.actions[np.random.randint(0, emu.num_actions)]) # act randomly
buf.insert(emu.get_screen_rgb())
# display difference between prediction and true frame
cv2.imshow('prediction', cv2.resize(pred[0], (84 * 4, 84 * 4)))
if emu.terminal():
emu.new_game()
if step % _config['save_freq'] == 0:
net.save('cont')
step += 1
| 2.109375
| 2
|
speechtotext/speech_to_text/speech_to_text.py
|
jonathanvanleeuwen/speech_to_text
| 0
|
12783806
|
<gh_stars>0
import logging
import time
import traceback
import azure.cognitiveservices.speech as speechsdk
from pathlib import Path
from typing import Union
logger = logging.getLogger(__name__)
def set_logging_level(level: str) -> None:
if logger.hasHandlers() is False:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
logger.addHandler(handler)
logger.setLevel(level)
class SpeechToText:
"""
Class which can be used to interface with azure speech to text service
required kwargs:
:param subscription: The subscription key to use for the service, str
optional kwargs
:param region: The region of the service i.e. westeurope (default), str
:param out: The output file and location to save results i.e. "./default_out.txt" (default), Path
:param max_duration: The maximum time the recognizer will run, seconds i.e. 3600 (default), Int
:param language: The language the recognizer will use, i.e. "en-US" (default), str
:param verbose: logging level to output, default = INFO
"""
def __init__(self, **kwargs):
self.subscription = kwargs.get("subscription")
self.region = kwargs.get("region", "westeurope")
self.out = kwargs.get("out", None)
self.max_duration = kwargs.get("max_duration", None)
self.verbose = kwargs.get("verbose", "INFO")
set_logging_level(self.verbose)
if self.max_duration is None:
self.max_duration = 3600
else:
self.max_duration = int(self.max_duration)
if self.out is None:
self.out = Path("./default_out.txt")
self._done = False
self._initated = False
self._sepperator = "_" * 120
try:
self.speech_recognizer: Union[speechsdk.SpeechRecognizer, None] = None
self.speech_config = speechsdk.SpeechConfig(subscription=self.subscription, region=self.region)
self.speech_config.speech_recognition_language = kwargs.get("language", "en-US")
self._initated = True
except (RuntimeError, ValueError) as e:
logger.error(e)
logger.error("Unable to connect to Azure service")
logger.error("".join(traceback.format_exception(type(e), e, e.__traceback__)))
self._done = True
def from_file(self, filepath: Path) -> None:
self._done = False
if self._initated:
logger.info(f"Running speech to text from file: {filepath.resolve()}")
audio_input = speechsdk.AudioConfig(filename=str(filepath.resolve()))
self.speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=self.speech_config, audio_config=audio_input
)
self._start_recognition()
else:
logger.error("Instance not connected to Azure")
def from_mic(self) -> None:
self._done = False
if self._initated:
logger.info("Running speech to text from microphone!")
self.speech_recognizer = speechsdk.SpeechRecognizer(speech_config=self.speech_config)
self._start_recognition()
else:
logger.error("Instance not connected to Azure")
def _start_recognition(self) -> None:
self.speech_recognizer.recognized.connect(self._save_to_file)
# self.speech_recognizer.recognized.connect(lambda evt: self._save_to_file((evt)))
self.speech_recognizer.session_stopped.connect(self._stop_cb)
logger.info("Starting continuous recognition")
self.speech_recognizer.start_continuous_recognition()
self.start = time.time()
try:
while not self._done and time.time() - self.start < self.max_duration:
time.sleep(0.5)
except KeyboardInterrupt:
logger.info(self._sepperator)
logger.info("Stopped by user")
logger.info(self._sepperator)
logger.info("Finished or timed out, please wait!")
self._stop_cb()
self._done = True
def _save_to_file(self, resultEvent: speechsdk.SpeechRecognitionEventArgs, out: Union[Path, None] = None) -> None:
if out is None:
out = self.out
out = str(out.resolve())
logger.info(f"Saving results to: {out}")
logger.info(f"----{resultEvent.result.text}")
with open(out, "a") as f:
f.write(resultEvent.result.text)
f.write("\n")
def _stop_cb(self, evt: Union[str, None] = None):
self.speech_recognizer.stop_continuous_recognition()
if self._done is False:
logger.info("Stopping continuous recognition!")
self._done = True
| 2.703125
| 3
|
mmderain/models/backbones/spdnet.py
|
biubiubiiu/derain-toolbox
| 4
|
12783807
|
from functools import partial
from typing import List, Sequence, Tuple
import einops
import torch
import torch.nn.functional as F
from pytorch_wavelets import DWTForward, DWTInverse
from torch import nn
from mmderain.models.common import get_rcp, make_layer, sizeof
from mmderain.models.layers import SELayer
from mmderain.models.registry import BACKBONES
class ConvAct(nn.Module):
"""2D Convolution + Activation"""
def __init__(self, in_planes: int, out_planes: int) -> None:
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1, padding_mode='reflect'),
nn.ReLU(inplace=True)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class SEResBlock(nn.Module):
"""SE-ResBlock"""
def __init__(self, planes: int) -> None:
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
SELayer(planes, reduction=1)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.model(x)
class SRiR(nn.Module):
"""SE-ResBlock in Residual Block"""
def __init__(self, planes: int, n_resblock: int) -> None:
super().__init__()
self.model = nn.Sequential(
*[SEResBlock(planes) for _ in range(n_resblock)],
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
)
self.act = nn.ReLU(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = x + self.model(x)
out = self.act(out)
return out
class RCPEM(nn.Module):
"""RCP Extration Module"""
def __init__(self, in_planes: int, out_planes: int, n_resblock: int) -> None:
super().__init__()
self.model = nn.Sequential(
ConvAct(in_planes, out_planes//2),
ConvAct(out_planes//2, out_planes),
SRiR(out_planes, n_resblock)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
feat = get_rcp(x).repeat(1, x.size(1), 1, 1)
return self.model(feat)
class IFM(nn.Module):
"""Interactive Fusion Module"""
def __init__(self, planes: int) -> None:
super().__init__()
self.conv0 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
self.conv1 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(planes*2, 2, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(planes*2, 2, kernel_size=3, stride=1, padding=1)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
feat_x0 = self.conv0(x)
feat_y0 = self.conv1(y)
w0 = torch.sigmoid(feat_x0 * feat_y0)
x_prime = x * w0
y_prime = y * w0
wx1, wx2 = torch.chunk(self.conv2(torch.cat([x, x_prime], dim=1)), chunks=2, dim=1)
wy1, wy2 = torch.chunk(self.conv3(torch.cat([x, x_prime], dim=1)), chunks=2, dim=1)
out_x = x*wx1 + x_prime*wx2
out_y = y*wy1 + y_prime*wy2
out = torch.cat([out_x, out_y], dim=1)
return out
class WMLMDecomposition(nn.Module):
def __init__(self, planes: int, is_first_level: bool) -> None:
super().__init__()
self.is_first_level = is_first_level
self.dwt = DWTForward(J=1, wave='haar')
self.conv = ConvAct(planes*2, planes) if is_first_level else ConvAct(planes*4, planes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.is_first_level:
return self.conv(x)
else:
return self.conv(self._decomp(x))
def _decomp(self, x: torch.Tensor) -> torch.Tensor:
xl, xh = self.dwt(x)
xl = xl.unsqueeze(2)
feat = torch.cat([xh[0], xl], dim=2)
out = einops.rearrange(feat, 'b c n h w -> b (n c) h w')
return out
class WMLMFusion(nn.Module):
def __init__(self, planes: int) -> None:
super().__init__()
self.idwt = DWTInverse(wave='haar')
self.conv = ConvAct(planes, planes*4)
self.upsample = nn.Sequential(
nn.ReflectionPad2d(1),
nn.ConvTranspose2d(planes, planes, kernel_size=3, stride=2),
nn.ReLU(inplace=True)
)
self.last = nn.Sequential(
SEResBlock(planes),
nn.ReLU(inplace=True)
)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x = self.conv(x)
x = self._reconstruct(x)
x = self.upsample(x)
x = F.interpolate(x, size=sizeof(y))
y = x + y
return self.last(y)
def _reconstruct(self, x: torch.Tensor) -> torch.Tensor:
feat = einops.rearrange(x, 'b (c1 c2) h w -> b c1 c2 h w', c2=4)
xl, xh = torch.split(feat, [1, 3], dim=2)
xl = xl.squeeze(dim=2)
out = self.idwt((xl, [xh]))
return out
class WMLM(nn.Module):
"""Wavelet-based Multi-level Module"""
def __init__(self, planes: int, n_level: int, n_srir: int, n_resblock: int) -> None:
super().__init__()
self.decomposition = nn.ModuleList([
WMLMDecomposition(planes, is_first_level=(i == 0))
for i in range(n_level)
])
self.trunks = nn.ModuleList([
make_layer(SRiR, n_srir, planes=planes, n_resblock=n_resblock)
for _ in range(n_level)
])
self.fusions = nn.ModuleList([
WMLMFusion(planes)
for _ in range(n_level-1)
])
def forward(self, x: torch.Tensor) -> torch.Tensor:
init_features = []
prev = x
for model in self.decomposition: # level 0, level 1, ... (top-down)
prev = model(prev)
init_features.append(prev)
out_features = []
for init_feat, model in zip(init_features, self.trunks):
feat = model(init_feat)
out_features.append(feat)
out = out_features.pop() # feature from bottom level
for model in self.fusions:
out = model(out, out_features.pop()) # bottom-up fusion
return out
class Subnet(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
mid_channels: int,
n_level: int,
n_srir: int,
n_resblock: int,
index: int
) -> None:
super().__init__()
if index > 0:
conv3x3 = partial(nn.Conv2d, kernel_size=3, stride=1, padding=1)
self.fusion1 = conv3x3(mid_channels*(index+1), mid_channels)
self.fusion2 = conv3x3(mid_channels*(index+1), mid_channels)
else:
self.fusion1 = nn.Identity()
self.fusion2 = nn.Identity()
self.rcpem = RCPEM(in_channels, mid_channels, n_resblock)
self.ifm = IFM(mid_channels)
self.wmlm = WMLM(mid_channels, n_level, n_srir, n_resblock)
self.last = nn.Conv2d(mid_channels, out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x: torch.Tensor, feats: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
rcp_feat = self.rcpem(x)
feat0 = self.fusion1(torch.cat(feats, dim=1))
feat1 = self.ifm(feat0, rcp_feat)
feat2 = self.wmlm(feat1)
feat3 = self.fusion2(torch.cat([feat2] + feats[:-1], dim=1))
out = self.last(feat3)
return out, feat2
@BACKBONES.register_module()
class SPDNet(nn.Module):
"""SPDNet Network Structure
Paper: Structure-Preserving Deraining with Residue Channel Prior Guidance
Official Code: https://github.com/Joyies/SPDNet
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
mid_channels (int): Channel number of intermediate features. Default: 32.
n_stage (int): Number of stages. Default: 3.
n_level (int): Number of levels in WMLM. Default: 3.
n_srir (int): Number of SRiR blocks of each level in WMLM. Default: 3.
n_resblock (int): Number of Resblocks in SRiR Module. Default: 3.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
mid_channels: int = 32,
n_stage: int = 3,
n_level: int = 3,
n_srir: int = 3,
n_resblock: int = 3
) -> None:
super().__init__()
self.head = nn.Sequential(
ConvAct(in_channels, mid_channels//2),
ConvAct(mid_channels//2, mid_channels),
)
self.subnets = nn.ModuleList([
Subnet(in_channels, out_channels, mid_channels, n_level, n_srir, n_resblock, i)
for i in range(n_stage)
])
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
outputs = []
prev_out = x
init_feat = self.head(x)
features = [init_feat]
for net in self.subnets:
out, feat = net(prev_out, features)
prev_out = out
outputs.append(out)
features.insert(0, feat)
return outputs
| 2.3125
| 2
|
start.py
|
aiidalab/aiidalab-manage-structures
| 0
|
12783808
|
import ipywidgets as ipw
def get_start_widget(appbase, jupbase):
#http://fontawesome.io/icons/
template = """
<table>
<tr>
<th style="text-align:center"></th>
<th style="width:70px" rowspan=2></th>
<th style="text-align:center"></th>
<th style="width:70px" rowspan=2></th>
<th style="text-align:center"></th>
<tr>
<td valign="top"><ul>
<li><a href="{appbase}/examples.ipynb" target="_blank">Look at the examples</a>
</ul></td>
<td valign="top"><ul>
<li><a href="{appbase}/import_from_cod.ipynb" target="_blank">Upload from the CoD</a>
</ul></td>
<td valign="top"><ul>
<li><a href="{appbase}/upload_structure.ipynb" target="_blank">Upload from computer</a>
<li><a href="{appbase}/upload_structures.ipynb" target="_blank">Upload from computer (multi) </a>
</ul></td>
</tr></table>
"""
html = template.format(appbase=appbase, jupbase=jupbase)
return ipw.HTML(html)
#EOF
| 2.703125
| 3
|
src/FFEAT/test/strategies/mutation/ReplaceWithNormalTest.py
|
PatrikValkovic/MasterThesis
| 0
|
12783809
|
###############################
#
# Created by <NAME>
# 3/16/2021
#
###############################
import unittest
import torch as t
import ffeat
from ffeat.strategies import mutation
class ReplaceWithNormalTest(unittest.TestCase):
def test_norm(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), 0.02)
pop = t.randn((1000,400))
(newpop,), kargs = m(pop)
self.assertEqual(newpop.shape, (1000,400))
self.assertIs(pop, newpop)
def test_not_inplace(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), 0.02, in_place=False)
pop = t.randn((1000,400))
(newpop,), kargs = m(pop)
self.assertEqual(newpop.shape, (1000,400))
self.assertIsNot(pop, newpop)
def test_rate_callable(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), ffeat.utils.decay.Linear(0.1, 0.01))
pop = t.randn((1000,400))
(newpop,), kargs = m(pop, iteration=13, max_iteration=23)
self.assertEqual(newpop.shape, (1000,400))
self.assertIs(pop, newpop)
def test_rate_high(self):
with self.assertRaises(ValueError):
mutation.Replace(t.distributions.Normal(0.0, 5.0), 1.6)
def test_rate_low(self):
with self.assertRaises(ValueError):
mutation.Replace(t.distributions.Normal(0.0, 5.0), 1.6)
def test_rate_high_callable(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), ffeat.utils.decay.Linear(1.2, 1.4))
pop = t.randn((1000,400))
with self.assertRaises(ValueError):
m(pop, iteration=13, max_iteration=23)
def test_rate_low_callable(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), ffeat.utils.decay.Linear(-1.6, -0.2))
pop = t.randn((1000,400))
with self.assertRaises(ValueError):
m(pop, iteration=13, max_iteration=23)
def test_invalid_distribution_shape(self):
m = mutation.Replace(t.distributions.Normal(0.0, t.ones((413,))), 0.02)
pop = t.randn((1000,400))
with self.assertRaises(ValueError):
m(pop, iteration=13, max_iteration=23)
@unittest.skipIf(not t.cuda.is_available(), 'CUDA not available')
def test_cuda(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), 0.02)
pop = t.randn((1000,400))
(newpop,), kargs = m(pop)
self.assertEqual(newpop.shape, (1000,400))
self.assertIs(pop, newpop)
@unittest.skipIf(not t.cuda.is_available(), 'CUDA not available')
def test_not_inplace_cuda(self):
m = mutation.Replace(t.distributions.Normal(0.0, 5.0), 0.02, in_place=False)
pop = t.randn((1000,400))
(newpop,), kargs = m(pop)
self.assertEqual(newpop.shape, (1000,400))
self.assertIsNot(pop, newpop)
if __name__ == '__main__':
unittest.main()
| 2.25
| 2
|
noxfile.py
|
astrogewgaw/priwo
| 7
|
12783810
|
<reponame>astrogewgaw/priwo<gh_stars>1-10
import nox
py_versions = [
"3.6",
"3.7",
"3.8",
"3.9",
]
@nox.session
def lint(session):
"""
Lint all files in priwo.
"""
session.install("black")
session.run("black", ".")
@nox.session(
python=py_versions,
reuse_venv=True,
)
def tests(session):
"""
Run tests for priwo.
"""
# Install dependencies.
session.install(
"pytest",
"pytest-cov",
"deepdiff",
)
# Install the package in development mode.
session.run(
"pip",
"install",
"-e",
".",
)
# Run the tests using pytest and generate a coverage report.
session.run(
"pytest",
"-vv",
"--cov",
"--cov-report",
"term-missing",
"tests",
)
| 2.15625
| 2
|
transactions/apps/discounts.py
|
cnds/wxdemo
| 0
|
12783811
|
<gh_stars>0
from flask import request, jsonify
from .base import Base
from .json_validate import SCHEMA
class Discounts(Base):
def get(self):
params = request.args.to_dict()
is_valid, tag = self.validate_dict_with_schema(params,
SCHEMA['discounts_get'])
if not is_valid:
return self.error_msg(self.ERR['invalid_query_params'], tag)
flag, discounts = self.db.find_by_condition('discounts', params)
if not flag:
return '', 500
return jsonify({'discounts': discounts})
def post(self):
is_valid, data = self.get_params_from_request(request,
SCHEMA['discounts_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
store_id = data['storeId']
discount_base = data['base']
flag, discount = self.db.find_by_condition(
'discounts', {'storeId': store_id, 'base': discount_base})
if not flag:
return '', 500
if discount:
return self.error_msg(self.ERR['conflict_discount'])
result = self.db.create('discounts', data)
if not result:
return '', 500
return jsonify(result), 201
class Discount(Base):
def get(self, discount_id):
params = request.args.to_dict()
store_id = params.get('storeId')
flag, discount = self.db.find_by_id('discounts', discount_id)
if not flag:
return '', 500
if not discount:
return self.error_msg(self.ERR['discount_not_exist'])
if store_id:
store_id_from_db = discount['storeId']
if store_id != store_id_from_db:
return self.error_msg(self.ERR['permission_denied'])
return jsonify(discount)
def put(self, discount_id):
is_valid, data = self.get_params_from_request(request,
SCHEMA['discount_put'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
flag, result = self.db.update('discounts', {'id': discount_id},
{'$set': data})
if not flag:
return '', 500
return jsonify(result)
def delete(self, discount_id):
params = request.args.to_dict()
store_id = params.get('storeId')
flag, discount = self.db.find_by_id('discounts', discount_id)
if not flag:
return '', 500
if not discount:
return self.error_msg(self.ERR['discount_not_exist'])
if store_id:
store_id_from_db = discount['storeId']
if store_id != store_id_from_db:
return self.error_msg(self.ERR['permission_denied'])
flag, result = self.db.remove('discounts', discount_id)
if not flag:
return '', 500
if result is None:
return self.error_msg(self.ERR['discount_has_been_removed'])
return jsonify(result)
| 2.515625
| 3
|
Basset/pretrained_model_reloaded_th.py
|
Luma-1994/lama
| 137
|
12783812
|
<reponame>Luma-1994/lama
import torch
import torch.nn as nn
from functools import reduce
from torch.autograd import Variable
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
def get_model(load_weights = True):
# alphabet seems to be fine:
"""
https://github.com/davek44/Basset/tree/master/src/dna_io.py#L145-L148
seq = seq.replace('A','0')
seq = seq.replace('C','1')
seq = seq.replace('G','2')
seq = seq.replace('T','3')
"""
pretrained_model_reloaded_th = nn.Sequential( # Sequential,
nn.Conv2d(4,300,(19, 1)),
nn.BatchNorm2d(300),
nn.ReLU(),
nn.MaxPool2d((3, 1),(3, 1)),
nn.Conv2d(300,200,(11, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
nn.Conv2d(200,200,(7, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,164)), # Linear,
nn.Sigmoid(),
)
if load_weights:
sd = torch.load('model_files/pretrained_model_reloaded_th.pth')
pretrained_model_reloaded_th.load_state_dict(sd)
return pretrained_model_reloaded_th
model = get_model(load_weights = False)
| 2.40625
| 2
|
src/olymptester/cpp_template.py
|
jrojer/easy-stdio-tester
| 1
|
12783813
|
cpp_template = '''\
#include<iostream>
#include<algorithm>
#include<functional>
#include<vector>
#include<unordered_map>
#include<unordered_set>
#include<map>
#include<set>
#include<string>
#include<cstdlib>
#include <math.h>
#define int int64_t
using namespace std;
#define verify(condition) { \
if(!(condition)) { \
cout << "line: " << __LINE__ << ", expected: " << #condition << endl; \
exit(0); \
} \
}
#define verifyEquals(a,b) { if(!((a) == (b))) { cout << "line: " << __LINE__ << ", expected: " << (a) << " to be equal " << (b) << endl; exit(0); } }
int n;
int m;
vector<int> vec;
void test(){
verify(1 == 1);
cout << "OK" << endl;
}
int32_t main() {
ios_base::sync_with_stdio(0);
cin.tie(0);
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
}
}
cin >> n;
cout << 2*n;
return 0;
}
'''
| 2.359375
| 2
|
eikonapi/views.py
|
royaljs/refinitiv-eikon-proxy-server
| 2
|
12783814
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import eikon as ek
from pandas import DataFrame
import json
from datetime import datetime
import os
import dateutil
#### SET EIKON APP KEY ####
ek.set_app_key('SET_APP_KEY_HERE')
##### NEWS #####
class NewsHeadlineView(APIView):
"""
GET /news/headlines
"""
def get(self, request):
queryString = request.query_params.get('queryString', None)
if queryString == None : return Response("queryString not provided. Please add queryString in query parameters.", status=400) # 필수 query parameter인 queryString 가 누락된 경우에 대한 응답
count = request.query_params.get('count', None)
if count != None : count = int(count)
else : count = 10
dateFrom = request.query_params.get('dateFrom', None)
dateTo = request.query_params.get('dateTo', None)
# EikonError 처리
try:
result = ek.get_news_headlines(queryString, count, dateFrom, dateTo)
except dateutil.parser._parser.ParserError:
return Response('Invalid Date', status=400)
except ek.eikonError.EikonError as err:
return Response(str(err), status=400)
return Response(json.loads(result.to_json(orient='index', date_format='iso', date_unit='s')), status=200) #pandas dataframe 객체에 to_json 사용시 redundant quote 가 추가되므로 json.loads를 적용한다.
class NewsStoryView(APIView):
"""
GET /news/stories
"""
def get(self, request):
storyId = request.query_params.get('storyId', None)
if storyId == None : return Response("storyId not provided. Please add storyId in query parameters.", status=400) # 필수 query parameter인 storyId 가 누락된 경우에 대한 응답
# EikonError 처리
try:
result = ek.get_news_story(storyId)
except ek.eikonError.EikonError as err:
return Response(str(err), status=400)
return Response(result, status=200)
##### DATA #####
class DataView(APIView):
"""
GET /data
"""
def get(self, request):
instruments = request.query_params.get('instruments', None).replace(" ", "").split(",")
if instruments == None : return Response("instruments not provided. Please add instruments in query parameters.", status=400) # 필수 query parameter인 instruments 가 누락된 경우에 대한 응답
fields = request.query_params.get('fields', None).replace(" ", "").split(",")
if fields == None : return Response("fields not provided. Please add fields in query parameters.", status=400) # 필수 query parameter인 fields 가 누락된 경우에 대한 응답
# EikonError 처리
try:
result = ek.get_data(instruments, fields)[0]
except ek.eikonError.EikonError as err:
return Response(str(err), status=400)
return Response(json.loads(result.to_json(orient='index')), status=200) # get_data API 반환 값이 json이 아닌 tuple 형태여서 to_json을 사용하기 위해 [0] 인덱싱.. redundant quote 제거를 위해 json.loads 적용
class TimeSeriesDataView(APIView):
"""
GET /data/timeseries
"""
def get(self, request):
instruments = request.query_params.get('instruments', None).replace(" ", "").split(",")
if instruments == None : return Response("instruments not provided. Please add instruments in query parameters.", status=400) # 필수 query parameter인 instruments 가 누락된 경우에 대한 응답
fields = request.query_params.get('fields', '*').replace(" ", "").split(",")
dateFrom = request.query_params.get('dateFrom', None)
dateTo = request.query_params.get('dateTo', None)
interval = request.query_params.get('interval', None)
# Invalid RIC에 대한 응답
try:
result = ek.get_timeseries(instruments, fields, start_date=dateFrom, end_date=dateTo, interval=interval)
except ek.eikonError.EikonError as err:
return Response(str(err), status=400)
# 엑셀 파일 저장
directory = f'{os.path.dirname(__file__)}\\data\\timeseries\\'
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError as err:
print(str(err))
result.to_excel(f'{directory}\\{datetime.today().strftime("%Y%m%d%H%M%S")}_{instruments}.xlsx')
#interval이 minute, hour, daily, weekly, monthly, quarterly, yearly인 경우 (tick이 아닌 경우)
if interval != 'tick' :
return Response(json.loads(result.to_json(orient='index', date_format='iso', date_unit='ms')), status=200) #pandas dataframe 객체에 to_json 사용시 redundant quote 가 추가되므로 json.loads를 적용한다.
#interval이 tick인 경우 (이 경우 index인 시각이 중복되는 경우에 대한 처리가 필요함)
elif interval == 'tick' :
dictByColumns = result.apply(dict, axis=1) #column(VALUE, VOLUME)에 dictionary 적용
result = dictByColumns.groupby(dictByColumns.index).apply(list) #index(시각) 기준으로 list 적용
return Response(json.loads(result.to_json(orient='index', date_format='iso', date_unit='ms')), status=200) #pandas dataframe 객체에 to_json 사용시 redundant quote 가 추가되므로 json.loads를 적용한다.
| 2.53125
| 3
|
jinja/app_v0.py
|
patrickdinneen/python101
| 0
|
12783815
|
from flask import Flask
from dataclasses import dataclass, field
from typing import List, Dict
app = Flask(__name__)
@dataclass
class Human:
name: str
pets: List[str] = field(default_factory=list)
def get_humans() -> Dict[str, Human]:
humans = [
Human('<NAME>', pets=['Bork the Dog',
'Henrietta the Chicken',
'Davis the Duck']),
Human('<NAME>', pets=['127 the Cellular Automata']),
Human('<NAME>', pets=[])
]
return {human.name.split()[0].lower(): human for human in humans}
humans = get_humans()
@app.route('/human/<name>', methods=['GET'])
def get_human(name):
human = humans.get(name.lower())
if human:
html_part1 = f"""
<html>
<body>
<h1>{human.name}</h1>
"""
pets_html = ""
if human.pets:
pets_html += "<h2>Pets</h2><ul>"
for pet in human.pets:
pets_html += f"<li>{pet}</li>"
pets_html += "</ul>"
else:
pets_html += "<h2>No pets! :(</h2>"
html_part2 = "</body></html>"
return html_part1 + pets_html + html_part2
else:
return f"Couldn't find human {name}", 404
| 3.328125
| 3
|
dcs/utils.py
|
racker/python-dcs
| 0
|
12783816
|
# Copyright 2012 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
try:
import simplejson as json
except:
import json
from copy import deepcopy
__all__ = ['merge_dictionary', 'file_to_context']
def merge_dictionary(dst, src, merge_list_keys=None):
"""
Merge a dictionary, and if there are any lists with matching keys, append
them.
"""
if merge_list_keys == None:
merge_list_keys = []
src = deepcopy(src)
stack = [(dst, src)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isinstance(current_src[key], dict) and \
isinstance(current_dst[key], dict):
stack.append((current_dst[key], current_src[key]))
elif key in merge_list_keys:
if isinstance(current_src[key], list) and \
isinstance(current_dst[key], list):
current_dst[key].extend(current_src[key])
else:
raise TypeError('key %s was not of list types '
'(source: %s dest: %s)' % (key,
type(current_src[key]), type(current_dst[key])))
else:
current_dst[key] = current_src[key]
return dst
def file_to_context(filename):
file_contents = None
with open(filename, 'r') as fp:
file_contents = fp.read()
file_data = None
try:
file_data = json.loads(file_contents)
except SyntaxError, e:
e.filename = filename
raise
except Exception, e:
e.filename = filename
raise
return file_data
| 2.515625
| 3
|
lib/neuralmetrics/setup.py
|
mohammadbashiri/bashiri-et-al-2021
| 2
|
12783817
|
<reponame>mohammadbashiri/bashiri-et-al-2021
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name="neuralmetrics",
version="0.0.0",
description="Metrics for evaluation and comparison of neural prediction models",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
packages=find_packages(exclude=[]),
install_requires=[],
)
| 1.234375
| 1
|
ex2/operations/operations.py
|
hentt30/lab7-ces22
| 0
|
12783818
|
"""
Interface for operations that the system can perform
"""
from abc import ABC, abstractclassmethod,abstractmethod
class Operation(ABC):
"""
Abstract class representing a generic operation
"""
@abstractmethod
def execute(self):
"""
Execute operation
"""
pass
| 3.390625
| 3
|
cryptor.pyw
|
MiserableHating/KinterCryptor
| 0
|
12783819
|
import os
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
from tkinter import *
from tkinter.messagebox import *
from tkinter.ttk import Label
import sys
import time
from threading import Thread
import webbrowser
import tkinter as tk
Kinter = tk()
Kinter.title("Kinter Cryptor")
def encrypt(key, filename):
chunksize = 64*1024
outputFile = "(encrypted)"+filename
filesize = str(os.path.getsize(filename)).zfill(16)
IV = Random.new().read(16)
encryptor = AES.new(key, AES.MODE_CBC, IV)
with open(filename, 'rb') as infile:
with open(outputFile, 'wb') as outfile:
outfile.write(filesize.encode('utf-8'))
outfile.write(IV)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - (len(chunk) % 16))
outfile.write(encryptor.encrypt(chunk))
def decrypt(key, filename):
chunksize = 64*1024
outputFile = filename[11:]
with open(filename, 'rb') as infile:
filesize = int(infile.read(16))
IV = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, IV)
with open(outputFile, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(filesize)
def getKey(password):
hasher = SHA256.new(password.encode('utf-8'))
return hasher.digest()
def Main():
string = StringVar()
string.set("Voulez-vous (E)ncrypter ou (D)crypter ?: ")
entree = Entry(Kinter, textvariable=string, width=30, bg="black", fg="white")
entree.pack(side=TOP)
if string == "E":
filename = input("Fichier à Encrypter: ")
password = input("<PASSWORD> de crypt<PASSWORD>: ")
encrypt(getKey(password), filename)
print("Fait.")
elif string == 'D':
filename = input("Fichier à Décrypter: ")
password = input("<PASSWORD> dé<PASSWORD>: ")
decrypt(getKey(password), filename)
print("Fait.")
else:
print("Aucune option séléctionée, fermeture...")
if __name__ == '__main__':
Main()
Kinter.mainloop()
| 2.921875
| 3
|
host/bin/nrfsvc.py
|
eblot/tde-base
| 0
|
12783820
|
<reponame>eblot/tde-base
#!/usr/bin/env python3
"""Nordic service call extractor for CLANG/LLVM toolchain
"""
from argparse import ArgumentParser
from collections import namedtuple, OrderedDict
from os import environ, rename, walk
from os.path import (basename, dirname, isdir, join as joinpath, normpath,
relpath)
from re import compile as recompile
from subprocess import Popen, TimeoutExpired, DEVNULL, PIPE
from sys import exit, modules, stderr
from tempfile import mkstemp
from traceback import format_exc
# pylint: disable-msg=broad-except,invalid-name,broad-except
class NrfSysCall:
"""
"""
UPGRADE_MARKER = 'NRF_CLANG_SUPPORT'
SVCRE = recompile(r'^\s*SVCALL\((?P<num>[A-Z_]+),\s'
r'(?P<rtype>[a-z][a-z_0-9]+),\s'
r'(?P<name>[a-z][a-z_0-9]+)\s*'
r'\((?P<args>.*)\)\);')
MKCRE = recompile(r'^\s*#define %s' % UPGRADE_MARKER)
FUNC = namedtuple('Func', ('rtype', 'name', 'args', 'line'))
def __init__(self):
self._calls = OrderedDict()
def parse(self, fp):
for nl, line in enumerate(fp, start=1):
line = line.strip()
if self.MKCRE.match(line):
# do not account for already upgraded files
return 0
mo = self.SVCRE.match(line)
if mo:
num = mo.group('num')
if num in self._calls:
raise ValueError('Redefinition of %s @ line %d' %
(num, nl))
args = tuple(arg.strip()
for arg in mo.group('args').split(','))
self._calls[num] = self.FUNC(mo.group('rtype'),
mo.group('name'),
args,
nl)
return len(self._calls)
def generate(self, fp, **kwargs):
self._generate_header(fp, **kwargs)
for num, func in self._calls.items():
print('static inline %s' % func.rtype, file=fp)
print('%s(%s) {' % (func.name, ', '.join(func.args)), file=fp)
argnames = [arg.split()[-1].strip('*') for arg in func.args]
if argnames[-1] == 'void':
argnames.pop()
argcount = len(argnames)
if argcount > 4:
raise ValueError('SVC calls limited to scratch registers')
if argcount:
print(' _SYSCALL%d(%s, %s);' %
(argcount, num, ', '.join(argnames)), file=fp)
else:
print(' _SYSCALL%d(%s);' % (argcount, num), file=fp)
print('}', file=fp)
print('', file=fp)
self._generate_footer(fp, **kwargs)
def _generate_header(self, fp, **kwargs):
values = dict(self.__class__.__dict__)
values.update(kwargs)
header = """
#ifdef __clang__
#ifndef %(hprot)s
#define %(hprot)s
#ifdef __cplusplus
extern "C" {
#endif
// prevent from upgrading nRF52 header files more than once
#define %(UPGRADE_MARKER)s 1
// define system call macros only once
#ifndef _SYSCALL_ARGS
#define _SYSCALL_ARGS(_SC_, ...) \\
__asm__ __volatile__ ( \\
"svc %%[SC]" \\
: "=r"(r0) : [SC]"I" ((uint16_t)_SC_), ##__VA_ARGS__ : "memory"); \\
return r0; \\
#define _SCC(X) ((long) (X))
#define _SYSCALL0(_SC_) \\
register long r0 __asm__("r0"); \\
_SYSCALL_ARGS(_SC_); \\
#define _SYSCALL1(_SC_, _a_) \\
register long r0 __asm__("r0") = _SCC(_a_); \\
_SYSCALL_ARGS(_SC_, "0"(r0)); \\
#define _SYSCALL2(_SC_, _a_, _b_) \\
register long r0 __asm__("r0") = _SCC(_a_); \\
register long r1 __asm__("r1") = _SCC(_b_); \\
_SYSCALL_ARGS(_SC_, "0"(r0), "r"(r1)); \\
#define _SYSCALL3(_SC_, _a_, _b_, _c_) \\
register long r0 __asm__("r0") = _SCC(_a_); \\
register long r1 __asm__("r1") = _SCC(_b_); \\
register long r2 __asm__("r2") = _SCC(_c_); \\
_SYSCALL_ARGS(_SC_, "0"(r0), "r"(r1), "r"(r2)); \\
#define _SYSCALL4(_SC_, _a_, _b_, _c_, _d_) \\
register long r0 __asm__("r0") = _SCC(_a_); \\
register long r1 __asm__("r1") = _SCC(_b_); \\
register long r2 __asm__("r2") = _SCC(_c_); \\
register long r3 __asm__("r3") = _SCC(_d_); \\
_SYSCALL_ARGS(_SC_, "0"(r0), "r"(r1), "r"(r2), "r"(r3)); \\
#endif // SYSCALL_CP
""" % values
print(header, file=fp)
def _generate_footer(self, fp, **kwargs):
footer = """
#ifdef __cplusplus
}
#endif
#endif // %(hprot)s
#endif // __clang__
""" % kwargs
print(footer, file=fp)
class NrfSvcDef:
CLANGCRE = recompile(r'^\s*#elif defined\(__clang__\)\s*$')
PATCH = r"""
--- a/nrf_svc.h (revision 4491)
+++ b/nrf_svc.h (working copy)
@@ -52,6 +52,9 @@
#ifndef SVCALL
#if defined (__CC_ARM)
#define SVCALL(number, return_type, signature) return_type __svc(number) signature
+#elif defined(__clang__)
+#define SVCALL(number, return_type, signature) \
+ static inline return_type signature;
#elif defined (__GNUC__)
#ifdef __cplusplus
#define GCC_CAST_CPP (uint16_t)
""".lstrip('\n')
def __init__(self):
pass
def parse(self, filename: str):
with open(filename, 'rt') as fp:
for line in fp:
line = line.strip()
if self.CLANGCRE.match(line):
# do not account for already upgraded files
return False
return True
def apply(self, filename: str, dryrun: bool =False):
environment = dict(environ)
environment['LC_ALL'] = 'C'
cwd = dirname(filename)
args = ['patch', '-p1', '--no-backup-if-mismatch', '--silent',
'--reject-file', '/dev/null']
if dryrun:
args.append('--dry-run')
proc = Popen(args, stdin=PIPE, stdout=PIPE, env=environment,
cwd=cwd, universal_newlines=True)
try:
out, _ = proc.communicate(input=self.PATCH, timeout=2.0)
print(out)
except TimeoutExpired:
proc.kill()
def main():
"""Main routine"""
debug = False
kinds = {'svc': 'Patch CALLs', 'wrap': 'Patch SVCALL macros'}
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('dir', nargs=1,
help='top directory to seek for header files')
argparser.add_argument('-u', '--update', action='store_true',
help='update source file')
argparser.add_argument('-k', '--kind', choices=kinds, required=True,
help='Action to perform: %s' % ', '.join([
'"%s": %s' % it for it in kinds.items()]))
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
topdir = args.dir[0]
if not isdir(topdir):
argparser.error('Invalid source directory')
if args.kind == 'svc':
for dirpath, dirnames, filenames in walk(topdir):
dirnames[:] = [dn for dn in dirnames if not dn.startswith('.')]
for fn in filenames:
if not fn.endswith('.h'):
continue
filename = normpath(joinpath(dirpath, fn))
count = 0
nrf = NrfSysCall()
with open(filename, 'rt') as hfp:
try:
count = nrf.parse(hfp)
except Exception as exc:
print('Cannot parse file %s' % filename,
file=stderr)
raise
if not count:
continue
if args.update:
print("Upgrade %s: %d syscalls" %
(relpath(filename), count), file=stderr)
hprot = '_CLANG_%s_' % \
basename(filename).upper().replace('.', '_')
with open(filename, 'rt') as ifp:
content = ifp.read()
# use a temporary filename to ensure file is only
# updated if it can be properly generated
ofd, ofname = mkstemp()
with open(ofd, 'wt') as ofp:
ofp.write(content)
nrf.generate(ofp, hprot=hprot)
ofp.close()
rename(ofname, filename)
else:
print("%s needs upgrade: %d syscalls" %
(relpath(filename), count), file=stderr)
if args.kind == 'wrap':
for dirpath, dirnames, filenames in walk(topdir):
for fn in filenames:
if fn != 'nrf_svc.h':
continue
filename = normpath(joinpath(dirpath, fn))
nrf = NrfSvcDef()
if nrf.parse(filename):
if args.update:
print('Patching %s' % filename)
else:
print('%s needs upgrade' % filename)
nrf.apply(filename, not args.update)
except Exception as e:
print('\nError: %s' % e, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
if __name__ == '__main__':
main()
| 2.078125
| 2
|
example/artists/models.py
|
moshthepitt/django-vega-admin
| 4
|
12783821
|
"""
Module for vega-admin test models
"""
from django.db import models
from django.utils.translation import ugettext as _
class Artist(models.Model):
"""
Artist Model class
"""
name = models.CharField(_("Name"), max_length=100)
class Meta:
ordering = ["name"]
verbose_name = "Artist"
verbose_name_plural = "Artists"
def __str__(self):
"""Unicode representation of Song."""
return self.name
class Song(models.Model):
"""Model definition for Song."""
SINGLE = "1"
COLLABO = "2"
SKIT = "3"
SONG_TYPES = ((SINGLE, "Single"), (COLLABO, "Collaboration"), (SKIT,
"Skit"))
artist = models.ForeignKey(
Artist, verbose_name=_("Artist"), on_delete=models.PROTECT)
name = models.CharField(_("Name"), max_length=100)
song_type = models.CharField(
_("Type"), max_length=1, choices=SONG_TYPES, default=SINGLE)
release_date = models.DateField(_("Release Date"))
release_time = models.TimeField(_("Release Time"))
recording_time = models.DateTimeField(
_("Recording Time"), auto_now_add=True)
class Meta:
"""Meta definition for Song."""
verbose_name = "Song"
verbose_name_plural = "Songs"
ordering = ["name"]
def __str__(self):
"""Unicode representation of Song."""
return self.name
| 2.5
| 2
|
tests/codec.py
|
axsguard/sstp-server
| 223
|
12783822
|
<reponame>axsguard/sstp-server
#!/usr/bin/env python3
import os
import timeit
from sstpd.codec import escape, PppDecoder
decoder = PppDecoder()
def get_enscaped():
frames = [os.urandom(1500) for i in range(2)]
return b''.join([escape(f) for f in frames])
def prof_unescape():
return timeit.timeit('decoder.unescape(data)',
setup='data = get_enscaped()',
globals=globals())
def codec_test():
frame = os.urandom(1500)
escaped = escape(frame)
print("escaped: %d bytes " % len(escaped))
unescaped = PppDecoder().unescape(escaped)
assert len(unescaped) == 1
print("unescaped: %d bytes" % len(unescaped[0]))
assert unescaped[0] == frame
def main():
codec_test()
print('Test unescape...')
print('\t%f' % prof_unescape())
if __name__ == '__main__':
main()
| 2.5625
| 3
|
skmob/utils/tests/test_gislib.py
|
FilippoSimini/scikit-mobility
| 489
|
12783823
|
from skmob.utils import gislib
import math
class TestClustering:
def setup_method(self):
self.point_1 = (43.8430139, 10.5079940)
self.point_2 = (43.5442700, 10.3261500)
self.decimal = 43.8430139
self.DMS = (43, 50, 34.85)
def test_get_distance(self):
output = gislib.getDistance(self.point_1, self.point_2)
assert (math.isclose(output, 36.293701213))
support = gislib.getDistanceByHaversine(self.point_1, self.point_2)
assert (math.isclose(support, output))
output = gislib.getDistance(self.point_1, self.point_1)
assert (math.isclose(output, 0))
def test_get_distance_by_haversine(self):
output = gislib.getDistanceByHaversine(self.point_1, self.point_2)
assert (math.isclose(output, 36.293701213))
output = gislib.getDistanceByHaversine(self.point_1, self.point_1)
assert (math.isclose(output, 0))
# def test_decimal_to_DMS(self):
# output = gislib.DecimalToDMS(self.decimal)
# assert (output[0] == 43)
# assert (output[1] == 50)
# assert (math.isclose(output[2], 34.85))
def test_DMS_to_decimal(self):
output = gislib.DMSToDecimal(self.DMS[0], self.DMS[1], self.DMS[2])
assert (math.isclose(output, 43.84301388888))
def test_get_coordinates_for_distance(self):
output = gislib.getCoordinatesForDistance(self.point_1[0], self.point_1[1], 15)
assert (math.isclose(output[0], 0.134989200863))
assert (math.isclose(output[1], 0.187162559305))
# def test_is_within_distance(self):
# assert (gislib.isWithinDistance(self.point_1, self.point_2, 20))
# assert (gislib.isWithinDistance(self.point_1, self.point_2, 40) is False)
| 2.4375
| 2
|
script.py
|
AndreasLochwitz/twine2json
| 1
|
12783824
|
# -*- coding: utf-8 -*-
import re
import json
def getTitle(storyContent):
pattern = re.compile("[^:\ ][A-Za-zäöüßÄÖÜ\d\ .\[\|\]\"\']*")
result = pattern.search(storyContent)
return result.group(0)
def getContent(storyContent):
pattern = re.compile("^[A-Za-z]{2}[A-Za-zäüößÄÖÜ\w\s\.\:]*", re.MULTILINE)
result = pattern.search(storyContent)
return result.group(0)
def getLinks(storyContent):
pattern = re.compile("\[{2}[A-Za-zäöüß\s\d]*\|[A-Za-zäöüßÄÖÜ\s\d]*\]{2}", re.MULTILINE)
result = pattern.findall(storyContent)
return result
def getLinkDesc(link):
pattern = re.compile("[^\[][A-Za-zäüößÄÖÜ\d\ ]*[^\|]")
result = pattern.search(link)
return result.group(0)
def getLinkTarget(link):
pattern = re.compile("\|[A-Za-zäöüßÄÖÜ\s\d]*")
result = pattern.search(link)
result = result.group(0)[1:]
return result
def readFile(fileName):
f = open(fileName, 'rb')
fileContent = f.read().decode('utf-8')
f.close()
return fileContent
def writeFile(fileName, fileContent):
f = open(fileName, 'wb')
f.write(fileContent.encode('utf-8'))
f.flush()
f.close()
# Datei lesen
storyContent = readFile('story.txt')
pattern = re.compile("::[\ A-Za-zäöüß\d\s.\[\|\]\"\']*")
storyParts = pattern.findall(storyContent)
resultDict = dict()
for i in range(len(storyParts)):
currentItem = storyParts[i]
title = getTitle(currentItem)
content = getContent(currentItem)
links = getLinks(currentItem)
linksArray = []
# Links extrahieren
for i in range(len(links)):
currentLink = links[i]
linkDesc = getLinkDesc(links[i])
linkTarget = getLinkTarget(links[i])
linksArray.append({'desc':linkDesc, 'target': linkTarget})
resultDict[title] = {'content': content, 'links': linksArray}
jsonData = json.dumps(resultDict, sort_keys=True, indent=4, ensure_ascii=False)
writeFile('story.json', jsonData)
| 2.96875
| 3
|
python/paddle/v2/framework/tests/test_minus_op.py
|
yu239/Paddle
| 0
|
12783825
|
import unittest
import numpy as np
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
class MinusOpTest(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self):
self.type = "minus"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32")
}
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
class MinusGradTest(GradientChecker):
def test_left(self):
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
if __name__ == '__main__':
unittest.main()
| 3
| 3
|
web/smt/Mydata.py
|
wzh191920/MyData
| 7
|
12783826
|
<reponame>wzh191920/MyData
from ctypes import windll, POINTER, c_int, c_longlong, c_char, c_char_p, Structure, c_short, c_float,\
c_double
ll = windll.LoadLibrary
lib = ll("MyDataAPI.dll")
PI = POINTER(c_int)
PCH = POINTER(c_char)
PSH = POINTER(c_short)
PLL = POINTER(c_longlong)
PF = POINTER(c_float)
PD = POINTER(c_double)
PPCH = POINTER(c_char*2048)
handle = int()
class TableInfo(Structure):
_fields_ = [("id", c_int),("type_id", c_int),("name", c_char*32)]
class TypeInfo(Structure):
_fields_ = [("id", c_int),("type", c_char*16),("name", c_char*32),("type_names", c_char*512)]
class PointInfo(Structure):
_fields_ = [("id", c_int),("table_id", c_int),("name", c_char*32)]
def to_json(self):
json_point = {
"id":self.id,
"table_id":self.table_id,
"name":self.name.decode('utf-8')
}
return json_point
class Serializer(Structure):
_fields_ = [("type", c_char*16), ("offset", c_int), ("buf", c_char*2048), ("data_pos", c_int)]
class DataFileInfo(Structure):
_fields_ = [("start_time", c_longlong),("end_time", c_longlong),("file_size", c_longlong),
("using_rate", c_float), ("filename", c_char*32)]
def MydataConnect(logger):
handle_tmp = PI(c_int(0))
err = lib.MyDataConnect(c_char_p("127.0.0.1".encode('utf-8')), 8182, handle_tmp)
if (err):
logger.error('连接Mydata数据库失败, %d', err)
return False
global handle
handle = handle_tmp.contents.value
return True
PTYPE = POINTER(TypeInfo)
DataTypeMap = {1:'char', 2:'int16', 3:'int32', 4:'int64', 5:'float', 6:'double', 7:'string'}
def Typeid2Typestr(type_infos):
typestrs = []
for type_info in type_infos:
strname = type_info.type_names.decode()
strname = strname.split(';')
typestr = []
for i, data_type in enumerate(type_info.type):
typestr.append((strname[i], DataTypeMap.get(data_type, '未定义')))
typestrs.append(typestr)
return typestrs
def TransformReadableData(type_info, timestamps, sers, logger):
readable_datas = list()
err = 0
for j, ser in enumerate(sers):
readable_data = {0:timestamps[j]}
for i, data_type in enumerate(type_info.type, start=1):
if data_type == 1:
ch = PCH(c_char(0))
err = lib.ReadChar(ser, ch)
if err:
logger.error('ReadChar fail, %d', err)
return None
readable_data[i] = ch.contents.value.decode()
elif data_type == 2:
sh = PSH(c_short(0))
err = lib.ReadShort(ser, sh)
if err:
logger.error('ReadShort fail, %d', err)
return None
readable_data[i] = sh.contents.value
elif data_type == 3:
pi = PI(c_int(0))
err = lib.ReadInt(ser, pi)
if err:
logger.error('ReadInt fail, %d', err)
return None
readable_data[i] = pi.contents.value
elif data_type == 4:
pll = PLL(c_longlong(0))
err = lib.ReadLongLong(ser, pll)
if err:
logger.error('ReadLongLong fail, %d', err)
return None
readable_data[i] = str(pll.contents.value)
elif data_type == 5:
pf = PF(c_float(0))
err = lib.ReadFloat(ser, pf)
if err:
logger.error('ReadFloat fail, %d', err)
return None
readable_data[i] = pf.contents.value
elif data_type == 6:
pd = PD(c_double(0))
err = lib.ReadDouble(ser, pd)
if err:
logger.error('ReadDouble fail, %d', err)
return None
readable_data[i] = pd.contents.value
elif data_type == 7:
ppch = PPCH((c_char*2048)())
err = lib.ReadString(ser, ppch)
if err:
logger.error('ReadString fail, %d', err)
return None
readable_data[i] = ppch.contents.value.decode()
else:
logger.error('错误的类型, %d', data_type)
return None
readable_datas.append(readable_data)
return {'retcode':0, 'length':len(type_info.type), 'readable_datas':readable_datas}
| 2.28125
| 2
|
webapp/routes.py
|
LSanten/mixbase_live
| 0
|
12783827
|
from flask import render_template, url_for, flash, redirect
from webapp import app, db, bcrypt
from webapp.forms import RegistrationForm, LoginForm, PairForm, SingleForm
from webapp.models import User, Pair
from flask_login import login_user, current_user
from sqlalchemy import func, desc
import time
@app.route('/', methods=['GET', 'POST']) #'/' tells us that it's the index of a page | access via http://127.0.0.1:5000/
def home():
form = PairForm()
if form.validate_on_submit():
#user = current_user.id or something
pair = Pair(firstname=form.firstname.data, secondname=form.secondname.data, firstartist=form.firstartist.data, secondartist=form.secondartist.data, comment=form.comment.data, firstgenre=form.firstgenre.data, secondgenre=form.secondgenre.data, guestname=form.guestname.data) ## TODO: add user_id info from current user ID
db.session.add(pair)
db.session.commit() # adds user to database
flash(f'Success! Your transition was added. {form.firstname.data} and {form.secondname.data}!', category='success')
return redirect(url_for('home'))
return render_template('index.html', title="Save DJ Transition", form=form)
@app.route('/singlesong', methods=['GET', 'POST']) #'/' tells us that it's the index of a page | access via http://127.0.0.1:5000/
def singlesong():
form = SingleForm()
if form.validate_on_submit():
#user = current_user.id or something
pair = Pair(firstname=form.firstname.data, secondname=None, firstartist=form.firstartist.data, secondartist=None, comment=form.comment.data, guestname=form.guestname.data) ## TODO: add user_id info from current user ID # create pair instance with input from form
db.session.add(pair)
db.session.commit() # adds user to database
flash(f'Success! Your single song was added. {form.firstname.data} - {form.firstartist.data}!', 'success')
return redirect(url_for('singlesong'))
return render_template('singlesong.html', title="Single Song", form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated: ## DEBUG: doens't work # TODO: if logged in redirect to home
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8') # hashes entered password
user = User(username=form.username.data, email=form.email.data, password=<PASSWORD>) # create user instance with input from form
db.session.add(user)
db.session.commit() # adds user to database
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first() #database will be filtered with entered email
if user and bcrypt.check_password_hash(user.password, form.password.data): #check password and email
login_user(user, remember=form.remember.data) #login_user() logs user in
return redirect(url_for('home'))
else:
flash('Login unsuccessful. Please check email and password', 'danger') #danger makes return red #TODO: get success and danger colored frames working
return render_template('login.html', title='Login', form=form)
@app.route('/transitions', methods=['GET', 'POST'])
def transitions():
pairs = Pair.query.order_by(desc(Pair.date_posted)).all() #order by date_posted - show newest first
return render_template('transitions.html', title='Transitions Database', pairs=pairs)
@app.route('/searchguest/<guestname>', methods=['GET', 'POST'])
def searchguest(guestname=None):
#filter by guestname and order by date (show neweste first)
posts = Pair.query.filter(func.lower(Pair.guestname) == func.lower(guestname)).order_by(desc(Pair.date_posted)).all()
return render_template('searchguest.html', guestname=guestname, title="Search", posts=posts)
#DEBUG Routes
@app.route('/hello') # access via http://127.0.0.1:5000/hello/anything
def hellos():
return render_template('hello.html')
@app.route('/hello/<name>') # access via http://127.0.0.1:5000/hello/*anything*
def helloTemplate(name=None):
return render_template('hello.html', name=name, title="Debug Route")
| 2.5625
| 3
|
tudo/ex109/teste.py
|
Ramon-Erik/Exercicios-Python
| 1
|
12783828
|
from moeda import *
preço = float(input('Informe um valor R$'))
resp = str(input('Você deseja apreentar o preço como R$? ')).lower()
if resp == 's':
resp = True
else:
resp = False
print('=' * 30)
print(f'A metade de {moeda(preço)} é {metade(preço, resp)}')
print(f'A dobro de {moeda(preço)} é {dobro(preço, resp)}')
print(f'Aumentando 10% de {moeda(preço)} temos {aumentar(preço, 10, resp)}')
print(f'Diminuindo 13% de {moeda(preço)} temos {diminuir(preço, 13, resp)}')
| 3.46875
| 3
|
doajtest/unit/test_models.py
|
glauberm/doaj
| 0
|
12783829
|
<reponame>glauberm/doaj
import json
import time
from datetime import datetime
from portality import constants
from doajtest.fixtures import ApplicationFixtureFactory, JournalFixtureFactory, ArticleFixtureFactory, BibJSONFixtureFactory, ProvenanceFixtureFactory, BackgroundFixtureFactory
from doajtest.helpers import DoajTestCase
from portality import models
from portality.lib import dataobj
from portality.models import shared_structs
class TestClient(DoajTestCase):
def test_00_structs(self):
# shared structs
dataobj.construct_validate(shared_structs.SHARED_BIBJSON)
dataobj.construct_validate(shared_structs.JOURNAL_BIBJSON_EXTENSION)
# constructed structs
journal = models.Journal()
dataobj.construct_validate(journal._struct)
jbj = models.JournalBibJSON()
dataobj.construct_validate(jbj._struct)
def test_01_imports(self):
"""import all of the model objects successfully?"""
j = models.lookup_model("journal")
ja = models.lookup_model("journal_article")
assert j.__type__ == "journal"
assert ja.__type__ == "journal,article"
def test_02_journal_model_rw(self):
"""Read and write properties into the journal model"""
j = models.Journal()
j.set_id("abcd")
j.set_created("2001-01-01T00:00:00Z")
j.set_last_updated("2002-01-01T00:00:00Z")
j.set_bibjson({"title" : "test"})
j.set_last_manual_update("2004-01-01T00:00:00Z")
j.set_in_doaj(True)
j.add_contact("richard", "<EMAIL>")
j.add_note("testing", "2005-01-01T00:00:00Z")
j.set_owner("richard")
j.set_editor_group("worldwide")
j.set_editor("eddie")
j.set_current_application("0987654321")
j.set_ticked(True)
j.set_bulk_upload_id("abcdef")
j.set_seal(True)
j.add_related_application("123456789", "2003-01-01T00:00:00Z")
j.add_related_application("987654321", "2002-01-01T00:00:00Z")
assert j.id == "abcd"
assert j.created_date == "2001-01-01T00:00:00Z"
assert j.created_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") == "2001-01-01T00:00:00Z"
assert j.last_updated == "2002-01-01T00:00:00Z"
assert j.last_manual_update == "2004-01-01T00:00:00Z"
assert j.last_manual_update_timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") == "2004-01-01T00:00:00Z"
assert j.is_in_doaj() is True
assert len(j.contacts()) == 1
assert j.get_latest_contact_name() == "richard"
assert j.get_latest_contact_email() == "<EMAIL>"
assert len(j.notes) == 1
assert j.owner == "richard"
assert j.editor_group == "worldwide"
assert j.editor == "eddie"
assert j.current_application == "0987654321"
assert j.is_ticked() is True
assert j.has_seal() is True
assert j.bulk_upload_id == "abcdef"
assert j.last_update_request == "2003-01-01T00:00:00Z"
notes = j.notes
j.remove_note(notes[0])
assert len(j.notes) == 0
j.set_notes([{"note" : "testing", "date" : "2005-01-01T00:00:00Z"}])
assert len(j.notes) == 1
j.remove_current_application()
assert j.current_application is None
# check over the related_applications management functions
related = j.related_applications
assert related is not None
assert j.latest_related_application_id() == "123456789"
j.remove_related_applications()
assert len(j.related_applications) == 0
j.set_related_applications(related)
assert len(j.related_applications) == 2
j.add_related_application("123456789", "2005-01-01T00:00:00Z") # duplicate id, should be overwritten
assert len(j.related_applications) == 2
rar = j.related_application_record("123456789")
assert rar.get("application_id") == "123456789"
assert rar.get("date_accepted") == "2005-01-01T00:00:00Z"
j.add_related_application("123456789", "2005-01-01T00:00:00Z", "deleted") # update as if being deleted
rar = j.related_application_record("123456789")
assert rar.get("application_id") == "123456789"
assert rar.get("date_accepted") == "2005-01-01T00:00:00Z"
assert rar.get("status") == "deleted"
# do a quick by-reference check on the bibjson object
bj = j.bibjson()
assert bj.title == "test"
bj.publication_time = 7
bj2 = j.bibjson()
assert bj2.publication_time == 7
# check over ordered note reading
j.add_note("another note", "2010-01-01T00:00:00Z")
j.add_note("an old note", "2001-01-01T00:00:00Z")
ons = j.ordered_notes
assert len(ons) == 3
assert ons[2]["note"] == "an old note"
assert ons[1]["note"] == "testing"
assert ons[0]["note"] == "another note"
# now construct from a fixture
source = JournalFixtureFactory.make_journal_source(include_obsolete_fields=True)
j = models.Journal(**source)
assert j is not None
# run the remaining methods just to make sure there are no errors
j.calculate_tick()
j.prep()
j.save()
def test_03_article_model_rw(self):
"""Read and write properties into the article model"""
a = models.Article()
assert not a.is_in_doaj()
assert not a.has_seal()
a.set_in_doaj(True)
a.set_seal(True)
a.set_publisher_record_id("abcdef")
a.set_upload_id("zyxwvu")
assert a.data.get("admin", {}).get("publisher_record_id") == "abcdef"
assert a.is_in_doaj()
assert a.has_seal()
assert a.upload_id() == "zyxwvu"
def test_04_suggestion_model_rw(self):
"""Read and write properties into the suggestion model"""
s = models.Suggestion()
s.set_current_journal("9876543")
s.set_related_journal("123456789")
s.set_bulk_upload_id("abcdef")
s.set_application_status(constants.APPLICATION_STATUS_REJECTED)
s.suggested_on = "2001-01-01T00:00:00Z"
s.set_articles_last_year(12, "http://aly.com")
s.article_metadata = True
s.set_suggester("test", "<EMAIL>")
assert s.data.get("admin", {}).get("current_journal") == "9876543"
assert s.current_journal == "9876543"
assert s.related_journal == "123456789"
assert s.bulk_upload_id == "abcdef"
assert s.application_status == constants.APPLICATION_STATUS_REJECTED
assert s.suggested_on == "2001-01-01T00:00:00Z"
assert s.articles_last_year.get("count") == 12
assert s.articles_last_year.get("url") == "http://aly.com"
assert s.article_metadata is True
assert s.suggester.get("name") == "test"
assert s.suggester.get("email") == "<EMAIL>"
# check over ordered note reading
s.add_note("another note", "2010-01-01T00:00:00Z")
s.add_note("an old note", "2001-01-01T00:00:00Z")
ons = s.ordered_notes
assert len(ons) == 2
assert ons[1]["note"] == "an old note"
assert ons[0]["note"] == "another note"
s.prep()
assert 'index' in s, s
assert 'application_type' in s['index'], s['index']
assert s['index']['application_type'] == constants.APPLICATION_TYPE_UPDATE_REQUEST
s.remove_current_journal()
assert s.current_journal is None
s.prep()
assert 'index' in s, s
assert 'application_type' in s['index'], s['index']
assert s['index']['application_type'] == constants.APPLICATION_TYPE_FINISHED
s.set_application_status(constants.APPLICATION_STATUS_PENDING)
s.prep()
assert s['index']['application_type'] == constants.APPLICATION_TYPE_NEW_APPLICATION
s.save()
s.remove_current_journal()
s.remove_related_journal()
assert s.current_journal is None
assert s.related_journal is None
def test_08_sync_owners(self):
# suggestion with no current_journal
s = models.Suggestion(**ApplicationFixtureFactory.make_application_source())
s.save()
models.Suggestion.refresh()
s = models.Suggestion.pull(s.id)
assert s is not None
# journal with no current_application
j = models.Journal(**JournalFixtureFactory.make_journal_source())
j.save()
models.Journal.refresh()
j = models.Journal.pull(j.id)
assert j is not None
# suggestion with erroneous current_journal
s.set_current_journal("asdklfjsadjhflasdfoasf")
s.save()
models.Suggestion.refresh()
s = models.Suggestion.pull(s.id)
assert s is not None
# journal with erroneous current_application
j.set_current_application("kjwfuiwqhu220952gw")
j.save()
models.Journal.refresh()
j = models.Journal.pull(j.id)
assert j is not None
# suggestion with journal
s.set_owner("my_new_owner")
s.set_current_journal(j.id)
s.save()
models.Journal.refresh()
j = models.Journal.pull(j.id)
assert j.owner == "my_new_owner"
# journal with suggestion
j.set_owner("another_new_owner")
j.set_current_application(s.id)
j.save()
models.Suggestion.refresh()
s = models.Suggestion.pull(s.id)
assert s.owner == "another_new_owner"
def test_09_article_deletes(self):
# populate the index with some articles
for i in range(5):
a = models.Article()
a.set_in_doaj(True)
bj = a.bibjson()
bj.title = "Test Article {x}".format(x=i)
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
a.save()
# make sure the last updated dates are suitably different
time.sleep(0.66)
# now hit the key methods involved in article deletes
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"bibjson.title.exact" : "Test Article 0"}}
]
}
}
}
count = models.Article.hit_count(query)
assert count == 1
count = models.Article.count_by_issns(["1000-0000", "2000-0000"])
assert count == 2
models.Article.delete_selected(query)
time.sleep(1)
assert len(models.Article.all()) == 4
assert len(self.list_today_article_history_files()) == 1
models.Article.delete_by_issns(["2000-0000", "3000-0000"])
time.sleep(1)
assert len(models.Article.all()) == 2
assert len(self.list_today_article_history_files()) == 3
def test_10_journal_deletes(self):
# tests the various methods that are key to journal deletes
# populate the index with some journals
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal {x}".format(x=i)
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
# make sure the last updated dates are suitably different
time.sleep(0.66)
# populate the index with some articles
for i in range(5):
a = models.Article()
a.set_in_doaj(True)
bj = a.bibjson()
bj.title = "Test Article {x}".format(x=i)
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
a.save()
# make sure the last updated dates are suitably different
time.sleep(0.66)
# now hit the key methods involved in journal deletes
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"bibjson.title.exact" : "Test Journal 1"}}
]
}
}
}
count = models.Journal.hit_count(query)
assert count == 1
issns = models.Journal.issns_by_query(query)
assert len(issns) == 1
assert "1000-0000" in issns
models.Journal.delete_selected(query, articles=True)
time.sleep(1)
assert len(models.Article.all()) == 4
assert len(self.list_today_article_history_files()) == 1
assert len(models.Journal.all()) == 4
assert len(self.list_today_journal_history_files()) == 6 # Because all journals are snapshot at create time
def test_11_iterate(self):
for jsrc in JournalFixtureFactory.make_many_journal_sources(count=99, in_doaj=True):
j = models.Journal(**jsrc)
j.save()
time.sleep(2) # index all the journals
journal_ids = []
theqgen = models.JournalQuery()
for j in models.Journal.iterate(q=theqgen.all_in_doaj(), page_size=10):
journal_ids.append(j.id)
journal_ids = list(set(journal_ids[:])) # keep only unique ids
assert len(journal_ids) == 99
assert len(self.list_today_journal_history_files()) == 99
def test_12_account(self):
# Make a new account
acc = models.Account.make_account(
username='mrs_user',
email='<EMAIL>',
roles=['api', 'associate_editor'],
)
# Check the new user has the right roles
assert acc.has_role('api')
assert acc.has_role('associate_editor')
assert not acc.has_role('admin')
assert acc.marketing_consent is None
# check the api key has been generated
assert acc.api_key is not None
# Make another account with no API access
acc2 = models.Account.make_account(
username='mrs_user2',
email='<EMAIL>',
roles=['editor']
)
assert not acc2.has_role('api')
# Ensure we don't get an api key
assert not acc2.api_key
assert acc2.data.get('api_key', None) is None
# now add the api role and check we get a key generated
acc2.add_role('api')
assert acc2.api_key is not None
# Set marketing consent to True
acc2.set_marketing_consent(True)
assert acc2.marketing_consent is True
# Now set marketing consent to false
acc2.set_marketing_consent(False)
assert acc2.marketing_consent is False
# remove the api_key from the object and ask for it again
del acc2.data['api_key']
assert acc2.api_key is None
acc2.generate_api_key()
acc2.save()
assert acc2.api_key is not None
def test_13_block(self):
a = models.Article()
a.save()
models.Article.block(a.id, a.last_updated)
a = models.Article.pull(a.id)
assert a is not None
def test_14_article_model_index(self):
"""Check article indexes generate"""
a = models.Article(**ArticleFixtureFactory.make_article_source())
assert a.data.get('index', None) is None
# Generate the index
a.prep()
assert a.data.get('index', None) is not None
def test_15_archiving_policy(self):
# a recent change to how we store archiving policy means we need the object api to continue
# to respect the old model, while transparently converting it in and out of the object
j = models.Journal()
b = j.bibjson()
b.set_archiving_policy(["LOCKSS", "CLOCKSS", ["A national library", "Trinity"], ["Other", "Somewhere else"]], "http://url")
assert b.archiving_policy == {"url" : "http://url", "policy" : ["LOCKSS", "CLOCKSS", ["A national library", "Trinity"], ["Other", "Somewhere else"]]}
b.add_archiving_policy("SAFE")
assert b.archiving_policy == {"url" : "http://url", "policy" : ["LOCKSS", "CLOCKSS", "SAFE", ["A national library", "Trinity"], ["Other", "Somewhere else"]]}
assert b.flattened_archiving_policies == ['LOCKSS', 'CLOCKSS', 'SAFE', 'A national library: Trinity', 'Other: Somewhere else']
def test_16_generic_bibjson(self):
source = BibJSONFixtureFactory.generic_bibjson()
gbj = models.GenericBibJSON(source)
assert gbj.title == "The Title"
assert len(gbj.get_identifiers()) == 2
assert len(gbj.get_identifiers(gbj.P_ISSN)) == 1
assert len(gbj.get_identifiers(gbj.E_ISSN)) == 1
assert gbj.get_one_identifier() is not None
assert gbj.get_one_identifier(gbj.E_ISSN) == "9876-5432"
assert gbj.get_one_identifier(gbj.P_ISSN) == "1234-5678"
assert gbj.keywords == ["word", "key"]
assert len(gbj.get_urls()) == 6
assert gbj.get_urls("homepage") == ["http://journal.url"]
assert gbj.get_single_url("waiver_policy") == "http://waiver.policy"
assert gbj.get_single_url("random") is None
assert len(gbj.subjects()) == 2
gbj.title = "Updated Title"
gbj.add_identifier("doi", "10.1234/7")
gbj.add_keyword("test")
gbj.add_url("http://test", "test")
gbj.add_subject("TEST", "first", "one")
assert gbj.title == "Updated Title"
assert len(gbj.get_identifiers()) == 3
assert gbj.get_one_identifier("doi") == "10.1234/7"
assert gbj.keywords == ["word", "key", "test"]
assert gbj.get_single_url("test") == "http://test"
assert gbj.subjects()[2] == {"scheme" : "TEST", "term" : "first", "code" : "one"}
gbj.remove_identifiers("doi")
gbj.set_keywords("one")
gbj.set_subjects({"scheme" : "TEST", "term" : "first", "code" : "one"})
assert len(gbj.get_identifiers()) == 2
assert gbj.get_one_identifier("doi") is None
assert gbj.keywords == ["one"]
assert len(gbj.subjects()) == 1
gbj.remove_identifiers()
gbj.remove_subjects()
assert len(gbj.get_identifiers()) == 0
assert len(gbj.subjects()) == 0
def test_17_journal_bibjson(self):
source = BibJSONFixtureFactory.journal_bibjson()
bj = models.JournalBibJSON(source)
assert bj.alternative_title == "Alternative Title"
assert bj.country == "US"
assert bj.publisher == "The Publisher"
assert bj.provider == "Platform Host Aggregator"
assert bj.institution == "Society Institution"
assert bj.active is True
assert bj.language == ["EN", "FR"]
assert bj.get_license() is not None
assert bj.get_license_type() == "CC MY"
assert bj.open_access is True
assert bj.oa_start.get("year") == 1980
assert bj.apc_url == "http://apc.com"
assert bj.apc.get("currency") == "GBP"
assert bj.apc.get("average_price") == 2
assert bj.submission_charges_url == "http://submission.com"
assert bj.submission_charges.get("currency") == "USD"
assert bj.submission_charges.get("average_price") == 4
assert bj.editorial_review.get("process") == "Open peer review"
assert bj.editorial_review.get("url") == "http://review.process"
assert bj.plagiarism_detection.get("detection") is True
assert bj.plagiarism_detection.get("url") == "http://plagiarism.screening"
assert bj.article_statistics.get("statistics") is True
assert bj.article_statistics.get("url") == "http://download.stats"
assert bj.deposit_policy == ["Sherpa/Romeo", "Store it"]
assert bj.author_copyright.get("copyright") == "True"
assert bj.author_copyright.get("url") == "http://copyright.com"
assert bj.author_publishing_rights.get("publishing_rights") == "True"
assert bj.author_publishing_rights.get("url") == "http://publishing.rights"
assert bj.allows_fulltext_indexing is True
assert bj.persistent_identifier_scheme == ["DOI", "ARK", "PURL"]
assert bj.format == ["HTML", "XML", "Wordperfect"]
assert bj.publication_time == 8
assert bj.replaces == ["0000-0000"]
assert bj.is_replaced_by == ["9999-9999"]
assert bj.discontinued_date == "2001-01-01"
assert bj.discontinued_datestamp == datetime.strptime("2001-01-01", "%Y-%m-%d")
bj.alternative_title = "New alternate"
bj.country = "UK"
bj.publisher = "Me"
bj.provider = "The claw"
bj.institution = "UCL"
bj.active = False
bj.set_language("DE")
bj.set_license("CC BY", "CC BY")
bj.set_open_access(False)
bj.set_oa_start(1900)
bj.apc_url = "http://apc2.com"
bj.set_apc("USD", 10)
bj.submission_charges_url = "http://sub2.com"
bj.set_submission_charges("GBP", 20)
bj.set_editorial_review("Whatever", "http://whatever")
bj.set_plagiarism_detection("http://test1", False)
bj.set_article_statistics("http://test2", False)
bj.deposit_policy = ["Never"]
bj.set_author_copyright("http://test3", "True")
bj.set_author_publishing_rights("http://test4", "True")
bj.allows_fulltext_indexing = False
bj.persistent_identifier_scheme = "DOI"
bj.format = "PDF"
bj.publication_time = 4
bj.replaces = ["1111-1111"]
bj.is_replaced_by = ["2222-2222"]
bj.discontinued_date = "2002-01-01"
assert bj.alternative_title == "New alternate"
assert bj.country == "UK"
assert bj.publisher == "Me"
assert bj.provider == "The claw"
assert bj.institution == "UCL"
assert bj.active is False
assert bj.language == ["DE"]
assert bj.get_license_type() == "CC BY"
assert bj.open_access is False
assert bj.oa_start.get("year") == 1900
assert bj.apc_url == "http://apc2.com"
assert bj.apc.get("currency") == "USD"
assert bj.apc.get("average_price") == 10
assert bj.submission_charges_url == "http://sub2.com"
assert bj.submission_charges.get("currency") == "GBP"
assert bj.submission_charges.get("average_price") == 20
assert bj.editorial_review.get("process") == "Whatever"
assert bj.editorial_review.get("url") == "http://whatever"
assert bj.plagiarism_detection.get("detection") is False
assert bj.plagiarism_detection.get("url") == "http://test1"
assert bj.article_statistics.get("statistics") is False
assert bj.article_statistics.get("url") == "http://test2"
assert bj.deposit_policy == ["Never"]
assert bj.author_copyright.get("copyright") == "True"
assert bj.author_copyright.get("url") == "http://test3"
assert bj.author_publishing_rights.get("publishing_rights") == "True"
assert bj.author_publishing_rights.get("url") == "http://test4"
assert bj.allows_fulltext_indexing is False
assert bj.persistent_identifier_scheme == ["DOI"]
assert bj.format == ["PDF"]
assert bj.publication_time == 4
assert bj.replaces == ["1111-1111"]
assert bj.is_replaced_by == ["2222-2222"]
assert bj.discontinued_date == "2002-01-01"
assert bj.discontinued_datestamp == datetime.strptime("2002-01-01", "%Y-%m-%d")
bj.add_language("CZ")
bj.add_deposit_policy("OK")
bj.add_persistent_identifier_scheme("Handle")
bj.add_format("CSV")
bj.add_replaces("3333-3333")
bj.add_is_replaced_by("4444-4444")
assert bj.language == ["DE", "CZ"]
assert bj.deposit_policy == ["Never", "OK"]
assert bj.persistent_identifier_scheme == ["DOI", "Handle"]
assert bj.format == ["PDF", "CSV"]
assert bj.replaces == ["1111-1111", "3333-3333"]
assert bj.is_replaced_by == ["2222-2222", "4444-4444"]
def test_18_continuations(self):
journal = models.Journal()
bj = journal.bibjson()
bj.replaces = ["1111-1111"]
bj.is_replaced_by = ["2222-2222"]
bj.add_identifier(bj.E_ISSN, "0000-0000")
journal.save()
future1 = models.Journal()
bjf1 = future1.bibjson()
bjf1.replaces = ["0000-0000"]
bjf1.is_replaced_by = ["3333-3333"]
bjf1.add_identifier(bj.E_ISSN, "2222-2222")
future1.save()
future2 = models.Journal()
bjf2 = future2.bibjson()
bjf2.replaces = ["2222-2222"]
bjf2.add_identifier(bj.E_ISSN, "3333-3333")
future2.save()
past1 = models.Journal()
bjp1 = past1.bibjson()
bjp1.replaces = ["4444-4444"]
bjp1.is_replaced_by = ["0000-0000"]
bjp1.add_identifier(bj.E_ISSN, "1111-1111")
past1.save()
past2 = models.Journal()
bjp2 = past2.bibjson()
bjp2.is_replaced_by = ["1111-1111"]
bjp2.add_identifier(bj.E_ISSN, "4444-4444")
past2.save()
time.sleep(2)
past = journal.get_past_continuations()
future = journal.get_future_continuations()
assert len(past) == 2
assert past[0].bibjson().get_one_identifier(bj.E_ISSN) == "1111-1111"
assert past[1].bibjson().get_one_identifier(bj.E_ISSN) == "4444-4444"
assert len(future) == 2
assert future[0].bibjson().get_one_identifier(bj.E_ISSN) == "2222-2222"
assert future[1].bibjson().get_one_identifier(bj.E_ISSN) == "3333-3333"
def test_19_article_bibjson(self):
source = BibJSONFixtureFactory.article_bibjson()
bj = models.ArticleBibJSON(source)
assert bj.year == "1987"
assert bj.month == "4"
assert bj.start_page == "14"
assert bj.end_page == "15"
assert bj.abstract == "Some text here"
assert bj.volume == "No 10"
assert bj.number == "Iss. 4"
assert bj.journal_title == "Journal of Things"
assert bj.journal_language == ["eng"]
assert bj.journal_country == "GB"
assert bj.journal_issns == ["1234-5678", "9876-5432"]
assert bj.publisher == "IEEE"
assert bj.author[0].get("name") == "Test"
assert bj.get_journal_license().get("title") == "CC-BY"
bj.year = "2000"
bj.month = "5"
bj.start_page = "100"
bj.end_page = "110"
bj.abstract = "New abstract"
bj.volume = "Four"
bj.number = "Q1"
bj.journal_title = "Journal of Stuff"
bj.journal_language = "fra"
bj.journal_country = "FR"
bj.journal_issns = ["1111-1111", "9999-9999"]
bj.publisher = "Elsevier"
bj.add_author("Testing", "School of Hard Knocks")
bj.set_journal_license("CC NC", "CC NC", "http://cc.nc", False)
assert bj.get_publication_date() is not None
assert bj.vancouver_citation() is not None
assert bj.year == "2000"
assert bj.month == "5"
assert bj.start_page == "100"
assert bj.end_page == "110"
assert bj.abstract == "New abstract"
assert bj.volume == "Four"
assert bj.number == "Q1"
assert bj.journal_title == "Journal of Stuff"
assert bj.journal_language == ["fra"]
assert bj.journal_country == "FR"
assert bj.journal_issns == ["1111-1111", "9999-9999"]
assert bj.publisher == "Elsevier"
assert bj.author[1].get("name") == "Testing"
assert bj.get_journal_license().get("title") == "CC NC"
del bj.year
del bj.month
bj.remove_journal_metadata()
assert bj.year is None
assert bj.month is None
assert bj.journal_title is None
def test_20_make_continuation_replaces(self):
journal = models.Journal()
bj = journal.bibjson()
bj.add_identifier(bj.E_ISSN, "0000-0000")
bj.add_identifier(bj.P_ISSN, "1111-1111")
bj.title = "First Journal"
journal.save()
time.sleep(2)
cont = journal.make_continuation("replaces", eissn="2222-2222", pissn="3333-3333", title="Second Journal")
rep = bj.replaces
rep.sort()
assert rep == ["2222-2222", "3333-3333"]
cbj = cont.bibjson()
irb = cbj.is_replaced_by
irb.sort()
assert irb == ["0000-0000", "1111-1111"]
assert cbj.title == "Second Journal"
assert cbj.get_one_identifier(cbj.E_ISSN) == "2222-2222"
assert cbj.get_one_identifier(cbj.P_ISSN) == "3333-3333"
assert cont.id != journal.id
def test_21_make_continuation_is_replaced_by(self):
journal = models.Journal()
bj = journal.bibjson()
bj.add_identifier(bj.E_ISSN, "0000-0000")
bj.add_identifier(bj.P_ISSN, "1111-1111")
bj.title = "First Journal"
journal.save()
time.sleep(2)
cont = journal.make_continuation("is_replaced_by", eissn="2222-2222", pissn="3333-3333", title="Second Journal")
irb = bj.is_replaced_by
irb.sort()
assert irb == ["2222-2222", "3333-3333"]
cbj = cont.bibjson()
rep = cbj.replaces
rep.sort()
assert rep == ["0000-0000", "1111-1111"]
assert cbj.title == "Second Journal"
assert cbj.get_one_identifier(cbj.E_ISSN) == "2222-2222"
assert cbj.get_one_identifier(cbj.P_ISSN) == "3333-3333"
assert cont.id != journal.id
def test_22_make_continuation_errors(self):
journal = models.Journal()
bj = journal.bibjson()
bj.add_identifier(bj.E_ISSN, "0000-0000")
bj.add_identifier(bj.P_ISSN, "1111-1111")
bj.title = "First Journal"
journal.save()
time.sleep(2)
with self.assertRaises(models.ContinuationException):
cont = journal.make_continuation("sideways", eissn="2222-2222", pissn="3333-3333", title="Second Journal")
with self.assertRaises(models.ContinuationException):
cont = journal.make_continuation("replaces", title="Second Journal")
def test_23_make_continuation_single_issn(self):
# this is to cover a case where a single issn is provided during the continuations create process,
# to make sure the behaviour is still correct
journal = models.Journal()
bj = journal.bibjson()
bj.add_identifier(bj.E_ISSN, "0000-0000")
bj.add_identifier(bj.P_ISSN, "1111-1111")
bj.title = "First Journal"
journal.save()
time.sleep(2)
# first do it with an eissn
cont = journal.make_continuation("replaces", eissn="2222-2222", title="Second Journal")
rep = bj.replaces
rep.sort()
assert rep == ["2222-2222"]
cbj = cont.bibjson()
irb = cbj.is_replaced_by
irb.sort()
assert irb == ["0000-0000", "1111-1111"]
assert cbj.title == "Second Journal"
assert cbj.get_one_identifier(cbj.E_ISSN) == "2222-2222"
assert cont.id != journal.id
# then do it with a pissn and give it a dud eissn
cont = journal.make_continuation("replaces", pissn="3333-3333", eissn="", title="Second Journal")
rep = bj.replaces
rep.sort()
assert rep == ["3333-3333"]
cbj = cont.bibjson()
irb = cbj.is_replaced_by
irb.sort()
assert irb == ["0000-0000", "1111-1111"]
assert cbj.title == "Second Journal"
assert cbj.get_one_identifier(cbj.P_ISSN) == "3333-3333"
assert cont.id != journal.id
def test_24_index_has_apc(self):
# no apc record, not ticked
j = models.Journal()
j.set_created("1970-01-01T00:00:00Z") # so it's before the tick
j.prep()
assert j.data.get("index", {}).get("has_apc") == "No Information"
# no apc record, ticked
j = models.Journal()
j.prep()
assert j.data.get("index", {}).get("has_apc") == "No"
# apc record, not ticked
j = models.Journal()
j.set_created("1970-01-01T00:00:00Z") # so it's before the tick
b = j.bibjson()
b.set_apc("GBP", 100)
j.prep()
assert j.data.get("index", {}).get("has_apc") == "Yes"
# apc record, ticked
j = models.Journal()
b = j.bibjson()
b.set_apc("GBP", 100)
j.prep()
assert j.data.get("index", {}).get("has_apc") == "Yes"
def test_25_autocomplete(self):
j = models.Journal()
bj = j.bibjson()
bj.publisher = "BioMed Central"
j.save()
j = models.Journal()
bj = j.bibjson()
bj.publisher = "BioMedical Publisher"
j.save()
j = models.Journal()
bj = j.bibjson()
bj.publisher = "De Gruyter"
j.save()
j = models.Journal()
bj = j.bibjson()
bj.publisher = "Deep Mind"
j.save()
time.sleep(2)
res = models.Journal.advanced_autocomplete("index.publisher_ac", "bibjson.publisher", "Bio")
assert len(res) == 2
res = models.Journal.advanced_autocomplete("index.publisher_ac", "bibjson.publisher", "BioMed")
assert len(res) == 2
res = models.Journal.advanced_autocomplete("index.publisher_ac", "bibjson.publisher", "De ")
assert len(res) == 1
res = models.Journal.advanced_autocomplete("index.publisher_ac", "bibjson.publisher", "BioMed C")
assert len(res) == 1
def test_26_provenance(self):
"""Read and write properties into the provenance model"""
p = models.Provenance()
# now construct from a fixture
source = ProvenanceFixtureFactory.make_provenance_source()
p = models.Provenance(**source)
assert p is not None
# run the remaining methods just to make sure there are no errors
p.save()
def test_27_save_valid_dataobj(self):
j = models.Journal()
bj = j.bibjson()
bj.title = "A legitimate title"
j.data["junk"] = "in here"
with self.assertRaises(dataobj.DataStructureException):
j.save()
assert j.id is None
s = models.Suggestion()
sbj = s.bibjson()
sbj.title = "A legitimate title"
s.data["junk"] = "in here"
with self.assertRaises(dataobj.DataStructureException):
s.save()
assert s.id is None
p = models.Provenance()
p.type = "suggestion"
p.data["junk"] = "in here"
with self.assertRaises(dataobj.DataStructureException):
p.save()
assert p.id is None
def test_28_make_provenance(self):
acc = models.Account()
acc.set_id("test")
acc.add_role("associate_editor")
acc.add_role("editor")
obj1 = models.Suggestion()
obj1.set_id("obj1")
models.Provenance.make(acc, "act1", obj1)
time.sleep(2)
prov = models.Provenance.get_latest_by_resource_id("obj1")
assert prov.type == "suggestion"
assert prov.user == "test"
assert prov.roles == ["associate_editor", "editor"]
assert len(prov.editor_group) == 0
assert prov.subtype is None
assert prov.action == "act1"
assert prov.resource_id == "obj1"
eg1 = models.EditorGroup()
eg1.set_id("associate")
eg1.add_associate(acc.id)
eg1.save()
eg2 = models.EditorGroup()
eg2.set_id("editor")
eg2.set_editor(acc.id)
eg2.save()
time.sleep(2)
obj2 = models.Suggestion()
obj2.set_id("obj2")
models.Provenance.make(acc, "act2", obj2, "sub")
time.sleep(2)
prov = models.Provenance.get_latest_by_resource_id("obj2")
assert prov.type == "suggestion"
assert prov.user == "test"
assert prov.roles == ["associate_editor", "editor"]
assert prov.editor_group == ["editor", "associate"]
assert prov.subtype == "sub"
assert prov.action == "act2"
assert prov.resource_id == "obj2"
def test_29_background_job(self):
source = BackgroundFixtureFactory.example()
bj = models.BackgroundJob(**source)
bj.save()
time.sleep(2)
retrieved = models.BackgroundJob.pull(bj.id)
assert retrieved is not None
source = BackgroundFixtureFactory.example()
source["params"]["ids"] = ["1", "2", "3"]
source["params"]["type"] = "suggestion"
source["reference"]["query"] = json.dumps({"query" : {"match_all" : {}}})
bj = models.BackgroundJob(**source)
bj.save()
bj.add_audit_message("message")
assert len(bj.audit) == 2
def test_30_article_journal_sync(self):
j = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
a = models.Article(**ArticleFixtureFactory.make_article_source(in_doaj=False, with_journal_info=False))
assert a.has_seal() is False
assert a.bibjson().journal_issns != j.bibjson().issns()
reg = models.Journal()
changed = a.add_journal_metadata(j, reg)
assert changed is True
assert a.has_seal() is True
assert a.is_in_doaj() is True
assert a.bibjson().journal_issns == j.bibjson().issns()
assert a.bibjson().publisher == j.bibjson().publisher
assert a.bibjson().journal_country == j.bibjson().country
assert a.bibjson().journal_language == j.bibjson().language
assert a.bibjson().journal_title == j.bibjson().title
changed = a.add_journal_metadata(j)
assert changed is False
def test_31_application_latest_by_current_journal(self):
j = models.Journal()
j.set_id(j.makeid())
app1 = models.Suggestion(**ApplicationFixtureFactory.make_application_source())
app1.set_id(app1.makeid())
app1.set_current_journal(j.id)
app1.set_created("1970-01-01T00:00:00Z")
app1.save()
app2 = models.Suggestion(**ApplicationFixtureFactory.make_application_source())
app2.set_id(app2.makeid())
app2.set_current_journal(j.id)
app2.set_created("1971-01-01T00:00:00Z")
app2.save(blocking=True)
# check that we find the right application when we search
app3 = models.Suggestion.find_latest_by_current_journal(j.id)
assert app3 is not None
assert app3.id == app2.id
# make sure we get a None response when there's no application
app0 = models.Suggestion.find_latest_by_current_journal("whatever")
assert app0 is None
def test_32_application_all_by_related_journal(self):
j = models.Journal()
j.set_id(j.makeid())
app1 = models.Suggestion(**ApplicationFixtureFactory.make_application_source())
app1.set_id(app1.makeid())
app1.set_related_journal(j.id)
app1.set_created("1970-01-01T00:00:00Z")
app1.save()
app2 = models.Suggestion(**ApplicationFixtureFactory.make_application_source())
app2.set_id(app2.makeid())
app2.set_related_journal(j.id)
app2.set_created("1971-01-01T00:00:00Z")
app2.save(blocking=True)
# check that we find all the applications when we search, and that they're in the right order
all = models.Suggestion.find_all_by_related_journal(j.id)
assert len(all) == 2
assert all[0].id == app1.id
assert all[1].id == app2.id
def test_33_article_stats(self):
articles = []
# make a bunch of articles variably in doaj/not in doaj, for/not for the issn we'll search
for i in range(1, 3):
article = models.Article(
**ArticleFixtureFactory.make_article_source(eissn="1111-1111", pissn="1111-1111", with_id=False, in_doaj=True)
)
article.set_created("2019-01-0" + str(i) + "T00:00:00Z")
articles.append(article)
for i in range(3, 5):
article = models.Article(
**ArticleFixtureFactory.make_article_source(eissn="1111-1111", pissn="1111-1111", with_id=False, in_doaj=False)
)
article.set_created("2019-01-0" + str(i) + "T00:00:00Z")
articles.append(article)
for i in range(5, 7):
article = models.Article(
**ArticleFixtureFactory.make_article_source(eissn="2222-2222", pissn="2222-2222", with_id=False, in_doaj=True)
)
article.set_created("2019-01-0" + str(i) + "T00:00:00Z")
articles.append(article)
for i in range(7, 9):
article = models.Article(
**ArticleFixtureFactory.make_article_source(eissn="2222-2222", pissn="2222-2222", with_id=False, in_doaj=False)
)
article.set_created("2019-01-0" + str(i) + "T00:00:00Z")
articles.append(article)
for i in range(len(articles)):
articles[i].save(blocking=i == len(articles) - 1)
journal = models.Journal()
bj = journal.bibjson()
bj.add_identifier(bj.P_ISSN, "1111-1111")
stats = journal.article_stats()
assert stats.get("total") == 2
assert stats.get("latest") == "2019-01-02T00:00:00Z"
def test_34_cache(self):
models.Cache.cache_site_statistics({
"articles" : 10,
"journals" : 20,
"countries" : 30,
"searchable" : 40
})
models.Cache.cache_csv("/csv/filename.csv")
models.Cache.cache_sitemap("sitemap.xml")
models.Cache.cache_public_data_dump("http://example.com/article", 100, "http://example.com/journal", 200)
time.sleep(1)
stats = models.Cache.get_site_statistics()
assert stats["articles"] == 10
assert stats["journals"] == 20
assert stats["countries"] == 30
assert stats["searchable"] == 40
assert models.Cache.get_latest_csv().get("url") == "/csv/filename.csv"
assert models.Cache.get_latest_sitemap() == "sitemap.xml"
assert models.Cache.get_public_data_dump().get("article").get("url") == "http://example.com/article"
assert models.Cache.get_public_data_dump().get("article").get("size") == 100
assert models.Cache.get_public_data_dump().get("journal").get("url") == "http://example.com/journal"
assert models.Cache.get_public_data_dump().get("journal").get("size") == 200
# TODO: reinstate this test when author emails have been disallowed again
'''
def test_33_article_with_author_email(self):
"""Check the system disallows articles with emails in the author field"""
a_source = ArticleFixtureFactory.make_article_source()
# Creating a model from a source with email is rejected by the DataObj
a_source['bibjson']['author'][0]['email'] = '<EMAIL>'
with self.assertRaises(dataobj.DataStructureException):
a = models.Article(**a_source)
bj = a.bibjson()
# Remove the email address again to create the model
del a_source['bibjson']['author'][0]['email']
a = models.Article(**a_source)
# We can't add an author with an email address any more.
with self.assertRaises(TypeError):
a.bibjson().add_author(name='<NAME>', affiliation='School of Rock', email='<EMAIL>')
'''
| 2.0625
| 2
|
src/binancema/indicators.py
|
emrementese/binance-ema
| 16
|
12783830
|
<reponame>emrementese/binance-ema<filename>src/binancema/indicators.py
# -*- coding: utf-8 -*-
'''
Created by <NAME> on 24/01/2022
Coding with Python.
'''
from binancema.coininfo import price
class indicator:
def __init__(self,client):
self.client = client
def SMA(self,series,length) -> float:
'''
* Simple Moving Average
* Referance (01/11/2021): https://www.tradingview.com/pine-script-reference/#fun_sma
* If you want look at the original SMA function coded with Pine Script, visit the reference.
- length --> Data count len(series) (int)
- series --> data (list) or (int- float)
'''
if isinstance(length,int):
pass
else:
raise Exception("SMA Calculating Error: Length must be integer.")
if isinstance(series,list):
sum = 0
for i in series:
if isinstance(i,(int,float)):
sum += i
else:
raise Exception("SMA Calculating Error: series elements must be integer or float.")
return sum
elif (series,(float,int)):
return series / length
else:
raise Exception("SMA Calculating Error: series must be integer & float or list.")
def EMA(self,close,length,previous_ema) -> float:
'''
* Exponential Moving Avarage
* Referance (01/11/2021): https://www.tradingview.com/pine-script-reference/#fun_ema
* If you want look at the original EMA function coded with Pine Script, visit the reference.
Firstly Binance not give EMA information for cripto coins. If you want calculate to last EMA, you must use
previous EMA. You can't take a previous EMA with Binance. You must calculate. You need previous to previous EMA
to calculate previous EMA. This goes as far as the first EMA. If you want calculate to the first EMA, you must
use SMA in the EMA calculate. However, calculating first EMA using SMA is inconvenient and takes too long and
Binance has an api limit. Binance max give to the 1000 candlestick bars. You can't calculate SMA with this candlestick bars.
* For example: You can't calculate BTC-USDT 5 minute grahpic's EMA. Because you need more candlestick bars information (Billion)
* That's Why we can add one more input in the function. (previous_EMA)
* This input can be any EMA provided it is an EMA before the EMA of the last candlestick bars.
length : Number of candlestick bars (Integer)
previous_ema : can be any EMA provided it is an EMA before the EMA of the last candlestick bars. (Float-Integer)
close : The closing value of the candlestick bars after the previous EMA candlestick bar (Float-Integer)
return --> next EMA
'''
if isinstance(length,int):
pass
else:
raise Exception("EMA Calculating Error: Length must be integer.")
if isinstance(previous_ema,(float,int)):
pass
else:
raise Exception("EMA Calculating Error: previous_ema must be integer or float.")
if isinstance(close,(float,int)):
pass
else:
raise Exception("EMA Calculating Error: close must be integer or float.")
alpha = 2 / (length + 1)
ema = (close * alpha) + (previous_ema * (1-alpha))
return ema
def coins_instant_ema(self,length,previous_ema,symbol) -> float:
'''
* This function return instant ema using with the ema of the previous candlestick bars
Note:
* When the graphic move to a next candlestick bars you have to change the ema and run it again !
* This function can't calculating next candlestick bars's ema. İt can just return instant (dynamic) ema !
Examples
- symbol = "BTCUSDT"
- length = 9
- previous_ema (9) = 62017.40
'''
close_value = price(self.client,symbol)
return self.EMA(close_value,length,previous_ema)
def MACD(self,close,fast,slow,signal,previous_ema_fast,previous_ema_slow,previous_macd):
'''
* Moving Average Convergence Divergence
* Referance (01/11/2021): https://en.tradingview.com/ideas/macd/
* If you want look at the original MACD function coded with Pine Script, visit the reference.
- close --> The closing value of the candlestick bars after the previous EMA candlestick bar (Float-Integer)
- fast --> fast ema length
- slow --> slow ema length
- signal --> signal length
- previous_ema_fast --> can be any ema provided it is an ema before the ema of the last candlestick bars. (Float-Integer)
- previous_ema_slow --> can be any ema provided it is an ema before the ema of the last candlestick bars. (Float-Integer)
- previous_macd --> can be any macd provided it is an macd before the macd of the last candlestick bars. (Float-Integer)
'''
fastMA = self.EMA(close,fast,previous_ema_fast) # next fast ema
slowMA = self.EMA(close,slow,previous_ema_slow) # next slow ema
macd = fastMA - slowMA # macd value
signal_value = self.EMA(macd,signal,previous_macd) # signal value
return [macd,signal_value]
def coins_instant_macd(self,symbol,fast,slow,signal,previous_ema_fast,previous_ema_slow,previous_macd):
'''
* This function return instant macd using with the macd of the previous candlestick bars
Note:
* When the graphic move to a next candlestick bars you have to change the macd and run it again !
* This function can't calculating next candlestick bars's macd. İt can just return instant macd !
Examples
- symbol = "BTCUSDT" string
- fast = 12 int
- slow = 26 int
- signal = 9 int
- previous_ema_fast
- previous_ema_slow
- previous_macd = 62017.40
'''
close = price(self.client,symbol)
return self.MACD(close,fast,slow,signal,previous_ema_fast,previous_ema_slow,previous_macd)
| 3.140625
| 3
|
panqec/decoders/sweepmatch/_deformed_decoder.py
|
ehua7365/bn3d
| 0
|
12783831
|
<reponame>ehua7365/bn3d
from typing import Tuple, Dict
import numpy as np
from pymatching import Matching
from panqec.codes import StabilizerCode
from panqec.decoders import BaseDecoder
from panqec.error_models import BaseErrorModel, PauliErrorModel
from .. import (
SweepDecoder3D, Toric3DMatchingDecoder, RotatedPlanarMatchingDecoder,
RotatedSweepDecoder3D
)
class DeformedToric3DMatchingDecoder(Toric3DMatchingDecoder):
code: StabilizerCode
error_rate: float
error_model: PauliErrorModel
_epsilon: float
_n_faces: Dict[str, int]
def __init__(self, code: StabilizerCode,
error_model: BaseErrorModel,
error_rate: float):
self._epsilon = 1e-15
self._n_faces = dict()
super().__init__(code, error_model, error_rate)
def get_matcher(self):
"""Return a new Matching object."""
# Get the number of X stabilizers (faces).
n_faces: int = int(3*np.product(self.code.size))
self._n_faces[self.code.label] = n_faces
# Only keep the Z vertex stabilizers.
H_z = self.code.Hz
weights = self.get_deformed_weights()
print("weights", weights)
return Matching(H_z, spacelike_weights=weights)
def get_deformed_weights(self) -> np.ndarray:
"""Get MWPM weights for deformed Pauli noise."""
return calculate_deformed_weights(
self.error_model, self.error_rate, self.code, self._epsilon
)
class DeformedSweepDecoder3D(SweepDecoder3D):
code: StabilizerCode
error_model: BaseErrorModel
error_rate: float
_p_edges: int
def __init__(self, code, error_model, error_rate):
super().__init__(code, error_model, error_rate)
self._p_edges = self.get_edge_probabilities()
def get_edge_probabilities(self):
"""Most likely face for detectable Z error."""
p_edges: Tuple[float, float, float]
p_X, p_Y, p_Z = (
np.array(self.error_model.direction)*self.error_rate
)
p_regular = p_Y + p_Z
p_deformed = p_Y + p_X
p = np.array([p_deformed, p_regular, p_regular])
p_edges = tuple(p/p.sum())
return p_edges
def get_default_direction(self):
"""Use most likely direction based on noise."""
direction = int(self._rng.choice([0, 1, 2], size=1, p=self._p_edges))
return direction
class DeformedSweepMatchDecoder(BaseDecoder):
label = 'Deformed Toric 3D Sweep Matching Decoder'
sweeper: BaseDecoder
matcher: BaseDecoder
def __init__(self, code: StabilizerCode,
error_model: BaseErrorModel,
error_rate: float):
self.sweeper = DeformedSweepDecoder3D(
code, error_model, error_rate
)
self.matcher = DeformedToric3DMatchingDecoder(
code, error_model, error_rate
)
def decode(
self, syndrome: np.ndarray, **kwargs
) -> np.ndarray:
"""Get X and Z corrections given code and measured syndrome."""
z_correction = self.sweeper.decode(syndrome)
x_correction = self.matcher.decode(syndrome)
correction = (z_correction + x_correction) % 2
correction = correction.astype(np.uint)
return correction
class DeformedRotatedSweepMatchDecoder(DeformedSweepMatchDecoder):
def __init__(self, code: StabilizerCode,
error_model: BaseErrorModel,
error_rate: float):
self.sweeper = RotatedSweepDecoder3D(code, error_model, error_rate)
self.matcher = DeformedRotatedPlanarMatchingDecoder(
code, error_model, error_rate
)
class DeformedRotatedPlanarMatchingDecoder(RotatedPlanarMatchingDecoder):
def __init__(self, code, error_model: BaseErrorModel, error_rate: float):
self._epsilon = 1e-15
super().__init__(code, error_model, error_rate)
def get_matcher(self):
"""Return a new Matching object."""
# Get the number of X stabilizers (faces).
n_faces = len([
location
for location in self.code.stabilizer_coordinates
if self.code.stabilizer_type(location) == 'face'
])
self._n_faces[self.code.label] = n_faces
n_qubits = self.code.n
self._n_qubits[self.code.label] = n_qubits
# Only keep the Z vertex stabilizers.
H_z = self.code.Hz
weights = self.get_deformed_weights()
return Matching(H_z, spacelike_weights=weights)
def get_deformed_weights(self) -> np.ndarray:
"""Get MWPM weights for deformed Pauli noise."""
return calculate_deformed_weights(
self.error_model, self.error_rate, self.code, self._epsilon
)
def calculate_deformed_weights(
error_model: PauliErrorModel, probability: float,
code: StabilizerCode, epsilon
) -> np.ndarray:
regular_weight, deformed_weight = get_regular_and_deformed_weights(
error_model.direction, probability, epsilon
)
# All weights are regular weights to start off with.
weights = np.ones(code.n, dtype=float)*regular_weight
# Get indices of deformed qubits from error model.
deformation_indices = error_model.get_deformation_indices(code)
# The weights on the deformed edge are different.
weights[deformation_indices] = deformed_weight
# Return flattened arrays.
return weights
def get_regular_and_deformed_weights(
direction: Tuple[float, float, float], error_rate: float, epsilon: float
) -> Tuple[float, float]:
"""Get MWPM weights for given Pauli noise probabilities."""
# Extract undeformed error probabilities.
r_x, r_y, r_z = direction
p_X, p_Y, p_Z = np.array([r_x, r_y, r_z])*error_rate
# For undeformed qubit sites, only X and Y errors can be detected,
# so the probability of error is the sum of their probabilities.
# Note that Z errors can neither be detected nor corrected so they
# do not contribute to the weight.
p_regular = p_X + p_Y
# For deformed qubit sites, only Z and Y errors can be detected.
p_deformed = p_Z + p_Y
# Take logarithms regularized by epsilons to avoid infinities.
# Logarithms turn products into sums.
# Divide by the probability of no (detectable) error because that is
# the baseline to compare with.
regular_weight = -np.log(
(p_regular + epsilon) / (1 - p_regular + epsilon)
)
deformed_weight = -np.log(
(p_deformed + epsilon) / (1 - p_deformed + epsilon)
)
return regular_weight, deformed_weight
| 2.203125
| 2
|
src/build_rindex/redis_index.py
|
ethanjperez/semanticRetrievalMRS
| 61
|
12783832
|
<reponame>ethanjperez/semanticRetrievalMRS
import heapq
import json
import redis
from tqdm import tqdm
from typing import List
import config
from build_rindex.build_rvindex import IndexDB, load_from_file
class RedisScoreIndexOld:
"""
The inverted index is basically a dictionary, with key: term, value {docid: num_of_occurs the terms in the doc}
"""
def __init__(self, redis_db: redis.Redis):
self.redis_db: redis.Redis = redis_db
def get_containing_document(self, term):
item = self.redis_db.get(term)
if item is None:
return None
else:
return json.loads(item).keys()
def get_score(self, term, docid):
item = self.redis_db.get(term)
if item is None:
return 0
else:
item = json.loads(item)
if docid not in item:
return 0
else:
return item[docid]
def get_score_item(self, term):
item = self.redis_db.get(term)
if item is None:
return None
else:
return json.loads(item)
def save_scored_index(self, scored_index):
print("Save scored term-doc index to Redis.")
for key in tqdm(scored_index.keys()):
item = scored_index[key]
self.redis_db.set(key, json.dumps(item))
self.redis_db.save()
class RedisScoreIndex(object):
"""
The inverted index is basically a dictionary, with key: term, value {docid: num_of_occurs the terms in the doc}
"""
TERM_PREFIX = 't'
SCORE_PREFIX = 's'
SEP_SYB = ':'
@staticmethod
def scored_dict_ranking(candidate_doc_list, scored_dict, top_k):
scored_doc = []
v_terms = scored_dict.keys()
for cur_doc in candidate_doc_list:
cur_doc_score = 0
for cur_term in v_terms:
if cur_doc not in scored_dict[cur_term]:
cur_doc_score += 0
else:
cur_doc_score += scored_dict[cur_term][cur_doc]
if top_k is not None and 0 <= top_k == len(scored_doc):
heapq.heappushpop(scored_doc, (cur_doc_score, cur_doc))
else:
heapq.heappush(scored_doc, (cur_doc_score, cur_doc))
return scored_doc
def __init__(self, redis_db: redis.Redis):
self.redis_db: redis.Redis = redis_db
def get_containing_document(self, term):
key = self.TERM_PREFIX + self.SEP_SYB + term
item = self.redis_db.smembers(key)
if item is None:
return None
else:
return item
def get_score(self, term, docid):
key = self.SEP_SYB.join([self.SCORE_PREFIX, term, docid])
item = self.redis_db.get(key)
if item is None:
return 0
else:
return float(item)
def get_candidate_set_from_batched_terms(self, terms):
pipe = self.redis_db.pipeline()
valid_terms = []
valid_set_list = []
for term in terms:
key = self.TERM_PREFIX + self.SEP_SYB + term
pipe.smembers(key)
result_set_list = pipe.execute()
for term, mset in zip(terms, result_set_list):
if len(mset) > 0:
valid_terms.append(term)
valid_set_list.append(mset)
return list(set.union(*valid_set_list)), valid_set_list, valid_terms
def get_scores_from_batched_term_doc_pairs(self, terms: List, valid_set_list: List):
scored_results = dict()
# Remember order matters:
for term, mset in zip(terms, valid_set_list):
pipe = self.redis_db.pipeline()
for docid in mset:
key = self.SEP_SYB.join([self.SCORE_PREFIX, term, docid])
pipe.get(key)
ritems = pipe.execute()
scored_results[term] = dict()
cur_ptr = 0
for docid in mset:
scored_results[term][docid] = float(ritems[cur_ptr])
cur_ptr += 1
return scored_results
def save_scored_index(self, scored_index):
print("Save scored term-doc index to Redis.")
for term in tqdm(scored_index.keys()):
pipe = self.redis_db.pipeline()
item = scored_index[term]
doc_set = scored_index[term].keys()
term_key = self.TERM_PREFIX + self.SEP_SYB + term
for docid, score in item.items():
score_key = self.SEP_SYB.join([self.SCORE_PREFIX, term, docid])
pipe.set(score_key, score)
pipe.sadd(term_key, *doc_set)
pipe.execute()
# self.redis_db.save()
def load_tf_idf_score_to_redis_cache():
tf_idf_score_redis = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
redis_score_index = RedisScoreIndex(tf_idf_score_redis)
# abs_rindexdb = IndexDB()
# abs_rindexdb.load_from_file(config.PDATA_ROOT / "reverse_indexing/abs_rindexdb")
# print("Number of terms:", len(abs_rindexdb.inverted_index.index))
# abs_rindexdb.inverted_index.build_Nt_table()
score_db = dict()
load_from_file(score_db,
config.PDATA_ROOT / "reverse_indexing/abs_rindexdb/scored_db/default-tf-idf.score.txt")
redis_score_index.save_scored_index(score_db)
if __name__ == '__main__':
# load_tf_idf_score_to_redis_cache()
tf_idf_score_redis = redis.StrictRedis(host='localhost', port=6379, db=0, decode_responses=True)
redis_score_index = RedisScoreIndex(tf_idf_score_redis)
#
# pipe = tf_idf_score_redis.pipeline()
# pipe.smembers('t:bansal')
# pipe.smembers('t:mohit')
# pipe.smembers('t:&(&(')
# r = pipe.execute()
# print(type(r))
# print(set.union(*r))
candidate_list, results_set_list, valid_terms = redis_score_index.get_candidate_set_from_batched_terms(['bansal', 'mohit', '&(&('])
scores_dict = redis_score_index.get_scores_from_batched_term_doc_pairs(valid_terms, results_set_list)
print(scores_dict)
print(redis_score_index.scored_dict_ranking(candidate_list, scores_dict, top_k=5))
print(tf_idf_score_redis.get('s:mohit:Mohit Banerji'))
# saved_item = {
# 'a': {'x': 1.0, 'y': 2.0},
# 'b': {'x': 0.5, 'z': 3.0}
# }
#
# redis_score_index.save_scored_index(saved_item)
# print(redis_score_index.get_containing_document('a'))
# print(redis_score_index.get_containing_document('b'))
# print(redis_score_inde)
# for i in tqdm(range(100000)):
# redis_score_index.get_score('a', 'x')
# redis_score_index.get_containing_document('a')
# for i in tqdm(range(1000000)):
# print(redis_score_index.get_containing_document('china'))
# a = redis_score_index.get_containing_document('')
# print(len(a))
# for i in tqdm(range(100000)):
# print(redis_score_index.get_score('china', 'Beijing babbler'))
# redis_score_index.redis_db.get('china')
# redis_score_index.get_score_item('china')
# redis_score_index.redis_db.delete('foo-12345')
# redis_score_index.redis_db.sadd('foo-1234', 'bar-1', 'bar-12', 'bar-123', 'bar-1234', 'bar-12345',
# 'foo-1', 'foo-12', 'foo-123', 'foo-1234', 'foo-12345', 1)
# redis_score_index.redis_db.set('foo-12345', 'bar-123456789 bar-123456789 bar-123456789 bar-123456789 bar-123456789')
# for i in tqdm(range(1000000)):
# a = redis_score_index.redis_db.get('foo-12345').decode('utf-8')
# a = redis_score_index.redis_db.get('foo-12345')
# for _ in range(10000):
# a.decode('utf-8')
# print(a)
# a = redis_score_index.redis_db.smembers('foo-1234')
# print(a)
# for _ in range(1000):
# for e in a:
# e.decode('utf-8')
# print(a)
| 2.65625
| 3
|
applications/Sist01/controllers/produto.py
|
BetinRibeiro/web2py_crediario
| 2
|
12783833
|
# -*- coding: utf-8 -*-
# tente algo como
def listar_merc_envio():
proj = db.projeto(request.args(0, cast=int))
rows = db(db.mercadoria_enviada.projeto == request.args(0, cast=int)).select()
return locals()
def inserir_merc_envio():
proj = db.projeto(request.args(0, cast=int))
db.mercadoria_enviada.projeto.default = proj.id
db.mercadoria_enviada.projeto.readable = False
db.mercadoria_enviada.projeto.writable = False
merc = db(db.mercadoria_enviada.projeto==proj.id).select()
form = SQLFORM(db.mercadoria_enviada).process()
if form.accepted:
response.flash = 'Formulario aceito'
redirect(URL('listar_merc_envio', args=proj.id))
elif form.errors:
response.flash = 'Formulario não aceito'
else:
response.flash = 'Preencha o formulario'
return locals()
def alterar_merc_envio():
merc = db.mercadoria_enviada(request.args(0, cast=int))
proj = db.projeto(merc.projeto)
db.mercadoria_enviada.projeto.readable = False
db.mercadoria_enviada.projeto.writable = False
form = SQLFORM(db.mercadoria_enviada, request.args(0, cast=int))
if form.process().accepted:
session.flash = 'atualizada'
redirect(URL('listar_merc_envio', args=proj.id))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
def subir_dados_mercadoria_envio():
iid = request.args(0, cast=int)
quant = request.args(1)
custo = request.args(3)
preco = request.args(2)
proj = db(db.projeto.id == iid).select().first()
proj.update_record(total_custo_envio=custo)
proj.update_record(total_quantidade_envio=quant)
proj.update_record(total_preco_envio=preco)
db.projeto.total_quantidade_envio.readable = True
db.projeto.total_quantidade_envio.writable = False
db.projeto.total_preco_envio.readable = True
db.projeto.total_preco_envio.writable = False
db.projeto.total_custo_envio.readable = True
db.projeto.total_custo_envio.writable = False
db.projeto.nome.readable = False
db.projeto.nome.writable = False
db.projeto.nome_chefe.readable = False
db.projeto.nome_chefe.writable = False
db.projeto.vale_saida_chefe.readable = False
db.projeto.vale_saida_chefe.writable = False
db.projeto.comissao_chefe.readable = False
db.projeto.comissao_chefe.writable = False
db.projeto.data_saida_venda.readable = False
db.projeto.data_saida_venda.writable = False
db.projeto.adiantamento_saida_venda.readable = False
db.projeto.adiantamento_saida_venda.writable = False
db.projeto.data_cobranca.readable = False
db.projeto.data_cobranca.writable = False
db.projeto.primeira_cidade.readable = False
db.projeto.primeira_cidade.writable = False
form = SQLFORM(db.projeto, request.args(0, cast=int))
if form.process().accepted:
session.flash = ' atualizado'
redirect(URL('listar_merc_envio', args=iid))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
def listar_merc_retorno():
proj = db.projeto(request.args(0, cast=int))
rows = db(db.mercadoria_retorno.projeto == request.args(0, cast=int)).select()
return locals()
def inserir_merc_retorno():
proj = db.projeto(request.args(0, cast=int))
db.mercadoria_retorno.projeto.default = proj.id
db.mercadoria_retorno.projeto.readable = False
db.mercadoria_retorno.projeto.writable = False
merc = db(db.mercadoria_retorno.projeto==proj.id).select()
form = SQLFORM(db.mercadoria_retorno).process()
if form.accepted:
response.flash = 'Formulario aceito'
redirect(URL('listar_merc_retorno', args=proj.id))
elif form.errors:
response.flash = 'Formulario não aceito'
else:
response.flash = 'Preencha o formulario'
return locals()
def alterar_merc_retorno():
merc = db.mercadoria_retorno(request.args(0, cast=int))
proj = db.projeto(merc.projeto)
db.mercadoria_retorno.projeto.readable = False
db.mercadoria_retorno.projeto.writable = False
form = SQLFORM(db.mercadoria_retorno, request.args(0, cast=int))
if form.process().accepted:
session.flash = 'atualizada'
redirect(URL('listar_merc_retorno', args=proj.id))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
def subir_dados_mercadoria_retorno():
iid = request.args(0, cast=int)
quant = request.args(1)
custo = request.args(3)
preco = request.args(2)
proj = db(db.projeto.id == iid).select().first()
proj.update_record(total_custo_retorno=custo)
proj.update_record(total_quantidade_retorno=quant)
proj.update_record(total_preco_retorno=preco)
db.projeto.total_quantidade_retorno.readable = True
db.projeto.total_quantidade_retorno.writable = False
db.projeto.total_preco_retorno.readable = True
db.projeto.total_preco_retorno.writable = False
db.projeto.total_custo_retorno.readable = True
db.projeto.total_custo_retorno.writable = False
db.projeto.nome.readable = False
db.projeto.nome.writable = False
db.projeto.nome_chefe.readable = False
db.projeto.nome_chefe.writable = False
db.projeto.vale_saida_chefe.readable = False
db.projeto.vale_saida_chefe.writable = False
db.projeto.comissao_chefe.readable = False
db.projeto.comissao_chefe.writable = False
db.projeto.data_saida_venda.readable = False
db.projeto.data_saida_venda.writable = False
db.projeto.adiantamento_saida_venda.readable = False
db.projeto.adiantamento_saida_venda.writable = False
db.projeto.data_cobranca.readable = False
db.projeto.data_cobranca.writable = False
db.projeto.primeira_cidade.readable = False
db.projeto.primeira_cidade.writable = False
form = SQLFORM(db.projeto, request.args(0, cast=int))
if form.process().accepted:
session.flash = ' atualizado'
redirect(URL('listar_merc_retorno', args=iid))
elif form.errors:
response.flash = 'Erros no formulário!'
else:
if not response.flash:
response.flash = 'Preencha o formulário!'
return locals()
| 2.65625
| 3
|
aparent/losses/__init__.py
|
johli/aparent-resnet
| 20
|
12783834
|
<reponame>johli/aparent-resnet
from aparent.losses.aparent_losses import *
| 1.015625
| 1
|
schedgym/scheduler/easy_scheduler.py
|
renatolfc/sched-rl-gym
| 2
|
12783835
|
<filename>schedgym/scheduler/easy_scheduler.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""easy_scheduler - A scheduler that uses easy backfilling.
"""
from typing import List, Tuple, Optional
from schedgym.job import Job, JobStatus
from schedgym.scheduler import Scheduler
from schedgym.event import JobEvent
class EasyScheduler(Scheduler):
"""EASY backfilling scheduler.
This is a backfilling scheduling that uses the EASY strategy. Upon
encountering a single job that cannot be scheduled, it makes a reservation
for that job on which would be the first time it should start on.
Smaller jobs than the one currenly with a reservation may start, provided
they do not delay the one with a reservation.
"""
reservation: Optional[Tuple[JobEvent, JobEvent]]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reservation = None
def _handle_reservation(self) -> None:
if not self.reservation:
return
start, finish = self.reservation
if (start.time == self.current_time
or start.job.status != JobStatus.WAITING):
# Reservation will be fulfilled
self.reservation = None
return
resources = self.can_schedule_now(start.job)
if resources:
self.queue_waiting.remove(start.job)
self.job_events.remove(start)
self.job_events.remove(finish)
self.assign_schedule(
start.job, resources, self.current_time
)
self.reservation = None
def schedule(self) -> None:
ignored_jobs: List[Job] = []
self._handle_reservation()
for job in self.queue_admission:
resources = self.can_schedule_now(job)
if resources:
self.assign_schedule(job, resources, self.current_time)
else:
if not self.reservation:
# This is the first job without a reservation.
# We're doing EASY backfilling, so we create a
# reservation for this one job and keep going
time, resources = self.find_first_time_for(job)
if not resources:
raise AssertionError("Something is terribly wrong")
self.reservation = self.assign_schedule(
job, resources, time
)
else:
# We already have a reservation, so we skip this job
ignored_jobs.append(job)
self.queue_admission = ignored_jobs
| 3.25
| 3
|
loans/migrations/0003_auto_20200617_0103.py
|
minsystems/minloansng
| 0
|
12783836
|
<reponame>minsystems/minloansng<filename>loans/migrations/0003_auto_20200617_0103.py
# Generated by Django 3.0.2 on 2020-06-17 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loans', '0002_auto_20200518_0655'),
]
operations = [
migrations.AlterField(
model_name='loan',
name='collection_date',
field=models.DateTimeField(blank=True, help_text='Date User Collects The Loan Money', null=True),
),
]
| 1.460938
| 1
|
clipkit/args_processing.py
|
JLSteenwyk/ClipKIT
| 28
|
12783837
|
import logging
import os.path
import sys
from .modes import TrimmingMode
logger = logging.getLogger(__name__)
def process_args(args) -> dict:
"""
Process args from argparser and set defaults
"""
input_file = args.input
output_file = args.output or f"{input_file}.clipkit"
if not os.path.isfile(input_file):
logger.warning("Input file does not exist")
sys.exit()
if input_file == output_file:
logger.warning("Input and output files can't have the same name.")
sys.exit()
# assign optional arguments
complement = args.complementary or False
mode = TrimmingMode(args.mode) if args.mode else TrimmingMode.smart_gap
gaps = float(args.gaps) if args.gaps is not None else 0.9
use_log = args.log or False
return dict(
input_file=input_file,
output_file=output_file,
input_file_format=args.input_file_format,
output_file_format=args.output_file_format,
complement=complement,
gaps=gaps,
mode=mode,
use_log=use_log,
)
| 2.8125
| 3
|
lowfat/migrations/0051_auto_20160804_1425.py
|
elena-kolomeets/lowfat
| 6
|
12783838
|
<reponame>elena-kolomeets/lowfat
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-04 14:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0050_auto_20160720_1533'),
]
operations = [
migrations.RenameField(
model_name='expense',
old_name='proof',
new_name='claim',
),
]
| 1.390625
| 1
|
comments_microservice/comments_microservice/urls.py
|
RolesFGA/2018.2-Roles_Comments
| 1
|
12783839
|
from django.contrib import admin
from django.urls import include, path
from django.conf.urls import url
from votes import urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('comments.urls')),
path('api-auth/', include('rest_framework.urls')),
url(r'^', include(urls)),
]
| 1.71875
| 2
|
seek.py
|
printNoahKemp/Seek
| 0
|
12783840
|
"""Seek behaviour in Pygame"""
import pygame
import numpy as np
import math
WIDTH,HEIGHT = 700,400
screen = pygame.display.set_mode((WIDTH,HEIGHT))
class Seeker():
def __init__(self,x,y):
super().__init__()
self.pos=np.array([x,y])
self.vel=np.array([0,0])
self.acc=np.array([0,0])
self.max_speed=0.1
def Draw(self):
#pygame.draw.polygon(screen, (0,255,255), ((self.pos),(self.pos+(8,-20)),(self.pos+(18,0))))
pygame.draw.circle(screen, (0,255,255), self.pos, 10)
def Update(self):
self.vel = np.add(self.vel, self.acc)
self.pos = np.subtract(self.pos, self.vel)
self.acc = np.multiply(self.acc,[0,0])
def Apply(self,force):
self.acc = np.add(self.acc,force)
def Seek(self,target):
desired_vel = self.pos - target
desired_vel = desired_vel/math.sqrt(desired_vel[0]*desired_vel[0]+desired_vel[1]*desired_vel[1])
desired_vel = desired_vel * self.max_speed
steering_vel = desired_vel - self.vel
self.Apply(steering_vel)
def Snitch(pos):
pygame.draw.circle(screen, (255,215,0), pos,10)
pygame.init()
agents=[]
for i in range(20):
agents.append(Seeker(i*100,i*100))
running = True
while running:
screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Get target position
target_pos= np.array(pygame.mouse.get_pos())
Snitch(target_pos)
for agent in agents:
agent.Seek(target_pos)
agent.Update()
agent.Draw()
pygame.display.update()
#pygame.time.Clock().tick(30)
| 3.6875
| 4
|
pipenv/vendor/pythonfinder/cli.py
|
mlhamel/pipenv
| 1
|
12783841
|
<reponame>mlhamel/pipenv
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import click
import crayons
import sys
from . import __version__
from .pythonfinder import PythonFinder
# @click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.command()
@click.option('--find', default=False, nargs=1, help="Find a specific python version.")
@click.option('--findall', is_flag=True, default=False, help="Find all python versions.")
# @click.version_option(prog_name=crayons.normal('pyfinder', bold=True), version=__version__)
@click.pass_context
def cli(
ctx, find=False, findall=False
):
if not find and not findall:
click.echo('Please provide a command', color='red')
sys.exit(1)
if find:
if any([find.startswith('{0}'.format(n)) for n in range(10)]):
found = PythonFinder.from_version(find.strip())
else:
found = PythonFinder.from_line()
if found:
click.echo('Found Python Version: {0}'.format(found), color='white')
sys.exit(0)
else:
#TODO: implement this
click.echo('This is not yet implemented')
sys.exit(0)
sys.exit()
if __name__ == '__main__':
cli()
| 2.375
| 2
|
mongodb_consistent_backup/Common/Util.py
|
cprato79/mongodb_consistent_backup
| 1
|
12783842
|
import socket
from dateutil import parser
from mongodb_consistent_backup.Errors import OperationError
def config_to_string(config):
config_vars = ""
for key in config:
config_vars += "%s=%s, " % (key, config[key])
return config_vars[:-1]
def is_datetime(string):
try:
parser.parse(string)
return True
except:
return False
def parse_method(method):
return method.rstrip().lower()
def validate_hostname(hostname):
try:
if ":" in hostname:
hostname, port = hostname.split(":")
socket.gethostbyname(hostname)
except socket.error, e:
raise OperationError("Could not resolve host '%s', error: %s" % (hostname, e))
| 2.6875
| 3
|
utils.py
|
Ahmednull/L2S-Net
| 21
|
12783843
|
<filename>utils.py<gh_stars>10-100
import numpy as np
import torch
import torch.nn as nn
import os
import scipy.io as sio
import cv2
import math
from math import cos, sin
from pathlib import Path
import subprocess
import re
from model import L2CS
import torchvision
import sys
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def gazeto3d(gaze):
gaze_gt = np.zeros([3])
gaze_gt[0] = -np.cos(gaze[1]) * np.sin(gaze[0])
gaze_gt[1] = -np.sin(gaze[1])
gaze_gt[2] = -np.cos(gaze[1]) * np.cos(gaze[0])
return gaze_gt
def angular(gaze, label):
total = np.sum(gaze * label)
return np.arccos(min(total/(np.linalg.norm(gaze)* np.linalg.norm(label)), 0.9999999))*180/np.pi
def draw_gaze(a,b,c,d,image_in, pitchyaw, thickness=2, color=(255, 255, 0),sclae=2.0):
"""Draw gaze angle on given image with a given eye positions."""
image_out = image_in
(h, w) = image_in.shape[:2]
length = w/2
pos = (int(a+c / 2.0), int(b+d / 2.0))
if len(image_out.shape) == 2 or image_out.shape[2] == 1:
image_out = cv2.cvtColor(image_out, cv2.COLOR_GRAY2BGR)
dx = -length * np.sin(pitchyaw[0]) * np.cos(pitchyaw[1])
dy = -length * np.sin(pitchyaw[1])
cv2.arrowedLine(image_out, tuple(np.round(pos).astype(np.int32)),
tuple(np.round([pos[0] + dx, pos[1] + dy]).astype(int)), color,
thickness, cv2.LINE_AA, tipLength=0.18)
return image_out
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
s = f'YOLOv3 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7
n = len(devices) # device count
if n > 1 and batch_size: # check batch_size is divisible by device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
return torch.device('cuda:0' if cuda else 'cpu')
def spherical2cartesial(x):
output = torch.zeros(x.size(0),3)
output[:,2] = -torch.cos(x[:,1])*torch.cos(x[:,0])
output[:,0] = torch.cos(x[:,1])*torch.sin(x[:,0])
output[:,1] = torch.sin(x[:,1])
return output
def compute_angular_error(input,target):
input = spherical2cartesial(input)
target = spherical2cartesial(target)
input = input.view(-1,3,1)
target = target.view(-1,1,3)
output_dot = torch.bmm(target,input)
output_dot = output_dot.view(-1)
output_dot = torch.acos(output_dot)
output_dot = output_dot.data
output_dot = 180*torch.mean(output_dot)/math.pi
return output_dot
def softmax_temperature(tensor, temperature):
result = torch.exp(tensor / temperature)
result = torch.div(result, torch.sum(result, 1).unsqueeze(1).expand_as(result))
return result
def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository
| 2.46875
| 2
|
movingpandas/tests/test_geometry_utils.py
|
DeemanOne/movingpandas
| 0
|
12783844
|
<reponame>DeemanOne/movingpandas
# -*- coding: utf-8 -*-
import pytest
from math import sqrt
from shapely.geometry import MultiPoint, Point
from movingpandas.geometry_utils import (
azimuth,
calculate_initial_compass_bearing,
angular_difference,
mrr_diagonal,
measure_distance_geodesic,
measure_distance_euclidean,
measure_distance_spherical,
)
class TestGeometryUtils:
def test_compass_bearing_east(self):
assert calculate_initial_compass_bearing(Point(0, 0), Point(10, 0)) == 90
def test_compass_bearing_west(self):
assert calculate_initial_compass_bearing(Point(0, 0), Point(-10, 0)) == 270
def test_compass_bearing_north(self):
assert calculate_initial_compass_bearing(Point(0, 0), Point(0, 10)) == 0
def test_compass_bearing_south(self):
assert calculate_initial_compass_bearing(Point(0, 0), Point(0, -10)) == 180
def test_azimuth_east(self):
assert azimuth(Point(0, 0), Point(1, 0)) == 90
assert azimuth(Point(0, 0), Point(100, 0)) == 90
def test_azimuth_west(self):
assert azimuth(Point(0, 0), Point(-10, 0)) == 270
def test_azimuth_north(self):
assert azimuth(Point(0, 0), Point(0, 1)) == 0
def test_azimuth_south(self):
assert azimuth(Point(0, 0), Point(0, -1)) == 180
def test_azimuth_northeast(self):
assert azimuth(Point(0, 0), Point(1, 1)) == 45
def test_azimuth_southeast(self):
assert azimuth(Point(0, 0), Point(1, -1)) == 135
def test_azimuth_southwest(self):
assert azimuth(Point(0, 0), Point(-1, -1)) == 225
def test_azimuth_northwest(self):
assert azimuth(Point(100, 100), Point(99, 101)) == 315
def test_anglular_difference_tohigher(self):
assert angular_difference(1, 5) == 4
def test_anglular_difference_tolower(self):
assert angular_difference(355, 5) == 10
def test_anglular_difference_halfcicle(self):
assert angular_difference(180, 0) == 180
def test_anglular_difference_same(self):
assert angular_difference(45, 45) == 0
def test_anglular_difference_onenegative(self):
assert angular_difference(-45, 45) == 90
def test_anglular_difference_twonegative(self):
assert angular_difference(-200, -160) == 40
def test_mrr_diagonal(self):
assert mrr_diagonal(
MultiPoint([Point(0, 0), Point(0, 2), Point(2, 0), Point(2, 2)])
) == sqrt(8)
def test_mrr_diagonal_one_point(self):
assert mrr_diagonal(Point(2, 3)) == 0
def test_euclidean_distance(self):
assert (measure_distance_euclidean(Point(0, 0), Point(0, 1))) == 1
def test_spherical_distance(self):
assert measure_distance_spherical(
Point(-74.00597, 40.71427), Point(-118.24368, 34.05223)
) == pytest.approx(3935735)
def test_geodesic_distance(self):
# Distance between NYC, NY USA and Los Angeles, CA USA is
# 3944411.0951634306 meters
assert (
measure_distance_geodesic(
Point(-74.00597, 40.71427), Point(-118.24368, 34.05223)
)
== 3944411.0951634306
)
def test_measure_distance_euclidean_throws_type_error(self):
with pytest.raises(TypeError):
measure_distance_euclidean((0, 0), (0, 1))
def test_measure_distance_spherical_throws_type_error(self):
with pytest.raises(TypeError):
measure_distance_spherical((0, 0), (0, 1))
def test_measure_distance_geodesic_throws_type_error(self):
with pytest.raises(TypeError):
measure_distance_geodesic((0, 0), (0, 1))
def test_calculate_initial_compass_bearing_throws_type_error(self):
with pytest.raises(TypeError):
calculate_initial_compass_bearing((0, 0), (0, 1))
def test_azimuth_throws_type_error(self):
with pytest.raises(TypeError):
azimuth((0, 0), (0, 1))
| 2.578125
| 3
|
bin/changebootloader.py
|
peterferrie/anti-m
| 31
|
12783845
|
#!/usr/bin/env python3
import sys
import os.path
target_disk_image, bootloader = sys.argv[1:]
ext = os.path.splitext(target_disk_image)[-1].lower()
assert(ext in (".dsk", ".do", ".po", ".2mg"))
if ext == ".2mg":
offset = 64
else:
offset = 0
with open(bootloader, 'rb') as f:
boot = f.read()
assert(len(boot) == 512)
with open(target_disk_image, 'rb') as f:
data = bytearray(f.read())
data[offset:offset+len(boot)] = boot
with open(target_disk_image, 'wb') as f:
f.write(data)
| 2.359375
| 2
|
reframe/core/containers.py
|
jacwah/reframe
| 0
|
12783846
|
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import abc
import reframe.core.fields as fields
import reframe.utility.typecheck as typ
from reframe.core.exceptions import ContainerError
_STAGEDIR_MOUNT = '/rfm_workdir'
class ContainerPlatform(abc.ABC):
'''The abstract base class of any container platform.'''
#: The default mount location of the test case stage directory inside the
#: container
#: The container image to be used for running the test.
#:
#: :type: :class:`str` or :class:`None`
#: :default: :class:`None`
image = fields.TypedField(str, type(None))
#: The command to be executed within the container.
#:
#: If no command is given, then the default command of the corresponding
#: container image is going to be executed.
#:
#: .. versionadded:: 3.5.0
#: Changed the attribute name from `commands` to `command` and its type
#: to a string.
#:
#: :type: :class:`str` or :class:`None`
#: :default: :class:`None`
command = fields.TypedField(str, type(None))
_commands = fields.TypedField(typ.List[str])
#: The commands to be executed within the container.
#:
#: .. deprecated:: 3.5.0
#: Please use the `command` field instead.
#:
#: :type: :class:`list[str]`
#: :default: ``[]``
commands = fields.DeprecatedField(
_commands,
'The `commands` field is deprecated, please use the `command` field '
'to set the command to be executed by the container.',
fields.DeprecatedField.OP_SET, from_version='3.5.0'
)
#: Pull the container image before running.
#:
#: This does not have any effect for the `Singularity` container platform.
#:
#: .. versionadded:: 3.5
#:
#: :type: :class:`bool`
#: :default: ``True``
pull_image = fields.TypedField(bool)
#: List of mount point pairs for directories to mount inside the container.
#:
#: Each mount point is specified as a tuple of
#: ``(/path/in/host, /path/in/container)``. The stage directory of the
#: ReFrame test is always mounted under ``/rfm_workdir`` inside the
#: container, independelty of this field.
#:
#: :type: :class:`list[tuple[str, str]]`
#: :default: ``[]``
mount_points = fields.TypedField(typ.List[typ.Tuple[str, str]])
#: Additional options to be passed to the container runtime when executed.
#:
#: :type: :class:`list[str]`
#: :default: ``[]``
options = fields.TypedField(typ.List[str])
_workdir = fields.TypedField(str, type(None))
#: The working directory of ReFrame inside the container.
#:
#: This is the directory where the test's stage directory is mounted inside
#: the container. This directory is always mounted regardless if
#: :attr:`mount_points` is set or not.
#:
#: .. deprecated:: 3.5
#: Please use the `options` field to set the working directory.
#:
#: :type: :class:`str`
#: :default: ``/rfm_workdir``
workdir = fields.DeprecatedField(
_workdir,
'The `workdir` field is deprecated, please use the `options` field to '
'set the container working directory',
fields.DeprecatedField.OP_SET, from_version='3.5.0'
)
def __init__(self):
self.image = None
self.command = None
# NOTE: Here we set the target fields directly to avoid the deprecation
# warnings
self._commands = []
self._workdir = _STAGEDIR_MOUNT
self.mount_points = []
self.options = []
self.pull_image = True
@abc.abstractmethod
def emit_prepare_commands(self, stagedir):
'''Returns commands for preparing this container for running.
Such a command could be for pulling the container image from a
repository.
.. note:
This method is relevant only to developers of new container
platform backends.
:meta private:
:arg stagedir: The stage directory of the test.
'''
@abc.abstractmethod
def launch_command(self, stagedir):
'''Returns the command for running :attr:`commands` with this container
platform.
.. note:
This method is relevant only to developers of new container
platforms.
:meta private:
:arg stagedir: The stage directory of the test.
'''
def validate(self):
if self.image is None:
raise ContainerError('no image specified')
def __str__(self):
return type(self).__name__
def __rfm_json_encode__(self):
return str(self)
class Docker(ContainerPlatform):
'''Container platform backend for running containers with `Docker
<https://www.docker.com/>`__.'''
def emit_prepare_commands(self, stagedir):
return [f'docker pull {self.image}'] if self.pull_image else []
def launch_command(self, stagedir):
super().launch_command(stagedir)
mount_points = self.mount_points + [(stagedir, _STAGEDIR_MOUNT)]
run_opts = [f'-v "{mp[0]}":"{mp[1]}"' for mp in mount_points]
run_opts += self.options
if self.command:
return (f'docker run --rm {" ".join(run_opts)} '
f'{self.image} {self.command}')
if self.commands:
return (f"docker run --rm {' '.join(run_opts)} {self.image} "
f"bash -c 'cd {self.workdir}; {'; '.join(self.commands)}'")
return f'docker run --rm {" ".join(run_opts)} {self.image}'
class Sarus(ContainerPlatform):
'''Container platform backend for running containers with `Sarus
<https://sarus.readthedocs.io>`__.'''
#: Enable MPI support when launching the container.
#:
#: :type: boolean
#: :default: :class:`False`
with_mpi = fields.TypedField(bool)
def __init__(self):
super().__init__()
self.with_mpi = False
self._command = 'sarus'
def emit_prepare_commands(self, stagedir):
# The format that Sarus uses to call the images is
# <reposerver>/<user>/<image>:<tag>. If an image was loaded
# locally from a tar file, the <reposerver> is 'load'.
if not self.pull_image or self.image.startswith('load/'):
return []
else:
return [f'{self._command} pull {self.image}']
def launch_command(self, stagedir):
super().launch_command(stagedir)
mount_points = self.mount_points + [(stagedir, _STAGEDIR_MOUNT)]
run_opts = [f'--mount=type=bind,source="{mp[0]}",destination="{mp[1]}"'
for mp in mount_points]
if self.with_mpi:
run_opts.append('--mpi')
run_opts += self.options
if self.command:
return (f'{self._command} run {" ".join(run_opts)} {self.image} '
f'{self.command}')
if self.commands:
return (f"{self._command} run {' '.join(run_opts)} {self.image} "
f"bash -c 'cd {self.workdir}; {'; '.join(self.commands)}'")
return f'{self._command} run {" ".join(run_opts)} {self.image}'
class Shifter(Sarus):
'''Container platform backend for running containers with `Shifter
<https://www.nersc.gov/research-and-development/user-defined-images/>`__.
'''
def __init__(self):
super().__init__()
self._command = 'shifter'
class Singularity(ContainerPlatform):
'''Container platform backend for running containers with `Singularity
<https://sylabs.io/>`__.'''
#: Enable CUDA support when launching the container.
#:
#: :type: boolean
#: :default: :class:`False`
with_cuda = fields.TypedField(bool)
def __init__(self):
super().__init__()
self.with_cuda = False
def emit_prepare_commands(self, stagedir):
return []
def launch_command(self, stagedir):
super().launch_command(stagedir)
mount_points = self.mount_points + [(stagedir, _STAGEDIR_MOUNT)]
run_opts = [f'-B"{mp[0]}:{mp[1]}"' for mp in mount_points]
if self.with_cuda:
run_opts.append('--nv')
run_opts += self.options
if self.command:
return (f'singularity exec {" ".join(run_opts)} '
f'{self.image} {self.command}')
if self.commands:
return (f"singularity exec {' '.join(run_opts)} {self.image} "
f"bash -c 'cd {self.workdir}; {'; '.join(self.commands)}'")
return f'singularity run {" ".join(run_opts)} {self.image}'
class ContainerPlatformField(fields.TypedField):
def __init__(self, *other_types):
super().__init__(ContainerPlatform, *other_types)
def __set__(self, obj, value):
if isinstance(value, str):
try:
value = globals()[value]()
except KeyError:
raise ValueError(
f'unknown container platform: {value}') from None
super().__set__(obj, value)
| 1.898438
| 2
|
salesforce_client/util.py
|
pylab-bd/salesforce-api-client
| 0
|
12783847
|
def date_to_iso8601(date):
"""Returns an ISO8601 string from a date"""
datetimestr = date.strftime('%Y-%m-%dT%H:%M:%S')
timezone_sign = date.strftime('%z')[0:1]
timezone_str = '%s:%s' % (
date.strftime('%z')[1:3],
date.strftime('%z')[3:5],
)
return f'{datetimestr}{timezone_sign}{timezone_str}'.replace(
':', '%3A'
).replace(
'+', '%2B'
)
def exception_handler(result, name=""):
"""Exception router. Determines which error to raise for bad results
Arguments:
result {requests.results} -- requests response
Keyword Arguments:
name {str} -- [description] (default: {""})
"""
try:
response_conent = result.json()
# pylint: disable=broad-except
except Exception:
response_conent = result.text
exc_map = {
300: SalesforceMoreThanOneRecord,
400: SalesforceMalformedRequest,
401: SalesforceExpiredSession,
403: SalesforceRefusedRequest,
404: SalesforceResourceNotFound,
}
exc_cls = exc_map.get(result.status_code, SalesforceGeneralError)
raise exc_cls(result.url, result.status_code, name, response_content)
def call_salesforce(url, method, session, headers, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
additional_headers = kwargs.pop('additional_headers', dict())
headers.update(additional_headers or dict())
result = session.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
exception_handler(result)
return result
| 3.328125
| 3
|
fast_tmp/depends/pageing.py
|
Chise1/fast-tmp2
| 1
|
12783848
|
<reponame>Chise1/fast-tmp2<filename>fast_tmp/depends/pageing.py
# -*- encoding: utf-8 -*-
"""
@File : pageing.py
@Time : 2021/1/18 10:11
@Author : chise
@Email : <EMAIL>
@Software: PyCharm
@info :分页
"""
from pydantic.main import BaseModel
class PageDepend(BaseModel): # 分页
perPage: int = 10
page: int = 1
def page_depend(perPage: int = 10, page: int = 1) -> PageDepend:
return PageDepend(page=page, perPage=perPage)
| 2.0625
| 2
|
application/__init__.py
|
stadibo/friseur-manager
| 0
|
12783849
|
<reponame>stadibo/friseur-manager<gh_stars>0
import os
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_wtf.csrf import CSRFProtect
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, current_user
from functools import wraps
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
bcrypt = Bcrypt(app)
csrf = CSRFProtect(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
# Login functionality
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "auth_login"
login_manager.login_message = "Please log in to use this functionality."
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
def login_required(role="ANY"):
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated:
return login_manager.unauthorized()
unauthorized = False
if role != "ANY":
unauthorized = True
user_role = current_user.role.name
if user_role == role:
unauthorized = False
if unauthorized:
return login_manager.unauthorized()
return fn(*args, **kwargs)
return decorated_view
return wrapper
# application
from application import views
from application.auth import views
from application.auth import models
from application.account import views
from application.appointments import models
from application.appointments import views
from application.work_days import models
from application.work_days import views
from application.auth.models import User, Role
# database table creation
try:
db.create_all()
except:
pass
try:
from application.auth.models import Role
role = Role.query.filter_by(name='USER').first()
if not role:
role = Role('USER')
db.session().add(role)
db.session().commit()
role = Role.query.filter_by(name='FRISEUR').first()
if not role:
role = Role('FRISEUR')
db.session().add(role)
db.session().commit()
role = Role.query.filter_by(name='ADMIN').first()
if not role:
role = Role('ADMIN')
db.session().add(role)
db.session().commit()
except:
pass
| 2.15625
| 2
|
osiris/urls.py
|
fboerman/coursebrowser
| 0
|
12783850
|
from django.urls import path, re_path, include
from . import views
app_name = 'osiris'
url_patterns_v2 = [
path('courses/all/', views.get_all_courses, name='getallcourses'),
]
urlpatterns = [
path('', views.index, name='index'),
path('api/unicodes/', views.unicodes, name='unicodes'),
path('api/<slug:uni>/<int:year>/course/<slug:code>/header/', views.get_course_header, name='getcourseheader'),
# path('api/<slug:uni>/<int:year>/course/<slug:code>/info/', views.getCourseInfo, name='getcourseinfo'),
path('api/<slug:uni>/faculties/', views.get_departments, name='faculties'),
path('api/<slug:uni>/types/', views.get_type_names, name='types'),
path('api/<slug:uni>/studies/', views.get_studies, name='studies'),
# path('api/<slug:uni>/faculty/courses/<slug:faculty>/<slug:type>/', views.getCoursesFromFaculty, name='getcoursesfromfaculty'),
re_path(r'^api/(?P<uni>[\w|\W]+)/(?P<year>[\d]+)/faculty/courses/(?P<department>[\w|\W]+)/(?P<type_shortname>[\w|\W]+)/$',
views.get_courses_from_faculty, name='getcoursesfromfaculty'),
path('api/v2/<slug:uni>/<int:year>/', include(url_patterns_v2)),
]
| 2.109375
| 2
|