content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import math
def createCylinder( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):
"""
Creates the mesh for a cylinder between the two specified points.
Colour should be a 3-tuple (R,G,B) or a 4-tuple (R,G,B,A)
"""
basePoint = PyUtils.toPoint3d(basePoint)
tipPoint = PyUtils.toPoint3d(tipPoint)
baseToTipVector = Vector3d(basePoint,tipPoint)
if baseToTipVector.isZeroVector() :
raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )
baseToTipUnitVector = baseToTipVector.unit()
xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )
if xUnitVector.length() < 0.5 :
xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )
xUnitVector.toUnit()
yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )
if yUnitVector.length() < 0.5 :
yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )
yUnitVector.toUnit()
vertices = []
for i in range(samples):
theta = i * 2 * math.pi / float(samples)
vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )
for i in range(samples):
theta = i * 2 * math.pi / float(samples)
vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )
for i in range(samples):
theta = i * 2 * math.pi / float(samples)
vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )
vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )
faces = [ range(0,samples), range(samples,2*samples) ]
for i in range(0,2*samples,2) :
base = 2*samples
size = 2*samples
faces.append( (base+i, base+i+1, base+(i+3)%size, base+(i+2)%size ) )
return create( vertices, faces, colour ) | 198a5585e5f2cdb000b4b59bfa7da8fca316fcf8 | 3,634,700 |
import os
def clojure_find(db):
"""
Lazy find using __clojure_find and shelve
"""
str_clojure = ''
if myos.iswin() or myos.islinux():
file = []
valid_file = []
if const.CLOJURE in db:
file = db[const.CLOJURE]
for item in file:
if os.path.exists(item):
valid_file.append(item)
if len(valid_file) == 0:
valid_file = __clojure_find()
db[const.CLOJURE] = valid_file
str_clojure = valid_file[0]
db.close()
return str_clojure | 48ce315f56d56b65067f086a50bba1d5b8cab36d | 3,634,701 |
import torch
def _sample_rois_manually(gt_boxes_origin, fg_rois_per_image, rois_per_image, num_classes, gt_truncated, im_info):
"""Args:
gt_boxes_origin: Variable, [gt_num, 5], [x1, y1, x2, y2, class_id]
fg_rois_per_image: int, 64
rois_per_image: float, 256.0
num_classes: int, 21
gt_truncated: ndarray.bool, [gt_num]
"""
fg_num = fg_rois_per_image
rois_per_image = int(rois_per_image)
gt_boxes_origin = gt_boxes_origin.data.cpu()
img_width = float(im_info[0])
img_height = float(im_info[1])
"""Remove truncated gt_boxes"""
gt_truncated = gt_truncated.astype(int)
gt_truncated = torch.from_numpy(gt_truncated)
truncated_idx = (gt_truncated == 0).nonzero().view(-1)
if len(truncated_idx) != 0:
gt_boxes = torch.index_select(gt_boxes_origin, 0, truncated_idx)
untruncted_gt_num = len(gt_boxes)
"""get width and height of every untruncated gt_box"""
width = gt_boxes[:, 2] - gt_boxes[:, 0] # x2-x1
height = gt_boxes[:, 3] - gt_boxes[:, 1]
"""for every untruncated gt_box:"""
for i in range(untruncted_gt_num):
# get the number of fg_rois that the ith gt should generate.
if i == untruncted_gt_num - 1:
fg_num_per_gt = fg_rois_per_image - (untruncted_gt_num - 1) * int(fg_rois_per_image / untruncted_gt_num)
else:
fg_num_per_gt = int(fg_rois_per_image / untruncted_gt_num)
# get the width and height delta.
delta = torch.rand(fg_num_per_gt, 4) * 0.2 - 0.1 # [-0.1, 0.1)
delta = delta * torch.FloatTensor([width[i], height[i], width[i], height[i]])
if i == 0:
fg_rois = delta + gt_boxes[i, :-1]
labels = torch.ones(fg_num_per_gt) * gt_boxes[i, 4]
else:
fg_rois = torch.cat((fg_rois, delta + gt_boxes[i, :-1]))
labels = torch.cat((labels, torch.ones(fg_num_per_gt) * gt_boxes[i, 4]))
"""manage the boundary"""
fg_rois[:, 0] = torch.max(torch.FloatTensor([0]), fg_rois[:, 0])
fg_rois[:, 1] = torch.min(torch.FloatTensor([img_width]), fg_rois[:, 1])
fg_rois[:, 2] = torch.max(torch.FloatTensor([0]), fg_rois[:, 2])
fg_rois[:, 3] = torch.min(torch.FloatTensor([img_height]), fg_rois[:, 3])
else:
fg_num = 0
fg_rois = torch.FloatTensor()
gt_boxes = torch.FloatTensor()
labels = torch.FloatTensor()
"""v3.0: generate truncated_rois"""
if len(gt_boxes) != 0:
truncated_rois, truncated_label, truncated_rois_num = genarate_truncated_rois(gt_boxes, fg_rois_per_image)
else:
truncated_rois = torch.FloatTensor()
truncated_label = torch.FloatTensor()
truncated_rois_num = 0
""" generate bg_rois """
bg_num = rois_per_image - fg_num - truncated_rois_num
x1_bg = (torch.rand(bg_num * 2) * img_width).type(torch.FloatTensor)
y1_bg = (torch.rand(bg_num * 2) * img_height).type(torch.FloatTensor)
if fg_num != 0:
bg_width = torch.min(width) + torch.rand(bg_num * 2) * (torch.max(width) - torch.min(width))
bg_height = torch.min(height) + torch.rand(bg_num * 2) * (torch.max(height) - torch.min(height))
else:
width_origin = gt_boxes_origin[:, 2] - gt_boxes_origin[:, 0] # x2-x1
height_origin = gt_boxes_origin[:, 3] - gt_boxes_origin[:, 1]
bg_width = torch.min(width_origin) + torch.rand(bg_num * 2) * (
torch.max(width_origin) - torch.min(width_origin))
bg_height = torch.min(height_origin) + torch.rand(bg_num * 2) * (
torch.max(height_origin) - torch.min(height_origin))
x2_bg = x1_bg + bg_width
y2_bg = y1_bg + bg_height
bg_rois = torch.cat(
(torch.unsqueeze(x1_bg, 1), torch.unsqueeze(y1_bg, 1), torch.unsqueeze(x2_bg, 1), torch.unsqueeze(y2_bg, 1)), 1)
"""cannot overlap with every gt"""
overlaps = bbox_overlaps(bg_rois, gt_boxes_origin[:, :-1])
max_overlaps, _ = overlaps.max(1)
bg_inds = (max_overlaps == 0).nonzero().view(-1)
if len(bg_inds) != 0:
bg_rois = bg_rois[bg_inds]
else: # Rare case: gt too large, no bg
bg_rois = torch.unsqueeze(torch.FloatTensor([10, 10, 20, 20]), 0)
# manage the bound
bg_inds = (bg_rois[:, 0] >= 0).numpy() & (bg_rois[:, 1] <= img_width).numpy() & \
(bg_rois[:, 2] >= 0).numpy() & (bg_rois[:, 3] <= img_height).numpy()
if max(bg_inds==0):
bg_rois = torch.unsqueeze(torch.FloatTensor([10, 10, 20, 20]), 0)
bg_inds = np.asarray([1])
bg_inds = torch.FloatTensor(bg_inds.astype(float)).nonzero().view(-1)
"""select 256-64 bg randomly"""
to_replace = bg_inds.numel() < bg_num
bg_inds = bg_inds[
torch.from_numpy(npr.choice(np.arange(0, bg_inds.numel()), size=int(bg_num), replace=to_replace)).long()]
bg_rois = bg_rois[bg_inds]
"""set return vars"""
rois = torch.cat((fg_rois, truncated_rois, bg_rois), 0)
rois = torch.cat((torch.zeros(len(rois), 1), rois), 1) # add 0s at first column.
rois = Variable(rois.type(torch.cuda.FloatTensor), requires_grad=True)
labels = torch.cat((labels, truncated_label, torch.zeros(bg_num)))
labels = Variable(labels.type(torch.cuda.FloatTensor), requires_grad=False)
roi_scores = Variable(torch.zeros(256,1).type(torch.cuda.FloatTensor), requires_grad=True)
bbox_targets = torch.zeros(256, num_classes*4).type(torch.cuda.FloatTensor)
bbox_inside_weights = torch.zeros(256, num_classes*4).type(torch.cuda.FloatTensor)
assert len(rois)==256, "len"
return labels, rois, roi_scores, bbox_targets, bbox_inside_weights
"""return:
labels: Variable, torch.cuda.FloatTensor of size 256, require_grad=False
rois: Variable, [256, 5], first column are all zeros, require_grad=True
[x] roi_scores: no use. Variable, [256,1]
[x] bbox_targets: no use. FloatTensor, [256, 84]
[x] bbox_inside_weights: no use. FloatTensor, [256, 84]
""" | 22b56b7abe2abd206d4aa69e2f412a0fc41795f8 | 3,634,702 |
def get_worker_list(AnnotationSet):
""" return a list of worker IDs """
return list(AnnotationSet.dataframe.columns)[1:] | 0f18afa4bb70360e03a1a65c1ab3a5b4bbba7e38 | 3,634,703 |
def is_string(data):
""" Check property string validity """
if not len(data):
return None
if data[-1] != 0:
return None
pos = 0
while pos < len(data):
posi = pos
while pos < len(data) and \
data[pos] != 0 and \
data[pos] in printable.encode() and \
data[pos] not in (ord('\r'), ord('\n')):
pos += 1
if data[pos] != 0 or pos == posi:
return None
pos += 1
return True | 06232bae72c49a8ee50545c3fb8b0f6aed6102d9 | 3,634,704 |
import os
def find_file_in_path( filename, search_path='.', pathsep=os.pathsep,
walkdir=False, path_only=False):
"""
Find a file matching the given name in a given search path of
directories.
:Parameters:
filename: str
The name of the file to be located.
search_path: str
A search path string containing the directories to be searched.
The string is assumed to contain a list of directories separated
by a separator (e.g. '.:/bin:/local/bin')
pathsep: str, optional (default=os.pathsep)
The string used to separate the directories in the search path.
If not specified this defaults to os.pathsep, which is the path
separator used by the current operating system (; for Windows
and : for Linux).
walkdir: bool, optional (default=False)
Set to True if the entire directory tree under each of the
directories in the search path should be searched as well.
By default, only the specific directories listed in the search
path are searched.
N.B. Specifying walkdir=True can slow down a file search
significantly. It is best to specify only a short search path
when walkdir=True.
path_only: bool, optional (default=False)
Set to True to return the path to the folder containing the file
rather than the path to the file itself.
:Returns:
filepath: str
The full path (and name) of the matching file.
If no file is found an empty string is returned.
"""
# Step through each path in the search path
for path in search_path.split(pathsep):
result = _check_folder(path, filename, walkdir=walkdir,
path_only=path_only)
if result:
return result
# No file found - return an empty string.
return '' | c595db3bfd9c1812129ecd2399f5a9efdf7b472a | 3,634,705 |
def Gen_DocumentTermMatrix(df_column, linguagem, stopwords,
quer_stemming='True'):
"""Faz todo o processo de geraรงรฃo da matrix de termos dos documentos.
Realiza as seguintes atividades:Stemming das palavras,
geraรงรฃo do TfidfVectorizer (12000 palavras)
geraรงรฃo do DocumentTermMatrix
"""
tfidf = TfidfVectorizer(stop_words=stopwords,
max_df=0.9, encoding='utf-8',
decode_error='ignore')
if quer_stemming == 'True':
st = Stemmer(language=linguagem)
stemmed = st.fit_transform(df_column)
base = tfidf.fit_transform(stemmed)
data_dtm = pd.DataFrame(base.toarray(),
columns=tfidf.get_feature_names())
data_dtm.index = df_column.index
elif quer_stemming == 'False':
base = tfidf.fit_transform(df_column)
data_dtm = pd.DataFrame(base.toarray(),
columns=tfidf.get_feature_names())
data_dtm.index = df_column.index
return data_dtm, tfidf | 04fc803e2b4959133cafe1605c6625c209a00ae1 | 3,634,706 |
import json
def create_hdfs_metadata_pipeline(pipeline_builder, pipeline_title, hdfs_directory, hdfs_metadata):
"""Helper function to create and return a pipeline with HDFS File Metadata
The Deduplicator assures there is only one ingest to HDFS. The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> hadoop_fs >= hdfs_metadata
>> trash
"""
raw_data = '\n'.join(json.dumps(product) for product in PRODUCT_DATA)
logger.info('Pipeline will write to HDFS directory %s ...', hdfs_directory)
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(data_format='JSON',
directory_template=hdfs_directory,
max_records_in_file=len(PRODUCT_DATA))
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> hadoop_fs >= hdfs_metadata
record_deduplicator >> trash
return pipeline_builder.build(title=pipeline_title) | fcf6d41365347a67e40a22ee01baceadb191d7e8 | 3,634,707 |
def dqpar_interpol(xfit, dqpars, ipkey='temperature'):
"""return interpolated parameters at temperature or exc_current
Arguments:
xfit -- temperature or exc_current to fit dqpars
dqpars -- list of dict with id, iq (or i1, beta), Psid and Psiq values
ipkey -- key (string) to interpolate
"""
# check current range
ckeys = (('i1', 'beta'), ('id', 'iq'))
dqtype = 0
fpip = {k: dqpars[0][k] for k in ckeys[dqtype]}
fpip['losses'] = dict()
for k in ckeys[dqtype]:
curr = np.array([f[k] for f in dqpars], dtype=object)
shape = curr.shape
if curr.shape != (len(dqpars), len(curr[0])):
raise ValueError("current range conflict")
curr = curr.astype(float)
if not np.array([np.allclose(curr[0], c)
for c in curr[1:]]).all():
raise ValueError("current range conflict")
try:
speed = np.array([d['losses']['speed'] for d in dqpars])
if (np.max(speed) - np.min(speed))/np.mean(speed) > 1e-3:
raise ValueError("losses: speed conflict")
except KeyError:
pass
sorted_dqpars = sorted(dqpars, key=lambda d: d[ipkey])
x = [f[ipkey] for f in sorted_dqpars]
for k in ('psid', 'psiq'):
m = np.array([f[k] for f in sorted_dqpars]).T
if len(x) > 2:
fpip[k] = ip.UnivariateSpline(x, m, k=2)(xfit).T
else:
fpip[k] = ip.interp1d(
x, m, fill_value='extrapolate')(xfit).T
try:
for k in ('styoke_hyst', 'stteeth_hyst',
'styoke_eddy', 'stteeth_eddy',
'rotor_hyst', 'rotor_eddy',
'magnet'):
m = np.array([f['losses'][k] for f in sorted_dqpars]).T
if len(x) > 2:
fpip['losses'][k] = ip.UnivariateSpline(x, m, k=2)(xfit).T
else:
fpip['losses'][k] = ip.interp1d(
x, m, fill_value='extrapolate')(xfit).T
fpip['losses']['speed'] = dqpars[0]['losses']['speed']
fpip['losses']['hf'] = dqpars[0]['losses']['hf']
fpip['losses']['ef'] = dqpars[0]['losses']['ef']
except KeyError:
pass
return x, fpip | 5ddea416732e4c5602e07ab2e5e6067a05a2530e | 3,634,708 |
def visflow_par_str(direc, size, str_type="file"):
"""
visflow_par_str()
Returns a string with stim type, as well as size (e.g., 128, 256) and
direction (e.g., "right", "left") parameters, unless all possible visual
flow parameters values are passed.
Required args:
- direc (str or list) : visual flow direction parameter values
- size (int or list): visual flow square size parameter values
Optional args:
- str_type (str) : use of output str, i.e., for a filename ("file") or
to print the info to console ("print")
default: "file"
Returns:
- pars (str): string containing stim type (visual flow) and parameter
values, unless all parameter values for visual flow are
passed.
"""
if size is None or direc is None:
raise ValueError("Must pass value for visual flow square size or "
"direction parameter.")
dirstr = dir_par_str(direc, str_type=str_type)
sizestr = size_par_str(size, str_type=str_type)
if str_type == "print":
if len(dirstr) > 10: # specified direction
if len(sizestr) > 10: # specified size
pars = (f"{sizestr.replace(')', '')}, "
f"{dirstr.replace('vis. flow (', '')}")
else:
pars = dirstr
else:
pars = sizestr
elif str_type == "file":
if len(dirstr) > 8: # specified direction
if len(sizestr) > 8:
pars = f"{sizestr}_{dirstr[8:]}"
else:
pars = dirstr
else:
pars = sizestr
else:
gen_util.accepted_values_error("str_type", str_type, ["print", "file"])
return pars | f214dd87c6eece8ce7fec3bdd846c0ad969de139 | 3,634,709 |
import math as m
from math import sin, cos, atan, asin, floor
def equ2gal(equ):
"""Convert single Equatorial J2000d to Galactic coordinates."""
ra, dec = equ
OB = m.radians(23.4333334);
dec = m.radians(dec)
ra = m.radians(ra)
a = 27.128251 # The RA of the North Galactic Pole
d = 192.859481 # The declination of the North Galactic Pole
l = 32.931918 # The ascending node of the Galactic plane on the equator
sdec = sin(dec)
cdec = cos(dec)
sa = sin(m.radians(a))
ca = cos(m.radians(a))
GT = asin(cdec * ca * cos(ra - m.radians(d)) + sdec * sa)
GL = m.degrees(atan((sdec - sin(GT) * sa) / (cdec * sin(ra - m.radians(d)) * ca)))
TP = sdec - sin(GT) * sa
BT = cdec * sin(ra - m.radians(d)) * ca
if (BT < 0):
GL += 180
else:
if (TP < 0):
GL += 360
GL += l
if (GL > 360):
GL -= 360
LG = floor(GL)
LM = floor((GL - floor(GL)) * 60)
LS = ((GL - floor(GL)) * 60 - LM) * 60
GT = m.degrees(GT)
D = abs(GT)
if (GT > 0):
BG = floor(D)
else:
BG = -1*floor(D)
BM = floor((D - floor(D)) * 60)
BS = ((D - floor(D)) * 60 - BM) * 60
if (GT < 0):
BM = -BM
BS = -BS
#if GL > 180:
# GL -= 360
return [GL,GT] | a7a007ad64595c72f80b02736fba8307d6c668b2 | 3,634,710 |
import torch
def pre_residual_correlation(labels, model_out, label_idx, **trash):
"""Generates the initial labels used for residual correlation"""
# what happened:
# z = model_out; y = labels
# it take inputs of BOTH model_out and labels
# then, assign (y-z) at indexed locs, and 0 elsewhere.
labels = labels.cpu()
labels[labels.isnan()] = 0
labels = labels.long()
model_out = model_out.cpu()
label_idx = label_idx.cpu()
c = labels.max() + 1
n = labels.shape[0]
y = torch.zeros((n, c))
y[label_idx] = F.one_hot(labels[label_idx],c).float().squeeze(1) - model_out[label_idx]
return y | 11bda13c8046788ad5b78943bd47c06ebbe63b93 | 3,634,711 |
def trimesh2Panda(trimesh, name = "auto"):
"""
cvt trimesh models to panda models
:param trimesh:
:return:
"""
return packpandanp_fn(trimesh.vertices, trimesh.face_normals, trimesh.faces) | 6afbbf9cc5bfeed4dbd1aa94985b246921e2aac9 | 3,634,712 |
def reachabilityquicktest_update(request, reachabilityquicktest_id, **params):
"""Update a reachability quick test.
:param request: request context
:param tenant_id: (optional) tenant id of the reachability test modified
:param name: name of the reachability test
:param src_tenant_id: tenant id of the source ip
:param src_segment_id: segment id of the source ip
:param src_ip: source ip of the reachability test
:param dst_ip: destination ip of the reachability test
:param expected_result: expected result of the reachability test
:param run_test: boolean flag to run the test
"""
LOG.debug("reachabilityquicktest_update(): params=%s", params)
if 'tenant_id' in params:
LOG.debug("Removing tenant_id from params, it cannot be changed")
params.pop('tenant_id')
if 'id' in params:
LOG.debug("Removing id from params, it cannot be changed")
params.pop('id', None)
body = {'reachabilityquicktest': params}
reachabilityquicktest = neutronclient(request)\
.update_reachabilityquicktest(reachabilityquicktest_id, body)\
.get('reachabilityquicktest')
return NeutronAPIDictWrapper(reachabilityquicktest) | 57a1bc4be61ba1757054f0f4d51b097968323bdc | 3,634,713 |
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import snowball, word_tokenize
def compute_similarity(left, right, tokenize=False, stop_words=None, **kwds):
"""Compute cosine similarity from tfidf-weighted matrix consisting
of two vectors (left and right).
"""
def tokenize_and_stem(text):
"""Auxiliary function to return stemmed tokens of document."""
return [snowball.SnowballStemmer("english").stem(t) for
t in word_tokenize(text.lower()) if t not in stop_words]
if not left and not right:
return None
if tokenize:
kwds.update({"tokenizer": tokenize_and_stem})
if not stop_words:
stop_words = []
vec = TfidfVectorizer(**kwds)
return compute_cosine(vec.fit_transform([left, right])) | b895e3921eecc3a14481279073250b3c99a20e5e | 3,634,714 |
def solve(dataframe:pd.DataFrame):
"""
# scripting out ortools model construction and run
# df: dataframe input containing every shipment.
# distances: distance matrix of all point to point calculations.
# [[n0-n0, n0-n1, n0-n2, n0-n3], [...], ...]
# vehicles: matrix for truck definition.
# [[t0_min_weight, t0_max_weight], [...], ...]
# demand: array of demand (includes origin? TODO: validate).
"""
distances = ovrp_haversine_distance_matrix(
lats=dataframe.latitude.values,
lons=dataframe.longitude.values,
unit='mi'
)
demand = np.insert(dataframe.pallets.values, 0, 0)
avg_demand = np.mean(demand[1:])
vehicles = [[0, 26]]*10
cvrp = GoogleORCVRP(
distances=distances,
demand=demand,
vehicles=vehicles,
depot=0,
max_seconds=30
)
cvrp.solve()
return cvrp.solution | 896c66819fa0917abb3b8eb14bc7e4903795f30e | 3,634,715 |
import yaml
import logging
import torch
import sys
def load_config(config_file_path, seed=-1, gpuid=-1):
"""
Read config and set up the logger
:param config_file_path: path to the config file in the yaml format
:return: config, logger
"""
with open(config_file_path, 'r') as config_file:
config = yaml.load(config_file.read())
config_global = config.get('global', {})
logger = logging.getLogger(__name__)
logger.setLevel(config['logger']['level'])
ch = logging.StreamHandler()
ch.setLevel(config['logger']['level'])
logger.addHandler(ch)
if seed < 0:
np.random.seed(config_global.get('random.seed', 1))
torch.manual_seed(config_global.get('random.seed', 1))
logger.info("Seed: {}".format(config_global.get('random.seed', 1)))
else:
np.random.seed(seed)
torch.manual_seed(seed)
logger.info("Seed: {}".format(seed))
logger.debug(config)
if "entity.linking" not in config:
logger.error("Entity linking parameters not in the config file!")
sys.exit()
if torch.cuda.is_available():
logger.info("Using your CUDA device")
if seed < 0:
torch.cuda.manual_seed(config_global.get('random.seed', 1))
else:
torch.cuda.manual_seed(seed)
if gpuid < 0:
torch.cuda.set_device(config_global.get('gpu.id', 0))
else:
torch.cuda.set_device(gpuid)
logger.info("GPU ID: {}".format(torch.cuda.current_device()))
wdaccess.set_backend(config['wikidata']['backend'])
candidate_retrieval.entity_linking_p.update({k: config['entity.linking'][k] for k in config['entity.linking']
if k in candidate_retrieval.entity_linking_p})
logger.debug(candidate_retrieval.entity_linking_p)
return config, logger | 7b808c49385b701073370ebb9ef33a77d7bdfd87 | 3,634,716 |
def hr_max_sqi(nn_intervals):
"""
Function returning the max heart rate .
The input nn_interval in ms is converted into
heart rate bpm (beat per minute) unit
Parameters
---------
nn_intervals : list
Normal to Normal Interval
Returns
---------
: float
The maximum heart rate
Notes
---------
If the purpose is to compute the HRV feature, the input
must pass the preprocessing steps - remove the invalid peaks then do the
interpolation - to obtain the normal to normal intervals.
If the purpose is to compute SQI, input the raw RR intervals -
obtained from the peak detection algorithm.
"""
nn_bpm = np.divide(60000, nn_intervals)
return int(np.round(np.max(nn_bpm))) | abc1e3b5385049fd5ab991a3ae92b3a2b0da5e9e | 3,634,717 |
import torch
def fft(x):
"""
Layer that performs a fast Fourier-Transformation.
"""
img_size = x.size(1) // 2
# sort the incoming tensor in real and imaginary part
arr_real = x[:, 0:img_size].reshape(-1, int(sqrt(img_size)), int(sqrt(img_size)))
arr_imag = x[:, img_size:].reshape(-1, int(sqrt(img_size)), int(sqrt(img_size)))
arr = torch.stack((arr_real, arr_imag), dim=-1)
# perform fourier transformation and switch imaginary and real part
arr_fft = torch.ifft(arr, 2).permute(0, 3, 2, 1).transpose(2, 3)
return arr_fft | e63143c79ef2446736384c243b36ea3b09a76279 | 3,634,718 |
def generate_elgamal_auxiliary_key_pair(
owner_id: GUARDIAN_ID, sequence_order: int
) -> AuxiliaryKeyPair:
"""
Generate auxiliary key pair using elgamal
:return: Auxiliary key pair
"""
elgamal_key_pair = elgamal_keypair_random()
return AuxiliaryKeyPair(
owner_id,
sequence_order,
elgamal_key_pair.secret_key.to_hex(),
elgamal_key_pair.public_key.to_hex(),
) | 18cd4fe4908181f4d38d8e31d2b3392cc7e6ce6d | 3,634,719 |
import glob
import scipy
def gene_knowledge_graph(anno_dir, data_dir):
"""
This function is to generate the knowledge graph including nodes and its edges.
:param anno_dir: the annotation file directions
:param data_dir: the data file directions
:return: graph
"""
node_representation = np.zeros((6, 256)) # init features for the six types of spinal structures:NV: 0, ND: 1, NNF: 2, AV: 3, AD: 4, ANF: 5
#the class order of node edge is NV: 0, ND: 1, NNF: 2, AV: 3, AD: 4, ANF: 5
node_edge = np.zeros((6, 6)) # init the edge between the six types of spinal structures.
structure_number = np.zeros((6)) #the number of spinal structures.NV: 0, ND: 1, NNF: 2, AV: 3, AD: 4, ANF: 5
anno_filenames = glob.glob(anno_dir)
for anno_filename in anno_filenames:
data_filename = data_dir + anno_filename.split("/")[-1].split(".")[0] + '.dcm'
labels, _, _, _, polygons, _ = get_groundtruth_from_xml(anno_filename)
#print data_filename
img_npy = get_image_data_from_dicom(data_filename)
img_npy = img_npy.astype(np.float32)/img_npy.max()
img_npy = img_npy*255
img_npy = img_npy.astype(np.uint8)
compute_node_edge(node_edge, polygons, labels)
compute_node_repres(node_representation, structure_number, polygons, labels, img_npy)
#print node_representation, structure_number
node_representation = np.divide(node_representation, structure_number[:, None])#TODO
degree_matrix = np.zeros((6, 6))
for i in xrange(node_edge.shape[0]):
degree_matrix[i,i] = np.sum(node_edge[i,:])
node_edge = node_edge + np.identity(6)
degree_matrix = np.linalg.inv(scipy.linalg.sqrtm(degree_matrix))
node_edge = np.matmul(np.matmul(degree_matrix, node_edge), degree_matrix)
#node_edge = normalize(node_edge, axis=1, norm='l1')
#node_representation = normalize(node_representation, axis=1, norm='l1')
knowledge_graph = {'node_representation': node_representation,
'node_edge': node_edge}
return knowledge_graph | 37e7ea08e157b573da233dc06720e50cdb9a94ef | 3,634,720 |
def benchmark_random(backtest, random_strategy, nsim=100):
"""
Given a backtest and a random strategy, compare backtest to
a number of random portfolios.
The idea here is to benchmark your strategy vs a bunch of
random strategies that have a similar structure but execute
some part of the logic randomly - basically you are trying to
determine if your strategy has any merit - does it beat
randomly picking weight? Or randomly picking the selected
securities?
Args:
* backtest (Backtest): A backtest you want to benchmark
* random_strategy (Strategy): A strategy you want to benchmark
against. The strategy should have a random component to
emulate skilless behavior.
* nsim (int): number of random strategies to create.
Returns:
RandomBenchmarkResult
"""
# save name for future use
if backtest.name is None:
backtest.name = 'original'
# run if necessary
if not backtest.has_run:
backtest.run()
bts = []
bts.append(backtest)
data = backtest.data
# create and run random backtests
for i in range(nsim):
random_strategy.name = 'random_%s' % i
rbt = bt.Backtest(random_strategy, data)
rbt.run()
bts.append(rbt)
# now create new RandomBenchmarkResult
res = RandomBenchmarkResult(*bts)
return res | 4c0bea5e3d434940102180c1f6caf5d685dc7ee6 | 3,634,721 |
def detachStatement(request):
"""
ํ๋์ ์ ์๋ช
์ธ์์ ์ฒจ๋ถ๋ ๋ค๋ฅธ ์ ์๋ช
์ธ์๋ฅผ ํด์ ํฉ๋๋ค.
- https://docs.popbill.com/statement/python/api#DetachStatement
"""
try:
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = settings.testCorpNum
# ํ๋นํ์ ์์ด๋
UserID = settings.testUserID
# ์ ์๋ช
์ธ์ ์ข
๋ฅ์ฝ๋, 121-๋ช
์ธ์, 122-์ฒญ๊ตฌ์, 123-๊ฒฌ์ ์, 124-๋ฐ์ฃผ์ 125-์
๊ธํ, 126-์์์ฆ
ItemCode = "121"
# ์ ์๋ช
์ธ์ ๋ฌธ์๋ฒํธ
MgtKey = "20211201-001"
# ์ฒจ๋ถํด์ ํ ์ ์๋ช
์ธ์ ์ข
๋ฅ์ฝ๋, 121-๋ช
์ธ์, 122-์ฒญ๊ตฌ์, 123-๊ฒฌ์ ์, 124-๋ฐ์ฃผ์ 125-์
๊ธํ, 126-์์์ฆ
SubItemCode = "121"
# ์ฒจ๋ถํด์ ํ ์ ์๋ช
์ธ์ ๋ฌธ์๋ฒํธ
SubMgtKey = "20211201-001"
response = statementService.detachStatement(CorpNum, ItemCode, MgtKey, SubItemCode, SubMgtKey, UserID)
return render(request, 'response.html', {'code': response.code, 'message': response.message})
except PopbillException as PE:
return render(request, 'exception.html', {'code': PE.code, 'message': PE.message}) | cff3d9f3e9ac3a22beaff8630f2494bb3643bab3 | 3,634,722 |
def this(func, cache_obj=CACHE_OBJ, key=None, ttl=None, *args, **kwargs):
"""
Store the output from the decorated function in the cache and pull it
from the cache on future invocations without rerunning.
Normally, the value will be stored under a key which takes into account
all of the parameters that are passed into it, thereby caching different
invocations separately. If you specify a key, all invocations will be
cached under that key, and different invocations will return the same
value, which may be unexpected. So, be careful!
If the cache is disabled, the decorated function will just run normally.
Unlike the other functions in this module, you must pass a custom cache_obj
to this() in order to operate on the non-global cache. This is because of
wonky behavior when using decorator.decorator from a class method.
:param func: (expensive?) function to decorate
:param cache_obj: cache to a specific object (for use from the cache object itself)
:param key: optional key to store the value under
:param ttl: optional expiry to apply to the cached value
:param *args: arg tuple to pass to the decorated function
:param **kwargs: kwarg dict to pass to the decorated function
"""
key = key or (func.__name__ + str(args) + str(kwargs))
if cache_obj.has(key):
return cache_obj.get(key)
value = func(*args, **kwargs)
cache_obj.upsert(key, value, ttl)
return value | a60d4c74198dd6435a9b67bba0f53eba8fc33225 | 3,634,723 |
def get_data(table,cid,fields=None,date=None):
"""
table = name of database table
fields = list of database fields to return
key = dict with unique identifier and value
"""
status = False
sql = "SELECT "
if fields:
sql += ','.join(fields)
else:
sql += "*"
sql += " FROM %s WHERE college_id=%s" % (table,cid)
if date:
sql += " AND created_at?"
result = do_esql(sql)
return result | 4ab5a3dbeb3ecaab5abdb97e3030c09c20ac64ba | 3,634,724 |
from typing import List
from typing import Callable
from typing import Tuple
def jwt_perm_required(perms: List[str]) -> Callable:
"""
Receives a list of permissions and only process the inner function if the jwt token
available in the input request have the required permissions.
"""
def process_func(func: Callable) -> Callable:
"""
Similar to the original Django's `permission_required` but functioning on top of
the jwt token as processed from the request. This function specifically enforces
on views that the input token has a field inside of the claim `user` called
`perms` containing the specified permission to process the input function.
Args
----
func: Callable
Refers to one of the verbs a view can have, such as `POST` or `GET`.
Returns
-------
Callable
Decorator that returns function to process view if and only if the requried
jwt tokens contains the necessary permissions.
"""
def test_func(request: THttpRequest) -> Tuple[bool, str]:
# It's supposed by default that `jwt_login_required` comes first. This
# guarantees that `request.payload` will not be `None`
if not getattr(request, 'payload', ''):
return False, 'Login must happen before evaluating permissions.'
payload = request.payload
jwt_perms = payload.get('user', {}).get('perms')
if jwt_perms:
if not all([perm in jwt_perms for perm in perms]):
return False, 'Insufficient Permissions.'
return True, ''
return False, 'Invalid permissions for jwt token.'
return jwt_passes_test(test_func)(func)
return process_func | 19eb006696de38c011b9c0ad33ed3804664f2e78 | 3,634,725 |
def get_park1_function_caller(**kwargs):
""" Returns the park1 function caller. """
opt_val = 25.5872304
opt_pt = None
func = lambda x: park1(x, opt_val)
domain_bounds = [[0, 1]] * 4
return get_euc_function_caller_from_function(func, domain_bounds,
vectorised=False, opt_val=opt_val, opt_pt=opt_pt, **kwargs) | 8cf82bb083df8eeefb83630918bf048b7fed1989 | 3,634,726 |
def univariate_analysis(groups):
"""Mean, median, variance"""
groups_to_analyze = []
basic = {}
central_tendencies = {}
dispersion = {}
all_vals = np.array([])
for group in groups:
all_vals = np.concatenate((all_vals, group['vals']), axis=0)
groups_to_analyze.append(group)
groups_to_analyze.append({
'name': 'All groups',
'vals': all_vals
})
for group in groups_to_analyze:
basic[group['name']] = {
'num_items': len(group['vals']),
'sum': sum(group['vals']),
}
central_tendencies[group['name']] = get_central_tendency_breakdown(
group['vals'])
dispersion[group['name']] = {
'range': np.ptp(group['vals']),
'standard deviation': np.std(group['vals']),
}
for group in groups_to_analyze:
basic[group['name']]['percent_of_total_items'] = basic[group['name']
]['num_items'] / len(all_vals) * 100
basic[group['name']]['percent_of_total_sum'] = basic[group['name']
]['sum'] / sum(all_vals) * 100
return {
'basic': basic,
'central_tendencies': central_tendencies,
'dispersion': dispersion
} | 0e5806d06e252a230eed60672d2e6ba63e9cd9c7 | 3,634,727 |
def create_version_dialog(request):
"""creates a create_version_dialog by using the given task
"""
logger.debug('inside create_version_dialog')
# get logged in user
logged_in_user = get_logged_in_user(request)
task_id = request.matchdict.get('tid', -1)
task = Task.query.filter(Task.task_id == task_id).first()
takes = map(
lambda x: x[0],
DBSession.query(distinct(Version.take_name))
.filter(Version.task == task)
.all()
)
if defaults.version_take_name not in takes:
takes.append(defaults.version_take_name)
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'task': task,
'default_take_name': defaults.version_take_name,
'take_names': [defaults.version_take_name]
} | 33088576f12dcb3b76744739ff699bcd5b02dbab | 3,634,728 |
from pathlib import Path
import os
def create_qnode_for_causx():
"""create a custom qnode"""
project = get_project()
request_json = request.get_json()
try:
label = request_json.pop("label")
# is_prop=node_id[0].lower()=="p"
is_prop = request_json.pop("is_property")
if is_prop:
data_type = request_json.pop("data_type")
if data_type not in ["globecoordinate", "quantity", "time", "string", "monolingualtext", "externalid", "wikibaseitem", "wikibaseproperty", "url"]:
raise web_exceptions.InvalidRequestException(
"Invalid data type")
except KeyError:
raise web_exceptions.InvalidRequestException(
"Missing required fields in entity definition")
filepath = Path(project.directory)/"user_input_properties.tsv"
if os.path.isfile(filepath):
custom_nodes = kgtk_to_dict(filepath)
else:
custom_nodes = dict()
id = request.json.get("id", None)
if not id:
if is_prop:
node_id = get_Pnode(project, label)
else:
node_id = get_Qnode(project, label)
entity_dict = {
"id": node_id,
"label": label,
}
if is_prop:
entity_dict["data_type"] = data_type
entity_dict["description"] = request_json.get("description", "")
for key in ["P31"]: # may add more
if request_json.get(key, None):
entity_dict[key] = request_json[key]
custom_nodes[node_id] = entity_dict
dict_to_kgtk(custom_nodes, filepath)
project.add_entity_file(filepath)
project.save()
t2wml_settings.wikidata_provider.save_entry(node_id, **entity_dict)
response = dict(entity=entity_dict, project=get_project_dict(project))
selection = request_json.get("selection", None)
if selection:
selection = get_tuple_selection(selection)
calc_params = get_calc_params(project)
context = request.get_json().get("context", "")
(col1, row1), (col2, row2) = selection
value = calc_params.sheet[row1, col1]
create_user_wikification(calc_params, project, selection, value,
context, node_id)
response["layers"] = get_qnodes_layer(calc_params)
else:
response["layers"] = {}
return response, 200 | 66c8182504a4725f9da794c840a56dbab0c3e70a | 3,634,729 |
from typing import Callable
def smape(actual_series: TimeSeries,
pred_series: TimeSeries,
intersect: bool = True,
reduction: Callable[[np.ndarray], float] = np.mean) -> float:
""" symmetric Mean Absolute Percentage Error (sMAPE).
Given a time series of actual values :math:`y_t` and a time series of predicted values :math:`\\hat{y}_t`
both of length :math:`T`, it is a percentage value computed as
.. math::
200 \\cdot \\frac{1}{T}
\\sum_{t=1}^{T}{\\frac{\\left| y_t - \\hat{y}_t \\right|}{\\left| y_t \\right| + \\left| \\hat{y}_t \\right|} }.
Note that it will raise a `ValueError` if :math:`\\left| y_t \\right| + \\left| \\hat{y}_t \\right| = 0`
for some :math:`t`. Consider using the Mean Absolute Scaled Error (MASE) in these cases.
Parameters
----------
actual_series
The series of actual values
pred_series
The series of predicted values
intersect
For time series that are overlapping in time without having the same time index, setting `intersect=True`
will consider the values only over their common time interval (intersection in time).
reduction
Function taking as input a np.ndarray and returning a scalar value. This function is used to aggregate
the metrics of different components in case of multivariate TimeSeries instances.
Raises
------
ValueError
If the actual series and the pred series contains some zeros at the same time index.
Returns
-------
float
The symmetric Mean Absolute Percentage Error (sMAPE)
"""
y_true, y_hat = _get_values_or_raise(actual_series, pred_series, intersect)
y_true, y_hat = _remove_nan_union(y_true, y_hat)
raise_if_not(np.logical_or(y_true != 0, y_hat != 0).all(),
'The actual series must be strictly positive to compute the sMAPE.', logger)
return 200. * np.mean(np.abs(y_true - y_hat) / (np.abs(y_true) + np.abs(y_hat))) | 96c1fc712ded84830b086264ef3a78bc12ddb937 | 3,634,730 |
def func_two(x):
"""
Harder function for testing on.
"""
return np.sin(4 * (x - 1/4)) + x + x**20 - 1 | eca62a1aea260029329fdaa360989d15d0070d91 | 3,634,731 |
def random(samples, key=(0, 0), counter=(0, 0), sampler="gaussian", threads=False):
"""Generate random samples from a distribution for one stream.
This returns values from a single stream drawn from the specified
distribution. The starting state is specified by the two key values and
the two counter values. The second value of the "counter" is used to
represent the sample index. If the serial option is enabled, only a
single thread will be used. Otherwise the stream generation is divided
equally between OpenMP threads.
Args:
samples (int): The number of samples to return.
key (tuple): Two uint64 values which (along with the counter) define
the starting state of the generator.
counter (tuple): Two uint64 values which (along with the key) define
the starting state of the generator.
sampler (string): The distribution to sample from. Allowed values are
"gaussian", "uniform_01", "uniform_m11", and "uniform_uint64".
threads (bool): If True, use OpenMP threads to generate the stream
in parallel. NOTE: this may actually run slower for short streams
and many threads.
Returns:
(Aligned array): The random values of appropriate type for the sampler.
"""
env = Environment.get()
nthread = env.max_threads()
log = Logger.get()
ret = None
if (not threads) or (samples < nthread):
# Run serially
if sampler == "gaussian":
ret = AlignedF64(samples)
rng_dist_normal(key[0], key[1], counter[0], counter[1], ret)
elif sampler == "uniform_01":
ret = AlignedF64(samples)
rng_dist_uniform_01(key[0], key[1], counter[0], counter[1], ret)
elif sampler == "uniform_m11":
ret = AlignedF64(samples)
rng_dist_uniform_11(key[0], key[1], counter[0], counter[1], ret)
elif sampler == "uniform_uint64":
ret = AlignedU64(samples)
rng_dist_uint64(key[0], key[1], counter[0], counter[1], ret)
else:
msg = "Undefined sampler. Choose among: gaussian, uniform_01,\
uniform_m11, uniform_uint64"
log.error(msg)
raise ValueError(msg)
else:
# We are using threads, divide the samples up.
dst = distribute_uniform(samples, nthread)
k1 = AlignedU64(nthread)
k2 = AlignedU64(nthread)
c1 = AlignedU64(nthread)
c2 = AlignedU64(nthread)
k1[:] = np.array([key[0] for x in dst], dtype=np.uint64)
k2[:] = np.array([key[1] for x in dst], dtype=np.uint64)
c1[:] = np.array([counter[0] for x in dst], dtype=np.uint64)
c2[:] = np.array([counter[1] + x[0] for x in dst], dtype=np.uint64)
lengths = [x[1] for x in dst]
if sampler == "gaussian":
chunks = rng_multi_dist_normal(k1, k2, c1, c2, lengths)
ret = AlignedF64(samples)
for t in range(nthread):
ret[dst[t][0]: dst[t][0] + dst[t][1]] = chunks[t]
elif sampler == "uniform_01":
chunks = rng_multi_dist_uniform_01(k1, k2, c1, c2, lengths)
ret = AlignedF64(samples)
for t in range(nthread):
ret[dst[t][0]: dst[t][0] + dst[t][1]] = chunks[t]
elif sampler == "uniform_m11":
chunks = rng_multi_dist_uniform_11(k1, k2, c1, c2, lengths)
ret = AlignedF64(samples)
for t in range(nthread):
ret[dst[t][0]: dst[t][0] + dst[t][1]] = chunks[t]
elif sampler == "uniform_uint64":
chunks = rng_multi_dist_uint64(k1, k2, c1, c2, lengths)
ret = AlignedU64(samples)
for t in range(nthread):
ret[dst[t][0]: dst[t][0] + dst[t][1]] = chunks[t]
else:
msg = "Undefined sampler. Choose among: gaussian, uniform_01,\
uniform_m11, uniform_uint64"
log.error(msg)
raise ValueError(msg)
return ret | 332c1ff48463a6c904f2cb75a80c0e9d0fa81afd | 3,634,732 |
def random_effect_2level_model(dataframe):
"""
Multi-level model_1_sci includes intercept, variable as fixed and the
interaction term
random on country level.
:param dataframe: a data frame with student ID, school ID, country ID,
science, math, reading, and other five selected variables as
predictors.
:return: the model results
"""
# Random intercept and slope two-level model:
model_1_sci = Lmer('Science ~ female + (female*ESCS | CountryID)',
data=dataframe)
# model must be fitted in order to get estimate results
model_1_sci.fit(REML=False)
# print summary since auto-generated result doesn't include fixed effects
print(model_1_sci.summary())
model_1_sci.plot_summary()
# Visualizing random effect of a predictor
model_1_sci.plot('female',
plot_ci=True,
ylabel='Predicted log_science')
sns.regplot(x='female',
y='residuals',
data=model_1_sci.data,
fit_reg=False
)
# Inspecting overall fit
sns.regplot(x='fits',
y='log_science',
units='CountryID',
data=model_1_sci.data,
fit_reg=True
)
return model_1_sci | 9184a6a513b1e0a24e747de4bc5811f7a724edc9 | 3,634,733 |
import os
import pandas
from colorama import Fore
from clinica.utils.exceptions import ClinicaException
def read_part_sess_long_ids_from_tsv(tsv_file):
"""Extract participant, session and longitudinal from TSV file.
TODO: Find a way to merge with utils/filemanip.py::read_participant_tsv into one util
"""
if not os.path.isfile(tsv_file):
raise ClinicaException(
"\n%s[Error] The TSV file you gave is not a file.%s\n"
"\n%sError explanations:%s\n"
" - Clinica expected the following path to be a file: %s%s%s\n"
" - If you gave relative path, did you run Clinica on the good folder?" %
(Fore.RED, Fore.RESET,
Fore.YELLOW, Fore.RESET,
Fore.BLUE, tsv_file, Fore.RESET)
)
df = pandas.read_csv(tsv_file, sep='\t')
def check_key_in_data_frame(file, data_frame, key):
if key not in list(data_frame.columns.values):
raise ClinicaException(
"\n%s[Error] The TSV file does not contain %s column (path: %s)%s" %
(Fore.RED, key, file, Fore.RESET)
)
check_key_in_data_frame(tsv_file, df, 'participant_id')
check_key_in_data_frame(tsv_file, df, 'session_id')
check_key_in_data_frame(tsv_file, df, 'long_id')
participants = list(df.participant_id)
sessions = list(df.session_id)
longs = list(df.long_id)
# Remove potential whitespace in participant, session or longitudinal ID
return [sub.strip(' ') for sub in participants], \
[ses.strip(' ') for ses in sessions],\
[lng.strip(' ') for lng in longs] | 43649e828c48f2adfd56cf2e1daf2d304714526c | 3,634,734 |
def get_model_data(model_name: str):
"""
Return model data saved in demisto (string of encoded base 64)
:param model_name: name of the model to load from demisto
:return: str, str
"""
res_model = demisto.executeCommand("getMLModel", {"modelName": model_name})[0]
if is_error(res_model):
raise DemistoException("Error reading model %s from Demisto" % model_name)
else:
model_data = res_model['Contents']['modelData']
try:
model_type = res_model['Contents']['model']["type"]["type"]
return model_data, model_type
except Exception:
return model_data, UNKNOWN_MODEL_TYPE | 042f39ca2028ed6986ebe0a5573bb16baa27bd04 | 3,634,735 |
def get_bprop_batch_norm(self):
"""Grad definition for `BatchNorm` operation."""
is_training = self.is_training
input_grad = G.BatchNormGrad(is_training, self.epsilon, self.data_format)
def bprop(x, scale, b, mean, variance, out, dout):
if is_training:
saved_reserve_1 = out[3]
saved_reserve_2 = out[4]
else:
saved_reserve_1 = mean
saved_reserve_2 = variance
out = input_grad(dout[0], x, scale, saved_reserve_1, saved_reserve_2)
dx = out[0]
dscale = out[1]
dbias = out[2]
return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)
return bprop | 4454d514b5b3b7f7ba96370c5e35e8c63b29828f | 3,634,736 |
def deg_to_num(lat_deg, lon_deg, zoom):
"""
degree to num
"""
lat_rad = radians(lat_deg)
n = 2.0 ** zoom
xtile_f = (lon_deg + 180.0) / 360.0 * n
ytile_f = (1.0 - log(tan(lat_rad) + (1 / cos(lat_rad))) / pi) / 2.0 * n
xtile = int(xtile_f)
ytile = int(ytile_f)
pos_x = int((xtile_f - xtile) * 256)
pos_y = int((ytile_f - ytile) * 256)
return (xtile, ytile, pos_x, pos_y) | 0459645ee4965a7226cc4e3167e86ca7eaa28108 | 3,634,737 |
def parse_cmdline():
""" Parse command-line arguments
"""
parser = ArgumentParser(prog=__file__)
parser.add_argument("-i", "--indir", dest="indirname",
action="store", default='multiplexed_data', type=str,
help="Parent directory for multiplexed subfolders")
parser.add_argument("-o", "--outfile", dest="outfilename",
action="store", default="multiplexed_predictions.tab",
type=str,
help="Path to single output file of combined predictions")
parser.add_argument("-l", "--logfile", dest="logfile",
action="store", default=None, type=str,
help="Logfile location")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true",
help="Give verbose output")
return parser.parse_args() | 3c8f4b44df9697a019fa093a70027e76c89ea7a2 | 3,634,738 |
import requests
def get_google_api_books(query_dict: dict, params: dict = None, page: int = 1) -> tuple:
"""Get books from google api from given page."""
query = google_api_query(query_dict)
if not params:
params = {}
params.update(GOOGLE_API_QUERY_PARAMS)
start_index = (page - 1) * PAGINATE_BY
params.update({"startIndex": start_index})
response = requests.get(f"{API}?{query}", params=params)
books = [] # type: List[dict]
total = 0
if response.status_code == 200:
result = response.json()
total = result.get("totalItems", 0)
for result in result.get("items", []):
books.append(google_book_parser(result))
return books, total, response.status_code | c23e3ff5aa55b03dc6f9d8eb88bb4cbed49459d9 | 3,634,739 |
def get_messages():
"""
Query the data base for messages and returns a container of database message objects.
"""
return Messages.select(Messages, MediaType).join(MediaType) | 4df5018217edb7051af79150b162c1b2e590b07a | 3,634,740 |
def create_db(x, y, train_size=0.8, bs=96, random_state=42):
"""
Take dataframe and convert to Fastai databunch
"""
X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=train_size)
train_ds = TrainData(X_train, y_train)
val_ds = TrainData(X_test, y_test)
bs = min(bs, len(train_ds))
val_bs = min(bs, len(val_ds))
train_dl = DataLoader(train_ds, bs)
val_dl = DataLoader(val_ds, val_bs)
return DataBunch(train_dl, val_dl) | 006ee0c4d3e5f43f1b60c22324c03cef5be564eb | 3,634,741 |
def bootstrap(resolver):
"""Lookup the root nameserver addresses using the given resolver
Return a Resolver which will eventually become a C{root.Resolver}
instance that has references to all the root servers that we were able
to look up.
"""
domains = [chr(ord('a') + i) for i in range(13)]
# f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1]
f = lambda r: r
L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains]
d = defer.DeferredList(L)
d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]]))
return DeferredResolver(d) | 4fd2287276a97e84b5433912c63d71f995282881 | 3,634,742 |
def estimate_prior_limits(param_space, prior_limit_estimation_points, objective_weights):
"""
Estimate the limits for the priors provided. Limits are used to normalize the priors, if prior normalization is required.
:param param_space: Space object for the optimization problem
:param prior_limit_estimation_points: number of points to sample to estimate the limits
:param objective_weights: Objective weights for multi-objective optimization. Not implemented yet.
:return: list with the estimated lower and upper limits found for the prior.
"""
uniform_configurations = param_space.random_sample_configurations_without_repetitions({}, prior_limit_estimation_points, use_priors=False)
prior_configurations = param_space.random_sample_configurations_without_repetitions({}, prior_limit_estimation_points, use_priors=True) # will be uniform random if no prior
configurations = uniform_configurations + prior_configurations
prior = compute_probability_from_prior(configurations, param_space, objective_weights)
return [min(prior), max(prior)] | 98b41f3882c11d941d5b771021f823971ab393d4 | 3,634,743 |
def rotate_by_point_and_angle(vector, origin, angle):
"""
Rotate vector at origin to angle
:param vector: DB.XYZ
:param origin: DB.XYZ of origin
:param angle: Angle to rotate
:type angle: float
:return: DB.XYZ
"""
transform = DB.Transform.CreateRotationAtPoint(DB.XYZ.BasisZ, angle, origin)
rot_vector = transform.OfPoint(vector)
return rot_vector | f69d6485da9c22e7d9b2533f87fb285be01b8f21 | 3,634,744 |
def load_xviii_bayer_from_binary(binary_data, image_height, image_width):
"""Read XVIII binary images into bayer array
Parameters
-----------
binary_data : numpy.ndarray
binary image data from XVIII
image_height : int
image height
image_width : int
image width
Returns
--------
numpy.ndarray
Bayer image
"""
img_h = image_height
img_w = image_width
bayer_img = np.zeros((img_h, img_w), dtype=np.uint32)
# read raw data and put them into bayer pattern.
count = 0
for i in range(0, img_h, 1):
for j in range(0, img_w, 4):
chunk = binary_data[count : count + 12] # noqa
bayer_img[i, j] = (
((chunk[3] & 0xFF) << 16)
| ((chunk[2] & 0xFF) << 8)
| (chunk[1] & 0xFF)
)
bayer_img[i, j + 1] = (
((chunk[0] & 0xFF) << 16)
| ((chunk[7] & 0xFF) << 8)
| (chunk[6] & 0xFF)
)
bayer_img[i, j + 2] = (
((chunk[5] & 0xFF) << 16)
| ((chunk[4] & 0xFF) << 8)
| (chunk[11] & 0xFF)
)
bayer_img[i, j + 3] = (
((chunk[10] & 0xFF) << 16)
| ((chunk[9] & 0xFF) << 8)
| (chunk[8] & 0xFF)
)
count += 12
bayer_img = bayer_img.astype(np.float32)
return bayer_img | 28495c2480e128109f0852a21780bf900fb0d1b8 | 3,634,745 |
from typing import Union
from typing import Iterable
from typing import Optional
import ast
def apply_dialects(
source: str, names: Union[str, Iterable[str]], filename: Optional[str] = None
) -> ast.AST:
"""Utility for applying dialect transpilers to source code."""
reducer = dialect_reducer(names, filename)
source = reducer.transform_src(source)
tree = reducer.transform_ast(ast.parse(source))
return tree | faeeb21d9e7325bac5ab55b7df3f1dac8d1e83fb | 3,634,746 |
def get_atoms(smiles):
"""
Process a SMILES.
SMILES string is processed to generate a sequence
of atoms.
Arguments:
smiles (str): a SMILES representing a molecule.
Returns:
a list of atoms.
"""
tokens = process_smiles(smiles)
return [
REVERSED_ATOM_MAPPING[token]
for token in tokens
] | e352159d4bc5d33833a4276a4ef13fbd4f89713c | 3,634,747 |
from pathlib import Path
import warnings
def clean_path(path):
"""
cleans up paths and resolves ~ and symlinks
"""
realpath = Path(path).expanduser().resolve()
if not realpath.exists():
#FIXME this is a shitty solution, we should deal with conditional include statements
warnings.warn(realpath.name+" doesn't exist")
return None
return realpath | 0083d59e0c2aa63aedf30eec91a80ca1b05fb82a | 3,634,748 |
from typing import Optional
from typing import List
def wide_article_preview_card(
box: str,
persona: Component,
image: str,
title: str,
name: Optional[str] = None,
aux_value: Optional[str] = None,
caption: Optional[str] = None,
items: Optional[List[Component]] = None,
commands: Optional[List[Command]] = None,
) -> WideArticlePreviewCard:
"""Create a wide article preview card displaying a persona, image, title, caption, and optional buttons.
Args:
box: A string indicating how to place this component on the page.
persona: The card's user avatar, 'size' prop is restricted to 'xs'.
image: The cardโs image displayed on the left-hand side.
title: The card's title on the right-hand side
name: An identifying name for this card. Makes the card clickable, similar to a button.
aux_value: The card's auxiliary text, displayed on the right-hand side of the header.
caption: The card's caption, displayed below the title on the right-hand side.
items: The card's buttons, displayed under the caption.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.WideArticlePreviewCard` instance.
"""
return WideArticlePreviewCard(
box,
persona,
image,
title,
name,
aux_value,
caption,
items,
commands,
) | 0b6b68e39d5e379392aa8ee26abca69cf64181bc | 3,634,749 |
def calc_water_year(df: pd.DataFrame):
"""Calculates the water year.
Parameters
----------
df : pandas.DataFrame
Flow timeseries with a DataTimeIndex.
Returns
-------
pandas.DataFrame.index
A pandas.DataFrame index grouped by water year.
"""
return df.index.year + (df.index.month >= 10).astype(int) | 40badcaae1bbf50add3616b4f420f7f46efe6d48 | 3,634,750 |
def count_and_dissect_tlvs(buf):
"""
Count and dissect TLVs. Return length of LLDP layer
buf -- buffer to dissect
return -- parsed_bytes_total, [(clz, bts), ...]
"""
shift = 0
tlv_type, tlv_len = 1, 1
clz_bts_list = []
while (tlv_type | tlv_len) != 0:
type_and_len = unpack_H(buf[shift:shift + TLV_HEADER_LEN])[0]
# get tlv length and type
tlv_type = (type_and_len & TYPE_MASK) >> LENGTH_FIELD_BITS
tlv_len = type_and_len & LENGTH_MASK
if tlv_type != ORG_SPEC_TYPE:
clz = LLDP_TLV_CLS.get(tlv_type, LLDPGeneric)
else:
oui_subtype = unpack_I(buf[shift + TLV_HEADER_LEN:shift + ORG_SPEC_HEADER_LEN + TLV_HEADER_LEN])[0]
oui = (oui_subtype & OUI_MASK) >> SUBTYPE_LEN_BITS
subtype = oui_subtype & SUBTYPE_MASK
clz = LLDP_ORG_SPEC_TLV_CLS.get((oui, subtype), LLDPOrgSpecGeneric)
# get body bytes
tlv_body = buf[shift: tlv_len + shift + TLV_HEADER_LEN]
# update shift to begin of next TLV (TLV_HEADER_LEN:2 + content:x)
shift += TLV_HEADER_LEN + tlv_len
clz_bts_list.append((clz, tlv_body))
return shift, clz_bts_list | 0c32e7f2b3be156d9c09965a60826bb877dd2fc2 | 3,634,751 |
import json
def get_encoded_access_granter():
"""Add REMS metadata as base64 encoded json. Uses data from user session."""
saml = session["samlUserdata"]
metadata_provider_user = saml[SAML_ATTRIBUTES["CSC_username"]][0]
email = saml[SAML_ATTRIBUTES["email"]][0]
name = "{} {}".format(
saml[SAML_ATTRIBUTES["first_name"]][0],
saml[SAML_ATTRIBUTES["last_name"]][0]
)
access_granter = {
"userid": metadata_provider_user,
"email": email,
"name": name
}
access_granter_json = json.dumps(access_granter)
return urlsafe_b64encode(access_granter_json.encode('utf-8')) | 29d83c01708195a058c5689541acac7a33ea3405 | 3,634,752 |
from typing import Dict
def output(prim_data: Dict) -> Dict:
""" Sort the dictionary so that key (score) in descending order, value (time) in ascending order
Args:
prim_data: The original data where key reps score, value reps time
Returns:
a "sorted" dictionary that has score in descending order and time in ascending order
{30: [6, 5], 40: [8, 2]} -> {40: [2, 8], 30: [5, 6]}
"""
final_d = {}
for score in msort([int(s) for s in prim_data.keys()])[::-1]:
for v in msort(prim_data[str(score)]):
if score not in final_d:
final_d[score] = []
final_d[score].append(v)
return final_d | ca8aed8905fb6cd4a57e180a6d4a06d8fbb8506e | 3,634,753 |
def _create_dummy_graph(triples_count):
"""This creates test data of a given size."""
test_graph_input = rdflib.Graph()
for triples_index in range(triples_count):
test_graph_input.add((
rdflib.term.URIRef("subject:%d" % triples_index),
rdflib.term.URIRef("predicate:%d" % triples_index),
rdflib.term.URIRef("object:%d" % triples_index),
))
return test_graph_input | 8f99be0a68c0c0bf96e1412e1f4c6cf282b0fcef | 3,634,754 |
from ansible.constants import MAGIC_VARIABLE_MAPPING
def _get_magic_var(hostobj, varname, default=""):
"""Use Ansible coordination of inventory format versions
:param hostobj: parsed Ansible host object
:param varname: key of MAGIC_VARIABLE_MAPPING, representing variations of
Ansible inventory parameter
:param default: value, that will be returned if 'varname' is not set in
inventory
"""
for item in MAGIC_VARIABLE_MAPPING[varname]:
result = hostobj.vars.get(item, "")
if result:
return result
else:
return default | 0636a7cec6c879ea8b3df7ebf6ffd7265687bd49 | 3,634,755 |
from typing import Counter
import math
def sentence_bleu(hypothesis, reference, smoothing=True, order=4, **kwargs):
"""
Compute sentence-level BLEU score between a translation hypothesis and a reference.
:param hypothesis: list of tokens or token ids
:param reference: list of tokens or token ids
:param smoothing: apply smoothing (recommended, especially for short sequences)
:param order: count n-grams up to this value of n.
:param kwargs: additional (unused) parameters
:return: BLEU score (float)
"""
log_score = 0
if len(hypothesis) == 0:
return 0
for i in range(order):
hyp_ngrams = Counter(zip(*[hypothesis[j:] for j in range(i + 1)]))
ref_ngrams = Counter(zip(*[reference[j:] for j in range(i + 1)]))
numerator = sum(min(count, ref_ngrams[bigram]) for bigram, count in hyp_ngrams.items())
denominator = sum(hyp_ngrams.values())
if smoothing:
numerator += 1
denominator += 1
score = numerator / denominator
if score == 0:
log_score += float('-inf')
else:
log_score += math.log(score) / order
bp = min(1, math.exp(1 - len(reference) / len(hypothesis)))
return math.exp(log_score) * bp | e3913cebdfe58ca55aa9c02d9faab4d8fc9ef3dd | 3,634,756 |
import os
def CPU_temperature():
"""Returns the temperature of the Raspberry Pi's CPU."""
try:
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
except:
pass | 98be304b1939f2f1d56c52e03815400b1863468b | 3,634,757 |
import copy
def generate_representation(coordinates, nuclear_charges,
max_size=23, neighbors=23, cut_distance = 5.0, cell=None):
""" Generates a representation for the FCHL kernel module.
:param coordinates: Input coordinates.
:type coordinates: numpy array
:param nuclear_charges: List of nuclear charges.
:type nuclear_charges: numpy array
:param max_size: Max number of atoms in representation.
:type max_size: integer
:param neighbors: Max number of atoms within the cut-off around an atom. (For periodic systems)
:type neighbors: integer
:param cell: Unit cell vectors. The presence of this keyword argument will generate a periodic representation.
:type cell: numpy array
:param cut_distance: Spatial cut-off distance - must be the same as used in the kernel function call.
:type cut_distance: float
:return: FCHL representation, shape = (size,5,neighbors).
:rtype: numpy array
"""
size = max_size
if cell is None:
neighbors=size
L = len(coordinates)
coords = np.asarray(coordinates)
ocupationList = np.asarray(nuclear_charges)
M = np.zeros((size,5,neighbors))
if cell is not None:
coords = np.dot(coords,cell)
nExtend = (np.floor(cut_distance/np.linalg.norm(cell,2,axis = 0)) + 1).astype(int)
for i in range(-nExtend[0],nExtend[0] + 1):
for j in range(-nExtend[1],nExtend[1] + 1):
for k in range(-nExtend[2],nExtend[2] + 1):
if i == -nExtend[0] and j == -nExtend[1] and k == -nExtend[2]:
coordsExt = coords + i*cell[0,:] + j*cell[1,:] + k*cell[2,:]
ocupationListExt = copy.copy(ocupationList)
else:
ocupationListExt = np.append(ocupationListExt,ocupationList)
coordsExt = np.append(coordsExt,coords + i*cell[0,:] + j*cell[1,:] + k*cell[2,:],axis = 0)
else:
coordsExt = copy.copy(coords)
ocupationListExt = copy.copy(ocupationList)
M[:,0,:] = 1E+100
for i in range(L):
cD = - coords[i] + coordsExt[:]
ocExt = np.asarray(ocupationListExt)
D1 = np.sqrt(np.sum(cD**2, axis = 1))
args = np.argsort(D1)
D1 = D1[args]
ocExt = np.asarray([ocExt[l] for l in args])
cD = cD[args]
args = np.where(D1 < cut_distance)[0]
D1 = D1[args]
ocExt = np.asarray([ocExt[l] for l in args])
cD = cD[args]
M[i,0,: len(D1)] = D1
M[i,1,: len(D1)] = ocExt[:]
M[i,2:5,: len(D1)] = cD.T
return M | 271ca6d7a66289a44e63452e64dca6a6b0e8da04 | 3,634,758 |
def standardize_selected_features(X_df, gene_features):
"""Standardize (take z-scores of) selected real-valued features.
Note this should be done for train and test sets independently. Also note
this doesn't necessarily preserve the order of features (this shouldn't
matter in most cases).
"""
X_df_gene = X_df.loc[:, gene_features]
X_df_other = X_df.loc[:, ~gene_features]
X_df_scaled = pd.DataFrame(
StandardScaler().fit_transform(X_df_gene),
index=X_df_gene.index.copy(),
columns=X_df_gene.columns.copy()
)
return pd.concat((X_df_scaled, X_df_other), axis=1) | 2a88901534129e957d5d92158f812951ec2f4e19 | 3,634,759 |
def crop_and_align_one(image: Image.Image, polygon: np.array):
"""
Crop and warp image so that it only contains the word selected by the bounding polygon, horizontally aligned
:param image:
:param polygon:
:return:
"""
polygon = np.array(polygon).astype(np.float32)
rect = cv2.minAreaRect(polygon)
box = cv2.boxPoints(rect)
box = _sort_clockwise(box)
box_width = int(np.linalg.norm(box[0] - box[1]) + 1)
box_height = int(np.linalg.norm(box[1] - box[2]) + 1)
# calculate a transformation matrix that can be used to extract only the word we are interested in right now
destination_coordinates = np.float32([[0, 0], [box_width, 0], [box_width, box_height], [0, box_height]])
transformation_matrix = cv2.getPerspectiveTransform(box, destination_coordinates)
# perform the transformation
# result is a cropped image containing only the word selected by the bounding box, horizontally aligned
image = np.array(image)
cropped = cv2.warpPerspective(image, transformation_matrix, (box_width, box_height), flags=cv2.INTER_NEAREST)
cropped = Image.fromarray(cropped)
return cropped | 7fdde53a11b0fd28d9b92ce50a569de3e51a12f8 | 3,634,760 |
import os
def _calculate_file_hashes(full_path, f_hashers):
"""
Returns a dictionary of (algorithm, hexdigest) values for the provided
filename
"""
if not os.path.exists(full_path):
raise BagValidationError("%s does not exist" % full_path)
try:
with open(full_path, 'rb') as f:
while True:
block = f.read(1048576)
if not block:
break
for i in list(f_hashers.values()):
i.update(block)
except IOError as e:
raise BagValidationError("could not read %s: %s" % (full_path, str(e)))
except OSError as e:
raise BagValidationError("could not read %s: %s" % (full_path, str(e)))
return dict(
(alg, h.hexdigest()) for alg, h in list(f_hashers.items())
) | 0c678062d62a869e7f7babde08dc003e85446fab | 3,634,761 |
def get_ldap():
"""connects to ldap and returns ldap connection"""
# if not hasattr(g, 'ldap'):
# g.ldap = ldap.initialize(app.config['LDAP_URL'])
# return g.ldap
return None | c2cebe614b269d8e68a320c3f6951ca5b5cf26d0 | 3,634,762 |
def _sqrt_l2_prox(ww, reg):
"""The proximal operator for reg * ||ww||_2 (not squared)."""
backend = get_backend()
norm_ww = backend.norm(ww, axis=0)
mask = norm_ww == 0
ww[:, mask] = 0
ww[:, ~mask] = backend.clip(1 - reg[~mask] / norm_ww[~mask], 0,
None)[None] * ww[:, ~mask]
return ww | 2888149411b016fd2995e0d972ec2174ed85c75b | 3,634,763 |
def modify_content(request, page_id, content_type, language_id):
"""Modify the content of a page."""
page = get_object_or_404(Page, pk=page_id)
perm = PagePermission(request.user).check('change', page=page,
lang=language_id, method='POST')
if perm and request.method == 'POST':
content = request.POST.get('content', False)
if not content:
raise Http404
page = Page.objects.get(pk=page_id)
if settings.PAGE_CONTENT_REVISION:
Content.objects.create_content_if_changed(page, language_id,
content_type, content)
else:
Content.objects.set_or_create_content(page, language_id,
content_type, content)
page.invalidate()
# to update last modification date
page.save()
return HttpResponse('ok')
raise Http404 | 024a98c20a1260eddf5714e702d4bb9054b8f182 | 3,634,764 |
def _extend_data_with_sampled_characteristics(df, optim_paras, options):
"""Sample initial observations from initial conditions.
The function iterates over all state space dimensions and replaces NaNs with values
sampled from initial conditions. In the case of an n-step-ahead simulation with
sampling all state space dimensions are sampled. For the other two simulation
methods, potential NaNs in the data are replaced with sampled characteristics.
Characteristics are sampled regardless of the simulation type which keeps randomness
across the types constant.
Parameters
----------
df : pandas.DataFrame
A pandas DataFrame which contains only an index for n-step-ahead simulation with
sampling. For the other simulation methods, it contains information on
individuals which is allowed to have missing information in the first period.
optim_paras : dict
options : dict
Returns
-------
df : pandas.DataFrame
A pandas DataFrame with no missing values.
"""
# Sample characteristics only for the first period.
fp = df.query("period == 0").copy()
index = fp.index
for observable in optim_paras["observables"]:
level_dict = optim_paras["observables"][observable]
sampled_char = _sample_characteristic(fp, options, level_dict, use_keys=False)
fp[observable] = fp[observable].fillna(
pd.Series(data=sampled_char, index=index), downcast="infer"
)
for choice in optim_paras["choices_w_exp"]:
level_dict = optim_paras["choices"][choice]["start"]
sampled_char = _sample_characteristic(fp, options, level_dict, use_keys=True)
fp[f"exp_{choice}"] = fp[f"exp_{choice}"].fillna(
pd.Series(data=sampled_char, index=index), downcast="infer"
)
for lag in reversed(range(1, optim_paras["n_lagged_choices"] + 1)):
level_dict = optim_paras[f"lagged_choice_{lag}"]
sampled_char = _sample_characteristic(fp, options, level_dict, use_keys=False)
fp[f"lagged_choice_{lag}"] = fp[f"lagged_choice_{lag}"].fillna(
pd.Series(data=sampled_char, index=index), downcast="infer"
)
# Sample types and map them to individuals for all periods.
if optim_paras["n_types"] >= 2:
level_dict = optim_paras["type_prob"]
types = _sample_characteristic(fp, options, level_dict, use_keys=False)
fp["type"] = fp["type"].fillna(
pd.Series(data=types, index=index), downcast="infer"
)
# Update data in the first period with sampled characteristics.
df = df.combine_first(fp)
# Types are invariant and we have to fill the DataFrame for one-step-ahead.
if optim_paras["n_types"] >= 2:
df["type"] = df["type"].fillna(method="ffill", downcast="infer")
state_space_columns = create_state_space_columns(optim_paras)
df = df[state_space_columns].astype(DTYPE_STATES)
return df | a8849d345b57c1207c9ab56a157ea9b061835918 | 3,634,765 |
def needs_update(targ_capacity, curr_capacity, num_up_to_date):
"""Return whether there are more batch updates to do.
Inputs are the target size for the group, the current size of the group,
and the number of members that already have the latest definition.
"""
return not (num_up_to_date >= curr_capacity == targ_capacity) | 77981f3fdb57296503f34b0ea955b68b9f98db4c | 3,634,766 |
def build_get_array_item_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Get an array of array of strings [['1', '2', '3'], [], ['7', '8', '9']].
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
accept = "application/json"
# Construct URL
url = '/array/array/itemempty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
) | 7ff34d977f9cd50ee8ed29df90952f757f2c4796 | 3,634,767 |
def get_single_image_results(gt_boxes, pred_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = len(gt_boxes)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = len(pred_boxes)
fn = 0
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou_individual(pred_box, gt_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
args_desc = np.argsort(ious)[::-1]
if len(args_desc) == 0:
# No matches
tp = 0
fp = len(pred_boxes)
fn = len(gt_boxes)
else:
gt_match_idx = []
pred_match_idx = []
for idx in args_desc:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
TPS.append(tp)
FPS.append(fp)
FNS.append(fn)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} | 9990dcf989b1b76bc6cb7a4bbbedad5bb73fc132 | 3,634,768 |
def generate_test_experiments(n=2):
"""Make a test experiment list"""
experiments = ExperimentList()
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 2.0],
"space_group_hall_symbol": " C 2y",
}
for i in range(n):
experiments.append(
Experiment(
crystal=Crystal.from_dict(exp_dict),
scan=Scan(image_range=[1, 10], oscillation=[0.0, 1.0]),
scaling_model=get_scaling_model(),
identifier=str(i),
)
)
return experiments | 19644c834127e24f15e46657e1e131444861f2fc | 3,634,769 |
def MakeProjectIssueConfig(
project_id, well_known_statuses, statuses_offer_merge, well_known_labels,
excl_label_prefixes, templates, col_spec):
"""Return a ProjectIssueConfig with the given values."""
# pylint: disable=multiple-statements
if not well_known_statuses: well_known_statuses = []
if not statuses_offer_merge: statuses_offer_merge = []
if not well_known_labels: well_known_labels = []
if not excl_label_prefixes: excl_label_prefixes = []
if not templates: templates = []
if not col_spec: col_spec = ' '
project_config = tracker_pb2.ProjectIssueConfig()
if project_id: # There is no ID for harmonized configs.
project_config.project_id = project_id
SetConfigStatuses(project_config, well_known_statuses)
project_config.statuses_offer_merge = statuses_offer_merge
SetConfigLabels(project_config, well_known_labels)
SetConfigTemplates(project_config, templates)
project_config.exclusive_label_prefixes = excl_label_prefixes
# ID 0 means that nothing has been specified, so use hard-coded defaults.
project_config.default_template_for_developers = 0
project_config.default_template_for_users = 0
project_config.default_col_spec = col_spec
# Note: default project issue config has no filter rules.
return project_config | 4be0d3b01e7a7ba958ef6fa489559a237a535d77 | 3,634,770 |
import json
import requests
def moderate(request):
"""
View for moderation actions on a single item.
"""
microcosm_id = request.POST.get('microcosm_id')
if request.method == 'POST':
if request.POST.get('action') == 'move':
if request.POST.get('item_type') == 'event':
event = Event()
event.id = int(request.POST.get('item_id'))
event.microcosm_id = int(microcosm_id)
event.meta = {'editReason': 'Moderator moved item'}
event.update(request.get_host(), request.access_token)
elif request.POST.get('item_type') == 'conversation':
conversation = Conversation()
conversation.id = int(request.POST.get('item_id'))
conversation.microcosm_id = int(microcosm_id)
conversation.meta = {'editReason': 'Moderator moved item'}
conversation.update(request.get_host(), request.access_token)
elif request.POST.get('item_type') == 'microcosm':
microcosm = Microcosm()
microcosm.id = int(request.POST.get('item_id'))
microcosm.parent_id = int(microcosm_id)
microcosm.meta = {'editReason': 'Moderator moved item'}
microcosm.update(request.get_host(), request.access_token)
else:
# These are all PATCH requests and we need the item in question first
if request.POST.get('item_type') == 'conversation':
url, params, headers = Conversation.build_request(
request.get_host(),
request.POST.get('item_id'),
access_token=request.access_token
)
if request.POST.get('item_type') == 'event':
url, params, headers = Event.build_request(
request.get_host(),
request.POST.get('item_id'),
access_token=request.access_token
)
if request.POST.get('item_type') == 'microcosm':
url, params, headers = Microcosm.build_request(
request.get_host(),
request.POST.get('item_id'),
access_token=request.access_token
)
# And then to execute the PATCH against the item
if request.POST.get('action') == 'delete':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/deleted', 'value': True}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'undelete':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/deleted', 'value': False}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'approve':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/moderated', 'value': False}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'pin':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/sticky', 'value': True}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'unpin':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/sticky', 'value': False}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'open':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/open', 'value': True}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
elif request.POST.get('action') == 'close':
payload = json.dumps([{'op': 'replace', 'path': '/meta/flags/open', 'value': False}])
headers['Content-Type'] = 'application/json'
requests.patch(url, payload, headers=headers)
return HttpResponseRedirect(reverse('single-microcosm', args=(microcosm_id,))) | 151a3fe08653835ccb6fe3e54d729b73675c84ac | 3,634,771 |
from typing import Dict
from typing import Any
from datetime import datetime
import requests
def get_our_config(
repository: str,
default_branch: str,
) -> Dict[str, Dict[str, Any]]:
"""
Parse our config from the ``pyproject.toml`` file from GitHub.
:param repository: The repository to obtain the file from (in the form ``<user>/<repo>``).
:param default_branch: The repository's default branch (e.g. ``'master'``).
:returns: The file's contents, parsed as a dictionary.
"""
# This makes another request, but I can't find a better way to lay everything out.
datafile = CACHE_DIR / repository / "dependency-dash.dat"
datafile.parent.maybe_make(parents=True)
url = f"https://raw.githubusercontent.com/{repository}/{default_branch}/pyproject.toml"
etag: str
expires: datetime
try:
data: Dict[str, Any] = datafile.load_json()
except FileNotFoundError:
response = requests.get(url, timeout=10)
if response.status_code != 200:
raise requests.HTTPError # TODO: better error
etag = response.headers["etag"]
expires = datetime.strptime(response.headers["expires"], "%a, %d %b %Y %H:%M:%S %Z")
config = dom_toml.loads(response.text)
if "dependency-dash" not in config.get("tool", {}):
raise KeyError
files = config["tool"]["dependency-dash"]
else:
etag = data["etag"]
expires = datetime.fromisoformat(data["expires"])
files = data["files"]
if expires > datetime.utcnow():
# Nothing changed
return files
else:
response = requests.get(url, timeout=10, headers={"If-None-Match": etag})
if response.status_code not in (200, 304):
raise requests.HTTPError # TODO: better error
if response.status_code == 200:
config = dom_toml.loads(response.text)
if "dependency-dash" not in config.get("tool", {}):
raise KeyError
files = config["tool"]["dependency-dash"]
etag = response.headers["etag"]
expires = datetime.strptime(response.headers["expires"], "%a, %d %b %Y %H:%M:%S %Z")
data = {
"etag": etag,
"expires": expires.isoformat(),
"files": files,
}
datafile.dump_json(data)
return files | 53e919e3560a1c194d258d62167f808165ff8ec7 | 3,634,772 |
def private_invite_code(invite_code_key, invite_code):
"""
ๅ
ๆต้่ฏท็ ๆ ก้ช
:param invite_code_key:
:param invite_code:
:return:
"""
error_dict = 0
if not invite_code:
error_dict = {'captcha_not_blank': ['ๅ
ๆต้่ฏท็ ไธ่ฝไธบ็ฉบ']}
else:
# if settings.ENABLE_VERIFY_CAPTCHA:
# server_captcha = get_redis_conn().get(captcha_key)
# else:
server_invite = '616833686'
# server_captcha = '123456'
# if captcha_value != server_captcha:
# server_captcha = get_redis_conn().get(captcha_key)
if server_invite != invite_code:
error_dict = {'captcha_error': ['ๅ
ๆต้่ฏท็ ๆ่ฏฏๆๅทฒ่ฟๆ']}
else:
# get_redis_conn().delete(captcha_key)
pass
return error_dict | 1403c849670adc21501775003283d3dfd0c32019 | 3,634,773 |
def build_collector_url(webmap=None, center=None, feature_layer=None, fields=None, search=None, portal=None, action=None, geometry=None, callback=None, callback_prompt=None):
"""
Creates a url that can be used to open Collector for ArcGIS
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
webmap Optional :class:`String`, :class:`~arcgis.mapping.WebMap`, :class:`~arcgis.gis.Item`.
The item id, webmap, or item representing the map to open in Collector.
------------------ --------------------------------------------------------------------
center Optional :class:`String`, :class:`list`, :class:`tuple`.
The "lat,long" in WGS84 of where to center the map
------------------ --------------------------------------------------------------------
feature_layer Optional :class:`String` or :class:`~arcgis.features.FeatureLayer`.
The feature layer url as string or the feature layer representing the layer to open
for collection.
------------------ --------------------------------------------------------------------
fields Optional :class:`Dict`. The feature attributes dictionary {"field":"value"}
------------------ --------------------------------------------------------------------
search Optional :class:`String` An address, place, coordinate, or feature to search for
Requires webmap and action=search to be set.
Value must be URL encoded
------------------ --------------------------------------------------------------------
portal Optional :class:`String`, :class:`~arcgis.gis.GIS`.
The URL of the portal the mobile worker must be connected to.
------------------ --------------------------------------------------------------------
action Optional :class:`String` What the app should do, if anything, once open
and the user is signed in.
The following values are supported: addFeature, center, open, search.
------------------ --------------------------------------------------------------------
geometry Optional :class:`String`. Defines the location for the newly collectoed
or edited feature
Requires webmap, action=addFeature, and feature_layer.
Value is a coordinate containing x, y (z if available)
------------------ --------------------------------------------------------------------
callback Optional :class:`String`. The URL to call when capturing the asset or
observation is complete.
Requires webmap, action=addFeature, and feature_layer to be set.
Optionally, before calling the URL provide a prompt for the user,
specified with the callback_prompt parameter.
------------------ --------------------------------------------------------------------
callback_prompt Optional :class:`String`. Prompt the mobile worker before executing the callback,
and display this value in the prompt as where the mobile worker will be taken.
Requires webmap, action=addFeature, feature_layer, and callback to be specified.
Value must be URL encoded
================== ====================================================================
Additional info can be found here: https://github.com/Esri/collector-integration
:return: :class:`String`
"""
params = []
# Branch out based on the version of Collector.
if portal or action:
url = "https://collector.arcgis.app"
if portal:
if isinstance(portal,arcgis.gis.GIS):
portal = portal.url
params.append("portalURL=" + portal)
if action:
params.append("referenceContext=" + action)
if not webmap:
raise ValueError("Invalid parameters -- Must specify a webmap")
else:
item_id = webmap
if isinstance(item_id, arcgis.mapping.WebMap):
item_id = item_id.item.id
elif isinstance(item_id, arcgis.gis.Item):
item_id = item_id.id
params.append("itemID=" + item_id)
actions = {'open': lambda: _build_collector_url_for_open_action(params),
'center': lambda: _build_collector_url_for_center_action(params, center),
'search': lambda: _build_collector_url_for_search_action(params, search),
'addFeature': lambda: _build_collector_url_for_addFeature_action(params, feature_layer, geometry, fields, callback, callback_prompt)}
params = actions.get(action)()
# Collector Classic app integration logic.
else:
url = "arcgis-collector://"
_validate_collector_url(webmap, center, feature_layer, fields)
item_id = webmap
# webmap falsy bug #1244
if webmap is not None:
if isinstance(webmap, arcgis.mapping.WebMap):
item_id = webmap.item.id
elif isinstance(webmap, arcgis.gis.Item):
item_id = webmap.id
params.append("itemID=" + item_id)
if center:
if isinstance(center,(list, tuple)):
center = '{},{}'.format(center[0],center[1])
params.append("center=" + center)
if feature_layer:
feature_source_url = feature_layer
if isinstance(feature_layer, arcgis.features.FeatureLayer):
feature_source_url = feature_layer.url
params.append("featureSourceURL=" + feature_source_url)
if fields:
attributes = []
# unencoded format is featureAttributes={"fieldName":"value","fieldName2":"value2"}
for k, v in fields.items():
attributes.append(_encode_string('"{}":"{}"'.format(k, v)))
params.append("featureAttributes=%7B" + ",".join(attributes) + "%7D")
if params:
url += "?" + "&".join(params)
return url | d34d14efd84b9bb63aca2c9e0d32a1ba0d24e22e | 3,634,774 |
def load_creator(_, context):
"""Load the record creator."""
old_data = context.get('record')
if old_data:
return old_data.get('created_by', missing)
# TODO a validation error must be raised in each case
return context.get('user_id', missing) | cbe72719cf589b7b7741804014b6095f075d71d6 | 3,634,775 |
def summary(state, figsize=(11, 7), hemisphere="both", center_lon=180, pv_cmap="viridis",
pv_max=None, v_max=None):
"""4-panel plot showing the model state in terms of vorticity and wind."""
grid = state.grid
roll, configure_lon_x = roll_lons(grid.lons, center_lon)
# Scale PV to 10e-4 1/s
pv = 10000 * state.pv
if pv_max is not None:
pv_max = pv_max * 10000
# Plot 2 rows with 2 panels each
fig, ((ax11, ax12), (ax21, ax22)) = plt.subplots(2, 2, figsize=figsize, gridspec_kw={
"width_ratios": (4, 10)
})
# Panel: zonal mean vorticity line plot
ax11.vlines([0], -90, 90, linestyle="--", linewidth=0.5, color="#666666")
# Planetary vorticity
zmpv = np.mean(grid.fcor * 10000, axis=_ZONAL)
ax11.plot(zmpv, grid.lats, color="#999999", label="pla.")
# Zonal mean relative vorticity
zmrv = np.mean(state.vorticity * 10000, axis=_ZONAL)
ax11.plot(zmrv, grid.lats, color="#006699", label="rel.")
# Zonal mean absolute (=potential) vorticity
zmav = np.mean(pv, axis=_ZONAL)
ax11.plot(zmav, grid.lats, color="#000000", label="pot.")
configure_lat_y(ax11, hemisphere)
ax11.legend(loc="upper left")
ax11.set_title("zonal mean vort. [$10^{-4} \\mathrm{s}^{-1}$]", loc="left")
# Panel: PV and wind vectors
pv_levels = symmetric_levels(pv, 11 if hemisphere == "both" else 17, ext=pv_max)
pvc = ax12.contourf(grid.lons, grid.lats, roll(pv), cmap=pv_cmap, levels=pv_levels, extend="both")
fig.colorbar(pvc, ax=ax12)
n_vectors = 13 if hemisphere == "both" else 21
if not (np.linalg.norm(state.u) == 0. and np.linalg.norm(state.v) == 0.):
ax12.quiver(*reduce_vectors(grid.lon, grid.lat, state.u, state.v, n_vectors))
configure_lon_x(ax12)
configure_lat_y(ax12, hemisphere)
ax12.set_title("PV [$10^{-4} \\mathrm{s}^{-1}$] and wind vectors", loc="left")
set_title_time(ax12, state.time)
# Panel: Zonal mean zonal wind line plot
ax21.vlines([0], -90, 90, linestyle="--", linewidth=0.5, color="#666666")
zmu = np.mean(state.u, axis=_ZONAL)
ax21.plot(zmu, grid.lats, color="#000000")
ax21.set_title("mean zonal wind [$\\mathrm{m} \\mathrm{s}^{-1}$]", loc="left")
configure_lat_y(ax21, hemisphere)
# Panel: Meridional wind and streamfunction
v_levels = symmetric_levels(state.v, 10, ext=v_max)
pvc = ax22.contourf(grid.lons, grid.lats, roll(state.v), levels=v_levels, cmap="RdBu_r", extend="both")
fig.colorbar(pvc, ax=ax22)
psi = state.streamfunction
psi_levels = np.linspace(np.min(psi), np.max(psi), 10 if hemisphere == "both" else 14)
if psi_levels[0] != psi_levels[-1]:
ax22.contour(grid.lons, grid.lats, roll(psi), levels=psi_levels, linestyles="-", colors="k")
configure_lon_x(ax22)
configure_lat_y(ax22, hemisphere)
ax22.set_title("meridional wind [$\\mathrm{m} \\mathrm{s}^{-1}$] and streamfunction", loc="left")
set_title_time(ax22, state.time)
fig.tight_layout()
return fig | 789bd3942d8c7bad671d22e29882f12825b9da73 | 3,634,776 |
from typing import List
def savings_initial_sol(dist: np.array, nodes: List[dict], vehicles: int, clients: int, limit:int, penalidade:float=0.2) -> List[np.array]:
"""
Cria solucao inicial considerando penalidade usando o metodo de Economia para criacao de solucoes.
"""
# Calcula todas as economias
best_savings = calculate_savings(nodes, dist, clients)
# Ordena as economias por capacidade e o valor da economia, maior pro menor
best_savings = sort_with_key(best_savings, 'saving')
# Cria n rotas, com n = clients
solution = create_n_routes(nodes, clients)
count = 0
# Junta rotas ate quantidade de rotas == veiculos. Permite penalidade
while len(solution) > vehicles and len(best_savings) > 0:
# Tupla de economia, com o valor da economia e as cidades que pertence
saving_tuple = best_savings.pop(0)
count += 1
# Valores das cidades da economia
i = saving_tuple['cities'][0]
j = saving_tuple['cities'][1]
# Nao foi encontrado rotas que tem as cidades i e j
route_i = False
route_j = False
# Procura rotas que tem cidade i e j
index = 0
while index < len(solution) and not (route_i and route_j):
route = solution[index]['route']
if i in route:
route_i = (route, index)
if j in route:
route_j = (route, index)
index += 1
# Caso em que cidades i e j estao na mesma rota
if route_i[1] == route_j[1]:
continue
# Une cidades
route = join_route(route_i[0], route_j[0], i, j)
aux_capacity = sum_route_capacity(route, nodes)
# A rota unida esta vazia ou ultrapassou penalidade% da capacidade
if not len(route) or aux_capacity > (limit * (1 + penalidade)):
continue
# Retira rota das solucoes e dps adiciona de novo
if route_i[1] > route_j[1]:
solution.pop(route_i[1])
solution.pop(route_j[1])
else:
solution.pop(route_j[1])
solution.pop(route_i[1])
solution.append({'route': route, 'capacity': aux_capacity})
return solution | 9c3ab079d7dcd40b96cc2f369ebe4cbaaeb74e46 | 3,634,777 |
def features_to_matrix(features):
"""
features_to_matrix(features)
This function takes a list of feature matrices as argument and returns
a single concatenated feature matrix and the respective class labels.
ARGUMENTS:
- features: a list of feature matrices
RETURNS:
- feature_matrix: a concatenated matrix of features
- labels: a vector of class indices
"""
labels = np.array([])
feature_matrix = np.array([])
for i, f in enumerate(features):
if i == 0:
feature_matrix = f
labels = i * np.ones((len(f), 1))
else:
feature_matrix = np.vstack((feature_matrix, f))
labels = np.append(labels, i * np.ones((len(f), 1)))
return feature_matrix, labels | 8f3ef18105aa2f6a427ac89582fb17a7e508990d | 3,634,778 |
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) | f211193d7b0ee2b3de22e0d3ae086cc443209ece | 3,634,779 |
from autode.species import Species
from autode.calculation import Calculation
from autode.exceptions import CouldNotGetProperty
def run_autode(configuration, max_force=None, method=None, n_cores=1):
"""
Run an orca or xtb calculation
--------------------------------------------------------------------------
:param configuration: (gaptrain.configurations.Configuration)
:param max_force: (float) or None
:param method: (autode.wrappers.base.ElectronicStructureMethod)
"""
if method.name == 'orca' and GTConfig.orca_keywords is None:
raise ValueError("For ORCA training GTConfig.orca_keywords must be"
" set. e.g. "
"GradientKeywords(['PBE', 'def2-SVP', 'EnGrad'])")
# optimisation is not implemented, needs a method to run
assert max_force is None and method is not None
species = Species(name=configuration.name,
atoms=configuration.atoms,
charge=configuration.charge,
mult=configuration.mult)
# allow for an ORCA calculation to have non-default keywords.. not the
# cleanest implementation..
kwds = GTConfig.orca_keywords if method.name == 'orca' else method.keywords.grad
calc = Calculation(name='tmp',
molecule=species,
method=method,
keywords=kwds,
n_cores=n_cores)
calc.run()
ha_to_ev = 27.2114
try:
configuration.forces = -ha_to_ev * calc.get_gradients()
except CouldNotGetProperty:
logger.error('Failed to set forces')
configuration.energy = ha_to_ev * calc.get_energy()
configuration.partial_charges = calc.get_atomic_charges()
return configuration | 32d36a5e271c17922b0898f414ecfabf04a63d5a | 3,634,780 |
def set_count_and_flavor_params(role, baremetal_client, compute_client):
"""Returns the parameters for role count and flavor.
The parameter names are derived from the role name:
<camel case role name, no hyphens>Count
Overcloud<camel case role name, no hyphens>Flavor
Exceptions from this rule (the control and object-storage roles) are
defined in the PARAM_EXCEPTIONS dict.
"""
node_count = get_node_count(role, baremetal_client)
if node_count == 0:
flavor = 'baremetal'
else:
flavor = get_flavor(role, compute_client)
if role in PARAM_EXCEPTIONS:
return {
PARAM_EXCEPTIONS[role]['count']: node_count,
PARAM_EXCEPTIONS[role]['flavor']: flavor
}
return {
_get_count_key(role): node_count,
_get_flavor_key(role): flavor
} | f635ae2cae259a3dc1da5734ffe1a062f5f5781c | 3,634,781 |
import sys
import linecache
def extract_tb(tb, limit=None):
"""This implementation is stolen from traceback module but respects __traceback_hide__."""
if limit is None:
if hasattr(sys, "tracebacklimit"):
limit = sys.tracebacklimit
tb_list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
if not _should_skip_frame(f):
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
tb_list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n + 1
return tb_list | 2f7a600fb5ff72a0b43167a4a3c112a9289edfda | 3,634,782 |
def remove_common(counter):
"""
Given a counter remove common words
:param counter:Counter Counter containing words used in the chat
:return: counter Returns the sanitized Counter
"""
# TODO fix remove common it removes stuff it shouldn't - maybe smaller curated list
# Top 10000 common english words are removed from messeenger chat statistics on top 25 words
# with open('google-10000-english.txt') as f:
# common_words = list(map(str.strip, f.readlines()))
# print(common_words)
# TODO reimplement proper word filtering
# Extra list to include any other words you would like to filter
extra_exceptions = ['I\'m', 'It\'s', 'Don\'t', 'That\'s', 'Should', 'About']
# with open('google-10000-english-usa-no-swears-short.txt') as f:
# common_words = list(map(str.strip, f.readlines())) + extra_exceptions
common_words = extra_exceptions
# print(common_words)
for word in common_words:
del counter[word.capitalize()]
return counter | ec9417c57d8aad2488c224b28bede52c50972f4f | 3,634,783 |
from pathlib import Path
def is_dir_exist(path):
"""Whether the directory exists"""
path_info = Path(path)
return path_info.is_dir() | 8182e96399d2271bc8e3cd5c1a4201f3e2acd895 | 3,634,784 |
def m1m2count(data):
"""
Reads the counts of epitopes for -1 and -2 mutations by HLA in a dataset.
"""
hlas = list(data["HLA"].sort_values().unique())
mutations = list(data["ID"].sort_values().unique())
m_1 = [mut for mut in mutations if mut.endswith("_m1")]
m_2 = [mut for mut in mutations if mut.endswith("_m2")]
m_1_counts = count_by_hla_by_mutation(data, hlas, m_1)
m_2_counts = count_by_hla_by_mutation(data, hlas, m_2)
return m_1_counts, m_2_counts, hlas, m_1, m_2 | 0e2b7d63e6ab2cb45d56ae3d3cc545b0b27d3349 | 3,634,785 |
def get_pyextension_imports(modname):
"""
Return list of modules required by binary (C/C++) Python extension.
Python extension files ends with .so (Unix) or .pyd (Windows).
It's almost impossible to analyze binary extension and its dependencies.
Module cannot be imported directly.
Let's at least try import it in a subprocess and get the difference
in module list from sys.modules.
This function could be used for 'hiddenimports' in PyInstaller hooks files.
"""
statement = """
import sys
# Importing distutils filters common modules, especiall in virtualenv.
import distutils
original_modlist = set(sys.modules.keys())
# When importing this module - sys.modules gets updated.
import %(modname)s
all_modlist = set(sys.modules.keys())
diff = all_modlist - original_modlist
# Module list contain original modname. We do not need it there.
diff.discard('%(modname)s')
# Print module list to stdout.
print(list(diff))
""" % {'modname': modname}
module_imports = eval_statement(statement)
if not module_imports:
logger.error('Cannot find imports for module %s' % modname)
return [] # Means no imports found or looking for imports failed.
#module_imports = filter(lambda x: not x.startswith('distutils'), module_imports)
return module_imports | cbe4faa642101c2dcb8fd7d8cbbbd2c0cd4622f2 | 3,634,786 |
import vedo
import numpy as np
def density_map(points: PointsData,
radius:float = 50) -> ImageData:
"""
Generate a density map from points data.
Parameters
----------
points : PointsData
radius : float, optional
The local neighborhood is specified as the radius around each sample
position (each voxel). The default is None.
computeGradient : bool, optional
Turn on/off the generation of the gradient vector, gradient magnitude
scalar, and function classification scalar. By default this is off.
Note that this will increase execution time and the size of the output.
The default is False.
Returns
-------
ImageData
See also
--------
https://vedo.embl.es/autodocs/content/vedo/pointcloud.html
"""
pointcloud = vedo.pointcloud.Points(points)
vol = pointcloud.density(radius=radius,
dims=np.max(points, axis=0).astype(int))
ndims = points.shape[1]
# Somehow vedo returns a 3D volume with XYZ for 2D data
if ndims == 2:
vol = vol.tonumpy()[..., 0]
else:
vol = vol.tonumpy()
return vol | 5a76eb9b0eec97d66b642fe41532d086a194a6cc | 3,634,787 |
def compute_permutation_sample(perm_num, all_conditions_power, trial_indices,
permutation_indices, times, freqs, chs, config,
comp, exp):
""" Helper function to compute the permuted toi band power difference for
a particular permutation of trials between two conditions.
This function takes in the tfr power data for two conditions, permutes
the trial membership between the two conditions according to the given
permutation index, and then computes the baseline-normalized band power
averaged across the first recording array and a post-stimulation time
period of interest for each condition and returns their difference.
Args:
perm_num: The permutation number used to index a particular
permutation index and sub-sample index.
all_conditions_power: Dictionary containing the tfr power data for
each condition being tested.
trial_indices: The pre-computed sub-sampling indices.
permutation_indices: The pre-computed permutation indices.
times: List of time labels.
freqs: List of frequency labels.
chs: List of channel names.
config: Dictionary containing experiment wide configuration info. In
this case it contains the baseline period to normalize to, bad chs
to ignore, time period to average over, and the frequency ranges
for alpha and beta band.
comp: List of the two conditions to compare.
Returns:
List of two numbers representing the permuted difference between
the two conditions for the alpha and beta bands.
"""
# collect power across conditions into single array
# we downsample to match trial sizes
power = []
for c in comp:
if perm_num != -1 and c != 'Brain' and 'Brain' in comp:
trial_ix = trial_indices[c][perm_num, :]
power.append(all_conditions_power[c][trial_ix, :, :, :].squeeze())
else:
power.append(all_conditions_power[c])
power = np.vstack(power)
# permute the data
if perm_num != -1:
perm_ix = permutation_indices[perm_num, :]
power = power[perm_ix, :, :, :].squeeze()
# baseline normalize each condition separately
if perm_num != -1:
cond_len = power.shape[0] / 2
else:
cond_len = all_conditions_power[comp[0]].shape[0]
tmp = []
tmp.append(baseline_normalize(power[:cond_len, :], config['baseline'],
times))
tmp.append(baseline_normalize(power[cond_len:, :], config['baseline'],
times))
power = tmp
# reduce over array
power[0] = reduce_array_power(power[0], chs, config['%s_bad_chs' % exp],
'1', axis=0)
power[1] = reduce_array_power(power[1], chs, config['%s_bad_chs' % exp],
'1', axis=0)
# compute toi band power difference
diffs = []
for band in [config['alpha'], config['beta']]:
# reduce to band
c1_power = reduce_band_power(power[0], freqs, band, axis=0)
c2_power = reduce_band_power(power[1], freqs, band, axis=0)
# reduce over time
c1_power = reduce_toi_power(c1_power, times, config['toi'], axis=0)
c2_power = reduce_toi_power(c2_power, times, config['toi'], axis=0)
diffs.append(c1_power - c2_power)
return diffs | 626a27f6cada79fa8a89223a4a3b64b9a0972e71 | 3,634,788 |
def get_users():
"""
The endpoint is for now publicly available.
Returns:
JSON of all users
"""
users = User.query # no need to order
users_data = [user.to_dict() for user in users.all()]
return jsonify(users=users_data) | 4231cf43f26d59501f386f5a5f661497ef488448 | 3,634,789 |
def _lower_neighbours(G, u):
"""Given a graph `G` and a vertex `u` in `G`, we return a list with the
vertices in `G` that are lower than `u` and are connected to `u` by an
edge in `G`.
Parameters
----------
G : :obj:`Numpy Array(no. of edges, 2)`
Matrix storing the edges of the graph.
u : int
Vertex of the graph.
Returns
-------
lower_neighbours : :obj:`list`
List of lower neighbours of `u` in `G`.
"""
lower_neighbours = []
for e in G:
if max(e) == u:
lower_neighbours.append(min(e))
return np.unique(lower_neighbours) | c51b681a7c0361cd1768791dce4d7d63fb54ab44 | 3,634,790 |
def find_f_include_statement(line):
"""
Determine whether line contains a Fortran INCLUDE statement. If so,
return the file name being INCLUDE'd.
line is a line of code from the Fortran file being processed.
If the INCLUDE'd file is in the exclude_finc_files set, then simply
return None. We do not want to add external include files, like mpif.h,
as a prerequisite.
"""
#exclude_finc_files = {'mpif.h', 'netcdf.inc', 'CXMLDEF.FOR'}
exclude_finc_files = get_cli_exclude_files('finc')
result = f_include_pattern.match(line)
if result:
inc_file = result.group(2)
if inc_file in exclude_finc_files:
return None
else:
return inc_file
else:
return None | cb381953f64ee03e4a42d20750562afecf3c1785 | 3,634,791 |
def get_phase_from_ephemeris_file(mjdstart, mjdstop, parfile,
ntimes=1000, ephem="DE405",
return_pint_model=False):
"""Get a correction for orbital motion from pulsar parameter file.
Parameters
----------
mjdstart, mjdstop : float
Start and end of the time interval where we want the orbital solution
parfile : str
Any parameter file understood by PINT (Tempo or Tempo2 format)
Other parameters
----------------
ntimes : int
Number of time intervals to use for interpolation. Default 1000
Returns
-------
correction_mjd : function
Function that accepts times in MJDs and returns the deorbited times.
"""
mjds = np.linspace(mjdstart, mjdstop, ntimes)
toalist = prepare_TOAs(mjds, ephem)
m = get_model(parfile)
phase_int, phase_frac = np.array(m.phase(toalist, abs_phase=True))
phases = phase_int + phase_frac
correction_mjd_rough = \
interp1d(mjds, phases,
fill_value="extrapolate")
return correction_mjd_rough | 29504e37159ce53dcbb52f868974601e07cec3e0 | 3,634,792 |
def create_hash(ls):
"""this function takes in the treecolor matrix and returns a tuple of the hash code and the hash vector that
uniquely identifies a particular combination of colors"""
index_vector = [x for x in range(20)] # creates a vector of the indexes
hash_vector = matrixvector_multiply(modify_treecolormatrix(ls), index_vector)
hash_code = ''
for code in hash_vector:
hash_code += str(code)
return (hash_code[:10], hash_vector) | 0d46624a4e14e18696469f6b637d403714170a01 | 3,634,793 |
import argparse
def arguments():
"""Parse the arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('SONG_NAME', help="Name of the song to download.",
default=None, nargs='?', type=str)
parser.add_argument('-q', '--quiet',
help="Don't ask the user to select songs\
if more than one search result.\
The first result in each case will be considered.",
action='store_true')
parser.add_argument('--version', action='version', version='v0.2-r3',
help='show the program version number and exit')
parser.add_argument('--url',
help="Youtube song link.")
parser.add_argument('-s', '--setup',
help='Setup the config file',
action='store_true')
parser.add_argument('--nolocal',
help='Dont search locally for the song before\
downloading.',
action='store_true')
args = parser.parse_args()
return args | 4c2d18544b0cd2a0b63e1b0657c264ef131a5787 | 3,634,794 |
import logging
import copy
def dict_inject(base_dct, injection_dct, add_keys=True):
""" Recursively inject inject_dict into base_dict. Recurses down into dicts nested
to an arbitrary depth, updating keys.
Will not alter base_dct or injection_dct, but return a deep copy without references to any of the former.
The optional argument ``add_keys``, determines whether keys which are
present in ``injection_dct`` but not ``base_dict`` should be included in the
new dict.
Args:
base_dct (dict): inject injection_dct into base_dct
injection_dct (dict):
add_keys (bool): whether to add new keys
Returns:
dct: constructed merge dict
"""
logger = logging.getLogger(__name__)
logger.debug("Inject 'injection_dct'...")
_log_nested_dict(logger.debug, injection_dct)
logger.debug("... into 'base_dct'...")
_log_nested_dict(logger.debug, base_dct)
if isinstance(injection_dct, dict) and isinstance(base_dct, dict):
logger.debug("Treating 'base_dct' and 'injection_dct' as parallel dicts...")
dct = copy.deepcopy(base_dct)
# injection_dct = injection_dct.copy()
for k, v in injection_dct.items():
if k in base_dct and isinstance(base_dct[k], dict) and isinstance(v, dict):
logger.debug("Descending into key '{}' for further injection.".format(k))
dct[k] = dict_inject(base_dct[k], v, add_keys=add_keys)
else: # inject
if k in base_dct:
logger.debug("Replacing dict item '{}: {}' with injection '{}'.".format(k, dct[k], injection_dct[k]))
else:
logger.debug("Inserting injection '{}' at key '{}'.".format(injection_dct[k], k))
dct[k] = copy.deepcopy(v)
elif isinstance(injection_dct, list) and isinstance(base_dct, list) and (len(injection_dct) == len(base_dct)):
logger.debug("Treating 'base_dct' and 'injection_dct' as parallel lists...")
# in this case base_dct and injecion_dct must have same length
dct = []
for base, injection in zip(base_dct, injection_dct):
if isinstance(base, dict) and isinstance(injection, dict):
logger.debug("Descending into list item '{}' and injection '{}' for further injection.".format(
base, injection))
dct.append(dict_inject(base, injection, add_keys=add_keys))
else:
logger.debug("Replacing list item '{}' with injection '{}'.".format(base, injection))
dct.append(copy.deepcopy(injection))
else: # arrived at leaf, inject
logger.debug("Treating 'base_dct' and 'injection_dct' as values.")
logger.debug("Replacing '{}' with injection '{}'.".format(base_dct, injection_dct))
dct = copy.deepcopy(injection_dct)
return dct | 58accedcd11ac763d70106315602a2ea9bb02b70 | 3,634,795 |
from typing import List
from typing import Union
def create_pruning_param_scorer(
params: List[Parameter],
score_type: Union[str, MFACOptions],
) -> PruningParamsScorer:
"""
:param params: List of Parameters for the created PruningParamsScorer to track
:param score_type: String name of scoring type to use. Valid options are
'magnitude', 'movement', or 'MFAC'. For MFAC pruning, passing in an MFACOptions
object valid and is preferred.
"""
scorer_name_to_constructor = {
scorer.get_name(): scorer for scorer in AVALIABLE_SCORER_CLASSES
}
if isinstance(score_type, str):
if score_type not in scorer_name_to_constructor:
raise ValueError(
f"Invalid score_type {score_type}. Valid score types include "
f"{list(scorer_name_to_constructor.keys())}"
)
return scorer_name_to_constructor[score_type](params)
if isinstance(score_type, MFACOptions):
return MFACPruningParamsScorer(params, mfac_options=score_type)
raise ValueError(
f"Recieved unsupported type for score_type: {type(score_type)} "
"expected string or MFACOptions object"
) | db46dc4420575995c23565f31d0d28aba4d6d8a1 | 3,634,796 |
from datetime import datetime
def calculate_new_age(table):
"""Calculates new age and adds it to the dataframe"""
date_now = datetime.now()
def get_age(birthday):
if birthday:
return relativedelta(date_now, birthday).years
table['age'] = table.apply(lambda birthday: get_age)
return table | d6b5fe19559022a175540eebc8f90c2f9371ab7e | 3,634,797 |
def get_wl(full_dir):
"""
Get the NIR wavelength used for this Video session.
:param full_dir: directory containing SVM/SVR files
:return: a string containing the wavelength
"""
return channels[str(get_channel(full_dir))] | 17bd7c312c3f91e8262e39f57052d1e987e8edaa | 3,634,798 |
def calculate_v(nfs):
"""Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...].
"""
v = []
if (nfs.columns != range(1, nfs.columns[-1]+1)).any():
raise ValueError("Column names invalid in nf_v_frame")
for i in nfs.columns[:-1]:
v.append(std(log2(nfs[i]/nfs[i+1]), ddof=1))
return pd.Series(v, index=nfs.columns[:-1]) | 3fedc410ac21b01352985e518484b21afb45d71f | 3,634,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.