content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import sys
def has_newer_fw( current_fw, bundled_fw ):
"""
:param current_fw: current FW version of a device
:param bundled_fw: bundled FW version of the same device
:return: True if the bundled version is newer than the current one
"""
current_fw_digits = current_fw.split( '.' )
bundled_fw_digits = bundled_fw.split( '.' )
if len( current_fw_digits ) != len( bundled_fw_digits ):
log.e( "Either the devices FW (", current_fw, ") or the bundled FW(", bundled_fw, ") was of an invalid format")
sys.exit(1)
for curr, bundled in zip( current_fw_digits, bundled_fw_digits ):
if int(bundled) > int(curr):
return True
if int(bundled) < int(curr):
return False
return False
|
22daa7346981bdeed394518993dcbbb6b7835c23
| 3,644,800
|
def is_idaq(*args):
"""
is_idaq() -> bool
Returns True or False depending if IDAPython is hosted by IDAQ
"""
return _ida_kernwin.is_idaq(*args)
|
5d18067b31be9c165a847815eb0bab92f89b0381
| 3,644,801
|
import requests
import yaml
def get_stats_yaml():
"""grab national stats yaml from scorecard repo"""
nat_dict = {}
try:
nat_yaml = requests.get(COLLEGE_CHOICE_NATIONAL_DATA_URL)
if nat_yaml.ok and nat_yaml.text:
nat_dict = yaml.safe_load(nat_yaml.text)
except AttributeError: # If response.text has no value
return nat_dict
except requests.exceptions.ConnectionError: # If requests can't connect
return nat_dict
else:
return nat_dict
|
045eeba3bfc42fa9e1821728260fd4d33e216731
| 3,644,802
|
import scipy
def signal_interpolate(x_values, y_values, desired_length, method="quadratic"):
"""Interpolate a signal.
Interpolate (fills the values between data points) a signal using different methods.
Parameters
----------
x_values : list, array or Series
The samples corresponding to the values to be interpolated.
y_values : list, array or Series
The values to be interpolated.
desired_length : int
The amount of samples over which to interpolate the y_values.
method : str
Method of interpolation. Can be 'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'previous' or 'next'. 'zero', 'slinear',
'quadratic' and 'cubic' refer to a spline interpolation of zeroth,
first, second or third order; 'previous' and 'next' simply return the
previous or next value of the point) or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
array
Vector of interpolated samples.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>> import matplotlib.pyplot as plt
>>>
>>> signal = np.cos(np.linspace(start=0, stop=20, num=10))
>>> zero = nk.signal_interpolate(signal, desired_length=1000, method="zero")
>>> linear = nk.signal_interpolate(signal, desired_length=1000, method="linear")
>>> quadratic = nk.signal_interpolate(signal, desired_length=1000, method="quadratic")
>>> cubic = nk.signal_interpolate(signal, desired_length=1000, method="cubic")
>>> nearest = nk.signal_interpolate(signal, desired_length=1000, method="nearest")
>>>
>>> plt.plot(np.linspace(0, 1, num=len(zero)), zero, 'y',
np.linspace(0, 1, num=len(linear)), linear, 'r',
np.linspace(0, 1, num=len(quadratic)), quadratic, 'b',
np.linspace(0, 1, num=len(cubic)), cubic, 'g',
np.linspace(0, 1, num=len(nearest)), nearest, 'm',
np.linspace(0, 1, num=len(signal)), signal, 'ko')
>>>
>>> # Use x-axis end new x-axis
>>> x_axis = np.linspace(start=10, stop=30, num=10)
>>> signal = np.cos(x_axis)
>>> new_x = np.linspace(start=0, stop=40, num=1000)
>>> interpolated = nk.signal_interpolate(signal,
desired_length=1000,
x_axis=x_axis,
new_x=new_x)
>>> plt.plot(new_x, interpolated, '-',
x, signal, 'o')
"""
# Sanity checks
if len(x_values) != len(y_values):
raise ValueError("NeuroKit error: signal_interpolate(): x_values and y_values "
"must be of the same length.")
if desired_length is None or len(x_values) == desired_length:
return y_values
# Create interpolation function
interpolation_function = scipy.interpolate.interp1d(x_values,
y_values,
kind=method,
bounds_error=False,
fill_value=([y_values[0]], [y_values[-1]]))
new_x = np.linspace(x_values[0], x_values[-1], desired_length)
interpolated = interpolation_function(new_x)
return interpolated
|
f3b20589591d2fed6054bbfc236894be70ddb598
| 3,644,803
|
from sys import modules
def check_module(feature):
"""
Checks if a module is available.
:param feature: The module to check for.
:returns: ``True`` if available, ``False`` otherwise.
:raises ValueError: If the module is not defined in this version of Pillow.
"""
if not (feature in modules):
raise ValueError(f"Unknown module {feature}")
module, ver = modules[feature]
try:
__import__(module)
return True
except ImportError:
return False
|
c00680a135a2464cfb9a04ebae348c74d3c80271
| 3,644,804
|
def get_original(N: int = 64) -> np.ndarray:
"""radontea logo base image"""
x = np.linspace(-N / 2, N / 2, N, endpoint=False)
X = x.reshape(1, -1)
Y = x.reshape(-1, 1)
z = logo(X, Y, N)
return np.array((z) * 255, dtype=np.uint16)
|
2bab08961d444f6ecfa097258872d02ae185944b
| 3,644,805
|
from typing import List
def get_sql_update_by_ids(table: str, columns: List[str], ids_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:param ids_length:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
kvs = [f"{columns[i]}=%s" for i in range(len(columns))]
kvs_str = ", ".join(kvs)
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"update {table} set {kvs_str} where id in ({ids_str});"
return s
|
ac70aa43aea4fad06ac2fd521239687040143b28
| 3,644,806
|
import os
import nibabel as nib
import numpy as np
import torch
def extract_roi(
input_img,
masks_location,
mask_pattern,
cropped_input,
roi_list,
uncrop_output,
):
"""Extracts regions of interest defined by masks
This function extracts regions of interest from preprocessed nifti images.
The regions are defined using binary masks that must be located in the CAPS
at `masks/tpl-<template>`.
Args:
input_img: nifti format MRI image.
masks_location: path to the masks
mask_pattern: pattern to identify the masks
cropped_input: if the input is cropped or not (contains desc-Crop)
roi_list: list of the names of the regions that will be extracted.
uncrop_output: if True, the final region is not cropped.
Returns:
file: multiple tensors saved on the disk, suffixes corresponds to
indexes of the patches. Same location than input file.
"""
image_array = nib.load(input_img).get_fdata(dtype="float32")
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
input_img_filename = os.path.basename(input_img)
sub_ses_prefix = "_".join(input_img_filename.split("_")[0:3:])
if not sub_ses_prefix.endswith("_T1w"):
sub_ses_prefix = "_".join(input_img_filename.split("_")[0:2:])
input_suffix = input_img_filename.split("_")[-1].split(".")[0]
output_roi = []
for index_roi, roi in enumerate(roi_list):
# read mask
mask_path, _ = find_mask_path(masks_location, roi, mask_pattern, cropped_input)
mask_np = nib.load(mask_path).get_fdata()
if len(mask_np.shape) == 3:
mask_np = mask_np[np.newaxis, :]
extracted_roi = image_tensor * mask_np
if not uncrop_output:
extracted_roi = extracted_roi[
np.ix_(
mask_np.any((1, 2, 3)),
mask_np.any((0, 2, 3)),
mask_np.any((0, 1, 3)),
mask_np.any((0, 1, 2)),
)
]
extracted_roi = extracted_roi.float()
# save into .pt format
output_pattern = compute_output_pattern(mask_path, not uncrop_output)
output_roi.append(
(
f"{sub_ses_prefix}_{output_pattern}_{input_suffix}.pt",
extracted_roi.clone(),
)
)
return output_roi
|
434e7115032c7b4575b5fe8f046df4a6d3c49db8
| 3,644,807
|
import random
import sympy
def add_X_to_both_sides(latex_dict: dict) -> str:
"""
https://docs.sympy.org/latest/gotchas.html#double-equals-signs
https://stackoverflow.com/questions/37112738/sympy-comparing-expressions
Given a = b
add c to both sides
get a + c = b + c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a + c'), 'RHS': parse_latex('b + c')}]
>>> add_X_to_both_sides(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
|
2ab0af9acbb09dcace00575a58afb66cebf2a07c
| 3,644,808
|
def init_var_dict(init_args, var_list):
"""Init var with different methods.
"""
var_map = {}
_, max_val = init_args
for i, _ in enumerate(var_list):
key, shape, method = var_list[i]
if key not in var_map.keys():
if method in ['random', 'uniform']:
var_map[key] = Parameter(initializer(Uniform(max_val), shape, ms_type), name=key)
elif method == "one":
var_map[key] = Parameter(initializer("ones", shape, ms_type), name=key)
elif method == "zero":
var_map[key] = Parameter(initializer("zeros", shape, ms_type), name=key)
elif method == 'normal':
var_map[key] = Parameter(Tensor(np.random.normal(loc=0.0, scale=0.01, size=shape).
astype(dtype=np_type)), name=key)
return var_map
|
05a3bece9598426010466c27ce794eb7d2aea937
| 3,644,809
|
def get_member_name(refobject):
""" return the best readable name
"""
try:
member_name = refobject.__name__
except AttributeError:
member_name = type(refobject).__name__
except Exception as error:
logger.debug('get_member_name :'+str(error))
member_name = str(refobject)
return member_name
|
103dfb1110ef8372e76b5ef734e842528d2b8f16
| 3,644,810
|
import os
import warnings
def _check_path(path=None):
"""
Returns the absolute path corresponding to ``path`` and creates folders.
Parameters
----------
path : None, str or list(str)
Absolute path or subfolder hierarchy that will be created and returned.
If None, os.getcwd() is used.
"""
if path is None:
return os.getcwd()
if isinstance(path, str):
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
if not os.path.isdir(path):
os.mkdir(path)
return path
elif isinstance(path, list):
abs_path = ''
for partial_path in path:
abs_path = _check_path(os.path.join(abs_path, partial_path))
return abs_path
else:
message = 'Variable ``path`` is neither a string or a list of string.'
warnings.warn(message, UserWarning)
|
2a1b18ac39cfd2573432911cd1aa6dfa5a740709
| 3,644,811
|
import warnings
def _eval_bernstein_1d(x, fvals, method="binom"):
"""Evaluate 1-dimensional bernstein polynomial given grid of values.
experimental, comparing methods
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
method: "binom", "beta" or "bpoly"
Method to construct Bernstein polynomial basis, used for comparison
of parameterizations.
- "binom" uses pmf of Binomial distribution
- "beta" uses pdf of Beta distribution
- "bpoly" uses one interval in scipy.interpolate.BPoly
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis.
"""
k_terms = fvals.shape[-1]
xx = np.asarray(x)
k = np.arange(k_terms).astype(float)
n = k_terms - 1.
if method.lower() == "binom":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.binom.pmf(k, n, xx[..., None])
bp_values = (fvals * poly_base).sum(-1)
elif method.lower() == "bpoly":
bpb = interpolate.BPoly(fvals[:, None], [0., 1])
bp_values = bpb(x)
elif method.lower() == "beta":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1)
bp_values = (fvals * poly_base).sum(-1)
else:
raise ValueError("method not recogized")
return bp_values
|
5561d4099bd07b0fc75dcbf47c53f5ff589e2d9d
| 3,644,812
|
def exp_bar(self, user, size=20):
"""\
Returns a string visualizing the current exp of the user as a bar.
"""
bar_length = user.exp * size // exp_next_lvl(user.lvl)
space_length = size - bar_length
bar = '#' * bar_length + '.' * space_length
return '[' + bar + ']'
|
575d475d602d0fdd4ded9eb2a139484c5d78887e
| 3,644,813
|
def linear(input_, output_size, scope=None, stddev=0.02, with_w=False):
"""Define lienar activation function used for fc layer.
Args:
input_: An input tensor for activation function.
output_dim: A output tensor size after passing through linearity.
scope: variable scope, if None, used independently.
stddev : user defined standard deviation for initialization.
with_w: if the weight is also needed as output.
Returns: logits of weights and biases.
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
|
8a5a4b06598d9c3c799c4a82d07a9d3d11962f23
| 3,644,814
|
from pathlib import Path
import json
import hashlib
import math
def generate_patches(patch_cache_location,
axis,
image_input_channels,
brain_mask_channel,
classification_mask,
patch_size,
k_fold_count,
patients=None,
excluded_patients=None):
"""Generate new patch sets for testing and training for given input channels"""
if excluded_patients is not None:
excluded_patients = np.array(excluded_patients)
patient_nrs = None
if patients: # patient override
print('Patient override:\n')
print(patients)
patient_nrs = np.array(patients)
else: # loop over patient nrs in input channel dirs
for input_channel in image_input_channels:
# get all dirs in given input channel path
input_channel_path = Path(input_channel['path'])
dirs = [f for f in input_channel_path.iterdir() if f.is_dir()]
# get all patient ids listed in input channel
new_patients = []
for pat_dir in dirs:
pat_id = basename(normpath(pat_dir))
new_patients.append(pat_id)
# calculate intersect in arrays so final patient nrs list only contains patients
# which are in all of the given input channels
if patient_nrs is not None:
patient_nrs = np.intersect1d(patient_nrs, np.array(new_patients))
else:
patient_nrs = np.array(new_patients)
patient_nrs.sort()
patient_nrs = np.array(patient_nrs)
if excluded_patients is not None:
excluded_indices = np.isin(patient_nrs, excluded_patients)
patient_nrs = np.delete(patient_nrs, excluded_indices.nonzero(), 0)
patient_shuffle = np.arange(patient_nrs.shape[0])
np_random_shuffle(patient_shuffle)
patient_nrs = patient_nrs[patient_shuffle]
del patient_shuffle
json_image_channels = json.dumps(image_input_channels, sort_keys=True).encode('utf-8')
input_channel_hash = str(hashlib.md5(json_image_channels).hexdigest())
pat_size_hashed_cache_path = join(patch_cache_location, input_channel_hash)
if not isdir(pat_size_hashed_cache_path):
makedirs(pat_size_hashed_cache_path)
with open(join(patch_cache_location,
input_channel_hash,
'_image_channels.json'), 'w') as o_file:
json.dump(image_input_channels, o_file)
fold_data_sets = []
fold_size = patient_nrs.shape[0] / k_fold_count
start = 0
for fold in range(k_fold_count):
fold_patients = patient_nrs[start:start+math.ceil(fold_size)]
start += math.ceil(fold_size)
if fold < (k_fold_count - 1):
fold_size = (patient_nrs.shape[0] - start) / (k_fold_count - (fold + 1))
fold_patches, fold_labels = patients_patches(fold_patients,
pat_size_hashed_cache_path,
image_input_channels,
brain_mask_channel,
classification_mask,
patch_size,
axis)
perm0 = np.arange(fold_patches.shape[0])
np_random_shuffle(perm0)
fold_patches = fold_patches[perm0]
fold_labels = fold_labels[perm0]
fold_data_set = DataWrapper(fold_patches,
fold_labels,
reshape=False,
patients=fold_patients)
fold_data_sets.append(fold_data_set)
print('Fetched all patient data')
for fold in range(k_fold_count):
print('\nFold {} Patches'.format(fold))
print(fold_data_sets[fold].images.shape)
print(fold_data_sets[fold].labels.shape)
return fold_data_sets
|
d8dc0d1312acff05bfdbc56192ee3c7caeb65c86
| 3,644,815
|
def _parse_locals_to_data_packet(locals_dict):
"""
Takes the locals object (i.e. function inputs as a dict), maps keys from.
TODO retire this function, its pretty hacky
:param locals_dict:
:return: parsed locals object
"""
if 'self' in locals_dict:
locals_dict.pop('self')
if 'kwargs' in locals_dict:
kwargs = locals_dict.pop('kwargs')
locals_dict.update(kwargs)
return {(param_map[k] if k in param_map else k): v for k, v in locals_dict.items() if v is not None}
|
1d7c6e3bcc3ee86d42717690d3739cc624279bb6
| 3,644,816
|
from typing import Union
from typing import Sequence
from typing import List
def query_user_joins(user_group: Union[User, Sequence[User], None]) \
-> List[JoinRecord]:
"""
:param user_group: User or user group as an iterable of users.
:return:
"""
# Input validation
user_list = [user_group] if isinstance(user_group, User) else user_group
# Query
query = session.query(JoinRecord)
if user_list:
# noinspection PyUnresolvedReferences
query = query.filter(JoinRecord.user_id.in_(u.user_id for u in user_list))
results = query.order_by(JoinRecord.timestamp).all()
logger.info("query_user_joins: "
"Found {:d} records for user group: {!r}".format(len(results), user_group))
return results
|
5481e4512b7b28b0832f9fec00ef0cf4e7cfd5de
| 3,644,817
|
import os
def is_running(process):
"""Returns True if the requested process looks like it's still running"""
if not process[0]:
return False # The process doesn't exist
if process[1]:
return process[1].poll() == None
try:
# check if the process is active by sending a dummy signal
os.kill(process[0]['pid'], 0)
except ProcessLookupError:
return False
return True
|
7dc002da5bbd87c5d8d8745fd49e6723478186c4
| 3,644,818
|
def rec_test(test_type: str):
"""
Rec test decorator
"""
def decorator(f):
@wraps(f)
def w(*args, **kwargs):
return f(*args, **kwargs)
# add attributes to f
w.is_test = True
w.test_type = test_type
try:
w.test_desc = f.__doc__.lstrip().rstrip()
except:
w.test_desc = ""
try:
# python 3
w.name = w.__name__
except:
# python 2
w.name = w.__func__.func_name
return w
return decorator
|
94eca60bd4d3f96fd3346da5bcc2b70c3a167ace
| 3,644,819
|
def display_convw(w, s, r, c, fig, vmax=None, vmin=None, dataset='mnist', title='conv_filters'):
"""
w2 = np.zeros(w.shape)
d = w.shape[1]/3
print w.shape
for i in range(w.shape[0]):
for j in range(w.shape[1]/3):
w2[i, j] = w[i, 3*j]
w2[i, j + d] = w[i, 3*j+1]
w2[i, j + 2*d] = w[i, 3*j+2]
w = w2
"""
numhid = w.shape[0]
size_x = s
size_y = s # For now.
num_channels = w.shape[1] / (size_x*size_y)
assert num_channels == 3
assert w.shape[1] % size_x*size_y == 0
if isinstance(w, np.ndarray):
vh = w.reshape(size_x*numhid*num_channels, size_y)
else:
vh = w.asarray().reshape(size_x*numhid*num_channels, size_y)
pvh = np.zeros((size_x*r, size_y*c, num_channels))
for i in range(r):
for j in range(c):
for ch in range(num_channels):
pvh[i*size_x:(i+1)*size_x, j*size_y:(j+1)*size_y, ch] = \
vh[(num_channels*(i*c+j)+ch)*size_x:(num_channels*(i*c+j)+ch+1)*size_x,:]
# pvh /= np.std(pvh)
plt.figure(fig)
plt.clf()
plt.title(title)
plt.imshow(pvh, vmax=vmax, vmin=vmin)
scale = 1
xmax = size_x*c
ymax = size_y*r
color = 'k'
for x in range(0, c):
plt.axvline(x=x*size_x/scale, ymin=0,ymax=ymax/scale, color = color)
for y in range(0, r):
plt.axhline(y=y*size_y/scale, xmin=0,xmax=xmax/scale, color = color)
plt.draw()
return pvh
|
87742ea0831f731e800385134379ce1b786b834f
| 3,644,820
|
def get_optional_list(all_tasks=ALL_TASKS, grade=-1, *keys) -> list:
"""获取可选的任务列表
:param keys: 缩小范围的关键字,不定长,定位第一级有一个键,要定位到第二级就应该有两个键
:param all_tasks: dict,两级, 所有的任务
:param grade: 字典层级 第0层即为最外层,依次向内层嵌套,默认值-1层获取所有最内层的汇总列表
:return:
"""
optional_list = []
# 按照指定层级获取相应的可选任务列表
if grade == -1:
# 获取最内层所有的具体任务
for key_grade_1 in all_tasks.keys():
for key_grade_2 in all_tasks[key_grade_1].keys():
optional_list.extend(all_tasks[key_grade_1][key_grade_2])
elif grade == 0:
# 获取最外层的宽泛任务
optional_list.extend(all_tasks.keys())
elif grade == 1:
key_grade_1 = keys[0] # 需取第一层级的值,就必须提供第0层的key
optional_list.extend(all_tasks[key_grade_1].keys())
elif grade == 2:
key_grade_1, key_grade_2 = keys[0], keys[1] # 需取第二层级的值,就必须提供第0层和第1层的key
optional_list.extend(all_tasks[key_grade_1][key_grade_2])
else:
print("超出任务字典的层级范围了哦")
return optional_list
|
ee54e65e724520d8ed9e3d994811c26ed2205add
| 3,644,821
|
def process_genotypes(filepath, snp_maf, snp_list=None, **kwargs):
"""
Process genotype file.
:param filepath:
:param snp_maf:
:param snp_list: get specified snp if provided
:param bool genotype_label: True if first column is the label of specimen, default False
:param bool skip_none_rs: True if skip None genotype, default True
:param bool fill_none: True if auto fill None genotype with most frequent genotype by MAF, default True
:return:
"""
conf = dict({
'genotype_label': False,
'skip_none_rs': True
}, **kwargs)
with open(filepath, encoding='utf-8') as fh:
if conf['genotype_label']:
df = genotype_with_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf)
else:
df = genotype_without_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf)
return df
|
501aa7b648d970b21dff1a4bd98102680e5ea774
| 3,644,822
|
import sys
import subprocess
def check_output(*cmd):
"""Log and run the command, raising on errors, return output"""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
|
e7108876e45a59a80785b9be696c71f1b4a5fe1e
| 3,644,823
|
def table_exists(conn, table_name, schema=False):
"""Checks if a table exists.
Parameters
----------
conn
A Psycopg2 connection.
table_name : str
The table name.
schema : str
The schema to which the table belongs.
"""
cur = conn.cursor()
table_exists_sql = ('select * from information_schema.tables '
f'where table_name={table_name!r}')
if schema:
table_exists_sql += f' and table_schema={schema!r}'
cur.execute(table_exists_sql)
return bool(cur.rowcount)
|
c9b698afbe795a6a73ddfb87b2725c3c4205f35e
| 3,644,824
|
import re
def _dict_from_dir(previous_run_path):
"""
build dictionary that maps training set durations to a list of
training subset csv paths, ordered by replicate number
factored out as helper function so we can test this works correctly
Parameters
----------
previous_run_path : str, Path
path to directory containing dataset .csv files
that represent subsets of training set, created by
a previous run of ``vak.core.learncurve.learning_curve``.
Typically directory will have a name like ``results_{timestamp}``
and the actual .csv splits will be in sub-directories with names
corresponding to the training set duration
Returns
-------
train_dur_csv_paths : dict
where keys are duration in seconds of subsets taken from training data,
and corresponding values are lists of paths to .csv files containing
those subsets
"""
train_dur_csv_paths = {}
train_dur_dirs = previous_run_path.glob("train_dur_*s")
for train_dur_dir in train_dur_dirs:
train_dur = re.findall(TRAIN_DUR_PAT, train_dur_dir.name)
if len(train_dur) != 1:
raise ValueError(
f"did not find just a single training subset duration in filename:\n"
f"{train_subset_path}\n"
f"Instead found: {train_dur}"
)
train_dur = int(train_dur[0])
# sort by increasing replicate number -- numerically, not alphabetically
replicate_dirs = sorted(
train_dur_dir.glob("replicate_*"),
key=lambda dir_path: int(dir_path.name.split("_")[-1]),
)
train_subset_paths = []
for replicate_dir in replicate_dirs:
train_subset_path = sorted(replicate_dir.glob("*prep*csv"))
if len(train_subset_path) != 1:
raise ValueError(
f"did not find just a single training subset .csv in replicate directory:\n"
f"{replicate_dir}\n"
f"Instead found: {train_subset_path}"
)
train_subset_path = train_subset_path[0]
train_subset_paths.append(train_subset_path)
train_dur_csv_paths[train_dur] = train_subset_paths
return train_dur_csv_paths
|
32d49b6ec6a8472a3864fc95cc52502a63038cdc
| 3,644,825
|
def aggregate_pixel(arr,x_step,y_step):
"""Aggregation code for a single pixel"""
# Set x/y to zero to mimic the setting in a loop
# Assumes x_step and y_step in an array-type of length 2
x = 0
y = 0
# initialize sum variable
s = 0.0
# sum center pixels
left = int(ceil(x_step[x]))
right = int(floor(x_step[x+1]))
top = int(ceil(y_step[y]))
bottom = int(floor(y_step[y+1]))
s += arr[left:right,top:bottom].sum()
# Find edge weights
wl = left - x_step[x]
wr = x_step[x+1] - right
wt = top - y_step[y]
wb = y_step[y+1] - bottom
# sum edges - left
s += arr[left-1:left,top:bottom].sum() * wl
# sum edges - right
s += arr[right:right+1,top:bottom].sum() * wr
# sum edges - top
s += arr[left:right,top-1:top].sum() * wt
# sum edges - bottom
s += arr[left:right,bottom:bottom+1].sum() * wb
# sum corners ...
# ul
s += arr[left-1:left,top-1:top].sum() * wl * wt
# ur
s += arr[right:right+1,top-1:top].sum() * wr * wt
# ll
s += arr[left-1:left,bottom:bottom+1].sum() * wl * wb
# lr
s += arr[right:right+1,bottom:bottom+1].sum() * wr * wb
# calculate weight
weight = (x_step[x+1]-x_step[x])*(y_step[y+1]-y_step[y])
return s/float(weight)
|
d9cdad36c7eeff3581310d13bedce204e7431560
| 3,644,826
|
def simplify_datatype(config):
""" Converts ndarray to list, useful for saving config as a yaml file """
for k, v in config.items():
if isinstance(v, dict):
config[k] = simplify_datatype(v)
elif isinstance(v, tuple):
config[k] = list(v)
elif isinstance(v, np.ndarray):
config[k] = v.tolist()
else:
config[k] = v
return config
|
f3e8ae76e04479ed9b1b5fbd450edec20342e5a9
| 3,644,827
|
def _strict_random_crop_image(image,
boxes,
labels,
is_crowd,
difficult,
masks=None,
sem_seg=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3):
"""Performs random crop.
Note: boxes will be clipped to the crop. Keypoint coordinates that are
outside the crop will be set to NaN, which is consistent with the original
keypoint encoding for non-existing keypoints. This function always crops
the image and is supposed to be used by `random_crop_image` function which
sometimes returns image unchanged.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
"""
with tf.name_scope('RandomCropImage', values=[image, boxes]):
image_shape = tf.shape(image)
# boxes are [N, 4]. Lets first make them [1, N, 4].
boxes_expanded = tf.expand_dims(
tf.clip_by_value(
boxes, clip_value_min=0.0, clip_value_max=1.0), 0)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=boxes_expanded,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
im_box_begin, im_box_size, im_box = sample_distorted_bounding_box
new_image = tf.slice(image, im_box_begin, im_box_size)
new_image.set_shape([None, None, image.get_shape()[2]])
# [1, 4]
im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0])
# [4]
im_box_rank1 = tf.squeeze(im_box)
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
boxlist.add_field('is_crowd', is_crowd)
boxlist.add_field('difficult', difficult)
if masks is not None:
boxlist.add_field('masks', masks)
im_boxlist = box_list.BoxList(im_box_rank2)
# remove boxes that are outside cropped image
boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(
boxlist, im_box_rank1)
# remove boxes that are outside image
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box_rank1)
new_boxes = new_boxlist.boxes
new_boxes = tf.clip_by_value(
new_boxes, clip_value_min=0.0, clip_value_max=1.0)
new_boxes.set_shape([None, 4])
result = [
new_image,
new_boxes,
overlapping_boxlist.get_field('labels'),
overlapping_boxlist.get_field('is_crowd'),
overlapping_boxlist.get_field('difficult'),
]
if masks is not None:
masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)
masks_of_boxes_completely_inside_window = tf.gather(
masks_of_boxes_inside_window, keep_ids)
masks_box_begin = [0, im_box_begin[0], im_box_begin[1]]
masks_box_size = [-1, im_box_size[0], im_box_size[1]]
new_masks = tf.slice(
masks_of_boxes_completely_inside_window,
masks_box_begin, masks_box_size)
result.append(new_masks)
if sem_seg is not None:
sem_seg = tf.expand_dims(sem_seg, axis=-1)
new_sem_seg = tf.slice(sem_seg, im_box_begin, im_box_size)
new_sem_seg = tf.squeeze(new_sem_seg, axis=-1)
new_sem_seg.set_shape([None, None])
result.append(new_sem_seg)
return tuple(result)
|
749107213a8bf34d2b159d38657a9c63af6699c3
| 3,644,828
|
def aggregate_by_player_id(statistics, playerid, fields):
"""
Inputs:
statistics - List of batting statistics dictionaries
playerid - Player ID field name
fields - List of fields to aggregate
Output:
Returns a nested dictionary whose keys are player IDs and whose values
are dictionaries of aggregated stats. Only the fields from the fields
input will be aggregated in the aggregated stats dictionaries.
"""
players = {}
# create nested dict with outer keys of player ids and inner dict of fields
for dic in statistics:
if dic[playerid] not in players:
players[dic[playerid]] = {playerid: dic[playerid]}
for field in fields:
players[dic[playerid]][field] = 0
# loop through statistics again, incrementing field values
for dic in statistics:
for field in fields:
players[dic[playerid]][field] += int(dic[field])
return players
|
c137fc8820f8898ebc63c54de03be5b919fed97a
| 3,644,829
|
import pickle
def loadStatesFromFile(filename):
"""Loads a list of states from a file."""
try:
with open(filename, 'rb') as inputfile:
result = pickle.load(inputfile)
except:
result = []
return result
|
cc2f64a977ff030ec6af94d3601c094e14f5b584
| 3,644,830
|
import tkinter
def get_configuration_item(configuration_file, item, default_values):
"""Return configuration value on file for item or builtin default.
configuration_file Name of configuration file.
item Item in configuation file whose value is required.
default_values dict of default values for items.
Return "" if configuration file cannot be opened or read, after showing
a dialogue to tell the user.
Return "" if the item exists but has no value.
Return default value if the item does not exist and a default value exists.
Return "" if the item does not exist and a default value does not exist.
Return the item value if there is one.
Items occupy a single line formatted as (?P<item>[^/s]*)/s*(?P<value>.*)
"""
try:
of = open(configuration_file)
try:
config_text = of.read()
except Exception as exc:
tkinter.messagebox.showinfo(
parent=parent,
message="".join(
(
"Unable to read from\n\n",
configuration_file,
"\n\n",
str(exc),
'\n\n"" will be returned as value of ',
item,
)
),
title="Read File",
)
return ""
finally:
of.close()
except Exception as exc:
tkinter.messagebox.showinfo(
parent=parent,
message="".join(
(
"Unable to open\n\n",
configuration_file,
"\n\n",
str(exc),
'\n\n"" will be returned as value of ',
item,
)
),
title="Open File",
)
return ""
key = None
for i in config_text.splitlines():
i = i.split(maxsplit=1)
if not i:
continue
if i[0].startswith("#"):
continue
if i[0] != item:
continue
key = item
if len(i) == 1:
value = ""
else:
value = i[1].strip()
if key is None:
for k, v in default_values:
if k == item:
key = item
value = v
if key is None:
value = ""
return value
|
c077989d2d90468a80b27f32a68b827fbdb49b92
| 3,644,831
|
import os
import logging
def tflite_stream_state_external_model_accuracy(
flags,
folder,
tflite_model_name='stream_state_external.tflite',
accuracy_name='tflite_stream_state_external_model_accuracy.txt',
reset_state=False):
"""Compute accuracy of streamable model with external state using TFLite.
Args:
flags: model and data settings
folder: folder name where model is located
tflite_model_name: file name with tflite model
accuracy_name: file name for storing accuracy in path + accuracy_name
reset_state: reset state between testing sequences.
If True - then it is non streaming testing environment: state will be
reseted in the beginning of every test sequence and will not be
transferred to another one (as it is done in real streaming).
Returns:
accuracy
"""
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
path = os.path.join(flags.train_dir, folder)
logging.info('tflite stream model state external with reset_state %d',
reset_state)
audio_processor = input_data.AudioProcessor(flags)
set_size = audio_processor.set_size('testing')
interpreter = tf.lite.Interpreter(
model_path=os.path.join(path, tflite_model_name))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
inputs = []
for s in range(len(input_details)):
inputs.append(np.zeros(input_details[s]['shape'], dtype=np.float32))
total_accuracy = 0.0
count = 0.0
inference_batch_size = 1
for i in range(0, set_size, inference_batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
inference_batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)
# before processing new test sequence we can reset model state
# if we reset model state then it is not real streaming mode
if reset_state:
for s in range(len(input_details)):
inputs[s] = np.zeros(input_details[s]['shape'], dtype=np.float32)
if flags.preprocess == 'raw':
out_tflite = inference.run_stream_inference_classification_tflite(
flags, interpreter, test_fingerprints, inputs)
out_tflite_argmax = np.argmax(out_tflite)
else:
for t in range(test_fingerprints.shape[1]):
# get new frame from stream of data
stream_update = test_fingerprints[:, t, :]
stream_update = np.expand_dims(stream_update, axis=1)
# [batch, time=1, feature]
stream_update = stream_update.astype(np.float32)
# set input audio data (by default input data at index 0)
interpreter.set_tensor(input_details[0]['index'], stream_update)
# set input states (index 1...)
for s in range(1, len(input_details)):
interpreter.set_tensor(input_details[s]['index'], inputs[s])
# run inference
interpreter.invoke()
# get output: classification
out_tflite = interpreter.get_tensor(output_details[0]['index'])
# get output states and set it back to input states
# which will be fed in the next inference cycle
for s in range(1, len(input_details)):
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
inputs[s] = interpreter.get_tensor(output_details[s]['index'])
out_tflite_argmax = np.argmax(out_tflite)
total_accuracy = total_accuracy + (
test_ground_truth[0] == out_tflite_argmax)
count = count + 1
if i % 200 == 0 and i:
logging.info(
'tflite test accuracy, stream model state external = %f %d out of %d',
*(total_accuracy * 100 / count, i, set_size))
total_accuracy = total_accuracy / count
logging.info(
'tflite Final test accuracy, stream model state external = %.2f%% (N=%d)',
*(total_accuracy * 100, set_size))
with open(os.path.join(path, accuracy_name), 'wt') as fd:
fd.write('%f on set_size %d' % (total_accuracy * 100, set_size))
return total_accuracy * 100
|
d0209ef72ea6f29f5410b2c493f5286685b88e53
| 3,644,832
|
def sexa2deg(ra, dec):
"""Convert sexagesimal to degree; taken from ryan's code"""
ra = coordinates.Angle(ra, units.hour).degree
dec = coordinates.Angle(dec, units.degree).degree
return ra, dec
|
3a016b1163c6ceda403cfe5c8d24467d1646c7aa
| 3,644,833
|
from theano import compile, shared
import theano.tensor
from theano.tensor import as_tensor_variable, TensorType
def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
out_type=None, abs_tol=None,
rel_tol=None, mode=None, cast_to_output_type=False):
"""Test a gradient by Finite Difference Method. Raise error on failure.
Example:
>>> verify_grad(theano.tensor.tanh,
(numpy.asarray([[2,3,4], [-1, 3.3, 9.9]]),),
rng=numpy.random)
Raises an Exception if the difference between the analytic gradient and
numerical gradient (computed through the Finite Difference Method) of a
random projection of the fun's output to a scalar exceeds the given
tolerance.
:param fun: a Python function that takes Theano variables as inputs,
and returns a Theano variable. For instance, an Op instance with
a single output.
:param pt: the list of numpy.ndarrays to use as input values.
These arrays must be either float32 or float64 arrays.
:param n_tests: number of times to run the test
:param rng: random number generator used to sample u, we test gradient
of sum(u * fun) at pt
:param eps: stepsize used in the Finite Difference Method (Default
None is type-dependent)
Raising the value of eps can raise or lower the absolute and
relative errors of the verification depending on the
Op. Raising eps does not lower the verification quality. It
is better to raise eps than raising abs_tol or rel_tol.
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
:param abs_tol: absolute tolerance used as threshold for gradient
comparison
:param rel_tol: relative tolerance used as threshold for gradient
comparison
:note: WARNING to unit-test writers: if `op` is a function that builds
a graph, try to make it a SMALL graph. Often verify grad is run
in debug mode, which can be very slow if it has to verify a lot of
intermediate computations.
:note: This function does not support multiple outputs. In
tests/test_scan.py there is an experimental verify_grad that
covers that case as well by using random projections.
"""
# The import is here to prevent circular import.
assert isinstance(pt, (list, tuple))
pt = [numpy.array(p) for p in pt]
for i, p in enumerate(pt):
if p.dtype not in ('float32', 'float64'):
raise TypeError(('verify_grad can work only with floating point '
'inputs, but input %i has dtype "%s".') % (i, p.dtype))
_type_tol = dict( # relative error tolerances for different types
float32=1e-2,
float64=1e-4)
if abs_tol is None:
abs_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rel_tol is None:
rel_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rng is None:
raise TypeError(('rng should be a valid instance of '
'numpy.random.RandomState. You may '
'want to use theano.tests.unittest'
'_tools.verify_grad instead of '
'theano.gradient.verify_grad.'))
# We allow input downcast in function, because numeric_grad works in the
# most precise dtype used among the inputs, so we may need to cast some.
def function(inputs, output):
if mode is None:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True,
on_unused_input='ignore')
else:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
return f
tensor_pt = [TensorType(
as_tensor_variable(p).dtype,
as_tensor_variable(p).broadcastable)(name='input %i' % i)
for i, p in enumerate(pt)]
# fun can be either a function or an actual Op instance
o_output = fun(*tensor_pt)
if isinstance(o_output, list):
raise NotImplementedError(('cant (yet) autotest gradient of fun '
'with multiple outputs'))
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -JB.
o_fn = function(tensor_pt, o_output)
o_fn_out = o_fn(*[p.copy() for p in pt])
if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list):
raise TypeError('It seems like you are trying to use verify_grad '
'on an op or a function which outputs a list: there should'
' be a single (array-like) output instead')
# random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient
def random_projection():
plain = rng.rand(*o_fn_out.shape) + 0.5
if cast_to_output_type:
return numpy.array(plain, o_output.dtype)
return plain
t_r = shared(random_projection())
t_r.name = 'random_projection'
# random projection of o onto t_r
# This sum() is defined above, it's not the builtin sum.
cost = theano.tensor.sum(t_r * o_output)
cost_fn = function(tensor_pt, cost)
symbolic_grad = grad(cost, tensor_pt,
disconnected_inputs='ignore')
grad_fn = function(tensor_pt, symbolic_grad)
for test_num in xrange(n_tests):
try:
num_grad = numeric_grad(cost_fn, [p.copy() for p in pt],
eps, out_type)
analytic_grad = grad_fn(*[p.copy() for p in pt])
# Since `tensor_pt` is a list, `analytic_grad` should be one too.
assert isinstance(analytic_grad, list)
max_arg, max_err_pos, max_abs_err, max_rel_err = num_grad.max_err(
analytic_grad, abs_tol, rel_tol)
if max_abs_err > abs_tol and max_rel_err > rel_tol:
raise verify_grad.E_grad(max_arg, max_err_pos,
max_abs_err, max_rel_err,
abs_tol, rel_tol)
# get new random projection for next test
if test_num < n_tests - 1:
t_r.set_value(random_projection(), borrow=True)
except Exception, e:
e.args += ("\nThe error happened with the following inputs:", pt,
"\nThe value of eps is:", eps,
"\nThe out_type is:", out_type)
raise
|
9fe5d2a8605b29d97f40d6830efeca6542a98603
| 3,644,834
|
import os
def get_filenames():
""" get file names given path """
files = []
for file in os.listdir(cwd):
if file.endswith(".vcf"):
fullPath = cwd + file
files.append(fullPath)
return files
|
4f18e104f21e284e603d88fc96f2407932908356
| 3,644,835
|
import re
def is_mismatch_before_n_flank_of_read(md, n):
"""
Returns True if there is a mismatch before the first n nucleotides
of a read, or if there is a mismatch before the last n nucleotides
of a read.
:param md: string
:param n: int
:return is_mismatch: boolean
"""
is_mismatch = False
flank_mm_regex = r"^(\d+).*[ACGT](\d+)$"
flank_mm = re.findall(flank_mm_regex,md)
if flank_mm:
flank_mm = flank_mm[0]
if flank_mm[1]:
if int(flank_mm[1]) < n:
is_mismatch = True
if flank_mm[0]:
if int(flank_mm[0]) < n:
is_mismatch = True
return is_mismatch
|
1e41c67e29687d93855ed212e2d9f683ef8a88d7
| 3,644,836
|
from typing import Dict
def get_county() -> Dict:
"""Main method for populating county data"""
api = SocrataApi('https://data.marincounty.org/')
notes = ('This data only accounts for Marin residents and does not '
'include inmates at San Quentin State Prison. '
'The tests timeseries only includes the number of tests '
'performed and not how many were positive or negative. '
'Demographic breakdowns for testing are not available.')
return {
'name': 'Marin',
'update_time': get_latest_update(api).isoformat(),
# The county's data dashboard is at:
# https://coronavirus.marinhhs.org/surveillance
# Which links to the data portal category with the data sets we
# actually use at:
# https://data.marincounty.org/browse?q=covid
'source_url': 'https://coronavirus.marinhhs.org/surveillance',
'meta_from_source': '',
'meta_from_baypd': notes,
'series': {
'cases': get_timeseries_cases(api),
'deaths': get_timeseries_deaths(api),
'tests': get_timeseries_tests(api),
},
'case_totals': get_case_totals(api),
'death_totals': get_death_totals(api),
# Marin does not currently provide demographic breakdowns for
# testing, so no test totals right now.
}
|
62fd267141e3cdcb3f5b81b78be2aafb1322335b
| 3,644,837
|
from typing import List
import logging
def optimize_player_strategy(
player_cards: List[int], opponent_cards: List[int], payoff_matrix: Matrix
) -> Strategy:
"""
Get the optimal strategy for the player, by solving
a simple linear program based on payoff matrix.
"""
lp = mip.Model("player_strategy", solver_name=mip.CBC)
lp.verbose = False # the problems are simple and we don't need to see the output
x = [lp.add_var(f"x_{card}", var_type=mip.CONTINUOUS) for card in player_cards]
v = lp.add_var("v", var_type=mip.CONTINUOUS, lb=-mip.INF)
for opponent_card in opponent_cards:
transposed_row = [
payoff_matrix[(player_card, opponent_card)] for player_card in player_cards
]
constraint = (
mip.xsum(transposed_row[i] * x_i for i, x_i in enumerate(x)) - v >= 0
)
lp += constraint, f"strategy_against_{opponent_card}"
logging.debug(f"constraint={constraint}")
lp += mip.xsum(x) == 1, "probability_distribution"
lp.objective = mip.maximize(v)
# all variables are continuous so we only need to solve relaxed problem
lp.optimize(max_seconds=30, relax=True)
if lp.status is not mip.OptimizationStatus.OPTIMAL:
logging.error(f"lp.status={lp.status}")
raise RuntimeError(
f"Solver couldn't optimize the problem and returned status {lp.status}"
)
strategy = Strategy(
card_probabilities={
card: lp.var_by_name(f"x_{card}").x for card in player_cards
},
expected_value=lp.var_by_name("v").x,
)
logging.debug(f"strategy.expected_value={strategy.expected_value}")
logging.debug("\n")
return strategy
|
49e04138daea3c78f117e2372e54419384c70810
| 3,644,838
|
import traceback
def address_book(request):
"""
This Endpoint is for getting contact
details of all people at a time.
We will paginate this for 10 items at a time.
"""
try:
paginator = PageNumberPagination()
paginator.page_size = 10
persons = Person.objects.all()
paginated_persons = paginator.paginate_queryset(persons, request)
serializer = PersonDetailSerializer(paginated_persons, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except:
print(traceback.format_exc())
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
88ec5613a7433128a2d06665319a6e3fd83f870f
| 3,644,839
|
def decrement_items (inventory, items):
"""
:param inventory: dict - inventory dictionary.
:param items: list - list of items to decrement from the inventory.
:return: dict - updated inventory dictionary with items decremented.
"""
return add_or_decrement_items (inventory, items, 'minus')
|
253339e3a8f9ff49e69372dc99d8b8f626a3b98b
| 3,644,840
|
def global_ave_pool(x):
"""Global Average pooling of convolutional layers over the spatioal dimensions.
Results in 2D tensor with dimension: (batch_size, number of channels) """
return th.mean(x, dim=[2, 3])
|
3f681e39041762ee2ca8bc52c542952eebd9b97c
| 3,644,841
|
import joblib
def train_models(models, train_data, target, logger, dask_client=None, randomized_search=False, scoring_metric=None):
"""Trains a set of models on the given training data/labels
:param models: a dictionary of models which need to be trained
:param train_data: a dataframe containing all possible features ( actual features used are specific to model) of train data
:param target: the column which contains the actual labels for training data
:param logger: logger object passed from the calling module
:param dask_client: ask client to use for training, esting, defaults to None
:param randomized_search: flag specifying to tune params by randomized search
:param scoring_metric: scoring metric to be used for randomized hyper param search
:return: a dictionary of models after fitting training params on the data
"""
for modelkey, model_metadata in models.items():
logger.info("Training started for " + modelkey)
#model_metadata["MRR"] = 0
# resolve feature data/params
features = model_metadata["features"]
X_train = train_data[features]
y_train = train_data[target]
model_params = model_metadata["param_grid"]
model_type = model_metadata["type"]
if model_type == "classical":
model_pipeline = get_prediction_pipeline(model_metadata)
if dask_client:
with joblib.parallel_backend("dask"):
model_pipeline.fit(X_train, y_train)
else:
if randomized_search and modelkey not in ['LinReg']:
# randomized_search if time permits fix bugs
search = RandomizedSearchCV(estimator = model_pipeline,
param_distributions = model_params,
n_iter = 50,
cv = 5,
verbose=10,
random_state=35,
n_jobs = -1,
scoring = scoring_metric
)
try:
search.fit(X_train, y_train)
best_params = str(search.best_params_)
model_pipeline.set_params(**literal_eval(best_params))
model_pipeline.fit(X_train, y_train)
except Exception as e:
logger.info(" Exception {} while param search for {} switching to default fit".format(e, modelkey))
model_pipeline.fit(X_train, y_train)
else:
model_pipeline.fit(X_train, y_train)
if model_type == "DL":
model_fitted_params = fit_dl_model(
X_train,
y_train,
param_dict=model_params,
logger=logger,
scoring_metric = scoring_metric
)
for key in model_fitted_params:
models[modelkey][key] = model_fitted_params[key]
logger.info("Training ended for " + modelkey)
return models
|
619565639eb4e59d5b639e3f687b43002c716800
| 3,644,842
|
import operator
def get_output(interpreter, top_k=1, score_threshold=0.0):
"""Returns no more than top_k classes with score >= score_threshold."""
scores = output_tensor(interpreter)
classes = [
Class(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(classes, key=operator.itemgetter(1), reverse=True)
|
69c4e956cee796384fa74d12338f3fb2cc90ba31
| 3,644,843
|
def bag_of_words_features(data, binary=False):
"""Return features using bag of words"""
vectorizer = CountVectorizer(
ngram_range=(1, 3), min_df=3, stop_words="english", binary=binary
)
return vectorizer.fit_transform(data["joined_lemmas"])
|
55ed963df31c2db79eaab58b585ad264a257c241
| 3,644,844
|
import time
def duration(func):
"""
计时装饰器
"""
def wrapper(*args, **kwargs):
print('2')
start = time.time()
f = func(*args, **kwargs)
print(str("扫描完成, 用时 ") + str(int(time.time()-start)) + "秒!")
return f
return wrapper
|
c55a941574a92cbe70c9b265eaa39563b91ab45a
| 3,644,845
|
def enumerate_assignments(max_context_number):
"""
enumerate all possible assignments of contexts to clusters for a fixed
number of contexts. Has the hard assumption that the first context belongs
to cluster #1, to remove redundant assignments that differ in labeling.
:param max_context_number: int
:return: list of lists, each a function that takes in a context id
number and returns a cluster id number
"""
cluster_assignments = [{}] # context 0 is always in cluster 1
for contextNumber in range(0, max_context_number):
cluster_assignments = augment_assignments(cluster_assignments, contextNumber)
return cluster_assignments
|
881723e2ca6a663821979a9029e03bb4f35195dc
| 3,644,846
|
def KL_monte_carlo(z, mean, sigma=None, log_sigma=None):
"""Computes the KL divergence at a point, given by z.
Implemented based on https://www.tensorflow.org/tutorials/generative/cvae
This is the part "log(p(z)) - log(q(z|x)) where z is sampled from
q(z|x).
Parameters
----------
z : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma : (B, N) | None
Returns
-------
KL : (B,)
"""
if log_sigma is None:
log_sigma = tf.math.log(sigma)
zeros = tf.zeros_like(z)
log_p_z = log_multivar_gaussian(z, mean=zeros, log_sigma=zeros)
log_q_z_x = log_multivar_gaussian(z, mean=mean, log_sigma=log_sigma)
return log_q_z_x - log_p_z
|
6d509607b3d4d6c248544330af06f2ef92fc3739
| 3,644,847
|
def get_order_discrete(p, x, x_val, n_full=None):
""" Calculate the order of the discrete features according to the alt/null ratio
Args:
p ((n,) ndarray): The p-values.
x ((n,) ndarray): The covaraites. The data is assumed to have been preprocessed.
x_val ((n_val,) ndarray): All possible values for x, sorted in ascending order.
n_full (int): Total number of hypotheses before filtering.
Returns:
x_order ((d,) ndarray): the order (of x_val) from smallest alt/null ratio to
the largest.
"""
n_val = x_val.shape[0]
# Separate the null and the alt proportion.
_, t_BH = bh_test(p, alpha=0.1, n_full=n_full)
x_null, x_alt = x[p>0.75], x[p<t_BH]
# Calculate the alt/null ratio。
cts_null = np.zeros([n_val], dtype=int)
cts_alt = np.zeros([n_val], dtype=int)
for i,val in enumerate(x_val):
cts_null[i] = np.sum(x_null==val)+1
cts_alt[i] = np.sum(x_alt==val)+1
p_null = cts_null/np.sum(cts_null)
p_alt = cts_alt/np.sum(cts_alt)
p_ratio = p_alt/p_null
# Calculate the order of x_val based on the ratio.
x_order = p_ratio.argsort()
return x_order
|
de8f05d7a882c2917e618bf315a45969f55dbd16
| 3,644,848
|
def _read_txt(file_path: str) -> str:
"""
Read specified file path's text.
Parameters
----------
file_path : str
Target file path to read.
Returns
-------
txt : str
Read txt.
"""
with open(file_path) as f:
txt: str = f.read()
return txt
|
5f0657ee223ca9f8d96bb612e35304a405d2339e
| 3,644,849
|
import os
def init_statick():
"""Fixture to initialize a Statick instance."""
args = Args("Statick tool")
return Statick(args.get_user_paths(["--user-paths", os.path.dirname(__file__)]))
|
11c7c4a0ddfc0dcb0d4838aaabb6f130ecc6b11d
| 3,644,850
|
def dedupe(entries):
"""
Uses fuzzy matching to remove duplicate entries.
"""
return thefuzz.process.dedupe(entries, THRESHOLD, fuzz.token_set_ratio)
|
d5d56f2acc25a107b5f78eefc4adc71676712f98
| 3,644,851
|
import binascii
def generate_openssl_rsa_refkey(key_pub_raw, # pylint: disable=too-many-locals, too-many-branches, too-many-arguments, too-many-statements
keyid_int, refkey_file,
key_size, encode_format="", password="nxp",
cert=""):
"""
Generate rsa reference key using openssl
:param key_pub_raw: Retrieved public key
:param keyid_int: Key index
:param refkey_file: File name to store reference key
:param key_size: RSA key size
:param encode_format: Encode format to store file
:param password: Password for encryption of pkcs12 reference key
:param cert: Input certificate
:return: Status
"""
# generate rsa key pair
key_openssl = rsa.generate_private_key(public_exponent=65537, key_size=key_size,
backend=default_backend())
key_prv_bytes = key_openssl.private_bytes(encoding=Encoding.DER,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
key_openssl_hex = binascii.hexlify(key_prv_bytes)
key_openssl_list = list()
for k in range(0, len(key_openssl_hex), 2):
key_openssl_list.append(key_openssl_hex[k:k + 2])
# convert the retrieved public key to hex format
key_pub_list = list(key_pub_raw)
# trim the header of public key
if key_size == 1024:
key_pub_no_header_list = key_pub_list[25:]
elif key_size in [2048, 3072, 4096]:
key_pub_no_header_list = key_pub_list[28:]
else:
log.error("key size: %s is not supported. Should be one of 1024, 2048, 3072, 4096",
(str(key_size),))
return apis.kStatus_SSS_Fail
key_pub_str_list = list()
for key_pub_no_header_item in key_pub_no_header_list:
key_pub_no_header_item = format(key_pub_no_header_item, 'x')
if len(key_pub_no_header_item) == 1:
key_pub_no_header_item = "0" + key_pub_no_header_item
key_pub_str_list.append(key_pub_no_header_item)
openssl_index = 7
# Public Key section
retrieved_pub_len = get_length(key_pub_str_list)
openssl_pub_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_pub_len, openssl_index,
key_pub_str_list, retrieved_pub_len)
openssl_index += retrieved_pub_len
# publicExponent section
openssl_index += get_length(key_openssl_list[openssl_index:])
# Private key Exponent section
openssl_index += get_length(key_openssl_list[openssl_index:])
# prime1 section
magic_prime1_data = ['02', '01', '01']
openssl_prime1_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_prime1_len, openssl_index,
magic_prime1_data, len(magic_prime1_data))
openssl_index += len(magic_prime1_data)
# convert keyID to hex format and add TLV
keyid_str = format("%08x" % keyid_int)
key_id_list = ['02']
if len(keyid_str) < 31:
key_id_len = int(len(keyid_str) / 2)
key_id_len_hex = format("%x" % key_id_len)
if len(key_id_len_hex) == 1:
key_id_len_hex = "0" + key_id_len_hex
key_id_list.append(key_id_len_hex)
for i in range(0, len(keyid_str), 2):
key_id_list.append(keyid_str[i:i + 2])
# prime 2 section
openssl_prime2_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_prime2_len,
openssl_index, key_id_list, len(key_id_list))
openssl_index += len(key_id_list)
# exponent1 section
openssl_index += get_length(key_openssl_list[openssl_index:])
# exponent2 section
openssl_index += get_length(key_openssl_list[openssl_index:])
# coefficient section
magic_mod_p = ['02', '04', 'a5', 'a6', 'b5', 'b6']
openssl_coefficient_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_coefficient_len,
openssl_index, magic_mod_p,
len(magic_mod_p))
# Recalculate total length of the key
key_openssl_len = len(key_openssl_list) - 4
key_openssl_len_str = format("%04x" % key_openssl_len)
total_len_list = []
for i in range(0, len(key_openssl_len_str), 2):
total_len_list.append(key_openssl_len_str[i:i + 2])
key_openssl_list[2] = total_len_list[0]
key_openssl_list[3] = total_len_list[1]
# convert key to der or pem format
key_der_hex = ""
for key_openssl_item in key_openssl_list:
if isinstance(key_openssl_item, bytes):
key_der_hex += bytes.decode(key_openssl_item)
else:
key_der_hex += key_openssl_item
key_der = binascii.unhexlify(key_der_hex)
key_pem_obj = openssl.backend.load_der_private_key(key_der, None)
key_pem = key_pem_obj.private_bytes(Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption())
status = write_refkey_to_file(refkey_file, password,
key_pem, key_der, cert, encode_format)
return status
|
ca3acdcf4fe615378f2f7088d015a7acbc58b7ff
| 3,644,852
|
import select
async def fetch_ongoing_alerts(
requester=Security(get_current_access, scopes=[AccessType.admin, AccessType.user]),
session=Depends(get_session)
):
"""
Retrieves the list of ongoing alerts and their information
"""
if await is_admin_access(requester.id):
query = (
alerts.select().where(
alerts.c.event_id.in_(
select([events.c.id])
.where(events.c.end_ts.is_(None))
)))
return await crud.base.database.fetch_all(query=query)
else:
retrieved_alerts = (session.query(models.Alerts)
.join(models.Events)
.filter(models.Events.end_ts.is_(None))
.join(models.Devices)
.join(models.Accesses)
.filter(models.Accesses.group_id == requester.group_id))
retrieved_alerts = [x.__dict__ for x in retrieved_alerts.all()]
return retrieved_alerts
|
721deaac7cca5f6589417f07d66a83111a062134
| 3,644,853
|
def breweryBeers(id):
"""Finds the beers that belong to the brewery with the id provided
id: string
return: json object list or empty json list
"""
try:
# [:-1:] this is because the id has a - added to the end to indicate
# that it is for this method, removes the last character from a string
return BreweryDb.brewery(id[:-1:] + "/beers")['data']
except Exception:
return id[:-1:] + "/beers"
|
f2d8824ad49ffeeec68077cb5e0ed143f4603d4e
| 3,644,854
|
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
|
7f08f42bd7dd9742bef300f5f7009807e47b7f23
| 3,644,855
|
def integrate(f, a, b, N, method):
"""
@param f: function to integrate
@param a: initial point
@param b: end point
@param N: number of intervals for precision
@param method: trapeze, rectangle, Simpson, Gauss2
@return: integral from a to b of f(x)
"""
h = (b-a)/(N)
if method == "trapeze":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+f(xi+h)
Lhf *= h/2
elif method == "rectangle":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+h/2
Lhf *= h
elif method == "Simpson":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+4*f(xi+h/2)+f(xi+h)
Lhf *= h/6
elif method == "Gauss2"
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi+h*(1/2)*(1-(1/sqrt(3))))+f(xi+h*(1/2)*(1-(1/sqrt(3))))
Lhf *= h/2
return Lhf
|
e716733160fd46943de3518e573215b3cf058113
| 3,644,856
|
def sum_naturals(n):
"""Sum the first N natural numbers.
>>> sum_naturals(5)
15
"""
total, k = 0, 1
while k <= n:
total, k = total + k, k + 1
return total
|
0ef1ff7e8f0f2df522c73d6d4affc890ba4ad2fa
| 3,644,857
|
def load_data(data_map,config,log):
"""Collect data locally and write to CSV.
:param data_map: transform DataFrame map
:param config: configurations
:param log: logger object
:return: None
"""
for key,df in data_map.items():
(df
.coalesce(1)
.write
.csv(f'{config["output"]}/{key}', mode='overwrite', header=True))
return None
|
2b690c4f5970df7f9e98ce22970ce3eb892f15bc
| 3,644,858
|
import yaml
import os
import time
import torch
def get_config(config_file, exp_dir=None, is_test=False):
""" Construct and snapshot hyper parameters """
# config = edict(yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader))
config = edict(yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader))
# create hyper parameters
config.run_id = str(os.getpid())
config.exp_name = '_'.join([
config.model.model_name, config.dataset.dataset_name,
time.strftime('%Y-%b-%d-%H-%M-%S'), config.run_id
])
if config.train.is_resume and not is_test:
config.save_dir = config.train.resume_dir
save_name = os.path.join(config.save_dir, 'config_resume_{}.yaml'.format(config.run_id))
else:
config.save_dir = os.path.join(config.exp_dir, config.exp_name)
save_name = os.path.join(config.save_dir, 'config.yaml')
mkdir(config.exp_dir)
mkdir(config.save_dir)
yaml.dump(edict2dict(config), open(save_name, 'w'), default_flow_style=False)
#Seed and GPU
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
config.use_gpu = config.use_gpu and torch.cuda.is_available()
return config
|
69d57ecf8538e1ca89124b148b068ec58098e046
| 3,644,859
|
import logging
def _filter_credential_warning(record) -> bool:
"""Rewrite out credential not found message."""
if (
not record.name.startswith("azure.identity")
or record.levelno != logging.WARNING
):
return True
message = record.getMessage()
if ".get_token" in message:
if message.startswith("EnvironmentCredential"):
print("Attempting to sign-in with environment variable credentials...")
if message.startswith("AzureCliCredential"):
print("Attempting to sign-in with Azure CLI credentials...")
if message.startswith("ManagedIdentityCredential"):
print("Attempting to sign-in with Managed Instance credentials...")
print("Falling back to interactive logon.")
return not message
|
bc9d2a96ccadfbdb297af86bbdf0f80ab8d2dafa
| 3,644,860
|
import importlib
def import_module_from_path(mod_name, mod_path):
"""Import module with name `mod_name` from file path `mod_path`"""
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
|
18891db514b4f1e41bce6de69f5b66fbf51d06e5
| 3,644,861
|
def preprocessing(text, checkpoint_dir, minocc):
"""
This time, we cannot leave the file as it is. We have to modify it first.
- replace "\n" by " \n " -> newline is a word
- insert space between punctuation and last word of sentence
- create vocab, but only for those words that occur more than once
- replace all words that occur too seldomly with "<unk>"
returns the list of integers we will use as the dataset as well as char2idx and idx2char
"""
splitted = prepare_text(text)
print("Total number of words:",len(splitted))
occurences = dict()
for word in splitted:
if word in list(occurences.keys()):
occurences[word] += 1
else:
occurences[word] = 1
vocab = ["<unk>"]
for word in list(occurences.keys()):
if occurences[word] > minocc:
vocab.append(word)
splitted = remove_unknowns(vocab, splitted) # removing words that appear less than two times
print(splitted[0:250])
print("Number of unique relevant words:", len(vocab))
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
pickle_rick(checkpoint_dir, char2idx, 'char2idx')
pickle_rick(checkpoint_dir, idx2char, 'idx2char')
pickle_rick(checkpoint_dir, splitted, 'dataset')
return splitted, char2idx, idx2char
|
f3dd597ac144d1c52ca2a65852ef59f2cee63d8b
| 3,644,862
|
def dwave_chimera_graph(
m,
n=None,
t=4,
draw_inter_weight=draw_inter_weight,
draw_intra_weight=draw_intra_weight,
draw_other_weight=draw_inter_weight,
seed=0,
):
"""
Generate DWave Chimera graph as described in [1] using dwave_networkx.
Parameters
----------
m: int
Number of cells per column
n: int
Number of cells per row
t: int
Number of nodes on each side of a bipartite cell subgraph
draw_inter_weight: function (seed) -> number
Function to call for weights of inter-cell edges
draw_intra_weight: function (seed) -> number
Function to call for weights of intra-cell edges
draw_other_weight: function (seed) -> number
Function to call for weights of intra-cell edges
seed: integer, random_state, or None
Indicator of random number generation state
Returns
-------
graph: nx.Graph
The generated Chimera graph
References
----------
..[1] https://docs.ocean.dwavesys.com/en/latest/concepts/topology.html
"""
if not n:
n = m
g = dwave.chimera_graph(m, n, t)
_initialize_weights_chimera(
chimera_graph=g,
size=m,
draw_inter_weight=lambda: draw_inter_weight(seed),
draw_intra_weight=lambda: draw_intra_weight(seed),
draw_other_weight=lambda: draw_other_weight(seed),
)
return g
|
cec6232d1f3413b6cedd74d909e8d9fa03d9b43f
| 3,644,863
|
def extract_first_value_in_quotes(line, quote_mark):
"""
Extracts first value in quotes (single or double) from a string.
Line is left-stripped from whitespaces before extraction.
:param line: string
:param quote_mark: type of quotation mark: ' or "
:return: Dict: 'value': extracted value;
'remainder': the remainder after extraction
'error' empty string if success or 'syntax' otherwise;
"""
line = line.lstrip()
result = {'value': '', 'remainder': line, 'error': 'syntax'}
if len(line) < 2:
return result
if line[0] != quote_mark:
return result
next_qm_pos = line.find(quote_mark, 1)
if next_qm_pos == -1:
return result
result['value'] = line[1:next_qm_pos]
result['remainder'] = line[next_qm_pos + 1:]
result['error'] = ''
return result
|
4f614cbbb3a1a04ece0b4da63ea18afb32c1c86b
| 3,644,864
|
def dynamic(graph):
"""Returns shortest tour using dynamic programming approach.
The idea is to store lengths of smaller sub-paths and re-use them
to compute larger sub-paths.
"""
adjacency_M = graph.adjacency_matrix()
tour = _dynamic(adjacency_M, start_node=0)
return tour
|
06d1adcadc6456aa29a7c0d176329f9d1569bf58
| 3,644,865
|
import yaml
def read_login_file():
"""
Parse the credentials file into username and password.
Returns
-------
dict
"""
with open('.robinhood_login', 'r') as login_file:
credentials = yaml.safe_load(login_file)
return credentials
|
16ef8a74c9523ac0809e80995069c3bbc0e8f8c0
| 3,644,866
|
def flatten(ls):
"""
Flatten list of list
"""
return list(chain.from_iterable(ls))
|
afab4515644ce340a73f5a5cf9f97e59fa8c4d7e
| 3,644,867
|
def gaussian_kernel(size, size_y=None):
""" Gaussian kernel.
"""
size = int(size)
if not size_y:
size_y = size
else:
size_y = int(size_y)
x, y = np.mgrid[-size:size+1, -size_y:size_y+1]
g = np.exp(-(x**2/float(size)+y**2/float(size_y)))
fwhm = size
fwhm_aper = photutils.CircularAperture((frame_center(g)), fwhm/2.)
fwhm_aper_phot = photutils.aperture_photometry(g, fwhm_aper)
g_norm = g/np.array(fwhm_aper_phot['aperture_sum'])
return g_norm/g_norm.max()
|
6752c4fc9355507d3b411515b8c687dc02b81d2b
| 3,644,868
|
from typing import Any
def parse_property_value(prop_tag: int, raw_values: list, mem_id: int = 0) -> Any:
"""
Parse property raw values
:param prop_tag: The property tag, see 'PropertyTag' enum
:param raw_values: The property values
:param mem_id: External memory ID (default: 0)
"""
if prop_tag not in PROPERTIES.keys():
return None
cls = PROPERTIES[prop_tag]['class'] # type: ignore
kwargs = PROPERTIES[prop_tag]['kwargs'] # type: ignore
kwargs['mem_id'] = mem_id # type: ignore
return cls(prop_tag, raw_values, **kwargs)
|
fc8d54a3f8b8ca762acdc5f6123749236e4eaeb3
| 3,644,869
|
from typing import Optional
from typing import Iterator
from typing import List
from typing import Tuple
def scan_stanzas_string(
s: str,
*,
separator_regex: Optional[RgxType] = None,
skip_leading_newlines: bool = False,
) -> Iterator[List[Tuple[str, str]]]:
"""
.. versionadded:: 0.4.0
Scan a string for zero or more stanzas of RFC 822-style header fields and
return a generator of lists of ``(name, value)`` pairs, where each list
represents a stanza of header fields in the input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
.. deprecated:: 0.5.0
Use `scan_stanzas()` instead
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan_stanzas()`
:param kwargs: Passed to the `Scanner` constructor
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan_stanzas( # pragma: no cover
s,
separator_regex=separator_regex,
skip_leading_newlines=skip_leading_newlines,
)
|
f68694ce344b738f23b689b74d92f7ab4c20b237
| 3,644,870
|
def format_dependency(dependency: str) -> str:
"""Format the dependency for the table."""
return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
|
981a38074dbfb1f332cc49bce2c6d408aad3e9e2
| 3,644,871
|
def _addSuffixToFilename(suffix, fname):
"""Add suffix to filename, whilst preserving original extension, eg:
'file.ext1.ext2' + '_suffix' -> 'file_suffix.ext1.ext2'
"""
head = op.split(fname)[0]
fname, ext = _splitExts(fname)
return op.join(head, fname + suffix + ext)
|
2fc0a16f6f8b8be1f27fd7ff32673ed79f84fccb
| 3,644,872
|
import re
def parse_into_tree(abbr, doc_type = 'html'):
"""
Преобразует аббревиатуру в дерево элементов
@param abbr: Аббревиатура
@type abbr: str
@param doc_type: Тип документа (xsl, html)
@type doc_type: str
@return: Tag
"""
root = Tag('', 1, doc_type)
parent = root
last = None
token = re.compile(r'([\+>])?([a-z][a-z0-9:\!\-]*)(#[\w\-\$]+)?((?:\.[\w\-\$]+)*)(?:\*(\d+))?', re.IGNORECASE)
def expando_replace(m):
ex = m.group(1)
if 'expandos' in zen_settings[doc_type] and ex in zen_settings[doc_type]['expandos']:
return zen_settings[doc_type]['expandos'][ex]
else:
return ex
# заменяем разворачиваемые элементы
abbr = re.sub(r'([a-z][a-z0-9]*)\+$', expando_replace, abbr)
def token_expander(operator, tag_name, id_attr, class_name, multiplier):
multiplier = multiplier and int(multiplier) or 1
current = is_snippet(tag_name, doc_type) and Snippet(tag_name, multiplier, doc_type) or Tag(tag_name, multiplier, doc_type)
if id_attr:
current.add_attribute('id', id_attr[1:])
if class_name:
current.add_attribute('class', class_name[1:].replace('.', ' '))
# двигаемся вглубь дерева
if operator == '>' and token_expander.last:
token_expander.parent = token_expander.last;
token_expander.parent.add_child(current)
token_expander.last = current;
return '';
token_expander.parent = root
token_expander.last = None
abbr = re.sub(token, lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5)), abbr)
# если в abbr пустая строка — значит, вся аббревиатура без проблем
# была преобразована в дерево, если нет, то аббревиатура была не валидной
return not abbr and root or None;
|
8bb0ecaa9b2a2e9ce41882b8f140442f28f3c922
| 3,644,873
|
import os
import csv
def map_pao1_genes(gene_list):
"""Takes a list of PAO1 genes and returns the corresponding PA14 names."""
pa14_pao1_mapping = dict()
mapping_path = os.path.join(os.getcwd(), 'data', 'ortholuge_pa14_to_pao1_20190708.tsv')
with open(mapping_path) as mapping:
reader = csv.reader(mapping, delimiter='\t')
for row in reader:
pa14_pao1_mapping[row[4]] = row[10]
pa14_genes = [pa14_pao1_mapping[gene] for gene in gene_list if gene in pa14_pao1_mapping.keys()]
return pa14_genes
|
675cf26d259bee1f6ff148f1a4ad2a71b8253ef5
| 3,644,874
|
def banner():
"""Verify banner in HTML file match expected."""
def match(path, expected_url=None, expected_base=None):
"""Assert equals and return file contents.
:param py.path.local path: Path to file to read.
:param str expected_url: Expected URL in <a href="" /> link.
:param str expected_base: Expected base message.
:return: File contents.
:rtype: str
"""
contents = path.read()
actual = RE_BANNER.findall(contents)
if not expected_url and not expected_base:
assert not actual
else:
assert actual == [(expected_url, expected_base)]
return contents
return match
|
54777fe767075561cbb20c3e7ab88ca209fa8c87
| 3,644,875
|
import tqdm
import operator
def rerank(x2ys, x2cnt, x2xs, width, n_trans):
"""Re-rank word translations by computing CPE scores.
See paper for details about the CPE method."""
x2ys_cpe = dict()
for x, ys in tqdm(x2ys.items()):
cntx = x2cnt[x]
y_scores = []
for y, cnty in sorted(ys.items(), key=operator.itemgetter(1), reverse=True)[:width]:
ts = cnty / float(cntx) # translation score: initial value
if x in x2xs:
for x2, cntx2 in x2xs[x].items(): # Collocates
p_x_x2 = cntx2 / float(cntx)
p_x2_y2 = 0
if x2 in x2ys:
p_x2_y2 = x2ys[x2].get(y, 0) / float(x2cnt[x2])
ts -= (p_x_x2 * p_x2_y2)
y_scores.append((y, ts))
_ys_ = sorted(y_scores, key=lambda y_score: y_score[1], reverse=True)[:n_trans]
_ys_ = [each[0] for each in _ys_]
x2ys_cpe[x] = _ys_
return x2ys_cpe
|
57d9c5012341acf89e92ffd6df29688af5d6965f
| 3,644,876
|
def ParallelTempering(num_sweeps=10000, num_replicas=10,
max_iter=None, max_time=None, convergence=3):
"""Parallel tempering workflow generator.
Args:
num_sweeps (int, optional):
Number of sweeps in the fixed temperature sampling.
num_replicas (int, optional):
Number of replicas (parallel states / workflow branches).
max_iter (int/None, optional):
Maximum number of iterations of the update/swaps loop.
max_time (int/None, optional):
Maximum wall clock runtime (in seconds) allowed in the update/swaps
loop.
convergence (int/None, optional):
Number of times best energy of the coldest replica has to repeat
before we terminate.
Returns:
Workflow (:class:`~hybrid.core.Runnable` instance).
"""
# expand single input state into `num_replicas` replica states
preprocess = SpawnParallelTemperingReplicas(num_replicas=num_replicas)
# fixed temperature sampling on all replicas in parallel
update = hybrid.Map(FixedTemperatureSampler(num_sweeps=num_sweeps))
# replica exchange step: do the top-down sweep over adjacent pairs
# (good hot samples sink to bottom)
swap = SwapReplicasDownsweep()
# loop termination key function
def key(states):
if states is not None:
return states[-1].samples.first.energy
# replicas update/swap until Loop termination criteria reached
loop = hybrid.Loop(
update | swap,
max_iter=max_iter, max_time=max_time, convergence=convergence, key=key)
# collapse all replicas (although the bottom one should be the best)
postprocess = hybrid.MergeSamples(aggregate=True)
workflow = preprocess | loop | postprocess
return workflow
|
48b62b2814f67b66823fc1c35024eaab6cde7591
| 3,644,877
|
def get_document_info(file):
"""
Scrape document information using ChemDataExtractor Scrapers
:param file: file path to target article
:type file: str
:return: list of dicts containing the document information
"""
if file.endswith('.html'):
file_type = 'html'
elif file.endswith('.xml'):
file_type = 'xml'
else:
return
print("file type", file_type)
f = open(file, 'rb').read()
sel = Selector.from_text(f)
# Determine publishers, use the RSC scraper by default
publisher = detect_publisher(f)
if publisher == 'acs':
document_info = AcsHtmlDocument(sel)
elif publisher == 'rsc':
document_info = RscHtmlDocument(sel)
elif publisher == 'elsevier' and file_type == 'html':
document_info = ElsevierHtmlDocument(sel)
elif publisher == 'elsevier' and file_type == 'xml':
document_info = ElsevierXmlDocument(sel)
elif publisher == 'springer' and file_type == 'html':
document_info = SpringerHtmlDocument(sel)
else:
print('Unknown Journal for file' + file + 'using RSC HTML formatting by default')
document_info = RscHtmlDocument(sel)
return document_info
|
5d5697ce9a7916920c938a3cff17fdeda8b5f81b
| 3,644,878
|
def qlog(q):
"""
Compute logarithm of a unit quaternion (unit norm is important here).
Let q = [a, qv], where a is the scalar part and qv is the vector part.
qv = sin(phi/2)*nv, where nv is a unit vector. Then
ln(q) = ln(||q||) + qv / ||qv|| * arccos(a / ||q||)
Therefore for a unit quaternion, the scalar part of ln(q) is zero
and the vector part of ln(q) is 1/2 * phi * nv,
i.e. half of rotation vector rv = phi * nv because
a = cos(phi/2) in attitude quaternion (see quatRotVec())
Reference: https://en.wikipedia.org/wiki/Quaternion
NOTE 1: due to existing implementation in C++, this function
returns just the vector part of ln(q)
NOTE 2: According to Wiki description, ln(q)_v should be a
half of rotation vector. However the previous
implementation computed the full rotation vector.
So, using the rotation vector for now until cleared up.
"""
rv = quatRotVec(q)
return rv
|
80e01568cc5fe2ab2c7d11bdd642906374992985
| 3,644,879
|
from datetime import datetime
def trx():
"""Response from ADN about current transaction APPROVED/DECLINED and showing Receipt of transaction"""
trx = web.trxs[-1]
trx.shoppingCartUuid = request.args.get('shoppingCartUuid', default = "", type = str)
trx.mediaType = request.args.get('mediaType', default = "", type = str)
trx.correlationId = request.args.get('correlationId', default = "", type = str)
trx.trxId = request.args.get('payId', default = "", type = str)
trx.maskedMediaId = request.args.get('maskedMediaId', default = "", type = str)
trx.status = request.args.get('status', default = "", type = str)
trx.author_time = datetime.now().strftime("%d.%m.%Y %H:%M:%S")
web.logger.info(f"ShoppingCart {trx.shoppingCartUuid} Transaction {trx.trxId} {trx.mediaType} {trx.maskedMediaId} {trx.status}")
return render_template('trx.html', trx=trx)
|
4ffa01c2d6682a6320870ac158f564c37aa5a32e
| 3,644,880
|
def get_counts_by_domain(df):
"""
Parameters:
df (pandas.Dataframe) - form of `get_counts_df` output
Returns:
pandas.Dataframe
"""
columns = ['study', 'study_label', 'domain_code', 'domain_label']
df2 = df.groupby(columns, as_index=False)[["count", "subjects"]].max()
return df2
|
544aaa734858209c36c84d87bb6beb05761a5194
| 3,644,881
|
def batch_cosine_similarity(x1, x2):
""" https://en.wikipedia.org/wiki/Cosine_similarity """
mul = np.multiply(x1, x2)
s = np.sum(mul, axis=1)
return s
|
6ed5e4ca426cc61d25dd272f92ba9220186bfd8e
| 3,644,882
|
def plot(ax, x, y):
"""Plot """
return ax._plot(x, y)
|
90cc2616d21e3c1239524437f653f85602c1984b
| 3,644,883
|
def concatenatePDFs(filelist, pdfname, pdftk='pdftk', gs='gs', cleanup=False,
quiet=False):
"""
Takes a list or a string list of PDF filenames (space-delimited), and an
output name, and concatenates them.
It first tries pdftk (better quality), and if that fails, it tries
ghostscript (more commonly installed).
Todd Hunter
"""
if (type(filelist) == list):
filelist = ' '.join(filelist)
cmd = '%s %s cat output %s' % (pdftk, filelist, pdfname)
if not quiet: print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
print "status = ", mystatus
cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist)
print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
gs = '/opt/local/bin/gs'
cmd = '%s -q -sPAPERSIZE=letter -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=%s %s' % (gs,pdfname,filelist)
print "Running command = %s" % (cmd)
mystatus = os.system(cmd)
if (mystatus != 0):
print "Both pdftk and gs are missing, no PDF created."
cleanup = False
if (cleanup):
os.system('rm %s' % filelist)
return (mystatus)
|
3e138e84db9650af3afbbab4d904dc3a4cb581c9
| 3,644,884
|
def get_module_offset(
process_id: int,
process_name: str
) -> Address:
"""Returns an Adress with the base offset of the process.
Args:
process_id (int): PID
process_name (str): Name of the process. Case does not matter.
Returns:
Address: Adress with the base offset of the process.
"""
flag = TH32CS_SNAPMODULE | TH32CS_SNAPMODULE32
snap = CreateToolhelp32Snapshot(flag, process_id)
me32 = MODULEENTRY32()
me32.dwSize = sizeof(MODULEENTRY32)
Module32First(snap, byref(me32))
while True:
name = me32.szModule.decode("ascii")
if process_name.lower() in name.lower():
base_addr = me32.modBaseAddr
addr = Address(addressof(base_addr.contents))
CloseHandle(snap)
return addr
if not Module32Next(snap, byref(me32)):
break
CloseHandle(snap)
|
09e0775213e4a32f1ea786ad9d1184e7f4dbd7cf
| 3,644,885
|
from typing import Sequence
def sequence_to_header(sequence: Sequence[Bytes]) -> Header:
"""
Build a Header object from a sequence of bytes. The sequence should be
containing exactly 15 byte sequences.
Parameters
----------
sequence :
The sequence of bytes which is supposed to form the Header
object.
Returns
-------
header : `Header`
The obtained `Header` object.
"""
ensure(len(sequence) == 15)
ensure(len(sequence[12]) <= 32)
return Header(
parent_hash=Hash32(sequence[0]),
ommers_hash=Hash32(sequence[1]),
coinbase=Address(sequence[2]),
state_root=Root(sequence[3]),
transactions_root=Root(sequence[4]),
receipt_root=Root(sequence[5]),
bloom=Bloom(sequence[6]),
difficulty=Uint.from_be_bytes(sequence[7]),
number=Uint.from_be_bytes(sequence[8]),
gas_limit=Uint.from_be_bytes(sequence[9]),
gas_used=Uint.from_be_bytes(sequence[10]),
timestamp=U256.from_be_bytes(sequence[11]),
extra_data=sequence[12],
mix_digest=Hash32(sequence[13]),
nonce=Bytes8(sequence[14]),
)
|
b1c4040b216162777e33bbbab0f7774b8b02af91
| 3,644,886
|
def makeASdef(isd_id, as_id_tail, label, public_ip, is_core=False, is_ap=False):
""" Helper for readable ASdef declaration """
return ASdef(isd_id, _expand_as_id(as_id_tail), label, public_ip, is_core, is_ap)
|
19bc51a648ac558f524f29744e1574a245e50cf2
| 3,644,887
|
from netneurotools.utils import check_fs_subjid
from netneurotools.datasets import fetch_fsaverage
from netneurotools.datasets.utils import _get_data_dir
import os
def _get_fs_subjid(subject_id, subjects_dir=None):
"""
Gets fsaverage version `subject_id`, fetching if required
Parameters
----------
subject_id : str
FreeSurfer subject ID
subjects_dir : str, optional
Path to FreeSurfer subject directory. If not set, will inherit from
the environmental variable $SUBJECTS_DIR. Default: None
Returns
-------
subject_id : str
FreeSurfer subject ID
subjects_dir : str
Path to subject directory with `subject_id`
"""
# check for FreeSurfer install w/fsaverage; otherwise, fetch required
try:
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
except FileNotFoundError:
if 'fsaverage' not in subject_id:
raise ValueError('Provided subject {} does not exist in provided '
'subjects_dir {}'
.format(subject_id, subjects_dir))
fetch_fsaverage(subject_id)
subjects_dir = os.path.join(_get_data_dir(), 'tpl-fsaverage')
subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)
return subject_id, subjects_dir
|
ce4599ab875c7a33aa71cb9bc07143a04b6b2643
| 3,644,888
|
def EnableTrt(mod, params=None, trt_version=None):
"""Converts the "main" function in the module into one that can be executed using
TensorRT. If any of the operators are not supported by the TensorRT
conversion, the unmodified program will be returned instead.
Parameters
----------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
trt_version : Optional[Tuple[int]]
Which version of TensorRT to target for partitioning as a tuple of
(major, minor, patch). If not specified, will attempt to get using
GetTrtVersion.
Returns
-------
mod: Module
The modified module which will use the TensorRT runtime if compatible.
"""
if not trt_version:
trt_version = GetTrtVersion()
# If TVM wasn't built against TRT, default to target TRT 6. Since the
# actual conversion to TRT is done at runtime, building against TRT is
# not required for compilation.
if not trt_version:
trt_version = (6, 0, 1)
assert isinstance(trt_version, (list, tuple))
assert len(trt_version) == 3
# Apply passes required for TRT
mod = relay.transform.RemoveUnusedFunctions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.ConvertLayout('NCHW')(mod)
mod = PreprocessForTrt(mod)
if params:
# Bind params so that we can use FoldConstant.
mod['main'] = _bind_params(mod['main'], params)
mod = relay.transform.FoldConstant()(mod)
return _transform.EnableTrt(*trt_version)(mod)
|
c3cac75de48e2c2a9af30ce427bc57d86a56dbc4
| 3,644,889
|
import cupy
def _setup_cuda_fft_resample(n_jobs, W, new_len):
"""Set up CUDA FFT resampling.
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT resampling.
W : array
The filtering function to be used during resampling.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
new_len : int
The size of the array following resampling.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to resample.
Notes
-----
This function is designed to be used with fft_resample().
"""
cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft)
rfft_len_x = len(W) // 2 + 1
# fold the window onto inself (should be symmetric) and truncate
W = W.copy()
W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2.
W = W[:rfft_len_x]
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
try:
# do the IFFT normalization now so we don't have to later
W = cupy.array(W)
logger.info('Using CUDA for FFT resampling')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
cuda_dict.update(use_cuda=True,
rfft=_cuda_upload_rfft,
irfft=_cuda_irfft_get)
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
cuda_dict['W'] = W
return n_jobs, cuda_dict
|
34a949250239b5334650b89d6566b81460079591
| 3,644,890
|
def sentensize(text):
"""Break a text into sentences.
Args:
text (str): A text containing sentence(s).
Returns:
list of str: A list of sentences.
"""
return nltk.tokenize.sent_tokenize(text)
|
ae16aff476842c8e0fc2fa2506b68ad60dc603f0
| 3,644,891
|
def tokenize(texts, context_length=77):
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = tokenizer.encoder["<|startoftext|>"]
eot_token = tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] +
tokenizer.encode(text) + [eot_token] for text in texts]
result = paddle.zeros((len(all_tokens), context_length), dtype='int64')
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = paddle.to_tensor(tokens)
return result
|
1fe73425cb30f0f6fbce6caa740f118ee9591347
| 3,644,892
|
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints
"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
|
edf4605c1dd9ad45d3a2508122b85213657f56cb
| 3,644,893
|
def read_relative_pose(object_frame_data: dict) -> tf.Transform:
"""
Read the pose of an object relative to the camera, from the frame data.
For reasons (known only to the developer), these poses are in OpenCV convention.
So x is right, y is down, z is forward.
Scale is still 1cm, so we divide by 100 again.
see
https://github.com/jskinn/Dataset_Synthesizer/blob/local-devel/Source/Plugins/NVSceneCapturer/Source/NVSceneCapturer/Private/NVSceneFeatureExtractor_DataExport.cpp#L143
:param object_frame_data: The frame data dict from the matching object in the objects array
:return: The relative pose of the object, as a Transform
"""
tx, ty, tz = object_frame_data['location']
qx, qy, qz, qw = object_frame_data['quaternion_xyzw']
return tf.Transform(
location=(tz / 100, -tx / 100, -ty / 100),
rotation=(qw, qz, -qx, -qy),
w_first=True
)
|
dae13aa0a10db2133f87c399ec90113ef157a210
| 3,644,894
|
import select
def upsert_task(task_uuid: str, task: Task) -> Task:
"""Upsert a task.
It is used to create a task in the database if it does not already exists,
else it is used to update the existing one.
Args:
task_uuid:
The uuid of the task to upsert.
task:
The task data.
Returns:
The upserted task.
"""
with Session(engine) as session:
# check if the task exists
statement = select(Task).where(Task.uuid == task_uuid)
result = session.exec(statement).first()
# if not, create it
if result is None:
result = task
# sync the data
for key, value in task.dict(exclude_unset=True).items():
setattr(result, key, value)
# persist the data to the database
session.add(result)
session.commit()
session.refresh(result)
return result
|
7fbf296377fb1e4e59b7c9884c6191ff2b0a273b
| 3,644,895
|
def shuffle_entries(x, entry_cls, config=None, value_type=sgf2n, reverse=False, perm_size=None):
""" Shuffle a list of ORAM entries.
Randomly permutes the first "perm_size" entries, leaving the rest (empty
entry padding) in the same position. """
n = len(x)
l = len(x[0])
if n & (n-1) != 0:
raise CompilerError('Entries must be padded to power of two length.')
if perm_size is None:
perm_size = n
xarrays = [Array(n, value_type.reg_type) for i in range(l)]
for i in range(n):
for j,value in enumerate(x[i]):
if isinstance(value, MemValue):
xarrays[j][i] = value.read()
else:
xarrays[j][i] = value
if config is None:
config = config_shuffle(perm_size, value_type)
for xi in xarrays:
shuffle(xi, config, value_type, reverse)
for i in range(n):
x[i] = entry_cls(xarrays[j][i] for j in range(l))
return config
|
827506de7e572b1df1b210ccfb990db5839b5273
| 3,644,896
|
import os
import random
import logging
import sys
def file(input_file):
"""Import colorscheme from json file."""
theme_name = ".".join((input_file, "json"))
user_theme_file = os.path.join(CONF_DIR, "colorschemes", theme_name)
theme_file = os.path.join(MODULE_DIR, "colorschemes", theme_name)
util.create_dir(os.path.dirname(user_theme_file))
# Find the theme file.
if os.path.isfile(input_file):
theme_file = input_file
elif os.path.isfile(user_theme_file):
theme_file = user_theme_file
elif input_file == "random":
themes = [theme.path for theme in list_themes()]
random.shuffle(themes)
theme_file = themes[0]
# Parse the theme file.
if os.path.isfile(theme_file):
logging.info("Set theme to \033[1;37m%s\033[0m.",
os.path.basename(theme_file))
return parse(theme_file)
else:
logging.error("No colorscheme file found.")
sys.exit(1)
|
9439f44c6d71b52d800fd95f0269e46f0185a8fa
| 3,644,897
|
import json
def entities(request):
"""Get entities for the specified project, locale and paths."""
try:
project = request.GET['project']
locale = request.GET['locale']
paths = json.loads(request.GET['paths'])
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
try:
project = Project.objects.get(slug=project)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
search = None
if request.GET.get('keyword', None):
search = request.GET
entities = Entity.for_project_locale(project, locale, paths, search)
return JsonResponse(entities, safe=False)
|
686f9298302d30e89ad0d34ed4c0c96d22fd455d
| 3,644,898
|
import json
def info(request, token):
"""
Return the HireFire json data needed to scale worker dynos
"""
if not settings.HIREFIRE_TOKEN:
return HttpResponseBadRequest(
"Hirefire not configured. Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling"
)
if token != settings.HIREFIRE_TOKEN:
raise PermissionDenied("Invalid token")
current_tasks = 0
queues = []
for index, config in enumerate(QUEUES_LIST):
queue = get_queue_by_index(index)
connection = queue.connection
# Only look at the default queue
if queue.name != "default":
continue
queue_data = {
"name": queue.name,
"jobs": queue.count,
"index": index,
"connection_kwargs": connection.connection_pool.connection_kwargs,
}
connection = get_connection(queue.name)
all_workers = Worker.all(connection=connection)
queue_workers = [worker for worker in all_workers if queue in worker.queues]
queue_data["workers"] = len(queue_workers)
finished_job_registry = FinishedJobRegistry(queue.name, connection)
started_job_registry = StartedJobRegistry(queue.name, connection)
deferred_job_registry = DeferredJobRegistry(queue.name, connection)
queue_data["finished_jobs"] = len(finished_job_registry)
queue_data["started_jobs"] = len(started_job_registry)
queue_data["deferred_jobs"] = len(deferred_job_registry)
current_tasks += queue_data["jobs"]
current_tasks += queue_data["started_jobs"]
queues.append(queue_data)
payload = [{"quantity": current_tasks, "name": "worker"}]
payload = json.dumps(payload)
return HttpResponse(payload, content_type="application/json")
|
7164d7f19b14ef601480484d6182f4b62cc250bf
| 3,644,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.