content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import logging
def auth(options) -> bool:
"""Set up auth and nothing else."""
objectStore = FB_ObjectStore(options)
if options.get("test", False):
# If we only care if it's valid, just check and leave
auth_token = objectStore.get_cached_auth_token()
# Validate and return cached auth token.
if auth_token and objectStore.validate_auth_token(auth_token):
logging.getLogger("pantri").info("Auth token is valid.")
return True
logging.getLogger("pantri").info("Auth token is invalid.")
return False
objectStore.get_auth_token()
logging.getLogger("pantri").info("Auth token is valid.")
return True | be2acdd2564ee05e746ba99fbb10a50e9dc92eb1 | 29,700 |
def _parse_detector(detector):
"""
Check and fix detector name strings.
Parameters
----------
detector : `str`
The detector name to check.
"""
oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',
'n10', 'n11']
altlist = [str(i) for i in range(12)]
if detector in oklist:
return detector
elif detector in altlist:
return 'n' + detector
else:
raise ValueError('Detector string could not be interpreted') | f78d7eb5004b3cb6d3276b0c701263c71668e36e | 29,701 |
def mobius_area_correction_spherical(tria, mapping):
"""
Find an optimal Mobius transformation for reducing the area distortion of a spherical conformal parameterization
using the method in [1].
Input:
tria : TriaMesh (vertices, triangle) of genus-0 closed triangle mesh
mapping: nv x 3 vertex coordinates of the spherical conformal parameterization
Output:
map_mobius: nv x 3 vertex coordinates of the updated spherical conformal parameterization
x: the optimal parameters for the Mobius transformation, where
f(z) = \frac{az+b}{cz+d}
= ((x(1)+x(2)*1j)*z+(x(3)+x(4)*1j))/((x(5)+x(6)*1j)*z+(x(7)+x(8)*1j))
If you use this code in your own work, please cite the following paper:
[1] G. P. T. Choi, Y. Leung-Liu, X. Gu, and L. M. Lui,
"Parallelizable global conformal parameterization of simply-connected surfaces via partial welding."
SIAM Journal on Imaging Sciences, 2020.
Adopted by Martin Reuter from Matlab code at
https://github.com/garyptchoi/spherical-conformal-map
with this
Copyright (c) 2019-2020, Gary Pui-Tung Choi
https://scholar.harvard.edu/choi
and has been distributed with the Apache 2 License
"""
# Compute the tria areas with normalization
area_t = tria.tria_areas()
area_t = area_t / area_t.sum()
# Project the sphere onto the plane
z = stereographic(mapping)
def area_map(xx):
v = inverse_stereographic(((xx[0]+xx[1]*1j)*z+(xx[2]+xx[3]*1j))/((xx[4]+xx[5]*1j)*z+(xx[6]+xx[7]*1j)))
area_v = TriaMesh(v, tria.t).tria_areas()
return area_v / area_v.sum()
# objective function: mean(abs(log(area_map/area_t)))
def d_area(xx):
a = np.abs(np.log(area_map(xx)/area_t))
return (a[np.isfinite(a)]).mean()
# Optimization setup
x0 = np.array([1, 0, 0, 0, 0, 0, 1, 0]) # initial guess
# lower and upper bounds
bnds = ((-100, 100), (-100, 100), (-100, 100), (-100, 100),
(-100, 100), (-100, 100), (-100, 100), (-100, 100))
# Optimization (may further supply gradients for better result, not yet implemented)
# options = optimoptions('fmincon','Display','iter');
# x = fmincon(d_area,x0,[],[],[],[],lb,ub,[],options);
options = {"disp": True}
result = minimize(d_area, x0, bounds=bnds, options=options)
x = result.x
# obtain the conformal parameterization with area distortion corrected
fz = ((x[0]+x[1]*1j)*z+(x[2]+x[3]*1j))/((x[4]+x[5]*1j)*z+(x[6]+x[7]*1j))
map_mobius = inverse_stereographic(fz)
return map_mobius, result | 08d58e2e2ff13533ac733ef49a4f57d4e1d6f41d | 29,702 |
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i]) | 2649250e2ea2619c7f6c21b8dd2cebaeec10647b | 29,703 |
def min_rl(din):
"""
A MIN function should "go high" when any of its
inputs arrives. Thus, OR gates are all that is
needed for its implementation.
Input: a list of 1-bit WireVectors
Output: a 1-bit WireVector
"""
if len(din) == 1:
dout = din[0]
else:
dout = din[0] | min_rl(din[1:])
return dout | 06f0bbce664367307669ddb28c60c65b79de91d3 | 29,704 |
def fetch(u, data, indices, indptr, lmbda):
"""
"""
is_skip = 1
if lmbda > 0:
u0, u1 = indptr[u], indptr[u + 1]
val, ind = data[u0:u1], indices[u0:u1]
if u1 > u0:
is_skip = 0
else:
val = np.empty(0, dtype=data.dtype)
ind = np.empty(0, dtype=np.int32)
return val, ind, is_skip | cd1d9ffe7ead7711b5e6cd1f2601c1456ce1baaa | 29,705 |
from typing import List
import re
def _gcc_parse_params(gcc: Gcc) -> List[Option]:
"""Parse the param help string from the GCC binary to find options."""
# Pretty much identical to _gcc_parse_optimize
logger.debug("Parsing GCC param space")
result = gcc("--help=param", "-Q", timeout=60)
out = result.split("\n")[1:]
param_enum_pat = re.compile("--param=([-a-zA-Z0-9]+)=\\[([-A-Za-z_\\|]+)\\]")
param_interval_pat = re.compile("--param=([-a-zA-Z0-9]+)=<(-?[0-9]+),([0-9]+)>")
param_number_pat = re.compile("--param=([-a-zA-Z0-9]+)=")
param_old_interval_pat = re.compile(
"([-a-zA-Z0-9]+)\\s+default\\s+(-?\\d+)\\s+minimum\\s+(-?\\d+)\\s+maximum\\s+(-?\\d+)"
)
params = {}
def add_gcc_param_enum(name: str, values: List[str]):
# Enum param.
opt = params.get(name)
assert not opt
params[name] = GccParamEnumOption(name, values)
def add_gcc_param_int(name: str, min: int, max: int):
# Int flag.
opt = params.get(name)
assert not opt
params[name] = GccParamIntOption(name, min, max)
def is_int(s: str) -> bool:
try:
int(s)
return True
except ValueError:
return False
def parse_line(line: str):
bits = line.split()
if not bits:
return
# TODO(hugh): Not sure what the correct behavior is there.
if len(bits) <= 1:
return
spec = bits[0]
default = bits[1]
# --param=name=[a|b]
m = param_enum_pat.fullmatch(spec)
if m:
name = m.group(1)
values = m.group(2).split("|")
assert not default or default in values
add_gcc_param_enum(name, values)
return
# --param=name=<min,max>
m = param_interval_pat.fullmatch(spec)
if m:
name = m.group(1)
min = int(m.group(2))
max = int(m.group(3))
if is_int(default):
assert not default or min <= int(default) <= max
add_gcc_param_int(name, min, max)
return
# --param=name=
m = param_number_pat.fullmatch(spec)
if m:
name = m.group(1)
min = 0
max = 2 << 31 - 1
if is_int(default):
dflt = int(default)
min = min if dflt >= min else dflt
add_gcc_param_int(name, min, max)
return
# name default num minimum num maximum num
m = param_old_interval_pat.fullmatch(line)
if m:
name = m.group(1)
default = int(m.group(2))
min = int(m.group(3))
max = int(m.group(4))
if min <= default <= max:
# For now we will only consider fully described params
add_gcc_param_int(name, min, max)
return
logger.warning("Unknown GCC param flag spec, '%s'", line)
# breakpoint()
for line in out:
parse_line(line.strip())
return list(map(lambda x: x[1], sorted(list(params.items())))) | 17af97340602f00f9437ea259cac4430b920eb6e | 29,706 |
def pgrrec(CONST_STRING, lon, lat, alt, re, f):
"""pgrrec(ConstSpiceChar * CONST_STRING, SpiceDouble lon, SpiceDouble lat, SpiceDouble alt, SpiceDouble re, SpiceDouble f)"""
return _cspyce0.pgrrec(CONST_STRING, lon, lat, alt, re, f) | a1a9889e8c9e2d4b34e480688aeb8a26cb7699b7 | 29,707 |
from operator import add
def set(isamAppliance, name, properties, attributes=None, description=None, type="JavaScript", new_name=None,
check_mode=False, force=False):
"""
Creating or Modifying a JavaScript PIP
"""
ret_obj = search(isamAppliance, name=name)
id = ret_obj['data']
if id == {}:
# If no id was found, Force the add
return add(isamAppliance, name=name, properties=properties, attributes=attributes, description=description,
type=type, check_mode=check_mode, force=force)
else:
# Update PIP
return update(isamAppliance, name=name, properties=properties, attributes=attributes, description=description,
type=type, new_name=new_name, check_mode=check_mode, force=force) | 6c7f097eb22a1f3033dce2cdf3264bff5f7c9acb | 29,708 |
import logging
def init_model(model, opt, argv):
"""select the network initialization method"""
if hasattr(opt, 'weight_init') and opt.weight_init == 'xavier':
network_weight_xavier_init(model)
elif hasattr(opt, 'weight_init') and opt.weight_init == 'MSRAPrelu':
network_weight_MSRAPrelu_init(model)
elif hasattr(opt, 'weight_init') and opt.weight_init == 'stupid':
network_weight_stupid_init(model)
elif hasattr(opt, 'weight_init') and opt.weight_init == 'zero':
network_weight_zero_init(model)
elif hasattr(opt, 'weight_init') and opt.weight_init == '01':
network_weight_01_init(model)
elif hasattr(opt, 'weight_init') and opt.weight_init == 'custom':
assert hasattr(model, 'init_parameters')
model.init_parameters()
elif hasattr(opt, 'weight_init') and opt.weight_init == 'None':
logging.info('Warning!!! model loaded without initialization !')
else:
raise ValueError('Unknown weight_init')
if hasattr(opt, 'bn_momentum') and opt.bn_momentum is not None:
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.momentum = opt.bn_momentum
if hasattr(opt, 'bn_eps') and opt.bn_eps is not None:
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eps = opt.bn_eps
return model | 5c53efd15af6403a6323d25dc877dc652e4a49b1 | 29,709 |
def get_bash_commands(root_parser, root_prefix, choice_functions=None):
"""
Recursive subcommand parser traversal, returning lists of information on
commands (formatted for output to the completions script).
printing bash helper syntax.
Returns:
subparsers : list of subparsers for each parser
option_strings : list of options strings for each parser
compgens : list of shtab `.complete` functions corresponding to actions
choices : list of choices corresponding to actions
nargs : list of number of args allowed for each action (if not 0 or 1)
"""
choice_type2fn = {k: v["bash"] for k, v in CHOICE_FUNCTIONS.items()}
if choice_functions:
choice_type2fn.update(choice_functions)
def get_option_strings(parser):
"""Flattened list of all `parser`'s option strings."""
return sum(
(opt.option_strings for opt in parser._get_optional_actions() if opt.help != SUPPRESS),
[],
)
def recurse(parser, prefix):
"""recurse through subparsers, appending to the return lists"""
subparsers = []
option_strings = []
compgens = []
choices = []
nargs = []
# temp lists for recursion results
sub_subparsers = []
sub_option_strings = []
sub_compgens = []
sub_choices = []
sub_nargs = []
# positional arguments
discovered_subparsers = []
for i, positional in enumerate(parser._get_positional_actions()):
if positional.help == SUPPRESS:
continue
if hasattr(positional, "complete"):
# shtab `.complete = ...` functions
compgens.append(u"{}_pos_{}_COMPGEN={}".format(
prefix, i, complete2pattern(positional.complete, "bash", choice_type2fn)))
if positional.choices:
# choices (including subparsers & shtab `.complete` functions)
log.debug("choices:{}:{}".format(prefix, sorted(positional.choices)))
this_positional_choices = []
for choice in positional.choices:
if isinstance(choice, Choice):
# append special completion type to `compgens`
# NOTE: overrides `.complete` attribute
log.debug("Choice.{}:{}:{}".format(choice.type, prefix, positional.dest))
compgens.append(u"{}_pos_{}_COMPGEN={}".format(
prefix, i, choice_type2fn[choice.type]))
elif isinstance(positional.choices, dict):
# subparser, so append to list of subparsers & recurse
log.debug("subcommand:%s", choice)
public_cmds = get_public_subcommands(positional)
if choice in public_cmds:
discovered_subparsers.append(str(choice))
this_positional_choices.append(str(choice))
(
new_subparsers,
new_option_strings,
new_compgens,
new_choices,
new_nargs,
) = recurse(
positional.choices[choice],
prefix + "_" + wordify(choice),
)
sub_subparsers.extend(new_subparsers)
sub_option_strings.extend(new_option_strings)
sub_compgens.extend(new_compgens)
sub_choices.extend(new_choices)
sub_nargs.extend(new_nargs)
else:
log.debug("skip:subcommand:%s", choice)
else:
# simple choice
this_positional_choices.append(str(choice))
if this_positional_choices:
choices.append(u"{}_pos_{}_choices='{}'".format(
prefix, i, " ".join(this_positional_choices)))
# skip default `nargs` values
if positional.nargs not in (None, "1", "?"):
nargs.append(u"{}_pos_{}_nargs={}".format(prefix, i, positional.nargs))
if discovered_subparsers:
subparsers.append(u"{}_subparsers=('{}')".format(prefix,
"' '".join(discovered_subparsers)))
log.debug("subcommands:{}:{}".format(prefix, discovered_subparsers))
# optional arguments
option_strings.append(u"{}_option_strings=('{}')".format(
prefix, "' '".join(get_option_strings(parser))))
for optional in parser._get_optional_actions():
if optional == SUPPRESS:
continue
for option_string in optional.option_strings:
if hasattr(optional, "complete"):
# shtab `.complete = ...` functions
compgens.append(u"{}_{}_COMPGEN={}".format(
prefix, wordify(option_string),
complete2pattern(optional.complete, "bash", choice_type2fn)))
if optional.choices:
# choices (including shtab `.complete` functions)
this_optional_choices = []
for choice in optional.choices:
# append special completion type to `compgens`
# NOTE: overrides `.complete` attribute
if isinstance(choice, Choice):
log.debug("Choice.{}:{}:{}".format(choice.type, prefix, optional.dest))
compgens.append(u"{}_{}_COMPGEN={}".format(
prefix, wordify(option_string), choice_type2fn[choice.type]))
else:
# simple choice
this_optional_choices.append(str(choice))
if this_optional_choices:
choices.append(u"{}_{}_choices='{}'".format(
prefix, wordify(option_string), " ".join(this_optional_choices)))
# Check for nargs.
if optional.nargs is not None and optional.nargs != 1:
nargs.append(u"{}_{}_nargs={}".format(prefix, wordify(option_string),
optional.nargs))
# append recursion results
subparsers.extend(sub_subparsers)
option_strings.extend(sub_option_strings)
compgens.extend(sub_compgens)
choices.extend(sub_choices)
nargs.extend(sub_nargs)
return subparsers, option_strings, compgens, choices, nargs
return recurse(root_parser, root_prefix) | 49ada910c5610634a9cbb5b2fd7c2ee639c2f9c0 | 29,710 |
import math
def shoulders_up(x, y, max_angle=10):
"""
1:"Neck",
2:"RShoulder",
5:"LShoulder".
looks at line from left shoulder to neck, and
line from right shoulder to neck
if either are not straight returns 1
if both are flat (slope of 0 or close to 0) returns 1
"""
left_degrees = math.degrees(math.atan2(y[5]-y[1], x[5]-x[1]))
right_degrees = math.degrees(math.atan2(y[1]-y[2], x[1]-x[2]))
slope_shoulder = (y[5]-y[2])/(x[5]-x[2])
if (left_degrees <= max_angle and
right_degrees <= max_angle) \
and slope_shoulder <= 0.25:
return left_degrees, right_degrees, 0.0
else:
return left_degrees, right_degrees, 1.0 | 2a6adce5dad431c91cac77bd79e4011964f76341 | 29,711 |
def str(x: i32) -> str:
"""
Return the string representation of an integer `x`.
"""
if x == 0:
return '0'
result: str
result = ''
if x < 0:
result += '-'
x = -x
rev_result: str
rev_result = ''
rev_result_len: i32
rev_result_len = 0
pos_to_str: list[str]
pos_to_str = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
while x > 0:
rev_result += pos_to_str[x - _lpython_floordiv(x, 10)*10]
rev_result_len += 1
x = _lpython_floordiv(x, 10)
pos: i32
for pos in range(rev_result_len - 1, -1, -1):
result += rev_result[pos]
return result | 6af580da343eb6adab58021eb489cf837b50571c | 29,712 |
def grad_spsp_mult_entries_x_reverse(b_out, entries_a, indices_a, entries_x, indices_x, N):
""" Now we wish to do the gradient with respect to the X matrix in AX=B
Instead of doing it all out again, we just use the previous grad function on the transpose equation X^T A^T = B^T
"""
# get the transposes of the original problem
entries_b, indices_b = b_out
indices_aT = transpose_indices(indices_a)
indices_xT = transpose_indices(indices_x)
indices_bT = transpose_indices(indices_b)
b_T_out = entries_b, indices_bT
# call the vjp maker for AX=B using the substitution A=>X^T, X=>A^T, B=>B^T
vjp_XT_AT = grad_spsp_mult_entries_a_reverse(b_T_out, entries_x, indices_xT, entries_a, indices_aT, N)
# return the function of the transpose vjp maker being called on the backprop vector
return lambda v: vjp_XT_AT(v) | 64e5fbf5ca12fb9516c7aab2a357823c201d3d5a | 29,713 |
import os
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC",
"TesterReplica", "skvbc_replica")
if os.environ.get('TIME_SERVICE_ENABLED', default="FALSE").lower() == "true":
time_service_enabled = "1"
else:
time_service_enabled = "0"
batch_size = "1"
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC",
"scripts", "logging.properties"),
"-f", time_service_enabled,
"-b", "2",
"-q", batch_size,
"-h", "3",
"-j", str(DB_CHECKPOINT_WIN_SIZE),
"-o", builddir + "/operator_pub.pem"] | c3e85d132214c8cd2801d795a255f0868dda168a | 29,714 |
def execute_until_false(method, interval_s):
"""Executes a method forever until the method returns a false value.
Args:
method: The callable to execute.
interval_s: The number of seconds to start the execution after each method
finishes.
Returns:
An Interval object.
"""
interval = Interval(method, stop_if_false=True)
interval.start(interval_s)
return interval | eee730604ea98080e669d02f18a6ca55c4a4fe97 | 29,715 |
def vecnorm(a):
"""Return a/|a|"""
return a / mdamath.norm(a) | 425de4c8ddae138e1528ada448f86781b4c5130e | 29,716 |
def count_qubits(operator):
"""Compute the minimum number of qubits on which operator acts.
Args:
operator: FermionOperator, QubitOperator, DiagonalCoulombHamiltonian,
or PolynomialTensor.
Returns:
num_qubits (int): The minimum number of qubits on which operator acts.
Raises:
TypeError: Operator of invalid type.
"""
# Handle FermionOperator.
if isinstance(operator, FermionOperator):
num_qubits = 0
for term in operator.terms:
for ladder_operator in term:
if ladder_operator[0] + 1 > num_qubits:
num_qubits = ladder_operator[0] + 1
return num_qubits
# Handle QubitOperator.
elif isinstance(operator, QubitOperator):
num_qubits = 0
for term in operator.terms:
if term:
if term[-1][0] + 1 > num_qubits:
num_qubits = term[-1][0] + 1
return num_qubits
# Handle DiagonalCoulombHamiltonian
elif isinstance(operator, DiagonalCoulombHamiltonian):
return operator.one_body.shape[0]
# Handle PolynomialTensor
elif isinstance(operator, PolynomialTensor):
return operator.n_qubits
# Raise for other classes.
else:
raise TypeError('Operator of invalid type.') | 9963c7b8202a825725ca3906b95f16d0243f81ed | 29,717 |
def datenum_to_date(date_num):
"""Transform date_num to datetime object.
Returns pd.NaT on invalid input"""
try:
total_seconds = round(dt.timedelta(days=date_num - 366).total_seconds())
return dt.datetime(1, 1, 1) + dt.timedelta(seconds=total_seconds) - dt.timedelta(days=1)
except OverflowError:
return pd.NaT | f2a523f0e1c1af15835ae042fdeac8ebcf0a5717 | 29,718 |
def save_ground_truth_part(name, tuple_path, mean, sem, std, sestd):
"""Saves a ground truth part to strings.
This is meant to be called with outputs of
`nest.flatten_with_tuple_paths(ground_truth_mean)`.
Args:
name: Python `str`. Name of the sample transformation.
tuple_path: Tuple path of the part of the ground truth we're saving. See
`nest.flatten_with_tuple_paths`.
mean: Ground truth mean, or `None` if it is absent.
sem: Ground truth stadard error of the mean, or `None` if it is absent.
std: Ground truth standard deviation, or `None` if it is absent.
sestd: Ground truth mean, or `None` if it is absent.
Returns:
array_strs: Python list of strings, representing the encoded arrays (that
were present). Typically these would be joined with a newline and written
out to a module, which can then be passed to `load_ground_truth_part`.
"""
array_strs = []
mean_name, sem_name, std_name, sestd_name = _get_global_variable_names(
name, tuple_path)
if mean is not None:
array_strs.append(array_to_source.array_to_source(mean_name, mean))
if sem is not None:
array_strs.append(array_to_source.array_to_source(sem_name, sem))
if std is not None:
array_strs.append(array_to_source.array_to_source(std_name, std))
if sestd is not None:
array_strs.append(array_to_source.array_to_source(sestd_name, sestd))
return array_strs | a4b769383dd0b250375205efeed363161b28ed0a | 29,719 |
def parse_cpe(cpe):
"""
Split the given CPE name into its components.
:param cpe: CPE name.
:type cpe: str
:returns: CPE components.
:rtype: list(str)
"""
ver = get_cpe_version(cpe)
if ver == "2.2":
parsed = [cpe22_unquote(x.strip()) for x in cpe[5:].split(":")]
if len(parsed) < 11:
parsed.extend( "*" * (11 - len(parsed)) )
elif ver == "2.3":
parsed = [x.strip() for x in _cpe23_split.split(cpe[8:])]
if len(parsed) != 11:
raise ValueError("Not a valid CPE 2.3 name: %s" % cpe)
else:
raise ValueError("Not a valid CPE 2.2 or 2.3 name: %s" % cpe)
return parsed | 4dfbac57d3719a1c6ed00b5884be1321800827f5 | 29,720 |
def draw_points(xs, ys, covs, M):
"""
Resample a set of points M times, adding noise according to their covariance matrices.
Returns
-------
x_samples, y_samples : np.array
Every column j, is x[j] redrawn M times.
Has M rows, and every row is a realization of xs or ys.
"""
# store the samples as follows
# col0 = all resamplings of x0
# -> each row is a different realization of our 75 sightlines
# rescale data to avoid numerical problems
factor_x = 1 / np.std(xs)
factor_y = 1 / np.std(ys)
xyr, covr = rescale.rescale_data(
np.column_stack((xs, ys)), covs, factor_x, factor_y
)
N = len(xyr)
x_samples = np.zeros((M, N))
y_samples = np.zeros((M, N))
for j in range(N):
samples = RNG.multivariate_normal(mean=xyr[j], cov=covr[j], size=M)
x_samples[:, j] = samples[:, 0]
y_samples[:, j] = samples[:, 1]
# unscale the data again before returning
return x_samples / factor_x, y_samples / factor_y | 5609efcf6ba42fc2a059d308e8391b6057de3a16 | 29,721 |
def k_folds_split(raw_indexes, n_splits, labels=default_pars.validation_pars_labels,
shuffle=default_pars.validation_pars_shuffle, random_state=default_pars.random_state,
return_original_indexes=default_pars.validation_pars_return_original_indexes):
"""Splits a raw set of indexes into k train and k dev subsets using k-folding.
There are k (given by 'n_splits') folds. Each of the folds uses the entire raw set of indexes (either for train or
for dev). The k dev sets do not overlap, and together they cover the entire raw set. For each fold, the train set is
made by all examples that are not in the dev set. Hence all train sets of different folds do overlap.
Parameters
----------
raw_indexes : array_like
Indexes of data (e.g. data.index, assuming data is a pandas dataframe).
n_splits : int
Number of folds.
labels : list or None
If not None, the k-folding is stratified; if None, labels are ignored.
shuffle : bool
True to shuffle indexes before splitting; False to keep original order.
random_state : int or None
Random state for shuffling; Ignored if 'shuffle' is False (in which case, 'random_state' can be set to None).
return_original_indexes : bool
True to return original indexes (as given by 'raw_indexes'); False to return new integer indexes (that go from 0
to the number of elements in raw_indexes).
Returns
-------
parts : list
K different parts (folds). Each part contains a tuple with:
(array of indexes in train set for this part, array of indexes in dev set for this part)
"""
raw_indexes_array = np.array(raw_indexes)
# To avoid warnings, impose random_state None if there is no shuffling.
if not shuffle:
random_state = None
# Split a data set into n parts without overlap, and optionally stratified.
if labels is None:
split_method = KFold
else:
split_method = StratifiedKFold
parts = list(split_method(n_splits=n_splits, random_state=random_state, shuffle=shuffle).
split(raw_indexes_array, labels))
if return_original_indexes:
parts = [(raw_indexes_array[part[0]], raw_indexes_array[part[1]]) for part in parts]
return parts | 77394ca5d345d97423abaa0fe421e50cb0017762 | 29,722 |
def element_wise_entropy(px):
"""
Returns a numpy array with element wise entropy calculated as -p_i*log_2(p_i).
Params
------
px (np.array)
Array of individual probabilities, i.e. a probability vector or distribution.
Returns
-------
entropy (np.array)
Array of element-wise entropies.
"""
if isinstance(px, list):
px = np.array(px)
# Make a copy of input array
entropy = px.copy()
# Get indices of nonzero probability values
nz = np.nonzero(entropy)
# Compute -pi*log_2(p_i) element-wise
entropy[nz] *= - np.log2(entropy[nz])
return entropy | 7637cac96e51ce6b89a395c306a6104e77bcef0d | 29,723 |
def score_page(preds, truth):
"""
Scores a single page.
Args:
preds: prediction string of labels and center points.
truth: ground truth string of labels and bounding boxes.
Returns:
True/false positive and false negative counts for the page
"""
tp = 0
fp = 0
fn = 0
truth_indices = {
'label': 0,
'X': 1,
'Y': 2,
'Width': 3,
'Height': 4
}
preds_indices = {
'label': 0,
'X': 1,
'Y': 2
}
if pd.isna(truth) and pd.isna(preds):
return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn}
if pd.isna(truth):
fp += len(preds.split(' ')) // len(preds_indices)
return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn}
if pd.isna(preds):
fn += len(truth.split(' ')) // len(truth_indices)
return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn}
truth = truth.split(' ')
if len(truth) % len(truth_indices) != 0:
raise ValueError('Malformed solution string')
truth_label = np.array(truth[truth_indices['label']::len(truth_indices)])
truth_xmin = np.array(truth[truth_indices['X']::len(truth_indices)]).astype(float)
truth_ymin = np.array(truth[truth_indices['Y']::len(truth_indices)]).astype(float)
truth_xmax = truth_xmin + np.array(truth[truth_indices['Width']::len(truth_indices)]).astype(float)
truth_ymax = truth_ymin + np.array(truth[truth_indices['Height']::len(truth_indices)]).astype(float)
preds = preds.split(' ')
if len(preds) % len(preds_indices) != 0:
raise ValueError('Malformed prediction string')
preds_label = np.array(preds[preds_indices['label']::len(preds_indices)])
preds_x = np.array(preds[preds_indices['X']::len(preds_indices)]).astype(float)
preds_y = np.array(preds[preds_indices['Y']::len(preds_indices)]).astype(float)
preds_unused = np.ones(len(preds_label)).astype(bool)
ok_array = []
for xmin, xmax, ymin, ymax, label in zip(truth_xmin, truth_xmax, truth_ymin, truth_ymax, truth_label):
# Matching = point inside box & character same & prediction not already used
matching = (xmin < preds_x) & (xmax > preds_x) & (ymin < preds_y) & (ymax > preds_y) & (preds_label == label) & preds_unused
if matching.sum() == 0:
fn += 1
else:
tp += 1
preds_unused[np.argmax(matching)] = False
fp += preds_unused.sum()
return preds_unused, {'tp': tp, 'fp': fp, 'fn': fn} | 25b3d4280db734ded586eb627fe98d417164601d | 29,724 |
import re
def _validate_eida_token(token):
"""
Just a basic check if the string contains something that looks like a PGP
message
"""
if re.search(pattern='BEGIN PGP MESSAGE', string=token,
flags=re.IGNORECASE):
return True
return False | 746fbd011b38abab43be983a1a054505526dcf78 | 29,725 |
def dn_histogram_mode_5(y, y_min, y_max):
"""
Mode of z-scored distribution (5-bin histogram)
"""
return histogram_mode(y, y_min, y_max, 5) | 0dc10f46136e60e2523f4ca79449f18aaedd4854 | 29,726 |
def _make_element(spec, parent, attributes=None):
"""Helper function to generate the right kind of Element given a spec."""
if (spec.name == constants.WORLDBODY
or (spec.name == constants.SITE
and (parent.tag == constants.BODY
or parent.tag == constants.WORLDBODY))):
return _AttachableElement(spec, parent, attributes)
elif isinstance(parent, _AttachmentFrame):
return _AttachmentFrameChild(spec, parent, attributes)
elif spec.name == constants.DEFAULT:
return _DefaultElement(spec, parent, attributes)
elif spec.name == constants.ACTUATOR:
return _ActuatorElement(spec, parent, attributes)
else:
return _ElementImpl(spec, parent, attributes) | 7635e8380e8238541544495f250e78a5da3ad441 | 29,727 |
import argparse
def create_hook_mock() -> TmTcHookBase:
"""Create simple minimal hook mock using the MagicMock facilities by unittest
:return:
"""
tmtc_hook_base = TmTcHookBase()
tmtc_hook_base.add_globals_pre_args_parsing = MagicMock(return_value=0)
tmtc_hook_base.add_globals_post_args_parsing = MagicMock(return_value=0)
tmtc_hook_base.custom_args_parsing = MagicMock(
return_value=argparse.Namespace(service=17, mode=CoreModeList.IDLE)
)
return tmtc_hook_base | 56880c4cf724ae129ef53450e567b4eafd566703 | 29,728 |
def read_sounding(timestamp, index_col=0, caching=True, **kws):
"""read wyoming sounding with optional caching (default)"""
if caching:
if in_cache(timestamp, **kws):
data = cache_read(timestamp, **kws)
# TODO: index col swapping
return data
skiprows = (0,1,2,3,4,5,6,8,9)
url = sounding_url(timestamp)
data = pd.read_table(url, delim_whitespace=True, index_col=index_col, skiprows=skiprows).dropna()
data = data.drop(data.tail(1).index).astype(np.float)
data.index = data.index.astype(np.float)
if caching:
cache_write(timestamp, data, **kws)
return data | 2dc2ba1d6d9af45728752817205b951213f4817e | 29,729 |
def nlf_css(parser, token):
"""Newsletter friendly CSS"""
args = token.split_contents()
css = {}
css_order = []
for item in args[1:]:
tag, value = item.split("=")
tag, value = tag.strip('"'), value.strip('"')
css[tag] = value
css_order.append(tag)
nodelist = parser.parse(('end_nlf_css',))
token = parser.next_token()
return NewsletterFriendlyCssNode(nodelist, css, css_order) | eadf74cdd6e58fb4e1551f1b951ad74226e175c2 | 29,730 |
def data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_bandwidth_profile_peak_information_rate_put(uuid, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_bandwidth_profile_peak_information_rate_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | c9ee5c8751e8424732f066598525a44d1c66d1c5 | 29,731 |
import logging
def _get_default_dataset_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList
) -> statistics_pb2.DatasetFeatureStatistics:
"""Gets the DatasetFeatureStatistics to use for validation.
If there is a single DatasetFeatureStatistics, this function returns that. If
there are multiple DatasetFeatureStatistics, this function attempts to find
the one that corresponds to the default slice. If found, this function returns
that. If not found, this function raises an error.
Args:
statistics: A DatasetFeatureStatisticsList protocol buffer.
Returns:
A DatasetFeatureStatistics protocol buffer to use for validation.
Raises:
ValueError: If the input statistics proto contains multiple datasets, none
of which corresponds to the default slice.
"""
if len(statistics.datasets) == 1:
return statistics.datasets[0]
# If there are multiple datasets, attempt to find the dataset for the
# default slice (i.e., slice for all examples) from among the datasets.
for dataset in statistics.datasets:
if dataset.name == constants.DEFAULT_SLICE_KEY:
logging.warning('Multiple datasets found in statistics. Using the '
'default slice dataset.')
return dataset
# If there are multiple datasets, but the default slice is not found, raise an
# error.
raise ValueError('Only statistics proto with one dataset or the default '
'slice (i.e., "All Examples" slice) is currently supported.') | 8466e67c9ea5d795ea9b6c16c637b9a55048056c | 29,732 |
from typing import Callable
from re import A
def foldr(_folder: Callable[[A, B], B], _init: B, _linked_list: LinkedList[A]) -> B:
""" foldr """
if _linked_list.content is None:
return _init
else:
head = _linked_list.content[0]
tail = _linked_list.content[1]
return _folder(head, foldr(_folder, _init, tail)) | 93aa3749442d8028dd115619e358e8fcf40ada46 | 29,733 |
def get_per_lane_sample_dist_plot(sample_data: pd.DataFrame) -> dict:
"""
A function for returing sample distribution plots
:params sample_data: A Pandas DataFrame containing sample data
:returns: A dictionary containing sample distribution plots
"""
try:
lane_plots = dict()
for lane_id, l_data in sample_data.groupby('Lane'):
lane_samples = l_data['Sample_ID'].values.tolist()
datasets = list()
for project_id, p_data in l_data.groupby('Sample_Project'):
pf_counts = \
p_data.\
set_index('Sample_ID')['PF Clusters'].\
reindex(lane_samples).\
fillna(0).\
values.tolist()
datasets.append({
"label": project_id,
"data": pf_counts})
data = {
"labels": lane_samples,
"datasets": datasets}
lane_plots.update({lane_id: data})
return lane_plots
except Exception as e:
raise ValueError("Failed to get sample distribution data, error: {0}".format(e)) | 143094fc909ac9044c5986c958cff79e9e218414 | 29,734 |
def mark_volatile(obj):
"""DEPRECATED(Jiayuan Mao): mark_volatile has been deprecated and will be removed by 10/23/2018; please use torch.no_grad instead."""
return stmap(_mark_volatile, obj) | 2e299889fe7982069fb5987f6af0e56ae365a9ee | 29,735 |
def list_keys(bucket):
"""
Lists all the keys in a bucket.
:param bucket: (string) A bucket name.
:return: (string list) Keys in the bucket.
"""
_check_bucket(bucket)
bucket_path = _bucket_path(bucket)
keys = []
for key_path in bucket_path.iterdir():
keys.append(key_path.name)
return keys | 0a84c72165cf76970221974196911853d0db027a | 29,736 |
def h6(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns a heading.\n
`content`: The text of the heading.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<h6 {g_args}>{content}</h6>\n" | c68a5ba229643571ce49e535b32e99580443e56c | 29,737 |
def extractUniversesWithMeaning(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Angel of Death' in item['title']:
return buildReleaseMessageWithType(item, 'Angel of Death', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'In The Name Of God' in item['title']:
return buildReleaseMessageWithType(item, 'In The Name Of God', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False | 0a00cc344bdb1d5a2498252f529536b2c58f2825 | 29,738 |
def get_provenance(message):
"""Given a message with results, find the source of the edges"""
prov = defaultdict(lambda: defaultdict(int)) # {qedge->{source->count}}
results = message['message']['results']
kg = message['message']['knowledge_graph']['edges']
edge_bindings = [ r['edge_bindings'] for r in results ]
for bindings in edge_bindings:
for qg_e, kg_l in bindings.items():
for kg_e in kg_l:
for att in kg[kg_e['id']]['attributes']:
if att['attribute_type_id'] == 'MetaInformation:Provenance':
source = att['value']
prov[qg_e][source]+=1
qg_edges = []
sources = []
counts = []
for qg_e in prov:
for source in prov[qg_e]:
qg_edges.append(qg_e)
sources.append(source)
counts.append(prov[qg_e][source])
prov_table = pd.DataFrame({"QG Edge":qg_edges, "Source":sources, "Count":counts})
return prov_table | 1f6dfe9e48da49b72cb55f3f949146db3db164f2 | 29,739 |
def bricklinkColorToLEGO(colornum):
"""
Get the LEGO equivalent to a bricklink color number if it exists
:param colornum:
:return:
"""
default = {"name": "Unknown", "Lego": colornum, "BrickLink": colornum}
if colornum < 0:
return default
for color in color_data:
if color_data[color]["BrickLink"] == colornum:
return color_data[color]
return default | 9ed77c271168025c5864a28624470c26ae3a0a9e | 29,740 |
def bounds(gdf):
"""Calculates the bounding coordinates (left, bottom, right, top) in the given GeoDataFrame.
Args:
gdf: A GeoDataFrame containing the input points.
Returns:
An array [minx, miny, maxx, maxy] denoting the spatial extent.
"""
bounds = gdf.total_bounds
return bounds | 48242e870edd1db9b1191518c4b9ba7433420610 | 29,741 |
from typing import OrderedDict
def controls_receptor():
"""Control for receptor network.
TODO: remain to be added to manuscript
"""
config = FullConfig()
config.data_dir = './datasets/proto/standard'
config.max_epoch = 100
config.train_pn2kc = False
config.receptor_layer = True
config.or2orn_normalization = True
config.pn_norm_pre = 'batch_norm'
config.ORN_NOISE_STD = .4
config.replicate_orn_with_tiling = True
# Ranges of hyperparameters to loop over
config_ranges = OrderedDict()
config_ranges['N_ORN_DUPLICATION'] = [1, 3, 10, 30,100]
config_ranges['or2orn_normalization'] = [False, True]
config_ranges['pn_norm_pre'] = ['None', 'batch_norm']
configs = vary_config(config, config_ranges, mode='control')
return configs | b01bb691c697a0efad2ebfef0a688021cda39598 | 29,742 |
def powerport_get_row_boot_strap (port_id, raw = False):
""" Get boot strapping setting
"""
result = powerport_get_port_status (port_id + 24, "pdu", raw)
state = result.pop ("Port State", None)
if (state):
result["Boot Strap"] = "Normal" if (state == "Off") else "Network"
return result | 00d41e0a505c6d88ce44c4a701865556ccff0f89 | 29,743 |
def inv_ltri(ltri, det):
"""Lower triangular inverse"""
inv_ltri = np.array((ltri[2], -ltri[1], ltri[0]), dtype=ltri.dtype) / det
return inv_ltri | 539bbcec02286f991d5cc426ebe631d1cbe0b890 | 29,744 |
def create_dummy_review(user, title='Review 1'):
"""Simple function for creating reviews of a user"""
review = Review.objects.create(
reviewer=user,
title=title,
rating=5,
summary='This is my first review!!!',
ip='190.190.190.1',
company='Test Company',
)
return review | ffa9ad526072fa48eff513f9b6d10628e700a266 | 29,745 |
import os
import time
def checkOneFile(path):
"""Process one file and return analysis result as element object"""
# Create output elementtree object
if config.inputRecursiveFlag or config.inputWrapperFlag:
# Name space already declared in results element, so no need to do it
# here
root = ET.Element('jpylyzer')
else:
root = ET.Element(
'jpylyzer', {'xmlns': nsString,
'xmlns:xsi': xsiNsString,
'xsi:schemaLocation': locSchemaString})
# Create elements for storing tool, file and status meta info
toolInfo = ET.Element('toolInfo')
fileInfo = ET.Element('fileInfo')
statusInfo = ET.Element('statusInfo')
# File name and path
fileName = os.path.basename(path)
filePath = os.path.abspath(path)
# If file name / path contain any surrogate pairs, remove them to
# avoid problems when writing to XML
fileNameCleaned = stripSurrogatePairs(fileName)
filePathCleaned = stripSurrogatePairs(filePath)
# Produce some general tool and file meta info
toolInfo.appendChildTagWithText("toolName", scriptName)
toolInfo.appendChildTagWithText("toolVersion", __version__)
fileInfo.appendChildTagWithText("fileName", fileNameCleaned)
fileInfo.appendChildTagWithText("filePath", filePathCleaned)
fileInfo.appendChildTagWithText(
"fileSizeInBytes", str(os.path.getsize(path)))
try:
lastModifiedDate = time.ctime(os.path.getmtime(path))
except ValueError:
# Dates earlier than 1 Jan 1970 can raise ValueError on Windows
# Workaround: replace by lowest possible value (typically 1 Jan 1970)
lastModifiedDate = time.ctime(0)
fileInfo.appendChildTagWithText(
"fileLastModified", lastModifiedDate)
# Initialise success flag
success = True
try:
# Contents of file to memory map object
fileData = fileToMemoryMap(path)
isValidJP2, tests, characteristics = bv.BoxValidator("JP2", fileData).validate()
if fileData != "":
fileData.close()
# Generate property values remap table
remapTable = generatePropertiesRemapTable()
# Create printable version of tests and characteristics tree
tests.makeHumanReadable()
characteristics.makeHumanReadable(remapTable)
except Exception as ex:
isValidJP2 = False
success = False
exceptionType = type(ex)
if exceptionType == MemoryError:
failureMessage = "memory error (file size too large)"
elif exceptionType == IOError:
failureMessage = "I/O error (cannot open file)"
elif exceptionType == RuntimeError:
failureMessage = "runtime error (please report to developers)"
else:
failureMessage = "unknown error (please report to developers)"
shared.printWarning(failureMessage)
tests = ET.Element("tests")
characteristics = ET.Element('properties')
# Add status info
statusInfo.appendChildTagWithText("success", str(success))
if not success:
statusInfo.appendChildTagWithText("failureMessage", failureMessage)
# Append all results to root
root.append(toolInfo)
root.append(fileInfo)
root.append(statusInfo)
root.appendChildTagWithText("isValidJP2", str(isValidJP2))
root.append(tests)
root.append(characteristics)
return root | 3242809b99bd71afbec2d3a9f27e4dabeb1a920d | 29,746 |
def check_user(update):
"""verify that a user has signed up"""
user = update.message.from_user
if db.tel_get_user(user.id) is not None:
return True
else:
return False | 6654ba29d15158b63f78cea16d796f26f9199b07 | 29,747 |
def acmg():
"""Calculate an ACMG classification from submitted criteria."""
criteria = request.args.getlist("criterion")
classification = get_acmg(criteria)
return jsonify(dict(classification=classification)) | 83e70afcd986ca8b7fe0e442130615cff2e84a1b | 29,748 |
def import_bin(filename, **kwargs):
"""
Read a .bin file generated by the IRIS Instruments Syscal Pro System and
return a curated dataframe for further processing. This dataframe contains
only information currently deemed important. Use the function
reda.importers.iris_syscal_pro_binary._import_bin to extract ALL
information from a given .bin file.
Parameters
----------
filename : string
path to input filename
x0 : float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing : float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals : int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
check_meas_nums : bool
if True, then check that the measurement numbers are consecutive. Don't
return data after a jump to smaller measurement numbers (this usually
indicates that more data points were downloaded than are part of a
specific measurement. Default: True
skip_rows : int
Ignore this number of rows at the beginning, e.g., because they were
inadvertently imported from an earlier measurement. Default: 0
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
metadata, data_raw = _import_bin(filename)
skip_rows = kwargs.get('skip_rows', 0)
if skip_rows > 0:
data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True)
data_raw = data_raw.reset_index()
if kwargs.get('check_meas_nums', True):
# check that first number is 0
if data_raw['measurement_num'].iloc[0] != 0:
print('WARNING: Measurement numbers do not start with 0 ' +
'(did you download ALL data?)')
# check that all measurement numbers increase by one
if not np.all(np.diff(data_raw['measurement_num'])) == 1:
logger.warning(' '.join((
'WARNING',
'Measurement numbers are not consecutive.',
'Perhaps the first measurement belongs to another'
'measurement?',
'Use the skip_rows parameter to skip those measurements'
)))
# import IPython
# IPython.embed()
# now check if there is a jump in measurement numbers somewhere
# ignore first entry as this will always be nan
diff = data_raw['measurement_num'].diff()[1:]
jump = np.where(diff != 1)[0]
if len(jump) > 0 and not np.all(data_raw['measurement_num'] == 0):
logger.warning(
'WARNING: One or more jumps in measurement numbers detected')
logger.warning('The jump indices are:')
for jump_nr in jump:
logger.warning(jump_nr)
logger.info('Removing data points subsequent to the first jump')
data_raw = data_raw.iloc[0:jump[0] + 1, :]
if data_raw.shape[0] == 0:
# no data present, return a bare DataFrame
return pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']), None, None
data = _convert_coords_to_abmn_X(
data_raw[['x_a', 'x_b', 'x_m', 'x_n']],
**kwargs
)
# [mV] / [mA]
data['r'] = data_raw['vp'] / data_raw['Iab']
data['Vmn'] = data_raw['vp']
data['vab'] = data_raw['vab']
data['Iab'] = data_raw['Iab']
data['mdelay'] = data_raw['mdelay']
data['Tm'] = data_raw['Tm']
data['Mx'] = data_raw['Mx']
data['chargeability'] = data_raw['m']
data['q'] = data_raw['q']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
logger.info('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']]
# print(data)
return data, None, None | 2ec4d3febad7eae08db4b2df5b4552459b627dd9 | 29,749 |
def get_messages(receive_address, send_address, offset, count):
""" return most recent messages from offset to count from both communicators """
conn = connect_db()
cur = conn.cursor()
cur.execute(
"(SELECT * FROM message WHERE (recvAddress=%s AND sendAddress=%s) ORDER by id DESC LIMIT %s OFFSET %s) "
"UNION "
"(SELECT * FROM message WHERE (recvAddress=%s AND sendAddress=%s) ORDER by id DESC LIMIT %s OFFSET %s) "
"ORDER by id;",
(receive_address, send_address, count, offset, send_address, receive_address, count, offset))
res = []
for row in cur:
res.append(row)
cur.close()
conn.close()
return res | 13e46a46a4bc15931de77374249ae701f5b65c1a | 29,750 |
def get_test_node(context, **kw):
"""Return a Node object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_node = db_utils.get_test_node(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_node['id']
node = objects.Node(context)
for key in db_node:
setattr(node, key, db_node[key])
return node | 6ce089c0d6643f2c58633ce846a4a46d2c9e5732 | 29,751 |
from typing import OrderedDict
def GlobalAttributes(ds, var):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args:
runinfo Table containing all the details of the individual runs
ensinfo Custom class object containing all the infomation about
the ensemble being saved
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
attr = OrderedDict()
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["title"] = "Trend in Climate Variable"
attr["summary"] = "Annual and season trends in %s" % var
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__)
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["institution"] = "University of Leicester"
attr["date_created"] = str(pd.Timestamp.now())
# ++++++++++ Netcdf Summary infomation ++++++++++
attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
# Note. Maybe ad some geographich infomation here
# Add publication references
# attr["references"] = "Results are described in: %s \n TSS-RESTREND method is described in: %s" % (
# pubs["p%d" % ensinfo.paper], pubs["p1"])
# ++++++++++ Infomation unique to TSS-RESREND ensembles ++++++++++
# attr["package_version"] = ",".join(runinfo.iloc[ensinfo.run]["TSS.version"].unique().tolist())
# attr["package_url"] = "https://cran.r-project.org/web/packages/TSS.RESTREND/index.html"
# attr["Vegetation"] = ",".join(runinfo.iloc[ensinfo.run]["VI.type"].unique().tolist())
# attr["Precipitation"] = ",".join(runinfo.iloc[ensinfo.run]["rf.type"].unique().tolist())
# ===== Check and see if temperature is included =====
# if not all(pd.isnull(runinfo.iloc[ensinfo.run].Temperature.tolist())):
# # +++++ contains temperature +++++
# # List of temperature infomation
# temp = ([fnm.split("/")[3].split(".")[1]
# for fnm in runinfo.iloc[ensinfo.run].Temperature.unique().tolist()])
# # join list to attr
# attr["Temperature"] = ",".join(temp)
# # ===== add infomation about CO2 fertilisation =====
# if (ensinfo.paper == 3) or (ensinfo.paper>=5):
# attr["CO2_method"] = "Franks et al., (2013) CO2 correction"
# attr["CO2_data"] = "CMIP5 rcp8.5 forcing data"
return attr | b5fcf1627f8c23964a1ec2fe4fa657a60cb8efa3 | 29,752 |
from typing import List
from typing import Tuple
def extend_path(path: List[Tuple[MerkleTree, hash.HashTriple]],
tree: MerkleTree,
) -> List[List[Tuple[MerkleTree, hash.HashTriple]]]:
"""Extend path if possible."""
paths = []
for t in (tree.left, tree.right):
if t is not None:
path_ = path + [(t, get_hash_triple(t))]
paths.append(path_)
return paths | 294ea5b1e2b844065a2fd962768f1b6501b4513f | 29,753 |
import os
def main(argv):
"""Cleans the generated Amplitude binary assets.
Returns:
Returns 0 on success.
"""
projectPath = common.get_o3de_project_path()
try:
common.clean_flatbuffer_binaries(
common.get_conversion_data(os.path.join(
projectPath, "sounds", "amplitude_project"))
)
print("Amplitude binary assets cleaned successfully.")
except common.BuildError as error:
common.handle_build_error(error)
return 1
return 0 | 3a7a322716a6240fcea25c6fa93620b42e579127 | 29,754 |
def calculate_debt(runsdim, runstot, low_bound, high_bound):
""" calculates tech debt and bugs """
debt_indexes = np.random.uniform(low_bound, high_bound, runstot)
debt_indexes = runsdim * debt_indexes
debt_indexes = np.rint(debt_indexes)
return np.int64(debt_indexes) | 697e2520c019ca8d46cdfebf092207e17a95ee0e | 29,755 |
def load_dictionary(loc):
"""
Load a dictionary
"""
with open(loc, 'r') as f:
worddict = pkl.load(f)
return worddict | f659ebb94410e02bbeab51b04b60e727cc749641 | 29,756 |
import json
def get_specific_post(post_id):
"""Get specific post"""
value = posts.get(post_id)
if not value:
return json.dumps({"error": "Post Not Found"}), 404
return json.dumps(value), 200 | 86ca2ee5847c3e7043dbd52f1a713477ea6191c5 | 29,757 |
def first_half(dayinput):
"""
first half solver:
"""
houses = { (0,0): 1 }
coords = [0, 0]
for h in dayinput:
coords = change_coords(h, coords)
home = (coords[0], coords[1])
if houses.get(home, None):
houses[home] += 1
else:
houses[home] = 1
return len(houses) | c0ed5c66c3f259257e14c2c6bc4daec406028135 | 29,758 |
def compute_cluster_metrics(neighbor_mat, max_k=10, included_fovs=None,
fov_col='SampleID'):
"""Produce k-means clustering metrics to help identify optimal number of clusters
Args:
neighbor_mat (pandas.DataFrame):
a neighborhood matrix, created from create_neighborhood_matrix
the matrix should have the label col droppped
max_k (int):
the maximum k we want to generate cluster statistics for, must be at least 2
included_fovs (list):
patient labels to include in analysis. If argument is none, default is all labels used.
fov_col (str):
the name of the column in neighbor_mat determining the fov
Returns:
xarray.DataArray:
an xarray with dimensions (num_k_values) where num_k_values is the range
of integers from 2 to max_k included, contains the metric scores for each value
in num_k_values
"""
# set included_fovs to everything if not set
if included_fovs is None:
included_fovs = neighbor_mat[fov_col].unique()
# make sure the user specifies a positive k
if max_k < 2:
raise ValueError("Invalid k provided for clustering")
# check if included fovs found in fov_col
misc_utils.verify_in_list(fov_names=included_fovs,
unique_fovs=neighbor_mat[fov_col].unique())
# subset neighbor_mat accordingly, and drop the columns we don't need
neighbor_mat_data = neighbor_mat[neighbor_mat[fov_col].isin(included_fovs)]
neighbor_mat_data = neighbor_mat_data.drop(fov_col, axis=1)
# generate the cluster score information
neighbor_cluster_stats = spatial_analysis_utils.compute_kmeans_cluster_metric(
neighbor_mat_data=neighbor_mat_data, max_k=max_k
)
return neighbor_cluster_stats | bc13b2eb1b4bd9824e4134395ed5d31a9cfa7cb0 | 29,759 |
def map_flat_line(x, y, data, linestyles='--', colors='k', ax=None, **kwargs):
"""Plot a flat line across every axis in a FacetGrid.
For use with seaborn's map_dataframe, this will plot a horizontal or
vertical line across all axes in a FacetGrid.
Parameters
----------
x, y : str, float, or list of floats
One of these must be a float (or list of floats), one a str. The str
must correspond to a column in the mapped dataframe, and we plot the
line from the minimum to maximum value from that column. If the axes
x/ylim looks very different than these values (and thus we assume this
was a seaborn categorical plot), we instead map from 0 to
data[x/y].nunique()-1
The float
corresponds to the x/y value of the line; if a list, then we plot
multiple lines.
data : pd.DataFrame
The mapped dataframe
linestyles, colors : str, optional
The linestyles and colors to use for the plotted lines.
ax : axis or None, optional
The axis to plot on. If None, we grab current axis.
kwargs :
Passed to plt.hlines / plt.vlines.
Returns
-------
lines : matplotlib.collections.LineCollection
Artists for the plotted lines
"""
if ax is None:
ax = plt.gca()
# we set color with the colors kwarg, don't want to confuse it.
kwargs.pop('color')
if isinstance(x, str) and not isinstance(y, str):
try:
xmin, xmax = data[x].min(), data[x].max()
except KeyError:
# it looks like the above works with catplot / related functions
# (i.e., when seaborn thought the data was categorical), but not
# when it's relplot / related functions (i.e., when seaborn thought
# data was numeric). in that case, the columns have been renamed to
# 'x', 'y', etc.
xmin, xmax = data['x'].min(), data['x'].max()
# then this looks like a categorical plot
if (ax.get_xlim()[-1] - xmax) / xmax > 5:
xmin = 0
xmax = data[x].nunique()-1
lines = ax.hlines(y, xmin, xmax, linestyles=linestyles, colors=colors,
**kwargs)
elif isinstance(y, str) and not isinstance(x, str):
try:
ymin, ymax = data[y].min(), data[y].max()
except KeyError:
# it looks like the above works with catplot / related functions
# (i.e., when seaborn thought the data was categorical), but not
# when it's relplot / related functions (i.e., when seaborn thought
# data was numeric). in that case, the columns have been renamed to
# 'x', 'y', etc.
ymin, ymax = data['y'].min(), data['y'].max()
# then this looks like a categorical plot
if (ax.get_ylim()[-1] - ymax) / ymax > 5:
ymin = 0
ymax = data[y].nunique()-1
lines = ax.vlines(x, ymin, ymax, linestyles=linestyles, colors=colors,
**kwargs)
else:
raise Exception("Exactly one of x or y must be a string!")
return lines | dd46189458ee0da79785d2c9119a41969dc1357e | 29,760 |
from sys import api_version
def validate_api_version():
"""
A decorator that validates the requested API version on a route
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
req_api_version = kwargs.get("api_version", 1)
if req_api_version > api_version or req_api_version < 1:
raise NotFound("The requested API version is not available")
return func(*args, **kwargs)
return wrapper
return decorator | 317894ea92d6f0815a2bdaad8d8eb2776251d060 | 29,761 |
def build_dictionary(sentences, size):
"""
Create dictionary containing most frequent words in the sentences
:param sentences: sequence of sentence that contains words
Caution: the sequence might be exhausted after calling this function!
:param size: size of dictionary you want
:return: dictionary that maps word to index (starting from 1)
"""
dictionary = defaultdict(int)
for sentence in sentences:
for token in sentence:
dictionary[token] += 1
frequent_pairs = nlargest(size, dictionary.items(), itemgetter(1))
words, frequencies = zip(*frequent_pairs)
result = {word: index + 1 for index, word in enumerate(words)}
return result | 24a998d9df539b44c2dad1ffabb2737a189e7a3e | 29,762 |
import re
def simplestr(text):
"""convert a string into a scrubbed lower snakecase. Intended use is converting
human typed field names deterministically into a string that can be used for a
key lookup.
:param text: type str text to be converted
"""
text = text.strip()
text = text.replace(' ', '_')
text = text.lower()
return re.sub('\W+', '', text) | b030c50cd300dd97d69a9d2b8421892bb1f0c23a | 29,763 |
def lti_launch(request):
"""
This method is here to build the LTI_LAUNCH dictionary containing all
the LTI parameters and place it into the session. This is nessesary as we
need to access these parameters throughout the application and they are only
available the first time the application loads.
"""
if request.user.is_authenticated():
if validaterequiredltiparams(request):
return redirect('sl:main')
else:
return render(request, 'student_locations/error.html', {'message': 'Error: The LTI parameter lis_course_offering_sourcedid is required by this LTI tool.'})
else:
return render(request, 'student_locations/error.html', {'message': 'Error: user is not authenticated!'}) | a65d4395095e36f4e27ae802ae7c2635f211521e | 29,764 |
import os
import subprocess
def compress_file(filename):
"""
Compresses file with gzip
:param filename: path to file to be compressed
:type filename: str
:rtype: str or None
"""
if not os.path.isfile(filename):
msg = "File not found or cannot be accessed: '{f}'".format(f=filename)
_log.critical(msg)
raise ONEFluxError(msg)
try:
r = subprocess.call(['gzip', '-f', filename])
_log.debug("Compressing file '{d}'. Result: {o}".format(d=filename, o=('success' if r == 0 else "fail ({r})".format(r=r))))
except subprocess.CalledProcessError as e:
msg = "Problems compressing file '{d}'. Error: '{e}'".format(d=filename, e=str(e))
_log.critical(msg)
raise ONEFluxError(msg)
return filename + '.gz' | c082cef5c38540f0e6daec1c0b079e1d551ef323 | 29,765 |
def cartesian_to_altaz(x):
""" Converts local Cartesian coordinates to Alt-az,
inverting altaz_to_cartesian
"""
x, y, z = x
return np.arcsin(z), np.arctan2(-y, x) | e585fb57a9509fab277e15f08e76e1367c8334d4 | 29,766 |
import sys
import re
import subprocess
def main():
"""Main function."""
argv = sys.argv
assert len(argv) == 4, 'Invalid arguments'
global kernel_path
global install_path
global arch
kernel_path = argv[1]
install_path = argv[2]
arch = argv[3]
# avoid the conflic with the 'new' operator in C++
patch_rule_append('new', 'anew')
# TODO: Add "extern "C"" to function declaration in string_64.h
# while we want to compile module with C++ code.
if 'x86' in arch:
patch_rule_append('void \*memset\(void \*s, int c, size_t n\)\;',
'extern \"C\" {\nvoid *memset(void *s, int c, size_t n);')
patch_rule_append('int strcmp\(const char \*cs, const char \*ct\);',
'int strcmp(const char *cs, const char *ct);}')
# wrap the declaration of extern function with extern "C"
# e.g. extern void func(void); => extern "C" {void func(void);}
def wrapped_with_externC(matched):
func = matched.group(0).split('extern')[1]
return 'extern \"C\" {' + func + '}'
pattern = re.compile(r'^extern\s*[\w_][\w\d_]*[\s\*]*[\w_][\w\d_]*\(.*\);$')
patch_rule_append(pattern, wrapped_with_externC)
# avoid duplicated keyword definition
# e.g. typedef _Bool bool;
# => #ifndef __cplusplus
# typedef _Bool bool;
# #endif
def wrapped_with_ifndef_cpluscplus_macro(matched):
line = matched.group(0)
return '#ifndef __cplusplus\n' + line + '\n#endif\n'
pattern = re.compile(r'^\s*typedef.*\s*(false|true|bool);$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
pattern = re.compile(r'^\s*(false|true|bool)\s*=.*$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
# Use find command to find out all headers
find_cmd = 'find -L ' + kernel_path + ' -name *.h'
proc = subprocess.Popen(find_cmd, shell = True, stdout = subprocess.PIPE)
lines = proc.stdout.readlines()
for line in lines:
if line == '':
break
# Remove the newline character
src = line.replace('\n', "")
file_patch_and_install(src) | 9f590599152325c72bb46c0a3befa15f047bf4be | 29,767 |
def discount_opex(opex, global_parameters, country_parameters):
"""
Discount opex based on return period.
Parameters
----------
cost : float
Financial cost.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
Returns
-------
discounted_cost : float
The discounted cost over the desired time period.
"""
return_period = global_parameters['return_period']
discount_rate = global_parameters['discount_rate'] / 100
wacc = country_parameters['financials']['wacc']
costs_over_time_period = []
for i in range(0, return_period):
costs_over_time_period.append(
opex / (1 + discount_rate)**i
)
discounted_cost = round(sum(costs_over_time_period))
#add wacc
discounted_cost = discounted_cost * (1 + (wacc/100))
return discounted_cost | 6e079ffc9accc7679bada3b31d07748c93d4b18c | 29,768 |
import os
import logging
def load_dataset_from_path(path):
"""
Build a dataset in the form of a pandas dataframe where
the schema is: <LYRIC_PATH, EMOTION>
"""
rows = list()
# Traverse the dataset directory
for root, dirs, files in os.walk(path):
for f in files:
fields = f.split('_')
if len(fields) <= 0:
logging.warning('Could not process file:', f)
break
rows.append([os.path.abspath(os.path.join(path, f)), fields[0]])
return pd.DataFrame(rows, columns=['Lyric_Path', 'Emotion']) | 73dfd29b65c09dd2f6f96bd8c19e95c9add67c93 | 29,769 |
import torch
def permute(x, perm):
"""Permutes the last three dimensions of the input Tensor or Array.
Args:
x (Tensor or Array): Input to be permuted.
perm (tuple or list): Permutation.
Note:
If the input has fewer than three dimensions a copy is returned.
"""
if is_tensor(x):
if x.dim() < 3:
return x.data.clone()
else:
s = tuple(range(0, x.dim()))
permutation = s[:-3] + tuple(s[-3:][i] for i in perm)
return x.permute(*permutation).contiguous()
elif is_array(x):
if x.ndim < 3:
return x.copy()
else:
s = tuple(range(0, x.ndim))
permutation = s[:-3] + tuple(s[-3:][i] for i in perm)
# Copying to get rid of negative strides
return np.transpose(x, permutation).copy()
else:
raise TypeError(f'Uknown type {torch.typename(x)} encountered.') | 513164bfc6d25a82f2bfeba05427bd69c8df181c | 29,770 |
def is_hr_between(time: int, time_range: tuple) -> bool:
"""
Calculate if hour is within a range of hours
Example: is_hr_between(4, (24, 5)) will match hours from 24:00:00 to 04:59:59
"""
if time_range[1] < time_range[0]:
return time >= time_range[0] or time <= time_range[1]
return time_range[0] <= time <= time_range[1] | 70d874f0a5dee344d7638559101fc6be2bcca875 | 29,771 |
def calc_Topt(sur,obs,Tvals,objfunc='nse'):
"""
Function to calibrate the T parameter using a brute-force method
Args: sur (pandas.Series): pandas series of the surface soil moisture
obs (pandas.Series): pandas series of the soil moisture at layer x to calibrate
Tvals (list,tuple,set,np.array): sequence of values to test for optimal value
Kwargs: objfuc (string): objective function used to search for optimal value;
options: "nse","rmse","bias",and "r"; default: "nse"
Returns: out (dict): dictionary with the optimal T value keyed at 'T' and the
objective function value keyed at 'objval'
"""
objOpts = dict(nse=nse,rmse=rmse,bias=bias,r=r,ubrmse=ubRmse)
objectiveFunc = objOpts[objfunc]
df = pd.concat([sur,obs],axis=1)
df.columns = ('surface','depth')
df.dropna(inplace = True)
# new_df = new_df[~new_df.index.duplicated(keep='first')]
results = []
for T in Tvals:
Ttest = expFilter(df['surface'],T=T)
tempDf = pd.concat([Ttest,df['depth']],axis=1)
tempDf.columns = ('simulation','observation')
N = objectiveFunc(tempDf)
results.append(N)
# check to either find the min or max depending on objectivFunc
if objfunc in ('nse','r'):
best = np.array(results).argmax()
objVal = np.nanmax(results)
else:
best = np.array(results).argmin()
objVal = np.nanmin(results)
out = dict(T=Tvals[best],objval=objVal)
return out | 39df23619ea364cbf477f02665a86e72cef86e11 | 29,772 |
def share_file(service, file_id):
"""Make files public
For a given file-id, sets role 'reader' to 'anyone'. Returns public
link to file.
:param: file_id (string)
:return: (string) url to shared file
"""
permission = {
'type': "anyone",
'role': "reader",
'withLink': True
}
try:
service.permissions().insert(
fileId=file_id, body=permission).execute()
except errors.HttpError, error:
print('An error occured while sharing: %s' % file_id)
try:
file = service.files().get(fileId=file_id).execute()
except errors.HttpError, error:
print('Error occured while fetch public link for file: %s' % file_id)
print("Uploaded to %s" % file['webContentLink'])
return file['webContentLink'] | 70a1132667663fa85cf7f00fd3ced2fb81b03cc4 | 29,773 |
import os
def getlocal():
"""Get the path to the local directory where OCRopus data is
installed. Checks OCROPUS_DATA in the environment first,
otherwise defaults to /usr/local/share/ocropus."""
local = os.getenv("OCROPUS_DATA") or modeldir
return local | a1cef507e92d0d39ab50316f749c0e377543d2b9 | 29,774 |
import data
def convert_image_for_visualization(image_data, mean_subtracted=True):
"""
Convert image data from tensorflow to displayable format
"""
image = image_data
if mean_subtracted:
image = image + np.asarray(data.IMAGE_BGR_MEAN, np.float32)
if FLAGS.image_channel_order == 'BGR':
image = image[:,:,::-1] # BGR => RGB
image = np.floor(image).astype(np.uint8)
return image | 9fe138051f7886d3c3e8e70e75f23b636a1ccd02 | 29,775 |
def mld(returns_array, scale=252):
"""
Maximum Loss Duration
Maximum number of time steps when the returns were below 0
:param returns_array: array of investment returns
:param scale: number of days required for normalization. By default in a year there are 252 trading days.
:return: MLD
"""
max_loss = 0
curr = 0
for i in range(returns_array.shape[0]):
# If first returns is negative, add this occurrence to max loss counter
# If it's positive, continue
if i == 0 and returns_array[0] < 0:
curr += 1
max_loss = curr
# If the equity continues dropping
elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] < 0):
curr += 1
if max_loss < curr:
max_loss = curr
# If the equity stops dropping
elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] > 0):
curr = 0
# Normalize over the number of trading days in a year
return max_loss / scale | 2d78d76c1456ebb4df606a9450f45e47b5e49808 | 29,776 |
from typing import Union
from typing import List
from typing import Dict
from typing import Any
def json2geodf(
content: Union[List[Dict[str, Any]], Dict[str, Any]],
in_crs: str = DEF_CRS,
crs: str = DEF_CRS,
) -> gpd.GeoDataFrame:
"""Create GeoDataFrame from (Geo)JSON.
Parameters
----------
content : dict or list of dict
A (Geo)JSON dictionary e.g., r.json() or a list of them.
in_crs : str
CRS of the content, defaults to ``epsg:4326``.
crs : str, optional
The target CRS of the output GeoDataFrame, defaults to ``epsg:4326``.
Returns
-------
geopandas.GeoDataFrame
Generated geo-data frame from a GeoJSON
"""
if not isinstance(content, (list, dict)):
raise InvalidInputType("content", "list or list of dict ((geo)json)")
content = content if isinstance(content, list) else [content]
try:
geodf = gpd.GeoDataFrame.from_features(content[0], crs=in_crs)
except TypeError:
content = [arcgis2geojson(c) for c in content]
geodf = gpd.GeoDataFrame.from_features(content[0], crs=in_crs)
if len(content) > 1:
geodf = geodf.append([gpd.GeoDataFrame.from_features(c, crs=in_crs) for c in content[1:]])
if in_crs != crs:
geodf = geodf.to_crs(crs)
return geodf | de9e956a79821611d6f209e9b206d3c15e24708e | 29,777 |
from typing import Optional
from typing import List
from typing import Tuple
from typing import Any
import logging
def build_block_specs(
block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]:
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs] | 6046b2ee33d15254ff6db989b63be4276d0bad19 | 29,778 |
def est_agb(dbh_mat, sp_list):
"""
Estimate above ground biomass using the allometric equation in Ishihara et al. 2015.
"""
# wood density
wd_list = [
dict_sp[sp]["wood_density"]
if sp in dict_sp and dict_sp[sp]["wood_density"]
else None
for sp in sp_list
]
# functional type (生活形)
# NOTE: 樹種不明の場合は最も頻度の高い生活形にする
ft_categ = {1: "EG", 4: "DA", 5: "EA"}
ft_list = [
ft_categ[dict_sp[sp]["categ2"]]
if sp in dict_sp and dict_sp[sp]["categ2"]
else "NA"
for sp in sp_list
]
ft_u, ft_c = np.unique([i for i in ft_list], return_counts=True)
ft_list = [ft_u[np.argmax(ft_c)] if i == "NA" else i for i in ft_list]
w_mat = np.array(
[
np.vectorize(biomass)(dbh, wd=wd, ft=ft)
for dbh, wd, ft in zip(dbh_mat, wd_list, ft_list)
]
).astype("float64")
w_mat = w_mat / 1000 # kg to Mg
return w_mat | ab0d80408bc9d216e387565cde3d6e758f738252 | 29,779 |
from typing import Optional
def parse_json_and_match_key(line: str) -> Optional[LogType]:
"""Parse a line as JSON string and check if it a valid log."""
log = parse_json(line)
if log:
key = "logbook_type"
if key not in log or log[key] != "config":
log = None
return log | c4efa103730ac63d2ee6c28bba2de99a450f6b8d | 29,780 |
def nearDistance(img, centers):
"""
Get the blob nearest to the image center, which is probably the blob for cells, using euclidian distance.
Parameters: img, image with labels defined;
centers, list of label' centers of mass.
Returns: nearestLabel, label nearest to the image center.
"""
N, M = img.shape
imgCenter = [N//2, M//2]
distances = []
for center in centers:
distances.append(np.linalg.norm(np.array(imgCenter) - np.array(center)))
nearestLabel = np.argmin(distances)
return nearestLabel | 6414d30cb1d591b5bf6f8101c084823f92605f1e | 29,781 |
from flask_swagger import swagger
import json
def create_app(config_object=ProdConfig):
"""This function is an application factory.
As explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_blueprints(app)
register_extensions(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
register_admin(app)
@app.context_processor
def inject_fb_app_ID():
return dict(
fb_app_id=app.config['FB_APP_ID'],
stripe_pub_key=app.config['STRIPE_PUBLISHABLE_KEY'],
embed_entity=json.dumps(g.embed_entity),
current_user_json=json.dumps(g.current_user_json),
mixpanel_enabled=app.config['MIXPANEL_ENABLED'],
mixpanel_id=app.config['MIXPANEL_ID'],
)
@app.before_request
def embed_entity():
"""Embed the entity based on the request."""
# NOTE: this relies pretty heavily on the fact that the before_request
# runs before the context_processor. If that's ever False, we'll have
# to change how this works.
assign_requested_entity()
@app.before_request
def set_sentry_user_context():
"""Add the user to the sentry context."""
sentry.user_context({'id': getattr(current_user, 'id', None)})
@app.route("/swagger/spec")
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "Ceraon API"
return jsonify(swag)
@app.route("/swagger/docs")
def api_doct():
return render_template('swagger-index.html')
return app | b9214ee822c8012a5273cf72daa04f33b9b99539 | 29,782 |
def get_f1_dist1(y_true, y_pred, smooth=default_smooth):
"""Helper to turn the F1 score into a loss"""
return 1 - get_f1_score1(y_true, y_pred, smooth) | c200bbe75f7013a75b2801fbb7de4cbf588bb873 | 29,783 |
def get_date_info_for_pids_tables(project_id, client):
"""
Loop through tables within all datasets and determine if the table has an end_date date or a date field. Filtering
out the person table and keeping only tables with PID and an upload or start/end date associated.
:param project_id: bq name of project_id
:param client: bq client object
:return: filtered dataframe which includes the following columns for each table in each dataset with a person_id
'project_id', 'dataset_id', 'table', 'date_column', 'start_date_column', 'end_date_column'
"""
# Create empty df to append to for final output
date_fields_info_df = pd.DataFrame()
# Loop through datasets
LOGGER.info(
"Looping through datasets to filter and create dataframe with correct date field to determine retraction"
)
dataset_obj = client.list_datasets(project_id)
datasets = [d.dataset_id for d in dataset_obj]
# Remove synthetic data, vocabulary, curation sandbox and previous naming convention datasets
prefixes = ('SR', 'vocabulary', 'curation', 'combined', '2018', 'R2018',
'rdr')
datasets = [x for x in datasets if not x.startswith(prefixes)]
for dataset in datasets:
LOGGER.info(f'Starting to iterate through dataset: {dataset}')
# Get table info for tables with pids
pids_tables_df = get_pids_table_info(project_id, dataset, client)
# Check to see if dataset is empty, if empty break out of loop
if pids_tables_df.empty:
LOGGER.info(
f'No tables in dataset:{dataset}, skipping over dataset')
continue
# Keep only records with datatype of 'DATE'
date_fields_df = pids_tables_df[pids_tables_df['data_type'] == 'DATE']
# Create empty df to append to, keeping only one record per table
df_to_append = pd.DataFrame(columns=[
'project_id', 'dataset_id', 'table', 'date_column',
'start_date_column', 'end_date_column'
])
df_to_append['project_id'] = date_fields_df['table_catalog']
df_to_append['dataset_id'] = date_fields_df['table_schema']
df_to_append['table'] = date_fields_df['table_name']
df_to_append = df_to_append.drop_duplicates()
# Create new df to loop through date time fields
df_to_iterate = pd.DataFrame(
columns=['project_id', 'dataset_id', 'table', 'column'])
df_to_iterate['project_id'] = date_fields_df['table_catalog']
df_to_iterate['dataset_id'] = date_fields_df['table_schema']
df_to_iterate['table'] = date_fields_df['table_name']
df_to_iterate['column'] = date_fields_df['column_name']
# Remove person table
df_to_append = df_to_append[~df_to_append.table.str.contains('person')]
df_to_iterate = df_to_iterate[~df_to_iterate.table.str.contains('person'
)]
# Filter through date columns and append to the appropriate column
for _, row in df_to_iterate.iterrows():
column = getattr(row, 'column')
table = getattr(row, 'table')
if 'start_date' in column:
df_to_append.loc[df_to_append.table == table,
'start_date_column'] = column
elif 'end_date' in column:
df_to_append.loc[df_to_append.table == table,
'end_date_column'] = column
else:
df_to_append.loc[df_to_append.table == table,
'date_column'] = column
date_fields_info_df = date_fields_info_df.append(df_to_append)
LOGGER.info(f'Iteration complete through dataset: {dataset}')
return date_fields_info_df | e75dd8176583673e8ee6e89a8c75312fe06216bc | 29,784 |
def preprocess_sent(sentence):
"""input: a string containing multiple sentences;
output: a list of tokenized sentences"""
sentence = fill_whitespace_in_quote(sentence)
output = tokenizer0(sentence)
# tokens = [token.text for token in tokenizer.tokenize(sentence)]
tokens = list(map(lambda x: x.text, output))
ret_sentences = []
st = 0
# fix for ','
new_tokens = []
for i, token in enumerate(tokens):
if token.endswith(','):
new_tokens += [token.rstrip(','), ',']
else:
new_tokens += [token]
tokens = new_tokens
for i, token in enumerate(tokens):
if token.endswith('.'):
ret_sentences.append(tokens[st: i] + [token.strip('.')])
st = i + 1
return ret_sentences | eddaa08e3b0a8ad43f410541793f873449101904 | 29,785 |
import os
def save_attention_visualization(source_img_path, att_map, dest_name):
"""
Visualize the attention map on the image and save the visualization.
"""
img = cv2.imread(source_img_path) # cv2.imread does auto-rotate
# downsample source image
img = downsample_image(img)
img_h, img_w, img_c = img.shape
_, att_h, att_w = att_map.shape
att_map = att_map.reshape((att_h, att_w))
# upsample attention map to match original image
upsample0 = resize(att_map, (img_h, img_w), order=3) # bicubic interpolation
upsample0 = upsample0 / upsample0.max()
# create rgb-alpha
rgba0 = np.zeros((img_h, img_w, img_c + 1))
rgba0[..., 0:img_c] = img
rgba0[..., 3] = upsample0
path0 = os.path.join(VIZ_FOLDER, dest_name + '.png')
cv2.imwrite(path0, rgba0 * 255.0)
return path0 | 0e91320054f2084c3dfc3228951224b882c69af6 | 29,786 |
def average_slope_intercept(image, lines):
"""
This function combines line segments into one or two lane lines
"""
left_fit = [] # contains the coordinate of on the line in the left
right_fit = []
if lines is None:
return None
# now loop through very line we did previously
for line in lines:
for x1, y1, x2, y2 in line:
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if slope < 0: # y is reversed in image
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
"""now What polyfit will do for us is it will fit a first degree polynomial,
which would simply be a linear function of Y= mx + b it's going to fit this polynomial to our X and Y
points and return a vector of coefficients which describe the slope and Y intercept.
"""
# add more weight to longer lines
left_fit_average = np.average(left_fit, axis=0)
right_fit_average = np.average(right_fit, axis=0)
left_line = make_coordinates(image, left_fit_average)
right_line = make_coordinates(image, right_fit_average)
averaged_lines = [left_line, right_line]
return averaged_lines | b6dbd56555d5c9d71905b808db20e4f8be6de15f | 29,787 |
def build_upper_limb_roll_jnts(main_net, roll_jnt_count=3):
"""Add roll jnts, count must be at least 1"""
increment = 1.0/float(roll_jnt_count)
def create_joints(jnt_a, jnt_b, net, lower_limb=False, up_axis='-Z'):
"""
:param jnt_a: Start Joint
:param jnt_b: End Joint
:param net: Limb Network node
:param lower_limb: This uses an aim constraint method for end driven rotation, the default uses a ikSc Solver
for upper arm and upper leg rotation that is driven by the parent jnt.
:param up_axis: For placement of the up locator, must be specified as positive or negative. '+Z', '-X', '+Y'
:return:
"""
driver_a, driver_b = create_driver_rig(jnt_a, jnt_b, net, reverse=lower_limb, up_axis=up_axis)
info = naming_utils.ItemInfo(jnt_a)
# Create Roll Joint
for roll_idx in range(roll_jnt_count):
weight_b = increment * roll_idx
weight_a = 1 - weight_b
name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', consts.INDEX[roll_idx]])
type = naming_utils.concatenate([info.base_name, info.joint_name, 'Roll', consts.INDEX[roll_idx]])
dup_jnt = pymel.duplicate(jnt_a, name=name)[0]
dup_jnt.radius.set(8)
dup_jnt.setAttr('otherType', type)
naming_utils.add_tags(dup_jnt, {'_skin': 'True'})
pymel.delete(dup_jnt.getChildren())
# Parent Roll joint to Jnt A
dup_jnt.setParent(jnt_a)
naming_utils.add_tags(dup_jnt, {'Network': net.name(), 'Utility': 'Roll'})
point_con = pymel.pointConstraint([jnt_a, jnt_b, dup_jnt])
# Weighting toward child
point_con.w0.set(weight_a)
point_con.w1.set(weight_b)
# Multi Node
name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Multi', consts.INDEX[roll_idx]])
multi_utility = pymel.shadingNode('multiplyDivide', name=name, asUtility=True)
naming_utils.add_tags(multi_utility, {'Network': net.name(), 'Utility': 'Roll'})
# Using jnt_a for Shoulder and Upper Leg
driver_a.rotateX.connect(multi_utility.input1X)
multi_utility.input2X.set(weight_a)
multi_utility.outputX.connect(dup_jnt.rotateX)
def create_driver_rig(jnt_a, jnt_b, net, reverse=False, up_axis=None):
info = naming_utils.ItemInfo(jnt_a)
# Driver Group
grp_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', 'GRP'])
grp = virtual_classes.TransformNode(name=grp_name)
naming_utils.add_tags(grp, {'Network': net.name()})
pymel.parentConstraint([jnt_a, grp])
grp.setParent(grp.limb_grp)
# Driver A
new_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Driver', 'A'])
driver_a = pymel.duplicate(jnt_a, name=new_name)[0]
pymel.delete(driver_a.getChildren())
driver_a.setTranslation(jnt_b.getTranslation(worldSpace=True), worldSpace=True)
driver_a.setParent(grp)
# Driver B
new_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Driver', 'B'])
driver_b = pymel.duplicate(jnt_a, name=new_name)[0]
pymel.delete(driver_b.getChildren())
driver_b.setParent(driver_a)
ikhandle_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', 'IK'])
ikhandle = pymel.ikHandle(startJoint=driver_a, endEffector=driver_b, name=ikhandle_name, solver='ikSCsolver')[0]
ikhandle.setParent(grp)
pymel.parentConstraint([jnt_a.getParent(), ikhandle], maintainOffset=True)
return driver_a, driver_b
# Add Upper Arm Roll
# idx[0] LEFT Side, idx[1] Right Side
for idx in range(2):
# UpperArm
create_joints(main_net.arms[idx].jnts[0], main_net.arms[idx].jnts[1], main_net.arms[idx])
# UpperLeg
create_joints(main_net.legs[idx].jnts[0], main_net.legs[idx].jnts[1], main_net.legs[idx]) | 0985a33a9fac3b218042cac4d1213df13f606464 | 29,788 |
import os
import json
import re
def upload():
"""UEditor文件上传接口
config 配置文件
result 返回结果
"""
mimetype = 'application/json'
result = {}
action = request.args.get('action')
# 解析JSON格式的配置文件
with open(os.path.join(app.static_folder, 'ueditor', 'php',
'config.json')) as fp:
try:
# 删除 `/**/` 之间的注释
CONFIG = json.loads(re.sub(r'\/\*.*\*\/', '', fp.read()))
except:
CONFIG = {}
if action == 'config':
# 初始化时,返回配置文件给客户端
result = CONFIG
elif action in ('uploadimage', 'uploadfile', 'uploadvideo'):
# 图片、文件、视频上传
if action == 'uploadimage':
fieldName = CONFIG.get('imageFieldName')
config = {
"pathFormat": CONFIG['imagePathFormat'],
"maxSize": CONFIG['imageMaxSize'],
"allowFiles": CONFIG['imageAllowFiles']
}
elif action == 'uploadvideo':
fieldName = CONFIG.get('videoFieldName')
config = {
"pathFormat": CONFIG['videoPathFormat'],
"maxSize": CONFIG['videoMaxSize'],
"allowFiles": CONFIG['videoAllowFiles']
}
else:
fieldName = CONFIG.get('fileFieldName')
config = {
"pathFormat": CONFIG['filePathFormat'],
"maxSize": CONFIG['fileMaxSize'],
"allowFiles": CONFIG['fileAllowFiles']
}
if fieldName in request.files:
field = request.files[fieldName]
uploader = Uploader(field, config, app.static_folder)
result = uploader.getFileInfo()
else:
result['state'] = '上传接口出错'
elif action in ('uploadscrawl'):
# 涂鸦上传
fieldName = CONFIG.get('scrawlFieldName')
config = {
"pathFormat": CONFIG.get('scrawlPathFormat'),
"maxSize": CONFIG.get('scrawlMaxSize'),
"allowFiles": CONFIG.get('scrawlAllowFiles'),
"oriName": "scrawl.png"
}
if fieldName in request.form:
field = request.form[fieldName]
uploader = Uploader(field, config, app.static_folder, 'base64')
result = uploader.getFileInfo()
else:
result['state'] = '上传接口出错'
elif action in ('catchimage'):
config = {
"pathFormat": CONFIG['catcherPathFormat'],
"maxSize": CONFIG['catcherMaxSize'],
"allowFiles": CONFIG['catcherAllowFiles'],
"oriName": "remote.png"
}
fieldName = CONFIG['catcherFieldName']
if fieldName in request.form:
# 这里比较奇怪,远程抓图提交的表单名称不是这个
source = []
elif '%s[]' % fieldName in request.form:
# 而是这个
source = request.form.getlist('%s[]' % fieldName)
_list = []
for imgurl in source:
uploader = Uploader(imgurl, config, app.static_folder, 'remote')
info = uploader.getFileInfo()
_list.append({
'state': info['state'],
'url': info['url'],
'original': info['original'],
'source': imgurl,
})
result['state'] = 'SUCCESS' if len(_list) > 0 else 'ERROR'
result['list'] = _list
elif action in ('listimage', 'listfile'):
allowFiles = []
listSize = 20
path = ""
if (action == 'listfile'):
allowFiles = CONFIG['fileManagerAllowFiles']
listSize = CONFIG['fileManagerListSize']
path = CONFIG['fileManagerListPath']
else:
allowFiles = CONFIG['imageManagerAllowFiles']
listSize = CONFIG['imageManagerListSize']
path = CONFIG['imageManagerListPath']
size = int(request.args.get('size', listSize))
start = int(request.args.get('start', 0))
end = start + size
path = app.static_folder + path
files = getfiles(app.root_path, path, allowFiles, [])
lens = len(files)
# 倒序
# files.reverse()
i = min(end, lens) - 1
list = []
for index in range(len(files)):
if (i < lens and i >= 0 and i >= start):
list.append(files[i])
i = i - 1
files = []
# min = min(end, lens)
# list = files[:min(end, lens)]
result["state"] = "SUCCESS"
result["list"] = list
result["start"] = start
result["total"] = lens
else:
result['state'] = '请求地址出错'
result = json.dumps(result)
if 'callback' in request.args:
callback = request.args.get('callback')
if re.match(r'^[\w_]+$', callback):
result = '%s(%s)' % (callback, result)
mimetype = 'application/javascript'
else:
result = json.dumps({'state': 'callback参数不合法'})
res = make_response(result)
res.mimetype = mimetype
res.headers['Access-Control-Allow-Origin'] = '*'
res.headers['Access-Control-Allow-Headers'] = 'X-Requested-With,X_Requested_With'
return res | 435ad167be5235bea594e709025ed66c7c84f156 | 29,789 |
def create_one(**kwargs):
""" Create a Prediction object with the given fields.
Args:
Named arguments.
date: Date object. Date of the predicted changes.
profitable_change: Decimal. Predicted profitable change in pips.
instrument: Instrument object.
score: Float. The cross validation score of this prediction.
predictor: Predictor object.
Returns:
Predicton object with the given fields.
"""
return Prediction(**kwargs) | d71d14445c5e53c032b529652f3ed5fb12df6544 | 29,790 |
def get_lane_lines(img_parms):
"""
Main jumping into function
"""
titles = list()
images = list()
cmaps = list()
print("Starting Lane Detection")
print("************************")
img_in = mpimg.imread(img_parms["image"])
if debug >= 3:
make_save_dir(img_parms["image"])
images.append(img_in)
titles.append("Original")
cmaps.append(None)
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
print("1/5 - Masking Unnecessary Colors")
img_filtered = color_filter(img_in, img_parms["maskings"])
images.append(img_filtered)
titles.append("Color_Filtered")
cmaps.append(None)
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
print("2/5 - Isolate region of interest")
img_roi = roi(img_filtered,img_parms["roi"])
images.append(img_roi)
titles.append("Isolated_ROI")
cmaps.append(None)
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
print("3/5 - Edge Detection")
img_canny = canny(img_roi)
images.append(img_canny)
titles.append("Canny_Edges")
cmaps.append('gray')
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
print("4/5 - Draw and shade lane lines")
img_hough, img_hough_lines = detect_lane_lines(img_parms["ll"], img_canny)
images.append(img_hough_lines)
titles.append("Hough_Lines")
cmaps.append(None)
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
print("5/5 - Final composite image")
img_result = weightSum(img_hough, img_in)
images.append(img_result)
titles.append("Final_Composite")
cmaps.append(None)
if debug >= 2 : display_image(images[-1],titles[-1],cmaps[-1])
if debug : display_images(images,titles,cmaps)
return img_hough | 37e9b516a8314f99e8cd9b0eb3fd2020f418ce30 | 29,791 |
import math
def get_distance_wgs84(lon1, lat1, lon2, lat2):
"""
根据https://github.com/googollee/eviltransform,里面的算法:WGS - 84
:param lon1: 经度1
:param lat1: 纬度1
:param lon2: 经度2
:param lat2: 纬度2
:return: 距离,单位为 米
"""
earthR = 6378137.0
pi180 = math.pi / 180
arcLatA = lat1 * pi180
arcLatB = lat2 * pi180
x = (math.cos(arcLatA) * math.cos(arcLatB) *
math.cos((lon1 - lon2) * pi180))
y = math.sin(arcLatA) * math.sin(arcLatB)
s = x + y
if s > 1:
s = 1
if s < -1:
s = -1
alpha = math.acos(s)
distance = alpha * earthR
return distance | 8da67a3a690ff0cb548dc31fb65f3b2133fa3e3f | 29,792 |
def make_uuid(ftype, size=6):
"""
Unique id for a type.
"""
uuid = str(uuid4())[:size]
return f'{ftype}-{uuid}' | 3cb779431e5cb452f63f8bab639e9a437d7aa0f9 | 29,793 |
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
Args:
img (image): Mask this image.
vertices (numpy array of integers): A polygon to use for the mask.
Returns:
image: The masked image.
"""
# Define a blank mask to start with
mask = np.zeros_like(img)
# Define a 3 channel or 1 channel color to fill the mask with depending
# on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# Fill pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# Return the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image | bd22cd8b6642dd820e786f88d54c63f4811c5221 | 29,794 |
import pytz
def _adjust_utc_datetime_to_phone_datetime(value, phone_tz):
"""
adjust a UTC datetime so that it's comparable with a phone datetime
(like timeEnd, modified_on, etc.)
returns a timezone-aware date
"""
phone_tz = _soft_assert_tz_not_string(phone_tz)
assert value.tzinfo is None
if phone_timezones_have_been_processed():
return value.replace(tzinfo=pytz.utc)
else:
return _adjust_utc_datetime_to_timezone(value, phone_tz) | 48a875af1082d1538c6bf09905afacc411e1408c | 29,795 |
def cache_function(length=CACHE_TIMEOUT):
"""
A variant of the snippet posted by Jeff Wheeler at
http://www.djangosnippets.org/snippets/109/
Caches a function, using the function and its arguments as the key, and the return
value as the value saved. It passes all arguments on to the function, as
it should.
The decorator itself takes a length argument, which is the number of
seconds the cache will keep the result around.
It will put a temp value in the cache while the function is
processing. This should not matter in most cases, but if the app is using
threads, you won't be able to get the previous value, and will need to
wait until the function finishes. If this is not desired behavior, you can
remove the first two lines after the ``else``.
"""
def decorator(func):
def inner_func(*args, **kwargs):
if not cache_enabled():
value = func(*args, **kwargs)
else:
try:
value = cache_get('func', func.__name__, func.__module__, args, kwargs)
except NotCachedError as e:
# This will set a temporary value while ``func`` is being
# processed. When using threads, this is vital, as otherwise
# the function can be called several times before it finishes
# and is put into the cache.
funcwrapper = CacheWrapper(".".join([func.__module__, func.__name__]), inprocess=True)
cache_set(e.key, value=funcwrapper, length=length, skiplog=True)
value = func(*args, **kwargs)
cache_set(e.key, value=value, length=length)
except MethodNotFinishedError as e:
value = func(*args, **kwargs)
return value
return inner_func
return decorator | 81c2574621e81c485712e456f9b34c638f47cdf8 | 29,796 |
import os
import yaml
import logging
def get_yaml_config(file_name):
"""Return a dict parsed from a YAML file within the config directory.
Args:
file_name: String that defines the config file name.
Returns:
A dict with the parsed YAML content from the config file or
an empty dict if the file is not found or YAML was unable
to parse it.
"""
root_path = os.path.join(os.path.sep, 'etc', 'timesketch')
if not os.path.isdir(root_path):
return {}
path = os.path.join(root_path, file_name)
if not os.path.isfile(path):
return {}
with open(path, 'r') as fh:
try:
return yaml.safe_load(fh)
except yaml.parser.ParserError as exception:
# pylint: disable=logging-format-interpolation
logging.warning((
'Unable to read in YAML config file, '
'with error: {0!s}').format(exception))
return {} | e1f484322b7e3eb74a4ef204309dc8b22563f40e | 29,797 |
def plot_contour_1d(X_grid, Y_grid, data,
xlabel, ylabel, xticks, yticks,
metric_bars, fillblack=True):
"""Create contour plots and return the figure and the axes."""
n = len(metric_bars)
assert data.shape == (*X_grid.shape, n), (
"data shape must be (X, Y, M), where (X, Y) is the shape of both X_grid and Y_grid, "
"and M is the number of metrics"
)
gs_kw = dict(height_ratios=[1, 0.25])
fig, axes = plt.subplots(ncols=n, nrows=2, gridspec_kw=gs_kw)
for i in range(n):
metric_bar = metric_bars[i]
ax = axes[0, i]
plt.sca(ax)
style_plot_limits(xticks, yticks)
yticklabels = i == 0
xticklabels = True
if fillblack: fill_plot()
cp = plt.contourf(X_grid, Y_grid, data[:, :, i],
levels=metric_bar.levels,
cmap=metric_bar.cmap)
style_axis(ax, xticks, yticks, xticklabels, yticklabels)
cbar_ax = axes[1, i]
plt.sca(cbar_ax)
metric_bar.colorbar(fig, cbar_ax, cp, shrink=0.8, orientation='horizontal')
plt.axis('off')
set_axes_labels(axes[:-1], xlabel, ylabel)
plt.subplots_adjust(hspace=0.1, wspace=0.1)
return fig, axes | 2eb534ebe841a96386976fa477ec357b5ea07ea7 | 29,798 |
def dataset_to_rasa(dataset: JsonDict) -> JsonDict:
"""Convert dataset to RASA format, ignoring entities
See: "https://rasa.com/docs/nlu/dataformat/"
"""
return {
"rasa_nlu_data": {
"common_examples": [
{"text": text, "intent": intent, "entities": []}
for intent, text in _dataset_intent_iterator(dataset)
]
}
} | 64a9b6291b0a2d2af297e3fb0ca214b8d65792f4 | 29,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.