content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
def load_tab_c(tables, fname):
"""Load table C (operators) from 'fname' into object Tables."""
if not os.path.exists(fname):
raise BufrTableError(_text_file_not_found % fname)
with open(fname, "r") as fh:
for line in fh:
if line.startswith('#') or len(line) < 3:
continue
d = None
e = None
el = line.rstrip().split(',')
# 0 1 2 3
# Edition, FXY, OperatorName_en, OperationDefinition_en
d = el[1]
e = (el[2], el[3])
if d.endswith("YYY"):
tables.tab_c[int(d[0:3])] = e
else:
tables.tab_c[int(d)] = e
return True | c1983f83bfac5308134f4a9d12373ce3262fe84c | 31,000 |
def create_new_dataset(data, num_schedules):
"""
creates a dataset where each row is the twenty timesteps of a schedule alongside the chosen task
creates a schedule array that tracks when each schedules starts and ends
:return:
"""
X = []
Y = []
schedule_array = []
for i in range(0, num_schedules):
rand_schedule = i
timesteps_where_things_scheduled = find_nums_with_task_scheduled_pkl(data, rand_schedule) # should be of size 20
if len(timesteps_where_things_scheduled) != 20:
print('schedule wrong size, WHY?')
continue
if i == 0:
start = 0
else:
start = schedule_array[-1][1] + 1 # end of previous list + 1
end = start + len(timesteps_where_things_scheduled) - 1
schedule_array.append([start, end])
for each_timestep in timesteps_where_things_scheduled:
input_nn, output = rebuild_input_output_from_pickle(data, i, each_timestep)
X.append(input_nn)
Y.append(output)
return X, Y, schedule_array | 35f6bbc5b5e9968e7890af17ae77a6c3cbd5d0a5 | 31,001 |
import http
import os
def replication_load(options, args):
"""%(prog)s load <server:port> <path>
Load the contents of a local directory into glance.
server:port: the location of the glance instance.
path: a directory on disk containing the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(http.HTTPConnection(server, port),
options.targettoken)
updated = []
for ent in os.listdir(path):
if uuidutils.is_uuid_like(ent):
image_uuid = ent
LOG.info(_LI('Considering: %s'), image_uuid)
meta_file_name = os.path.join(path, image_uuid)
with open(meta_file_name) as meta_file:
meta = jsonutils.loads(meta_file.read())
# Remove keys which don't make sense for replication
for key in options.dontreplicate.split(' '):
if key in meta:
LOG.debug('Stripping %(header)s from saved '
'metadata', {'header': key})
del meta[key]
if _image_present(client, image_uuid):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
LOG.debug('Image %s already present', image_uuid)
headers = client.get_image_meta(image_uuid)
for key in options.dontreplicate.split(' '):
if key in headers:
LOG.debug('Stripping %(header)s from target '
'metadata', {'header': key})
del headers[key]
if _dict_diff(meta, headers):
LOG.info(_LI('Image %s metadata has changed'), image_uuid)
headers, body = client.add_image_meta(meta)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
else:
if not os.path.exists(os.path.join(path, image_uuid + '.img')):
LOG.debug('%s dump is missing image data, skipping',
image_uuid)
continue
# Upload the image itself
with open(os.path.join(path, image_uuid + '.img')) as img_file:
try:
headers, body = client.add_image(meta, img_file)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
except exc.HTTPConflict:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE)
% image_uuid) # noqa
return updated | 8bb798d4a0846fbf51688bb8990a4b4712b8b25a | 31,002 |
def init_language():
"""
기본 언어 설정. 없으면 영어. 글-로벌
"""
yaml_data = read_yaml("config.yaml", '!default')
if yaml_data:
lang = yaml_data.get('LANGUAGE')
return lang + "\\" if lang else ""
else: return "" | 03ee90ead37060d843ebc71b6d857a719a6d9c7b | 31,003 |
import tempfile
import os
def temp_checkout(svnrepos, path, rev):
"""Check out file revision to temporary file"""
fd, temp = tempfile.mkstemp()
fp = os.fdopen(fd, 'wb')
try:
root = svnrepos._getroot(rev)
stream = fs.file_contents(root, path)
try:
while 1:
chunk = core.svn_stream_read(stream, core.SVN_STREAM_CHUNK_SIZE)
if not chunk:
break
fp.write(chunk)
finally:
core.svn_stream_close(stream)
finally:
fp.close()
return temp | 49cbb3ba9979114608a0a25737fb8b9ef709a1d7 | 31,004 |
def __clean_term__(term, convert_letter = True, w_space = True, is_url=True):
"""
Prepares an input term to be queried in a url
Input
----------------------------------------------------------------
term : str
term to clean
convert_letter : bool (default True)
Whether or not to convert letter to greek representation
w_space : bool (default True)
keep space in term, removes spaces if false
is_url : bool (default True)
Replaces spaces with %20 if true
Returns
----------------------------------------------------------------
term : str
same term as input after cleaning process
"""
term = term.lower().strip()
term = __greek_letter_converter__(term, convert_letter=convert_letter)
# Keeps spaces in string
if w_space:
if is_url:
term = term.replace(' ', '%20') # To replace ' ' in request
else:
pass
else:
term = term.replace(' ', '')
return term | e07f0f828227176665e85532b0f755084d0b294f | 31,005 |
from typing import Counter
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size) | 3926e4717317ca9fd897b98d2db28bfdced74487 | 31,006 |
def find_local_minimum(data, threshold=None):
"""
Find local minimum in data.
:param data: input data.
:param threshold: (optional) local minimum whose value is not less than threshold won't be selected.
:return: a 1-D array.
"""
local_min_idx = argrelextrema(data, np.less)
local_min_idx = local_min_idx[0]
if threshold:
local_min_idx = [idx for idx in local_min_idx if data[idx] < threshold]
return local_min_idx | 5b240627d1f5d1203e0ab592ec29be3ab71ad967 | 31,007 |
def mouse_annotations(mouse_file):
"""
Updates and get JSON file for mouse annotations
"""
zipped_rows = get_rows_from_file(mouse_file, '\n')
# Too many processes causes the http requests causes the remote to respond with error
pool = mp.Pool(processes=1)
annotations = pool.map(mouse_single_annotation, zipped_rows)
return prepare_for_bulk_indexing(annotations) | ddd89a7ea9a02a6aa646691c36968d66eaa93ff3 | 31,008 |
def thomsen_parameters(vp, vs, rho, lb, dz):
"""
Liner, C, and T Fei (2006). Layer-induced seismic anisotropy from
full-wave sonic logs: Theory, application, and validation.
Geophysics 71 (6), p D183–D190. DOI:10.1190/1.2356997
Args:
vp (ndarray): P-wave interval velocity.
vs (ndarray): S-wave interval velocity.
rho (ndarray): Bulk density.
lb (float): The Backus averaging length in m.
dz (float): The depth sample interval in m.
Returns:
namedtuple: delta, epsilon and gamma.
"""
A, C, F, L, M = backus_parameters(vp, vs, rho, lb, dz)
delta = ((F + L)**2.0 - (C - L)**2.0) / (2.0 * C * (C - L))
epsilon = (A - C) / (2.0 * C)
gamma = (M - L) / (2.0 * L)
ThomsenParameters = namedtuple('ThomsenParameters', ['δ', 'ε', 'γ'])
return ThomsenParameters(delta, epsilon, gamma) | 9fb2418608d154deb2bf362a5966e32d420d9c74 | 31,009 |
def replace_last(source_string, replace_what, replace_with):
""" Function that replaces the last ocurrence of a string in a word
:param source_string: the source string
:type source_string: str
:param replace_what: the substring to be replaced
:type replace_what: str
:param replace_with: the string to be inserted
:type replace_with: str
:returns: string with the replacement
:rtype: str
:Example:
>>> import chana.lemmatizer
>>> chana.lemmatizer.replace_last('piati','ti','ra')
'piara'
"""
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail | 6fbc36824b960fb125b722101f21b5de732194c5 | 31,010 |
def unescape(text):
"""Unescapes text
>>> unescape(u'abc')
u'abc'
>>> unescape(u'\\abc')
u'abc'
>>> unescape(u'\\\\abc')
u'\\abc'
"""
# Note: We can ditch this and do it in tokenizing if tokenizing
# returned typed tokens rather than a list of strings.
new_text = []
escape = False
for c in text:
if not escape and c == u'\\':
escape = True
continue
new_text.append(c)
escape = False
return u''.join(new_text) | 7db9fa5bb786ea5c1f988ee26eed07abe66a2942 | 31,011 |
def patched_novoed_tasks(mocker):
"""Patched novoed-related tasks"""
return mocker.patch("applications.models.novoed_tasks") | 06d2e825bc9407e8e8bdd27170d457f046f8193c | 31,012 |
def cart2spherical(x, y, z):
"""
Converts to spherical coordinates
:param x: x-component of the vector
:param y: y-component of the vector
:param z: z-component of the vector
:return: tuple with (r, phi, theta)-coordinates
"""
vectors = np.array([x, y, z])
r = np.sqrt(np.sum(vectors ** 2, 0))
theta = np.arccos(vectors[2] / r)
phi = np.arctan2(vectors[1], vectors[0])
if vectors.ndim == 1:
if r == 0:
phi = 0
theta = 0
else:
phi[r == 0] = 0
theta[r == 0] = 0
return r, phi, theta | 702b0fa13f21cbee1fc7fe63219ef1bc8d398269 | 31,013 |
def get_JD(year=None, month=None, day=None, hour=None, min=None, sec=None,
string=None, format='yyyy-mm-dd hh:mm:ss', rtn='jd'):
"""compute the current Julian Date based on the given time input
:param year: given year between 1901 and 2099
:param month: month 1-12
:param day: days
:param hour: hours
:param min: minutes
:param sec: seconds
:param string: date string with format referencing "format" input
:param format: format of string input; currently accepts:
'yyyy-mm-dd hh:mm:ss'
'dd mmm yyyy hh:mm:ss'
:param rtn: optional return parameter; jd or mjd (modified julian)
default=jd
:return jd: Julian date
:return mjd: modified julian date
"""
if string:
if format == 'yyyy-mm-dd hh:mm:ss':
year = float(string[:4])
month = float(string[5:7])
day = float(string[8:10])
hour = float(string[11:13])
min = float(string[14:16])
sec = float(string[17:19])
elif format == 'dd mmm yyyy hh:mm:ss':
months = {'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5,
'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10,
'Nov':11, 'Dec':12}
year = float(string[7:11])
month = float(months[f'{string[3:6]}'])
day = float(string[:2])
hour = float(string[12:14])
min = float(string[15:17])
sec = float(string[18:20])
# compute julian date
jd = 1721013.5 + 367*year - int(7/4*(year+int((month+9)/12))) \
+ int(275*month/9) + day + (60*hour + min + sec/60)/1440
if rtn == 'mjd':
# compute mod julian
mjd = jd - 2400000.5
return mjd
else:
return jd | 4350177350d731ea2c98d84f5f4cb56d5c67fb07 | 31,014 |
def get_final_feature(feature_1, feature_2, metric_list):
"""Get the difference between two features.
:param feature_1: the first feature
:type feature_1: numpy array
:param feature_2: the second feature
:type feature_2: numpy array
:param metric_list: the metrics which will be used to compare two feature vectors
:type metric_list: list
:return: the difference between two features
:rtype: numpy array
"""
if feature_1 is None or feature_2 is None:
return None
if metric_list is None:
return np.abs(feature_1 - feature_2)
final_feature_list = []
for metric in metric_list:
distance_matrix = pairwise_distances(np.vstack((feature_1, feature_2)),
metric=metric)
final_feature_list.append(distance_matrix[0, 1])
return np.array(final_feature_list) | a67d7da73a10393f119fa549991c277004b25beb | 31,015 |
def removeZeros(infile, outfile, prop=0.5, genecols=2):
"""Remove lines from `infile' in which the proportion of zeros is equal to or higher than `prop'. `genecols' is the number of columns containing gene identifiers at the beginning of each row. Writes filtered lines to `outfile'."""
nin = 0
nout = 0
with open(infile, "r") as f:
hdr = f.readline()
columns = hdr.split("\t")
ncols = len(columns)-genecols
maxzeros = ncols*prop
with open(outfile, "w") as out:
out.write(hdr)
while True:
line = f.readline()
if line == '':
break
nin += 1
pline = line.rstrip("\r\n").split("\t")
nzeros = 0
for v in pline[genecols:]:
if float(v) == 0:
nzeros += 1
if nzeros < maxzeros:
out.write(line)
nout += 1
return (nin, nout) | 43feba21513be4a8292c08918e16b3e34a73c341 | 31,016 |
def cuboid(origin, bounds, direction, color=Vec4(1), normal_as_color=NAC):
"""
Return GeomNode of the cuboid,
Args:
origin: center of the cuboid
bounds: 3-Tuple of length, width and height
direction: normal vector of the up face
color: Vec4
normal_as_color: whether to use vertex normal as color
"""
dfl = Vec3(-bounds[0], -bounds[1], -bounds[2])
dfr = Vec3(bounds[0], -bounds[1], -bounds[2])
dbr = Vec3(bounds[0], bounds[1], -bounds[2])
dbl = Vec3(-bounds[0], bounds[1], -bounds[2])
ufl = Vec3(-bounds[0], -bounds[1], bounds[2])
ufr = Vec3(bounds[0], -bounds[1], bounds[2])
ubr = Vec3(bounds[0], bounds[1], bounds[2])
ubl = Vec3(-bounds[0], bounds[1], bounds[2])
faces = [
(ufl, ufr, ubr, ubl), # Up
(dfl, dbl, dbr, dfr), # Down
(dfr, dbr, ubr, ufr), # Right
(dfl, ufl, ubl, dbl), # Left
(dfl, dfr, ufr, ufl), # Front
(dbl, ubl, ubr, dbr), # Back
]
D.setup(origin, direction)
m = mesh.Mesh('cuboid')
for f in faces:
pts = []
for p in f:
D.set_pos_hp_d(p.x, p.y, p.z, 0, 0, 0)
pts.append(m.add_vertex(D.pos, color))
m.add_triangle(*reversed(pts[:3]))
m.add_triangle(pts[0], *reversed(pts[2:]))
return m.export(normal_as_color=normal_as_color) | 78578db28a0fb0e18a36f2f8de4a3bd3fc69f3ac | 31,017 |
def fireball_get_HS_dat(cd, fname='HS.dat'):
""" """
f = open(cd+'/'+fname, "r")
nlines = int(f.readline())
#print(nlines)
s = f.readlines()
assert nlines==len(s)
i2aoao = np.zeros((nlines,4), dtype=int)
i2h = np.zeros((nlines))
i2s = np.zeros((nlines))
i2x = np.zeros((nlines,3))
for i,line in enumerate(s):
lspl = line.split()
i2aoao[i] = list(map(int, lspl[0:4]))
i2h[i] = float(lspl[4])
i2s[i] = float(lspl[5])
i2x[i] = list(map(float, lspl[6:]))
f.close()
return i2aoao,i2h,i2s,i2x | fd63b0def17f80c88b150087f749aa421aa45d48 | 31,018 |
def create(db: Session, request: UsersRequestSchema):
""" Creates a new user.
:return: UserModel
"""
user = UserModel(
username=request.username,
group=request.group
)
db.add(user)
db.commit()
db.refresh(user)
return user | f43a220fdfb654bc554f970ad01196ad5b754920 | 31,019 |
def format_timedelta(time):
"""Format a timedelta for use in a columnar format. This just
tweaks stuff like ``'3 days, 9:00:00'`` to line up with
``'3 days, 10:00:00'``
"""
result = str(strip_microseconds(time))
parts = result.split()
if len(parts) == 3 and len(parts[-1]) == 7:
return '%s %s %s' % tuple(parts)
else:
return result | 1913b4492bfee4541dc7266dd72989d2b38b4dc4 | 31,020 |
def describe_protein(s1, s2, codon_table=1):
"""
"""
codons = util.codon_table_string(codon_table)
description = ProteinAllele()
s1_swig = util.swig_str(s1)
s2_swig = util.swig_str(s2)
codons_swig = util.swig_str(codons)
extracted = extractor.extract(s1_swig[0], s1_swig[1],
s2_swig[0], s2_swig[1], extractor.TYPE_PROTEIN, codons_swig[0])
variants = extracted.variants
#for variant in variants:
# print_var(variant)
#print()
index = 0
while index < len(variants):
if variants[index].type != extractor.IDENTITY:
variant = variants[index]
index += 1
seq_list = AISeqList()
# NOTE: This is for filling.
if index < len(variants):
last_end = variants[index].reference_start
else:
last_end = 1000000
while (index < len(variants) and
variants[index].type & extractor.FRAME_SHIFT):
if last_end < variants[index].sample_start:
seq_list.append(AISeq(
s2[last_end:variants[index].sample_start]))
last_end = variants[index].sample_end
seq_list.append(AISeq(
s2[variants[index].sample_start:
variants[index].sample_end],
start=variants[index].reference_start + 1,
end=variants[index].reference_end,
sample_start=variants[index].sample_start + 1,
sample_end=variants[index].sample_end,
frames=get_frames(variants[index].type)))
# NOTE: Perhaps use trans_open, trans_close to ...
index += 1
if last_end < variant.sample_end:
seq_list.append(AISeq(s2[last_end:variant.sample_end]))
var = var_to_protein_var(s1, s2, variant, seq_list,
weight_position=extracted.weight_position)
description.append(var)
else:
index += 1
if not description:
return ProteinAllele([ProteinVar()])
return description | 12da2ca688b35324fadc2d897fa076daeb14b91f | 31,021 |
from typing import Any
from typing import Mapping
import logging
import itertools
import inspect
def _convert_gradient_function(
proto: tf.compat.v1.NodeDef,
graph: Any,
library: Mapping[str, _LibraryFunction],
) -> Mapping[str, _LibraryFunction]:
"""Convert a custom_gradient function."""
op = graph.as_graph_element(proto.name)
input_specs = tuple([tf.TensorSpec.from_tensor(v) for v in op.inputs])
grad_fn_name = str(proto.attr["_gradient_op_type"].s, "utf-8")
if grad_fn_name in library:
return {}
@tf.function
def tf_grad_fn(*grad_args, **grad_kwargs):
fn = tf_ops.gradient_registry.lookup(grad_fn_name)
return fn(None, *grad_args, **grad_kwargs)
concrete_tf_grad_fn = tf_grad_fn.get_concrete_function(*input_specs)
grad_lib = _convert_all_gradient_functions(concrete_tf_grad_fn.graph, library)
logging.info("Converting gradient function %s", grad_fn_name)
grad_inputs = concrete_tf_grad_fn.inputs
grad_captured_inputs = concrete_tf_grad_fn.captured_inputs
num_flat_args = len(grad_inputs) - len(grad_captured_inputs)
func_variables = {v.handle.ref(): v for v in concrete_tf_grad_fn.variables}
# Gradient function can capture tensors in the outer function. Move them
# into the arguments of the gradient function for conversion to JAX.
variable_map = {}
constant_map = {}
external_capture_specs = []
internal_capture_names = []
for inp, cap in zip(grad_inputs[num_flat_args:], grad_captured_inputs):
if cap.dtype == tf.resource:
variable_map[inp.op.name] = func_variables[cap.ref()]
internal_capture_names.append(inp.op.name)
elif hasattr(cap, "numpy"):
constant_map[inp.op.name] = cap.numpy()
internal_capture_names.append(inp.op.name)
else:
external_capture_specs.append(tf.TensorSpec.from_tensor(cap))
structured_grad_input_specs = tree.map_structure(tf.TensorSpec.from_tensor,
concrete_tf_grad_fn.inputs)
structured_grad_input_specs = (structured_grad_input_specs, {})
grad_input_specs = input_specs + tuple(external_capture_specs)
grad_structured_outputs = tuple(
itertools.dropwhile(lambda x: x is None,
concrete_tf_grad_fn.structured_outputs))
grad_output_specs = tuple([
tf.TensorSpec.from_tensor(x) for x in grad_structured_outputs
])
# Nones correspond to the outputs of the original function.
num_fn_outputs = (
len(concrete_tf_grad_fn.structured_outputs) -
len(grad_structured_outputs))
signature = inspect.Signature(
(inspect.Parameter("grad_args", inspect.Parameter.VAR_POSITIONAL),))
jax_grad_fn, jax_grad_params = _convert(
concrete_tf_grad_fn.graph.as_graph_def(),
signature,
structured_grad_input_specs,
grad_output_specs,
captured_input_names=tuple(internal_capture_names),
variable_map=variable_map,
constants=constant_map,
# Note that dict(**a, **b) will raise TypeError on dupliates, unlike {}.
library=dict(**library, **grad_lib),
)
grad_fn = _LibraryFunction(jax_grad_fn, False, jax_grad_params,
grad_input_specs, grad_output_specs,
grad_output_specs[:num_fn_outputs])
return dict(**grad_lib, **{grad_fn_name: grad_fn}) | 0cdfbe6c6a302e3b454be77e1cd03a0419dcd64f | 31,022 |
def generate_warp_function(chromatic_consts=None,
drift=None,
n_dim=3,
verbose=True):
"""Function to generate a spot translating function"""
## check inputs
if chromatic_consts is None:
_ch_consts = np.zeros([n_dim,1])
else:
_ch_consts = chromatic_consts
if drift is None:
_drift = np.zeros(n_dim)
else:
_drift = drift[:n_dim]
def _shift_function(coords, _ch_consts=_ch_consts,
_drift=_drift):
if np.shape(coords)[1] == n_dim:
_coords = np.array(coords).copy()
elif np.shape(coords)[1] == 11: # this means 3d fitting result
_coords = np.array(coords).copy()[:,4-n_dim:4]
else:
raise ValueError(f"Wrong input coords")
_shifts = []
for _idim in range(n_dim):
_consts = np.array(_ch_consts[_idim])
_ft_order = np.int(np.sqrt(len(_consts)*2+0.25)-1.5) # only dependent on 2d
_corr_data = []
for _order in range(_ft_order+1):
for _p in range(_order+1):
_corr_data.append(_coords[:,-2]**_p \
* _coords[:,-1]**(_order-_p))
_shifts.append(np.dot(np.array(_corr_data).transpose(), _consts))
# generate corrected coordinates
_corr_coords = _coords - np.stack(_shifts).transpose() - _drift
# return as input
if np.shape(coords)[1] == n_dim:
_output_coords = _corr_coords
elif np.shape(coords)[1] == 11: # this means 3d fitting result
_output_coords = np.array(coords).copy()
_output_coords[:,4-n_dim:4] = _corr_coords
return _output_coords
# return function
return _shift_function | 38ebf2303494017f2e401a034fa800d04627a790 | 31,023 |
import pdb
def Dfunc(sign,k,N,dphi,si,sd,xF=[],F=[],beta=np.pi/2):
"""
Parameters
----------
sign : int
+1 | -1
k : wave number
N : wedge parameter
dphi : phi-phi0 or phi+phi0
si : distance source-D
sd : distance D-observation
beta : skew incidence angle
xF : array
support of Fresnel function.
F : array
Values of Fresnel function in regard of support
if F =[], fresnel function is computed
otherwise the passed interpolation F is used.
Reference
---------
[1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge
in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11
Notes
-----
e-jnp.pi/4 1
Di= ------------------ * ----------- * F(kla) ([1] eq 25)
2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)
"""
cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))
rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)
nn = np.zeros(np.shape(rnn))
nn[rnn>0.5] = 1
nn[rnn>1.5] = 2
nn[rnn<-0.5] = -1
nn[rnn<-1.5] = -2
# KLA ref[1] eq 27
L = ((si*sd)*np.sin(beta)**2)/(1.*(si+sd))
AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )
A = 2*AC**2
KLA = k * L * A
epsi = AC*2.0
angle = (np.pi+sign*dphi)/(2.0*N)
tan = np.tan(angle)
Di = np.empty(KLA.shape)
if len(F) == 0:
Fkla,ys,yL = FreF(KLA)
else :
#pxF = 10**xF
#uF = (np.abs(KLA[:,:]-pxF[:,None,None])).argmin(axis=0)
val = np.maximum(np.log10(np.abs(KLA))-xF[0,None,None],0)
uF2 = (len(F)-1)*(val)/(xF[-1,None,None]-xF[0,None,None])
uF2_int = np.floor(uF2).astype('int')
Fkla = F[uF2_int]
#if np.max(Fkla) > 1:
# Warning('diffRT : Fkla tab probably wrong')
# 4.56 Mac Namara
try:
Di = -cste*Fkla/tan
except:
print('tan=0 : It can happen')
pdb.set_trace()
c5 = np.where(np.abs(tan)<1e-9)
BL = np.ones(Di.shape)*L
Di[:,c5] = 0.5*np.sqrt(BL[c5])
# if np.isinf(Di).any():
# pdb.set_trace()
return(Di) | 6e85c3f708d6333307fc80c82910361c09dc892c | 31,024 |
def deprecated(since=nicos_version, comment=''):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
The parameter ``since`` should contain the NICOS version number on which
the deprecation starts.
The ``comment`` should contain a hint to the user, what should be used
instead.
"""
def deco(f):
msg = '%r is deprecated since version %r.' % (f.__name__, since)
@wraps(f)
def new_func(*args, **options):
for l in [msg, comment]:
session.log.warning(l)
return f(*args, **options)
new_func.__doc__ += ' %s %s' % (msg, comment)
return new_func
return deco | 04b77c3daf7aa92cab8dfdaf8fc2afca6eda0d24 | 31,025 |
import resource
def create_rlimits():
"""
Create a list of resource limits for our jailed processes.
"""
rlimits = []
# Allow a small number of subprocess and threads. One limit controls both,
# and at least OpenBLAS (imported by numpy) requires threads.
nproc = LIMITS["NPROC"]
if nproc:
rlimits.append((resource.RLIMIT_NPROC, (nproc, nproc)))
# CPU seconds, not wall clock time.
cpu = LIMITS["CPU"]
if cpu:
# Set the soft limit and the hard limit differently. When the process
# reaches the soft limit, a SIGXCPU will be sent, which should kill the
# process. If you set the soft and hard limits the same, then the hard
# limit is reached, and a SIGKILL is sent, which is less distinctive.
rlimits.append((resource.RLIMIT_CPU, (cpu, cpu+1)))
# Total process virtual memory.
vmem = LIMITS["VMEM"]
if vmem:
rlimits.append((resource.RLIMIT_AS, (vmem, vmem)))
# Size of written files. Can be zero (nothing can be written).
fsize = LIMITS["FSIZE"]
rlimits.append((resource.RLIMIT_FSIZE, (fsize, fsize)))
return rlimits | ac8fbfeeae471068ef75cc80520a016902f5d887 | 31,026 |
import pathlib
def validate_file(file_path):
"""check type and validate his_fn. Raise error is incorrect
:function: TODO
:returns: TODO
"""
if not isinstance(file_path, pathlib.Path):
file_path = pathlib.Path(file_path)
if not file_path.is_file():
raise TypeError(
f"{file_path} is not a valid file.Check path specification."
)
return file_path | f458de56835cf0d89acde38a44e807d4c7252dc6 | 31,027 |
def save_processed_image_uuid(username, uuid):
"""Updates existing user by adding the uuid of the processed image
:param username: user email as string type which serves as user id
:param uuid: UUID4 of processed image
:returns: adds uuid of processed image to mongo database
"""
try:
user = User.objects.raw({'_id': username}).first()
user.processed_image = uuid
user.save()
except DoesNotExist:
return None | f1a063d417a66c5436efe65bbb024478d11ec05c | 31,028 |
def web_request():
"""Mock web request for views testing."""
return web_request_func() | a8327e14fd793181f4b3e669d69e7ccc8edd8213 | 31,029 |
def _NormalizeDiscoveryUrls(discovery_url):
"""Expands a few abbreviations into full discovery urls."""
if discovery_url.startswith('http'):
return [discovery_url]
elif '.' not in discovery_url:
raise ValueError('Unrecognized value "%s" for discovery url')
api_name, _, api_version = discovery_url.partition('.')
return [
'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % (
api_name, api_version),
'https://%s.googleapis.com/$discovery/rest?version=%s' % (
api_name, api_version),
] | f361d01006a6e7f7487e06db375ae703ffde0021 | 31,030 |
def computeAirmass(dec, ha, lat=config['observatory']['latitude'],
correct=[75., 10.]):
"""Calculates the airmass for a given declination and HA (in degrees).
By default, assumes that the latitude of the observation is the one set
in the configuration file. If correct is defined, abs(HA) anggles greater
than correct[0] are given a flat value correct[1].
"""
dec = np.atleast_1d(dec)
ha = np.atleast_1d(ha) % 360.
if ha > 180:
ha -= 360
airmass = (np.sin(lat * np.pi / 180.) * np.sin(dec * np.pi / 180.) +
np.cos(lat * np.pi / 180.) * np.cos(dec * np.pi / 180.) *
np.cos(ha * np.pi / 180.)) ** (-1)
if correct is not None:
airmass[np.abs(ha) > correct[0]] = correct[1]
if len(airmass) == 1:
return airmass[0]
else:
return airmass | df9513e23932fe646bf13df85283380eff5bb871 | 31,031 |
def metric_op(metric):
"""Converts Keras metrics into a metric op tuple.
NOTE: If this method is called in for loop, the runtime is O(n^2). However
the number of eval metrics at any given time should be small enough that
this does not affect performance. Any impact is only during graph construction
time, and therefore has no effect on steps/s.
Args:
metric: Either a `tf.keras.metric.Metric` instance or a tuple of Tensor
value and update op.
Returns:
A tuple of metric Tensor value and update op.
"""
if not isinstance(metric, tf.keras.metrics.Metric):
return metric
vars_to_add = set()
vars_to_add.update(metric.variables)
metric = (metric.result(), metric.updates[0])
_update_variable_collection(tf.GraphKeys.LOCAL_VARIABLES, vars_to_add)
_update_variable_collection(tf.GraphKeys.METRIC_VARIABLES, vars_to_add)
return metric | 52ad2f5092aa5203e1e850eae628d8e804447847 | 31,032 |
def get_geostore(geostore_id, format='esri'):
""" make request to geostore microservice for user given geostore ID """
config = {
'uri': '/geostore/{}?format={}'.format(geostore_id, format),
'method': 'GET',
}
return request_to_microservice(config) | 1e5ebdd04f62930de40942efd4894d79d7a7cfd4 | 31,033 |
def srmi(df, n=9):
"""
SRMIMI修正指标 srmi(9)
如果收盘价>N日前的收盘价,SRMI就等于(收盘价-N日前的收盘价)/收盘价
如果收盘价<N日前的收盘价,SRMI就等于(收盘价-N日签的收盘价)/N日前的收盘价
如果收盘价=N日前的收盘价,SRMI就等于0
"""
_srmi = pd.DataFrame()
_srmi['date'] = df.date
_m = pd.DataFrame()
_m['close'] = df.close
_m['cp'] = df.close.shift(n)
_m['cs'] = df.close - df.close.shift(n)
_srmi['srmi'] = _m.apply(lambda x: x.cs/x.close if x.cs > 0 else (x.cs/x.cp if x.cs < 0 else 0), axis=1)
return _srmi | 28bd0e715b34707b742e13230d93a4a104fd80ba | 31,034 |
def define_pairs(grid: 'np.ndarray'):
"""Take a sequence grid and return all pairs of neighbours.
Returns a list of dictionaries containing the indices of the pairs
(neighbouring only), and the corresponding sequence numbers
(corresponding to the image array)
"""
nx, ny = grid.shape
footprint = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
shape = np.array(footprint.shape)
assert shape[0] == shape[1], 'Axes must be equal'
assert shape[0] % 2 == 1, 'Axis length must be odd'
center = shape // 2
connected = np.argwhere(footprint == 1) - center
pairs = []
for idx0, i0 in np.ndenumerate(grid):
neighbours = connected + idx0
for neighbour in neighbours:
neighbour = tuple(neighbour)
if neighbour[0] < 0 or neighbour[0] >= nx:
pass
elif neighbour[1] < 0 or neighbour[1] >= ny:
pass
else:
assert i0 == grid[idx0]
d = {
'seq0': grid[idx0],
'seq1': grid[neighbour],
'idx0': idx0,
'idx1': neighbour,
}
pairs.append(d)
return pairs | 6c4aa1fcc22641f07054a3bcfe11a7edc21a3c56 | 31,035 |
def _length_normalization(length_norm_power, length, dtype=tf.float32):
"""Returns length normalization factor."""
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), length_norm_power) | d012f3c5c24165e7f529ec55976f969edbcca6e6 | 31,036 |
def unregistercls(self, schemacls=None, data_types=None):
"""Unregister schema class or associated data_types.
:param type schemacls: sub class of Schema.
:param list data_types: data_types to unregister.
"""
return _REGISTRY.unregistercls(schemacls=schemacls, data_types=data_types) | 50744b0f9fbf96b5f1e0213c216e4c3e6419f0d0 | 31,037 |
def get_relation_mapper() -> RelationMapper:
"""Get the relation mapper. Create and load if necessary."""
global _relation_mapper
if _relation_mapper is not None:
return _relation_mapper
path = mapping_root.joinpath("reltoid")
if not path.is_file():
create_mapping()
_relation_mapper = RelationMapper(__load_mapping(path))
return _relation_mapper | 5a487c37dbca6b197b781ac8cab3b1f91bcbaf3e | 31,038 |
def griffin_lim(stftm_matrix, max_iter=100):
""""Iterative method to 'build' phases for magnitudes."""
stft_matrix = np.random.random(stftm_matrix.shape)
y = librosa.core.istft(stft_matrix, hop_size, window_size)
for i in range(max_iter):
stft_matrix = librosa.core.stft(y, fft_size, hop_size, window_size)
stft_matrix = stftm_matrix * stft_matrix / np.abs(stft_matrix)
y = librosa.core.istft(stft_matrix, hop_size, window_size)
return y | ada8e7442d1d2c8ed1224d50be42b274fe1229fe | 31,039 |
import os
def strip_pathname1(name):
"""remove ../ and ./ and leading/trailing blanks and path separators
from input string ``name``, replace any remaining path separator
with '/', and keep only the last part of the path"""
return (name.replace('..' + os.sep, '').replace('.' + os.sep, '').strip().strip(os.sep).split(os.sep)[-1]).replace('data', '').replace('Data', '').replace('DATA', '').replace('.tar.gz', '').replace('.tgz', '').replace('.tar', '').replace(genericsettings.extraction_folder_prefix, '').strip(os.sep).replace(os.sep, '/') | 8e12130459561e83e408bf76058c7a44c8ae0067 | 31,040 |
from typing import Union
from typing import Optional
def parse_subtitle_stream_id(input_file: str, input_sid: Union[int, str, None]) -> Optional[int]:
"""Translate the CLI `-s` parameter into a stream index suitable for subtitle_options()."""
subtitle_streams = tuple(list_subtitle_streams(input_file))
external_sub_file = find_sub_file(input_file)
if input_sid is None:
return 0 if subtitle_streams or external_sub_file else None
try:
stream_index = int(input_sid)
except ValueError:
pass
else:
return stream_index if stream_index >= 0 else None
language = str(input_sid)
if external_sub_file:
# external subtitles don't have the necessary metadata
raise ValueError("matching external subtitles to a language code is not supported")
for index, stream in enumerate(sorted(subtitle_streams, key=itemgetter("index"))):
if stream_matches_language(stream, language):
return index
raise ValueError("no subtitles found for language: %s" % language) | 93d62eae4e3080eab7d94bb4fd9a5ed3aabbce0e | 31,041 |
from web3.middleware import geth_poa_middleware
import os
def setup_web3(config_file, _logger=None):
"""
:param config_file: Web3 object instance
:param _logger: Logger instance
:return: web3 instance
"""
network_rpc = os.environ.get("EVENTS_RPC", "http:127.0.0.1:8545")
if _logger:
_logger.info(
f"EventsMonitor: starting with the following values: rpc={network_rpc}"
)
provider = get_web3_connection_provider(network_rpc)
web3 = Web3(provider)
if (
get_bool_env_value("USE_POA_MIDDLEWARE", 0)
or get_network_name().lower() == "rinkeby"
):
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
return web3 | 7f806f69e78776929c1f284b719fcce685eb5c3b | 31,042 |
from typing import Optional
from typing import Iterable
from typing import Dict
import json
def routes_to_geojson(
feed: "Feed",
route_ids: Optional[Iterable[str]] = None,
*,
split_directions: bool = False,
include_stops: bool = False,
) -> Dict:
"""
Return a GeoJSON FeatureCollection of MultiLineString features representing this Feed's routes.
The coordinates reference system is the default one for GeoJSON,
namely WGS84.
If ``include_stops``, then include the route stops as Point features .
If an iterable of route IDs is given, then subset to those routes.
If the subset is empty, then return a FeatureCollection with an empty list of
features.
If the Feed has no shapes, then raise a ValueError.
If any of the given route IDs are not found in the feed, then raise a ValueError.
"""
if route_ids is not None:
D = set(route_ids) - set(feed.routes.route_id)
if D:
raise ValueError(f"Route IDs {D} not found in feed.")
# Get routes
g = geometrize_routes(feed, route_ids=route_ids, split_directions=split_directions)
if g.empty:
collection = {"type": "FeatureCollection", "features": []}
else:
collection = json.loads(g.to_json())
# Get stops if desired
if include_stops:
if route_ids is not None:
stop_ids = (
feed.stop_times.merge(feed.trips.filter(["trip_id", "route_id"]))
.loc[lambda x: x.route_id.isin(route_ids), "stop_id"]
.unique()
)
else:
stop_ids = None
stops_gj = feed.stops_to_geojson(stop_ids=stop_ids)
collection["features"].extend(stops_gj["features"])
return hp.drop_feature_ids(collection) | f74bb2f1b533a8c3ae84da59d77daf2e96fb573b | 31,043 |
def read_input():
"""
Read user input and return state of running the game.
If user press Esc or exit game window stop game main loop.
Returns:
bool: Should game still be running?
"""
# Should we still run game after parsing all inputs?
running = True
# Look at every event in the queue
for event in pygame.event.get():
# Did the user hit a key?
if event.type == pygame.KEYDOWN:
# Was it the Escape key? If so, stop the loop.
if event.key == K_ESCAPE:
running = False
# Did the user click the window close button? If so, stop the loop.
elif event.type == QUIT:
running = False
return running | 983661f2a63d0f68ac073ff52d49afe1c98c5ef3 | 31,044 |
from dateutil.relativedelta import relativedelta
def add_to_date(date, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, as_string=False, as_datetime=False):
"""Adds `days` to the given date"""
if date==None:
date = now_datetime()
if hours:
as_datetime = True
if isinstance(date, string_types):
as_string = True
if " " in date:
as_datetime = True
try:
date = parser.parse(date)
except ParserError:
frappe.throw(frappe._("Please select a valid date filter"), title=frappe._("Invalid Date"))
date = date + relativedelta(years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
if as_string:
if as_datetime:
return date.strftime(DATETIME_FORMAT)
else:
return date.strftime(DATE_FORMAT)
else:
return date | eec228e54f90a0dfc41be802ba0120906550a007 | 31,045 |
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters | d04e2a94470ca540fced5fc3b1b0a9321421de22 | 31,046 |
def update(data: dict) -> None:
"""Update a single key."""
# Set up a query to get the current key object but don't fetch it
token = data.pop("token")
key = ApiKey.query.filter_by(token=token)
# Run the query to get the current object and record it
# for auditing purposes
original_info = ApiKey.as_dict(key.first())
original_info["key_id"] = original_info["_id"]
del original_info["_id"]
del original_info["desc"]
del original_info["token"]
del original_info["date_created"]
db.session.add(ApiKeyHistory(**original_info))
current_app.logger.debug(
f"API key {original_info['_id']} former permissions archived."
)
# Go back to our original object, update it with
# the key changes, and save everything
key.update(data)
db.session.commit()
current_app.logger.debug(
f"API key {original_info['_id']} new permissions recorded."
)
return None | 6e4538000b4266f6bb8fd55f42b44b315734bc4e | 31,047 |
def IntegerHeap(i):
"""Return an integer heap for 2^i-bit integers.
We use a BitVectorHeap for small i and a FlatHeap for large i.
Timing tests indicate that the cutoff i <= 3 is slightly
faster than the also-plausible cutoff i <= 2, and that both
are much faster than the way-too-large cutoff i <= 4.
The resulting IntegerHeap objects will use 255-bit long integers,
still small compared to the overhead of a FlatHeap."""
if i <= 3:
return BitVectorHeap()
return FlatHeap(i) | 61a7f44cfd37b38c91a8bcdf3b83b1dc2af98b5b | 31,048 |
def list_sensors(name_pattern=Sensor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
"""
This is a generator function that enumerates all sensors that match the
provided arguments.
Parameters:
name_pattern: pattern that device name should match.
For example, 'sensor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, driver_name='lego-ev3-touch', or
address=['in1', 'in3']. When argument value is a list,
then a match against any entry of the list is enough.
"""
class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Sensor.SYSTEM_CLASS_NAME)
return (Sensor(name_pattern=name, name_exact=True)
for name in list_device_names(class_path, name_pattern, **kwargs)) | b4ddc4508988e8c93d1388ce6857e736da7fc624 | 31,049 |
def preprocessBoilerPower(df2, key):
"""
calculates the average boiler power, because we have timestamps where the boiler is turned off/on often not following the capture period
preprocessing is done as well (set index, interpolation)
:param df2: dataframe of boiler
:param key: sensorname
:returns: new dataframe, in a 5 min grid
"""
toProcessDates = [pd.to_datetime('2017-10-15 02:15:02')]
toProcessValues =[0]
dataDict = {'DateTime' : [], key:[]}
firstTime = True
for idx, row in df2.iterrows():
ceiledTime = row["DateTime"].ceil("300s")
if not (firstTime or ceiledTime == recentTime):
#calculate integral
accumVal = 0
for i in range(1, len(toProcessDates)):
timeDel = (toProcessDates[i] - toProcessDates[i-1]).seconds
prevVal = toProcessValues[i-1]
accumVal += prevVal*timeDel
endInterval = toProcessDates[-1].ceil("300s")
timeDel = (endInterval - toProcessDates[-1]).seconds
accumVal += toProcessValues[-1]*timeDel
accumVal /= 300 # P = W /t divide by capture period
#append to dict for new dataframe
dataDict["DateTime"].append(endInterval)
dataDict[key].append(accumVal)
toProcessDates = [endInterval]
toProcessValues = [toProcessValues[-1]]
recentTime = ceiledTime
firstTime = False
toProcessDates.append(row["DateTime"])
toProcessValues.append(row[key])
recentTime = ceiledTime
dfBoil = pd.DataFrame(dataDict)
dfBoil = dfBoil.set_index("DateTime")
dfBoil5min = dfBoil.asfreq("300s")
dfBoil5min[key] = dfBoil5min[key].interpolate(method='linear').where(mask_knans(dfBoil5min[key], int(interpolationTime*60/300)))
return dfBoil5min | 1deaab79bd82b9df2b91fc296f87deebfb1c03cd | 31,050 |
def ProbLate(pmf):
"""Computes the probability of a birth in Week 41 or later.
Args:
pmf: Pmf object
Returns:
float probability
"""
return ProbRange(pmf, 41, 50) | c04f1047eeff6336975c490a66c736a7519f70b1 | 31,051 |
def inrange(inval, minimum=-1., maximum=1.):
"""
Make sure values are within min/max
"""
inval = np.array(inval)
below = np.where(inval < minimum)
inval[below] = minimum
above = np.where(inval > maximum)
inval[above] = maximum
return inval | 3277ed0d780217713f22f5ca27e7fd15b6758d1c | 31,052 |
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
return HttpResponse("There was an LTI communication error: {}".format(exception), status=500) | bb2df5f1fe38c6d72dd383b12713ac0ccc6d9f20 | 31,053 |
def index_for_shop(shop_id, page):
"""List orders for that shop."""
shop = _get_shop_or_404(shop_id)
brand = brand_service.get_brand(shop.brand_id)
per_page = request.args.get('per_page', type=int, default=15)
search_term = request.args.get('search_term', default='').strip()
only_payment_state = request.args.get(
'only_payment_state', type=PaymentState.__members__.get
)
def _str_to_bool(value):
valid_values = {
'true': True,
'false': False,
}
return valid_values.get(value, False)
only_overdue = request.args.get('only_overdue', type=_str_to_bool)
only_processed = request.args.get('only_processed', type=_str_to_bool)
order_state_filter = OrderStateFilter.find(
only_payment_state, only_overdue, only_processed
)
orders = order_service.get_orders_for_shop_paginated(
shop.id,
page,
per_page,
search_term=search_term,
only_payment_state=only_payment_state,
only_overdue=only_overdue,
only_processed=only_processed,
)
orders.items = list(service.extend_order_tuples_with_orderer(orders.items))
return {
'shop': shop,
'brand': brand,
'per_page': per_page,
'search_term': search_term,
'PaymentState': PaymentState,
'only_payment_state': only_payment_state,
'only_overdue': only_overdue,
'only_processed': only_processed,
'OrderStateFilter': OrderStateFilter,
'order_state_filter': order_state_filter,
'orders': orders,
'render_order_payment_method': _find_order_payment_method_label,
} | d9be41ba01991aaa08f74bf9546f18b565d9ac8e | 31,054 |
from typing import Tuple
import requests
def create_token(author: str, password: str, token_name: str) -> Tuple[int, str]:
"""
Create an account verification token.
This token allows for avoiding HttpBasicAuth for subsequent calls.
Args:
author (`str`):
The account name.
password (`str`):
The account password.
token_name (`str`):
The name to be given to the token.
Returns:
(`Tuple[int, str]`)
Return the token id and the sha-1.
Raises:
(`HTTPError`)
Raise the error in request.
"""
url = f'https://hub.towhee.io/api/v1/users/{author}/tokens'
data = {'name': token_name}
try:
r = requests.post(url, data=data, auth=HTTPBasicAuth(author, password))
r.raise_for_status()
except HTTPError as e:
raise e
res = r.json()
token_id = str(res['id'])
token_sha1 = str(res['sha1'])
return token_id, token_sha1 | ec0d5d4e208fb7fd6c77bfec554c1ea51d0838f4 | 31,055 |
def getPositionPdf(i):
"""Return the position of the square on the pdf page"""
return [int(i/5), i%5] | 859fd00c1475cfcb4cd93800299181b77fdd6e93 | 31,056 |
def plm_colombo(LMAX, x, ASTYPE=np.float):
"""
Computes fully-normalized associated Legendre Polynomials and
their first derivative using a Standard forward column method
Arguments
---------
LMAX: Upper bound of Spherical Harmonic Degrees
x: elements ranging from -1 to 1
Keyword arguments
-----------------
ASTYPE: output variable data type
Returns
-------
plms: fully-normalized Legendre polynomials
dplms: first differentials of Legendre polynomials
"""
#-- removing singleton dimensions of x
x = np.atleast_1d(x).flatten().astype(ASTYPE)
#-- length of the x array
jm = len(x)
#-- verify data type of spherical harmonic truncation
LMAX = np.int(LMAX)
#-- allocating for the plm matrix and differentials
plm = np.zeros((LMAX+1,LMAX+1,jm))
dplm = np.zeros((LMAX+1,LMAX+1,jm))
#-- u is sine of colatitude (cosine of latitude) so that 0 <= s <= 1
#-- for x=cos(th): u=sin(th)
u = np.sqrt(1.0 - x**2)
#-- update where u==0 to eps of data type to prevent invalid divisions
u[u == 0] = np.finfo(u.dtype).eps
#-- Calculating the initial polynomials for the recursion
plm[0,0,:] = 1.0
plm[1,0,:] = np.sqrt(3.0)*x
plm[1,1,:] = np.sqrt(3.0)*u
#-- calculating first derivatives for harmonics of degree 1
dplm[1,0,:] = (1.0/u)*(x*plm[1,0,:] - np.sqrt(3)*plm[0,0,:])
dplm[1,1,:] = (x/u)*plm[1,1,:]
for l in range(2, LMAX+1):
for m in range(0, l):#-- Zonal and Tesseral harmonics (non-sectorial)
#-- Computes the non-sectorial terms from previously computed
#-- sectorial terms.
alm = np.sqrt(((2.0*l-1.0)*(2.0*l+1.0))/((l-m)*(l+m)))
blm = np.sqrt(((2.0*l+1.0)*(l+m-1.0)*(l-m-1.0))/((l-m)*(l+m)*(2.0*l-3.0)))
#-- if (m == l-1): plm[l-2,m,:] will be 0
plm[l,m,:] = alm*x*plm[l-1,m,:] - blm*plm[l-2,m,:]
#-- calculate first derivatives
flm = np.sqrt(((l**2.0 - m**2.0)*(2.0*l + 1.0))/(2.0*l - 1.0))
dplm[l,m,:] = (1.0/u)*(l*x*plm[l,m,:] - flm*plm[l-1,m,:])
#-- Sectorial harmonics
#-- The sectorial harmonics serve as seed values for the recursion
#-- starting with P00 and P11 (outside the loop)
plm[l,l,:] = u*np.sqrt((2.0*l+1.0)/(2.0*l))*np.squeeze(plm[l-1,l-1,:])
#-- calculate first derivatives for sectorial harmonics
dplm[l,l,:] = np.float128(l)*(x/u)*plm[l,l,:]
#-- return the legendre polynomials and their first derivative
return plm,dplm | b54f5802fa5d0989c5813f85e3b0e37c653ca5aa | 31,057 |
import logging
def describe_entity_recognizer(
describe_entity_recognizer_request: EntityRecognizer,
):
"""[Describe a Entity Recognizer Router]
Args:
describe_entity_recognizer_request (EntityRecognizer): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [Based on Response Model]
"""
try:
logging.info(
f"Describe Entity Recognizer Router: {describe_entity_recognizer_request}"
)
response = ComprehendController().describe_entity_recognizer_controller(
describe_entity_recognizer_request
)
return response
except Exception as error:
logging.error(f"{error=}")
raise error | 80ee32329b5393d18d4689caeeb50a2332660f20 | 31,058 |
def tool_factory(clsname, command_name, base=CommandBase):
""" Factory for WESTPA commands."""
clsdict = {
'command_name': command_name,
}
return type(clsname, (base,), clsdict) | f4fe71f0938dfa17399e81d15233839a547844be | 31,059 |
from typing import Union
from typing import Sequence
from typing import Any
from typing import Tuple
from typing import cast
from typing import Dict
def plot_single_sd(
sd: SpectralDistribution,
cmfs: Union[
MultiSpectralDistributions,
str,
Sequence[Union[MultiSpectralDistributions, str]],
] = "CIE 1931 2 Degree Standard Observer",
out_of_gamut_clipping: Boolean = True,
modulate_colours_with_sd_amplitude: Boolean = False,
equalize_sd_amplitude: Boolean = False,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plot given spectral distribution.
Parameters
----------
sd
Spectral distribution to plot.
cmfs
Standard observer colour matching functions used for computing the
spectrum domain and colours. ``cmfs`` can be of any type or form
supported by the :func:`colour.plotting.filter_cmfs` definition.
out_of_gamut_clipping
Whether to clip out of gamut colours otherwise, the colours will be
offset by the absolute minimal colour leading to a rendering on
gray background, less saturated and smoother.
modulate_colours_with_sd_amplitude
Whether to modulate the colours with the spectral distribution
amplitude.
equalize_sd_amplitude
Whether to equalize the spectral distribution amplitude.
Equalization occurs after the colours modulation thus setting both
arguments to *True* will generate a spectrum strip where each
wavelength colour is modulated by the spectral distribution amplitude.
The usual 5% margin above the spectral distribution is also omitted.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
References
----------
:cite:`Spiker2015a`
Examples
--------
>>> from colour import SpectralDistribution
>>> data = {
... 500: 0.0651,
... 520: 0.0705,
... 540: 0.0772,
... 560: 0.0870,
... 580: 0.1128,
... 600: 0.1360
... }
>>> sd = SpectralDistribution(data, name='Custom')
>>> plot_single_sd(sd) # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Single_SD.png
:align: center
:alt: plot_single_sd
"""
_figure, axes = artist(**kwargs)
cmfs = cast(
MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values())
)
sd = cast(SpectralDistribution, sd.copy())
sd.interpolator = LinearInterpolator
wavelengths = cmfs.wavelengths[
np.logical_and(
cmfs.wavelengths
>= max(min(cmfs.wavelengths), min(sd.wavelengths)),
cmfs.wavelengths
<= min(max(cmfs.wavelengths), max(sd.wavelengths)),
)
]
values = as_float_array(sd[wavelengths])
RGB = XYZ_to_plotting_colourspace(
wavelength_to_XYZ(wavelengths, cmfs),
CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["E"],
apply_cctf_encoding=False,
)
if not out_of_gamut_clipping:
RGB += np.abs(np.min(RGB))
RGB = normalise_maximum(RGB)
if modulate_colours_with_sd_amplitude:
with sdiv_mode():
RGB *= cast(NDArray, sdiv(values, np.max(values)))[..., np.newaxis]
RGB = CONSTANTS_COLOUR_STYLE.colour.colourspace.cctf_encoding(RGB)
if equalize_sd_amplitude:
values = ones(values.shape)
margin = 0 if equalize_sd_amplitude else 0.05
x_min, x_max = min(wavelengths), max(wavelengths)
y_min, y_max = 0, max(values) + max(values) * margin
polygon = Polygon(
np.vstack(
[
(x_min, 0),
tstack([wavelengths, values]),
(x_max, 0),
]
),
facecolor="none",
edgecolor="none",
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.add_patch(polygon)
padding = 0.1
axes.bar(
x=wavelengths - padding,
height=max(values),
width=1 + padding,
color=RGB,
align="edge",
clip_path=polygon,
zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon,
)
axes.plot(
wavelengths,
values,
color=CONSTANTS_COLOUR_STYLE.colour.dark,
zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_line,
)
settings: Dict[str, Any] = {
"axes": axes,
"bounding_box": (x_min, x_max, y_min, y_max),
"title": f"{sd.strict_name} - {cmfs.strict_name}",
"x_label": "Wavelength $\\lambda$ (nm)",
"y_label": "Spectral Distribution",
}
settings.update(kwargs)
return render(**settings) | b5523c7f278c5bd5fdd91d73f855af3b17c6a63e | 31,060 |
def dur_attributes_to_dur(d_half, d_semiqvr):
"""
Convert arrays of d_hlf and d_sqv to d.
- See eq. (2) of the paper.
"""
def d_hlf_dur_sqv_to_d(d_hlf, d_sqv):
return 8 * d_hlf + d_sqv
d = d_hlf_dur_sqv_to_d(d_half, d_semiqvr)
return d | aeea74f929ef94d94178444df66a30d0d017fd4e | 31,061 |
import shutil
import traceback
def copy_dir(source, destination):
"""
Copy a directory tree and returns destination path.
Parameters:
source (string): source containing root directory path
destination (string): target root directory path
Returns:
destination (string): copied destination path
"""
try:
shutil.copytree(
source, destination, ignore=shutil.ignore_patterns('.svn'))
return destination
except Exception:
print(traceback.format_exc()) | 5751da6232a64902f0030271671f3e74ecda97e0 | 31,062 |
def polyder_vec(p, m):
"""Vectorized version of polyder for differentiating multiple polynomials of the same degree
Parameters
----------
p : ndarray, shape(N,M)
polynomial coefficients. Each row is 1 polynomial, in descending powers of x,
each column is a power of x
m : int >=0
order of derivative
Returns
-------
der : ndarray, shape(N,M)
polynomial coefficients for derivative in descending order
"""
m = jnp.asarray(m, dtype=int) # order of derivative
p = jnp.atleast_2d(p)
n = p.shape[1] - 1 # order of polynomials
D = jnp.arange(n, -1, -1)
D = factorial(D) / factorial(D-m)
p = jnp.roll(D*p, m, axis=1)
idx = jnp.arange(p.shape[1])
p = jnp.where(idx < m, 0, p)
return p | 25d0455c4649f0986ea592ec32c49ded921f73e9 | 31,063 |
def normalize_data(data:np.ndarray) -> np.ndarray:
"""
Subtracts the zero point of the time array and removes nans and infs
:param data: Dataset
:return: zeroed in dataset
"""
x = data[0]
y = data[1]
for i in [np.inf,-np.inf,np.nan]:
if i in y:
n = len(y[y==i])
x = x[y != i]
y = y[y != i]
return np.array(((x - x[0]), y)) | 26f334e5cebabf6d66cee79a47159bb34dd6e18f | 31,064 |
def _scale_fct_fixed(*args, scale=0):
"""
This is a helper function that is necessary because multiprocessing requires
a picklable (i.e. top-level) object for parallel computation.
"""
return scale | 75eb728f37466aee8664d5fe435d379cf5d7c6f2 | 31,065 |
def authorize(vendor_api_key, user_api_key, client_class=Client):
"""Authorize use of the Leaf Data Systems API
using an API key and MME (licensee) code.
This is a shortcut function which
instantiates `client_class`.
By default :class:`cannlytics.traceability.leaf.Client` is used.
Returns: `client_class` instance.
"""
client = client_class(vendor_api_key, user_api_key)
return client | bf005a58d0063f5171a5b884da59bce361d9df98 | 31,066 |
import six
def isclass(obj):
# type: (Any) -> bool
"""
Evaluate an object for :class:`class` type (ie: class definition, not an instance nor any other type).
"""
return isinstance(obj, (type, six.class_types)) | a1116d44513c05407368517dc031f023f86d64a7 | 31,067 |
def filter_score_grouped_pair(post_pair):
"""
Filter posts with a positive score.
:param post_pair: pair of post_id, dict with score, text blocks, and comments
:return: boolean indicating whether post has a positive score
"""
_, post_dict = post_pair
post_score = post_dict['score']
return post_score and int(post_score) > 0 | c824eacd43b44c85fc7acf102fdde2413a7c4d0e | 31,068 |
def post_nodes():
"""
.. :quickref: Dev API; Update cluster nodes
**Developer documentation**
*Requires admin user.*
Update the status of cluster nodes specified in the request. The endpoint can be used to notify CC-Server after a
dead cluster node has been repaired.
**Example request**
.. sourcecode:: http
POST /nodes HTTP/1.1
{
"nodes": [{
"name": "cc-node2"
}]
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{}
"""
return request_handler.post_nodes() | 5fc105ffa236a798c1646282aa5221bec223179f | 31,069 |
import torch
def to_one_hot(y_tensor, n_dims=None):
"""
Take integer y (tensor or variable) with n dims &
convert it to 1-hot representation with n+1 dims.
"""
if(n_dims is None):
n_dims = int(y_tensor.max()+ 1)
_,h,w = y_tensor.size()
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(h,w,n_dims)
return y_one_hot.permute(2,0,1).unsqueeze(0) | f70fd5ab95386c6e471019801d9fe0a5dc0dcbda | 31,070 |
def names():
"""List the names of the available satellites
Returns:
List: List of strings with the names of the available satellites
"""
return sorted(satellites().keys()) | e5627ef1e6981f529b0cf1cf547a0364beb9f498 | 31,071 |
import re
def read_exechours(filename, verbose = False):
"""
Read exechours_SEMESTER.txt file and return columns as '~astropy.table.Table'.
Parameters
----------
filename : string
program exec hours text file name.
Returns
-------
progtable : '~astropy.table.Table'
Program data table
Columns
-------
'prog_ref' : str
program references
'alloc_time' : str
number of hours allocated to program
'elaps_time' : str
number of hours of elapsed time
'notcharged_time' : str
number of hours not charged
'partner_time' : str
number of hours charged to partner
'prog_time' : str
number of hours charged to program
"""
filetext = []
with open(filename, 'r') as readtext: # read file into memory.
# Split lines where commas ',' are found. Remove newline characters '\n'.
[filetext.append(re.sub('\n', '', line).split(',')) for line in readtext]
readtext.close()
# # For testing, set times elapsed, non-charged, partner, and program to zero.
# for i in range(len(filetext)):
# for j in range(2, len(filetext[i])):
# filetext[i][j] = '0.00'
if verbose:
[print(line) for line in filetext]
rows = np.array(filetext[3:])
columns = ['prog_ref', 'alloc_time', 'elaps_time', 'notcharged_time', 'partner_time', 'prog_time']
exechourstable = Table()
for i in range(len(columns)):
exechourstable[columns[i]] = rows[:, i]
if verbose:
print(exechourstable)
return exechourstable | 24409895c3d9bef6149a84ecb9eded576fae75fd | 31,072 |
def is_cond_comment(soup):
"""test whether an element is a conditional comment, return a
boolean.
:param soup: a BeautifulSoup of the code to reduce
:type soup: bs4.BeautifulSoup
"""
return isinstance(soup, bs4.element.Comment) \
and re_cond_comment.search(soup.string) | 8976343f96fdaf144a324fe76709816fbe500b4d | 31,073 |
def sequences_end_with_value(sequences, value, axis=-1):
"""Tests if `sequences` and with `value` along `axis`.
Args:
sequences: A matrix of integer-encoded sequences.
value: An integer value.
axis: Axis of `sequences` to test.
Returns:
A boolean `np.nadarray` that indicates for each sequences if it ends with
`value` along `axis`.
"""
sequences = np.asarray(sequences)
return np.all(np.diff((sequences == value).astype(np.int8), axis=axis) >= 0,
axis) | fb4b8091ede9c9f06cd8e1419b6a8f973811e923 | 31,074 |
import os
import tty
def _read_config_file(filename, schema):
"""Read a YAML configuration file."""
# Ignore nonexisting files.
if not os.path.exists(filename):
return None
elif not os.path.isfile(filename):
raise ConfigFileError(
"Invalid configuration. %s exists but is not a file." % filename)
elif not os.access(filename, os.R_OK):
raise ConfigFileError("Config file is not readable: %s" % filename)
try:
tty.debug("Reading config file %s" % filename)
with open(filename) as f:
data = _mark_overrides(syaml.load(f))
if data:
validate(data, schema)
return data
except MarkedYAMLError as e:
raise ConfigFileError(
"Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
except IOError as e:
raise ConfigFileError(
"Error reading configuration file %s: %s" % (filename, str(e))) | c7d2b23907ad7c2d592a198d1313facb00c16aac | 31,075 |
def shortest_path_search(start, successors, is_goal):
"""Find the shortest path from start state to a state
such that is_goal(state) is true."""
# your code here
if is_goal(start):
return [start]
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
s = path[-1]
for (state, action) in successors(s).items():
if state not in explored:
explored.add(state)
path2 = path + [action, state]
if is_goal(state):
return path2
else:
frontier.append(path2)
Fail | 9bd14c1d848f00dec27a88ec25f3062055f5967b | 31,076 |
from typing import Union
from typing import Optional
def to_tensor(X: Union[np.ndarray, tf.Tensor], **kwargs) -> Optional[tf.Tensor]:
"""
Converts tensor to tf.Tensor
Returns
-------
tf.Tensor conversion.
"""
if X is not None:
if isinstance(X, tf.Tensor):
return X
return tf.constant(X)
return None | 77e6fc1e52101717ba3671e3a357803c0a13630d | 31,077 |
def get_explicit_positions(parsed_str_format):
"""
>>> parsed = parse_str_format("all/{}/is/{2}/position/{except}{this}{0}")
>>> get_explicit_positions(parsed)
{0, 2}
"""
return set(
map(
int,
filter(
lambda x: isinstance(x, str) and str.isnumeric(x),
(x[1] for x in parsed_str_format),
),
)
) | f6f3720443385f5d514de15d3d63d45cd4ef3408 | 31,078 |
from typing import Optional
import os
from pathlib import Path
def get_git_repo(path_to_repo: OptionalPath=None) -> Optional[git.Repo]:
"""
Return a git.Repo object. If path_to_repo is None,
check the environment variable MHEALTH_REPO.
"""
if not path_to_repo:
path_to_repo = os.getenv("MHEALTH_ROOT")
if not path_to_repo:
candidate = Path(__file__).parent.parent.parent.parent
if (candidate / ".git").exists:
path_to_repo = candidate
if path_to_repo is None:
print("WARN: The environment variable MHEALTH_ROOT is not set.")
print("WARN: Some features may not be working properly.")
return None
path_to_repo = Path(path_to_repo)
if path_to_repo.is_file():
path_to_repo = path_to_repo.parent
if not path_to_repo.is_dir():
print("WARN: This is not a valid path: %s" % path_to_repo)
return None
try:
repo = git.Repo(path_to_repo)
except git.exc.InvalidGitRepositoryError:
print("WARN: This is not a valid repository: %s" % path_to_repo)
repo = None
return repo | e08aaff81cd5fcd40dd14e6f92d0893411d18571 | 31,079 |
def msg_parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby = (0, 1)
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)
).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled" | 0e3ff7006fba205eec47fd92e8b15bb4f52ae454 | 31,080 |
def filter_is(field, target):
"""
Check if a log field is the specified value.
A boolean is returned.
Case-insensitive checks are made.
"""
lowfield = field.lower()
retval = lowfield == target
# print "is:\t<%s>\t<%s>\t" % (field, target), retval
return(retval) | 2975560c54736362f8986d5a9f92af351c4e40fe | 31,081 |
async def get_server_port(node: Node) -> int:
"""
Returns the port which the WebSocket server is running on
"""
client = node.create_client(GetParameters, "/rosbridge_websocket/get_parameters")
try:
if not client.wait_for_service(5):
raise RuntimeError("GetParameters service not available")
port_param = await client.call_async(GetParameters.Request(names=["actual_port"]))
return port_param.values[0].integer_value
finally:
node.destroy_client(client) | 5a409440312e6c0b0c01be53741f4182a6dc8f70 | 31,082 |
import sqlite3
def search_vac(search_phrase):
"""Get vacancies with search phrase in JSON"""
con = sqlite3.connect("testdb.db")
cur = con.cursor()
sql = 'SELECT * FROM vacancies WHERE json LIKE "%{}%" ORDER BY id DESC LIMIT 100;'.format(search_phrase)
cur.execute(sql)
vac = cur.fetchall()
con.close()
data_list = []
for i in vac:
data_list.append('<a href="https://hh.ru/vacancy/' + str(i[0]) + '">' + str(i[0]) + '</a>')
return str(data_list) | 8669e4ecd50b47a385929536f5cf0faf62577361 | 31,083 |
from dso.task.regression.regression import RegressionTask
from dso.task.control.control import ControlTask
def make_task(task_type, **config_task):
"""
Factory function for Task object.
Parameters
----------
task_type : str
Type of task:
"regression" : Symbolic regression task.
"control" : Episodic reinforcement learning task.
config_task : kwargs
Task-specific arguments. See specifications of task_dict.
Returns
-------
task : Task
Task object.
"""
# Lazy import of task factory functions
if task_type == "regression":
task_class = RegressionTask
elif task_type == "control":
task_class = ControlTask
else:
# Custom task import
task_class = import_custom_source(task_type)
assert issubclass(task_class, Task), \
"Custom task {} must subclass dso.task.Task.".format(task_class)
task = task_class(**config_task)
return task | 6ebee30330750ab607626b4aa52701b208d70ade | 31,084 |
def detectar_lenguaje(texto, devolver_proba=False):
"""
Identifica el lenguaje en el que está escrito el texto de entrada.
:param texto: Texto de entrada.
:type texto: str
:param devolver_proba: Indica si se retorna el porcentaje de \
confiabilidad del lenguaje identificado. Valor por \
defecto `False`.
:type devolver_proba: bool, opcional
:return: (str) Texto del lenguaje identificado siguiendo el estandar \
`ISO 639-1 <https://es.wikipedia.org/wiki/ISO_639-1>`_. \
Si `devolver_proba = True` retorna una tupla.
"""
identificador = LanguageIdentifier.from_modelstring(model, norm_probs=True)
if devolver_proba:
return identificador.classify(texto)
else:
return identificador.classify(texto)[0] | 1382cc2fb41d53fbc286a425f46171ece102f0aa | 31,085 |
def get_tablenames():
"""get table names from database. """
con = get_db()
cursor = con.cursor()
select_tn_cmd = "SELECT name FROM sqlite_master WHERE type='table';"
tablenames, res = [], cursor.execute(select_tn_cmd).fetchall()
for tn in res:
tablenames += [tn['name']]
return tablenames | 1fd55b54dfd06d5735bb56ff0287954132ba6a94 | 31,086 |
def expando_distinct_values(model_class, field_name):
""" Returns all possible values for a specific expando field.
Useful for search forms widgets.
"""
ct = ContentType.objects.get_for_model(model_class)
qs = Expando.objects.filter(content_type=ct, key=field_name)
return qs.distinct().values_list('value', flat=True) | 0883022b25bf72f13b2eee98b5ddd98deb8b5991 | 31,087 |
def oconner(w, r):
"""
Optimistic for low reps. Between Lombardi and Brzycki for high reps.
"""
return w * (1 + r/40) | cdcdc44a06e44910361c55217366e3891e76b6a5 | 31,088 |
def unionPart(* partsList):
"""Finds the union of several partitions"""
mat = part2graph(util.concat(* partsList))
parts = graph.connectedComponents(mat.keys(), lambda x: mat[x].keys())
# remove parition ids from partitioning
parts = map(lambda part: filter(lambda x: type(x) != int, part), parts)
return parts | 1aa54b80d37d51e9f75c80a0d95fed24e4c57853 | 31,089 |
def add_upper_log_level(logger, method_name, event_dict):
"""
Add the log level to the event dict.
"""
event_dict["level"] = method_name.upper()
return event_dict | 36ccdf335473136fe8188ff99ed539920ee39fa7 | 31,090 |
def tanh_cl(a):
""" Hyperbolic tangent of GPUArray elements.
Parameters
----------
a : gpuarray
GPUArray with elements to be operated on.
Returns
-------
gpuarray
tanh(GPUArray)
Examples
--------
>>> a = tanh_cl(give_cl(queue, [0, pi/4]))
[ 0., 0.6557942]
>>> type(a)
<class 'pyopencl.array.Array'>
"""
return pyopencl.clmath.tanh(a) | d4fbb13033f48773bd20539837c69d6c66528f7f | 31,091 |
import logging
import os
def get_logger(log_file=None):
"""
Initialize global logger and return it.
:param log_file: log to this file, or to standard output if None
:return: created logger
"""
formatter = logging.Formatter(
fmt='%(asctime)s p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger()
if len(log.handlers) >= 2:
return log
log.setLevel(logging.INFO)
if log_file is not None:
os.makedirs(os.path.dirname(log_file), exist_ok=True)
handler = RotatingFileHandler(
log_file,
maxBytes=1024*1024*30,
backupCount=3)
handler.setFormatter(formatter)
log.addHandler(handler)
handler.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
log.addHandler(handler)
mail_handler = TlsSMTPHandler(("...", 587), '...', ['...'], '.', ('...', '...'))
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(formatter)
log.addHandler(mail_handler)
return log | 00d00c25e482f2102a977ffe9d843c440977525d | 31,092 |
import sys
def local_execute_from_command_line(argv=None, **sandbox_overrides):
"""Execute commands in the local sandbox"""
return _execute_from_command_line(sandbox.LOCAL, argv or sys.argv, **sandbox_overrides) | 85cec9ab8f655fdab17cdf34c565ec4f498eac02 | 31,093 |
import time
def sweep_depth(sketch_class):
"""Return a table of shape (len(depth_values), 2) counting number of times the sketch correctly found the heaviest key."""
# 1st column is for the median, 2nd is for the sign alignment estimator
print(sketch_class.__name__)
num_success = np.zeros((len(depth_values), 2))
max_depth = np.max(depth_values)
print("Depth values", depth_values)
for t in range(num_trials):
seed = rnd_seed + 53 * t
start = time.perf_counter()
sketch = sketch_class(seed, width, max_depth, num_keys)
sweep_depth_trial(sketch, num_success)
# sweep_depth_trial(sketch_class, seed, num_success)
print("Depth trial #", t, "time", time.perf_counter() - start)
return num_success | 9c034ac6ef0c93488b30bbb6aef3b693489d6490 | 31,094 |
def plot_ensemble_results(model, ensemble, expts = None,
style='errorbars',
show_legend = True, loc = 'upper left',
plot_data = True, plot_trajectories = True):
"""
Plot the fits to the given experiments over an ensemble.
Note that this recalculates the cost for every member of the ensemble, so
it may be very slow. Filtering correlated members from the ensemble is
strongly recommended.
Inputs:
model: Model whose results to plot
ensemble: Parameter ensemble
expts: List of experiment IDs to plot, if None is specified, all
experiments are plotted
style: Style of plot. Currently supported options are:
'errorbars': Plots points and bars for each data point
'lines': Plots a continuous line for the data
show_legend: Boolean that control whether or not to show the legend
loc: Location of the legend. See help(Plotting.legend) for options.
plot_data: Boolean that controls whether the data is plotted
plot_trajectories: Boolean that controls whether the trajectories are
plotted
"""
exptColl = model.get_expts()
nets = model.get_calcs()
if expts is None:
expts = exptColl.keys()
lines, labels = [], []
cW = ColorWheel()
Network_mod.Network.pretty_plotting()
model.cost(ensemble[0])
timepoints = {}
for netId, net in nets.items():
traj = getattr(net, 'trajectory', None)
if traj is not None:
net.times_to_add = scipy.linspace(traj.timepoints[0],
traj.timepoints[-1], 1000)
Network_mod.Network.full_speed()
results = {}
for params in ensemble:
model.cost(params)
for exptId in expts:
expt = exptColl[exptId]
results.setdefault(exptId, {})
dataByCalc = expt.GetData()
for netId in dataByCalc.keys():
results[exptId].setdefault(netId, {})
# Pull the trajectory from that calculation, defaulting to None
# if it doesn't exist.
net = nets.get(netId)
traj = net.trajectory
for dataId in dataByCalc[netId].keys():
results[exptId][netId].setdefault(dataId, [])
scaleFactor = model.GetScaleFactors()[exptId][dataId]
result = scaleFactor*traj.get_var_traj(dataId)
results[exptId][netId][dataId].append(result)
for exptId in expts:
expt = exptColl[exptId]
dataByCalc = expt.GetData()
# We sort the calculation names for easier comparison across plots
sortedCalcIds = dataByCalc.keys()
sortedCalcIds.sort()
for netId in sortedCalcIds:
for dataId, dataDict in dataByCalc[netId].items():
color, sym, dash = cW.next()
if plot_data:
# Pull the data out of the dictionary and into an array
d = scipy.array([[t, v, e] for (t, (v, e))
in dataDict.items()])
if style is 'errorbars':
l = errorbar(d[:,0], d[:,1], yerr=d[:,2], fmt='o',
color=color, markerfacecolor=color,
marker=sym, ecolor='k', capsize=6)[0]
elif style is 'lines':
# Make sure we order the data before plotting
order = scipy.argsort(d[:,0], 0)
d = scipy.take(d, order, 0)
l = plot(d[:,0], d[:,1], color=color,
linestyle=dash)
lines.append(l)
if plot_trajectories:
times = model.get_calcs().get(netId).trajectory.get_times()
mean_vals = scipy.mean(results[exptId][netId][dataId], 0)
std_vals = scipy.std(results[exptId][netId][dataId], 0)
lower_vals = mean_vals - std_vals
upper_vals = mean_vals + std_vals
# Plot the polygon
xpts = scipy.concatenate((times, times[::-1]))
ypts = scipy.concatenate((lower_vals, upper_vals[::-1]))
fill(xpts, ypts, fc=color, alpha=0.4)
# Let's print the pretty name for our variable if we can.
name = net.get_component_name(dataId)
labels.append('%s in %s for %s' % (name, netId, exptId))
for netId, net in nets.items():
del net.times_to_add
if show_legend:
legend(lines, labels, loc=loc)
for net in nets.values():
net.times_to_add = None
return lines, labels | 38d36373dde4959cce8f51d2c52a2c404ee73e4c | 31,095 |
import math
import tqdm
def generate_synthetic_gaps(
mean: np.ndarray,
covariance: np.ndarray,
size: int,
chunk_size: int,
threshold: int,
seed: int
) -> np.ndarray:
"""Return numpy array with the coordinates of gaps in matrix mask.
Parameters
------------------
mean: np.ndarray,
Mean of biological gaps in considered windows.
covariance: np.ndarray,
Covariance of biological gaps in considered windows.
size: int,
Total number of rows to generate.
chunk_size: int,
Size of the chunk to process per sub-process step.
threshold: int,
Threshold used to convert the multivariate gaussian
distribution to a multivariate binomial.
seed: int,
The initial seed to use to render the gaps.
Returns
------------------
Return numpy array with shape(size, 2) containing
the coordinates of the gaps in the generated matrix mask.
"""
tasks = [
{
"mean": mean,
"covariance": covariance,
"size": min(chunk_size, size-chunk_size*i),
"offset": chunk_size*i,
"threshold": threshold,
"seed": i+seed
}
for i in range(math.ceil(size/chunk_size))
]
with Pool(cpu_count()) as p:
indices = np.vstack(list(tqdm(
p.imap(_generate_synthetic_gaps_wrapper, tasks),
total=len(tasks),
desc="Generating synthetic gaps",
leave=False,
dynamic_ncols=True
)))
p.close()
p.join()
return indices | fcfaa5c0dc7a3b1dc47d7d4947504c8a7794d6d1 | 31,096 |
import argparse
import getpass
def get_args():
"""Get command line args from the user.
"""
parser = argparse.ArgumentParser(
description='arguments for interacting with vsphere')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on.')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-n', '--name',
required=True,
action='store',
help='The name of the VM to export.')
parser.add_argument('-w', '--workdir',
required=True,
action='store',
help='Working directory. Must have write permission.')
args = parser.parse_args()
if not args.password:
args.password = getpass(prompt='Enter password: ')
return args | 75fcffcc42f5b1e986b8d77c69aace15478c0fbe | 31,097 |
def ulmfit_document_classifier(*, model_type, pretrained_encoder_weights, num_classes,
spm_model_args=None, fixed_seq_len=None,
with_batch_normalization=False, activation='softmax'):
"""
Document classification head as per the ULMFiT paper:
- AvgPool + MaxPool + Last hidden state
- BatchNorm
- 2 FC layers
"""
######## VERSION 1: ULMFiT last state built from Python code - pass the path to a weights directory
if model_type == 'from_cp':
ulmfit_rnn_encoder = ulmfit_rnn_encoder_native(pretrained_weights=pretrained_encoder_weights,
spm_model_args=spm_model_args,
fixed_seq_len=fixed_seq_len,
also_return_spm_encoder=False)
hub_object=None
######## VERSION 2: ULMFiT last state built from a serialized SavedModel - pass the path to a directory containing 'saved_model.pb'
elif model_type == 'from_hub':
ulmfit_rnn_encoder, hub_object = ulmfit_rnn_encoder_hub(pretrained_weights=pretrained_encoder_weights,
spm_model_args=None,
fixed_seq_len=fixed_seq_len,
also_return_spm_encoder=False)
else:
raise ValueError(f"Unknown model type {args['model_type']}")
if fixed_seq_len is None:
rpooler = RaggedConcatPooler(name="RaggedConcatPooler")(ulmfit_rnn_encoder.output)
else:
rpooler = ConcatPooler(name="ConcatPooler")(ulmfit_rnn_encoder.output)
if with_batch_normalization is True:
bnorm1 = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, scale=False, center=False)(rpooler)
drop1 = tf.keras.layers.Dropout(0.4)(bnorm1)
else:
drop1 = tf.keras.layers.Dropout(0.4)(rpooler)
fc1 = tf.keras.layers.Dense(50, activation='relu')(drop1)
if with_batch_normalization is True:
bnorm2 = tf.keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.1, scale=False, center=False)(fc1)
drop2 = tf.keras.layers.Dropout(0.1)(bnorm2)
else:
drop2 = tf.keras.layers.Dropout(0.1)(fc1)
fc_final = tf.keras.layers.Dense(num_classes, activation=activation)(drop2)
document_classifier_model = tf.keras.models.Model(inputs=ulmfit_rnn_encoder.inputs, outputs=fc_final)
return document_classifier_model, hub_object | 639c7b2b94722a92fda94344529563b05fa2adbc | 31,098 |
def is_zero_dict( dict ):
"""
Identifies empty feature vectors
"""
has_any_features = False
for key in dict:
has_any_features = has_any_features or dict[key]
return not has_any_features | eefb3df1547917fbc11751bbf57212f95388e8b2 | 31,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.