content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Dict
def _codify_quantitative_input_by_abs_val(
df: pd.DataFrame,
threshold: float,
p_value: float,
) -> Dict[str, int]:
"""Codify nodes with | logFC | if they pass threshold, otherwise score is 0."""
# Codify nodes with | logFC | if they pass threshold
df.loc[(df[LOG_FC]).abs() >= threshold, SCORE] = (df[LOG_FC]).abs()
# Codify nodes with score 0 if it falls below threshold
df.loc[(df[LOG_FC]).abs() < threshold, SCORE] = 0
# LogFC and adjusted p-values are provided in dataset
if P_VALUE in df.columns:
# Disregard entities if logFC adjusted p-value is not significant
return _remove_non_significant_entities(df, p_value)
return df.set_index(LABEL)[SCORE].to_dict() | 0baaf3a58539f5be2a34d41e553b356e0b4df883 | 30,300 |
from typing import Optional
def labor_day(date: dt.date) -> Optional[str]:
"""First Monday in September"""
if not is_nth_day(date, 0, 0, 9):
return None
return "Happy Memorial Day. You can wear white again" | f03746c741ba60c18fa6d9254b1a8d80a7aa3437 | 30,301 |
def vgg16(reparametrized=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D'], reparametrized=reparametrized), **kwargs)
return model | b1c7e4b98fdb25bce70e33865405232b25e17118 | 30,302 |
def logout(request):
"""
:param request:
:return:
"""
auth_logout(request)
return redirect('/') | d5d85ff49c36e81557bee83403046e5ec22f78ee | 30,303 |
def vec3d_rand_corners(corner1, corner2):
""" Sample one R3 point from the AABB
defined by 'corner1' and 'corner2' """
span = np.subtract(corner2, corner1)
sample = vec_random(3)
return [corner1[0]+span[0]*sample[0],
corner1[1]+span[1]*sample[1],
corner1[2]+span[2]*sample[2]] | b8e87a869545476d8fce25bfd74c4d29138c93d8 | 30,304 |
import sys
def exception_in_stack():
"""Return true if we are currently in the process of handling an exception, ie one has been caught in a try block.
https://docs.python.org/3/library/sys.html#sys.exc_info
"""
return sys.exc_info()[0] is not None | 71f2076c956fa3bb92751778c29537df7bceac35 | 30,305 |
import os
import csv
import re
def validate_terminology(table, gecko_labels):
"""Validate an IHCC mapping table."""
basename = os.path.splitext(os.path.basename(table))[0].capitalize()
problems = []
problem_count = 0
# label -> locs
labels = {}
# loc -> parent_term
parent_terms = {}
lines = []
with open(table, "r") as f:
reader = csv.DictReader(f, delimiter="\t")
# Validate headers
headers = reader.fieldnames
headers_valid = True
col_idx = 0
for h in headers:
if col_idx < len(expected_headers):
matching_header = expected_headers[col_idx]
if h != matching_header:
headers_valid = False
problem_count += 1
problems.append(
{
"table": basename,
"cell": idx_to_a1(1, col_idx + 1),
"level": "error",
"rule": "Invalid header",
"suggestion": matching_header,
"message": f"This column ({h}) should be '{matching_header}'",
}
)
else:
headers_valid = False
problem_count += 1
problems.append(
{
"table": basename,
"cell": idx_to_a1(1, col_idx + 1),
"level": "error",
"rule": "Invalid header",
"message": f"This column ({h}) should be empty",
}
)
col_idx += 1
if not headers_valid:
print(
"\nERROR: Unable to complete validation due to invalid headers\n"
)
return None, problems
# Validate contents
row_idx = 2
for row in reader:
lines.append(row)
# Validate that the term ID exists and matches a numeric pattern
term_id = row["Term ID"]
if not term_id or term_id.strip() == "":
problem_count += 1
problems.append(
{
"table": basename,
"cell": idx_to_a1(row_idx, 1),
"level": "error",
"rule": "Missing term ID",
"message": "run the automated_mapping script to assign term IDs",
}
)
elif not re.match(r"[A-Z]+:[0-9]{7}", term_id):
problem_count += 1
problems.append(
{
"table": basename,
"cell": idx_to_a1(row_idx, 1),
"level": "error",
"rule": "Invalid term ID",
"message": "the term ID must follow the pattern COHORT:num_id where "
"num_id has 7 digits (e.g., FOO:0000020)",
}
)
# Add label to labels map
label = row["Label"]
if label in labels:
locs = labels[label]
else:
locs = []
locs.append(idx_to_a1(row_idx, 2))
labels[label] = locs
# Add parent to parent_terms map
parent_terms[idx_to_a1(row_idx, 3)] = row["Parent Term"].strip()
# Check that GECKO category is valid
gecko_cat = row["GECKO Category"].strip()
if gecko_cat != "":
for gc in gecko_cat.split("|"):
if gc not in gecko_labels:
problem_count += 1
problems.append(
{
"table": basename,
"cell": idx_to_a1(row_idx, 5),
"level": "error",
"rule": "Invalid GECKO category",
"message": "select a valid GECKO category",
}
)
row_idx += 1
# Validate labels
duplicates = {k: v for k, v in labels.items() if len(v) > 1}
if duplicates:
for label, locs in duplicates.items():
for loc in locs:
problem_count += 1
other_locs = ", ".join([x for x in locs if x != loc])
problems.append(
{
"table": basename,
"cell": loc,
"level": "error",
"rule": "Duplicate label",
"message": f"update this label ({label}) & label(s) in cell(s): {other_locs}",
}
)
# Validate parent terms
for loc, parent_term in parent_terms.items():
if parent_term == "":
continue
for pt in parent_term.split("|"):
if pt not in labels.keys():
problem_count += 1
problems.append(
{
"table": basename,
"cell": loc,
"level": "error",
"rule": "Invalid parent term",
"message": f"make sure that the parent term ({pt}) is a label listed "
"in the label column of this table",
}
)
return lines, problems | 5a3bfc7337ad489acd08f0af64a089848149b0e8 | 30,306 |
def meta_body():
"""Ugoira page data."""
return '{"error":false,"message":"","body":{"src":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira600x600.zip","originalSrc":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira1920x1080.zip","mime_type":"image\/jpeg","frames":[{"file":"000000.jpg","delay":70},{"file":"000001.jpg","delay":70},{"file":"000002.jpg","delay":70},{"file":"000003.jpg","delay":70},{"file":"000004.jpg","delay":70},{"file":"000005.jpg","delay":70},{"file":"000006.jpg","delay":70},{"file":"000007.jpg","delay":70},{"file":"000008.jpg","delay":70},{"file":"000009.jpg","delay":70},{"file":"000010.jpg","delay":70},{"file":"000011.jpg","delay":70},{"file":"000012.jpg","delay":70},{"file":"000013.jpg","delay":70},{"file":"000014.jpg","delay":70},{"file":"000015.jpg","delay":70},{"file":"000016.jpg","delay":70},{"file":"000017.jpg","delay":70},{"file":"000018.jpg","delay":70},{"file":"000019.jpg","delay":70},{"file":"000020.jpg","delay":70},{"file":"000021.jpg","delay":70},{"file":"000022.jpg","delay":70},{"file":"000023.jpg","delay":70},{"file":"000024.jpg","delay":70},{"file":"000025.jpg","delay":70},{"file":"000026.jpg","delay":70},{"file":"000027.jpg","delay":70},{"file":"000028.jpg","delay":70},{"file":"000029.jpg","delay":70},{"file":"000030.jpg","delay":70},{"file":"000031.jpg","delay":70},{"file":"000032.jpg","delay":70},{"file":"000033.jpg","delay":70},{"file":"000034.jpg","delay":70},{"file":"000035.jpg","delay":70},{"file":"000036.jpg","delay":70},{"file":"000037.jpg","delay":70},{"file":"000038.jpg","delay":70},{"file":"000039.jpg","delay":70},{"file":"000040.jpg","delay":70},{"file":"000041.jpg","delay":70},{"file":"000042.jpg","delay":70},{"file":"000043.jpg","delay":70},{"file":"000044.jpg","delay":70},{"file":"000045.jpg","delay":70},{"file":"000046.jpg","delay":70},{"file":"000047.jpg","delay":70},{"file":"000048.jpg","delay":70},{"file":"000049.jpg","delay":70},{"file":"000050.jpg","delay":70},{"file":"000051.jpg","delay":70},{"file":"000052.jpg","delay":70},{"file":"000053.jpg","delay":70},{"file":"000054.jpg","delay":70},{"file":"000055.jpg","delay":70},{"file":"000056.jpg","delay":70},{"file":"000057.jpg","delay":70},{"file":"000058.jpg","delay":70},{"file":"000059.jpg","delay":70},{"file":"000060.jpg","delay":70},{"file":"000061.jpg","delay":70},{"file":"000062.jpg","delay":70},{"file":"000063.jpg","delay":70},{"file":"000064.jpg","delay":70},{"file":"000065.jpg","delay":70},{"file":"000066.jpg","delay":70},{"file":"000067.jpg","delay":70},{"file":"000068.jpg","delay":70},{"file":"000069.jpg","delay":70},{"file":"000070.jpg","delay":70},{"file":"000071.jpg","delay":70},{"file":"000072.jpg","delay":70},{"file":"000073.jpg","delay":70},{"file":"000074.jpg","delay":70},{"file":"000075.jpg","delay":70},{"file":"000076.jpg","delay":70}]}}' | abf9e01371938467b12721373a0e5fc8fb926016 | 30,307 |
def anatomical_traverse_bids(bids_layout,
modalities='anat',
subjects=None,
sessions=None,
extension=('nii', 'nii.gz', 'json'),
param_files_required=False,
**kwargs):
"""
Builds a convenient dictionary of usable anatomical subjects/sessions.
"""
meta_types = {'datatype' : modalities,
'extension' : extension,
'subjects' : subjects,
'sessions' : sessions}
meta_types.update(kwargs)
non_empty_types = {type_: values for type_, values in meta_types.items() if values}
# __FIELDS_TO_IGNORE__ = ('filename', 'modality', 'type')
# __TYPES__ = ['subjects', 'sessions',]
results = bids_layout.get(**non_empty_types)
if len(results) < 1:
print('No results found!')
return None, None
all_subjects = bids_layout.get_subjects()
all_sessions = bids_layout.get_sessions()
if len(all_sessions) > 1:
sessions_exist = True
combinations = product(all_subjects, all_sessions)
else:
sessions_exist = False
combinations = all_subjects
reqd_exts_params = ('.json', )
named_exts_params = ('params', )
reqd_exts_images = ('.nii', '.gz')
# named_exts_images = ('image', 'image')
files_by_id = dict()
for sub in combinations:
if sessions_exist:
# sub is a tuple of subject,session
results = bids_layout.get(subject=sub[0], session=sub[1],
datatype='anat')
final_sub_id = '_'.join(sub)
else:
results = bids_layout.get(subject=sub, datatype='anat')
final_sub_id = sub
temp = {splitext(file.filename)[-1] : realpath(file.path)
for file in results}
param_files_exist = all([file_ext in temp for file_ext in reqd_exts_params])
image_files_exist = any([file_ext in temp for file_ext in reqd_exts_images])
if param_files_required and (not param_files_exist):
print('parameter files are required, but do not exist for {}'
' - skipping it.'.format(sub))
continue
if not image_files_exist:
print('Image file is required, but does not exist for {}'
' - skipping it.'.format(sub))
continue
files_by_id[final_sub_id] = dict()
# only when all the files required exist, do we include it for review
# adding parameter files, only if they exist
if param_files_exist:
files_by_id[final_sub_id] = {new_ext: temp[old_ext]
for old_ext, new_ext in
zip(reqd_exts_params, named_exts_params)}
else:
files_by_id[final_sub_id]['params'] = 'None'
# adding the image file
files_by_id[final_sub_id]['image'] = \
temp['.nii'] if '.nii' in temp else temp['.gz']
return files_by_id | cb48c4af0a4cf2969cbb291daf980d0556989e85 | 30,308 |
def get_email_subscriptions(email):
"""Verifies which email subsciptions exist for the provided email
Parameters
----------
email : str
The email to the check subscriptions for
Returns
-------
list(tuple(str, str, query_hash))
"""
user_queries = db.get_subscribed_queries(email)
user_models = db.get_user_models(email)
model_full_names = {}
for qo, mid, dh in user_queries:
if mid not in model_full_names:
config = load_config_from_s3(mid)
model_full_names[mid] = config.get('human_readable_name', mid)
for mid in user_models:
if mid not in model_full_names:
config = load_config_from_s3(mid)
model_full_names[mid] = config.get('human_readable_name', mid)
results = {
'queries': [(qo.to_english() + f' for model {model_full_names[mid]}',
f'{qo.get_type()}'.replace('_', ' '), qh)
for qo, mid, qh in user_queries],
'models': [(mid, model_full_names[mid]) for mid in user_models]
}
return results | 84961b40512005a73b78d28feefcc424385bef8f | 30,309 |
import re
def format_comments(text="default", line_size=90):
"""
Takes a string of text and formats it based on rule 1 (see docs).
"""
# rules to detect fancy comments, if not text
regex1 = r"^ *?####*$"
# rules to detect fancy comments, if text
regex2 = r"^ *?####*([^#\n\r]+)#*"
# if detected pattern 1, replace with this
subst1 = "#"*line_size
# if detected pattern 2, replace with this
def subst2(match_obj):
fix_pad = 4 + 2 # 4 hashes on left plus two spaces
cap_group = match_obj.group(1).strip()
return '#### ' + cap_group + ' ' + '#'*(line_size-fix_pad-len(cap_group))
text = re.sub(regex1, subst1, text, 0, re.MULTILINE)
text = re.sub(regex2, subst2, text, 0, re.MULTILINE)
# formatted text to return
return text | 6eba4539aa7128d5654ddab7fe08a2e9df6dc738 | 30,310 |
def get_kernel_versions_async(loop=None):
"""
Execute dpkg commands asynchronously.
Args:
loop: asyncio event loop (optional)
Returns:
[DpkgCommandResult]: stats from the executed dpkg commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
[DpkgCommandParams()],
_get_dpkg_command_args_list,
parse_dpkg_output,
loop,
) | 425eac2d2ef7e00512b04ed41f3269e094762557 | 30,311 |
import math
def autoencoder(
input_shape,
encoding_dim=512,
n_base_filters=16,
batchnorm=True,
batch_size=None,
name="autoencoder",
):
"""Instantiate Autoencoder Architecture.
Parameters
----------
input_shape: list or tuple of four ints, the shape of the input data. Should be
scaled to [0,1]. Omit the batch dimension, and include the number of channels.
Currently, only squares and cubes supported.
encoding_dim: int, the dimensions of the encoding of the input data. This would
translate to a latent code of dimensions encoding_dimx1.
n_base_filters: int, number of base filters the models first convolutional layer.
The subsequent layers have n_filters which are multiples of n_base_filters.
batchnorm: bool, whether to use batch normalization in the network.
batch_size: int, number of samples in each batch. This must be set when training on
TPUs.
name: str, name to give to the resulting model object.
Returns
-------
Model object.
"""
conv_kwds = {"kernel_size": 4, "activation": None, "padding": "same", "strides": 2}
conv_transpose_kwds = {
"kernel_size": 4,
"strides": 2,
"activation": None,
"padding": "same",
}
dimensions = input_shape[:-1]
n_dims = len(dimensions)
if not (n_dims in [2, 3] and dimensions[1:] == dimensions[:-1]):
raise ValueError("Dimensions should be of square or cube!")
Conv = getattr(layers, "Conv{}D".format(n_dims))
ConvTranspose = getattr(layers, "Conv{}DTranspose".format(n_dims))
n_layers = int(math.log(dimensions[0], 2))
# Input layer
inputs = x = layers.Input(shape=input_shape, batch_size=batch_size, name="inputs")
# Encoder
for i in range(n_layers):
n_filters = min(n_base_filters * (2 ** (i)), encoding_dim)
x = Conv(n_filters, **conv_kwds)(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Encoding of the input image
x = layers.Flatten(name="Encoding")(x)
# Decoder
x = layers.Reshape((1,) * n_dims + (encoding_dim,))(x)
for i in range(n_layers)[::-1]:
n_filters = min(n_base_filters * (2 ** (i)), encoding_dim)
x = ConvTranspose(n_filters, **conv_transpose_kwds)(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Output layer
outputs = Conv(1, 3, activation="sigmoid", padding="same")(x)
return models.Model(inputs=inputs, outputs=outputs) | dbb1983cb3b6adfcde823e6a2013e5517b57044f | 30,312 |
import numpy
def SHAPER(B, D, LA):
"""
"""
LB = B.size
LD = D.size
A = numpy.zeros(LA)
LC = LB + LA - 1
LCD = LC + LD - 1
C = numpy.zeros(LCD)
INDEX = 0
ERRORS = numpy.zeros(LCD)
SPACE = numpy.zeros(3 * LA)
(A, LC, C, INDEX, ERRORS, S) = ER.SHAPER(LB, B, LD, D, LA, A, LC, C, INDEX, ERRORS, SPACE)
return (A, C, INDEX, ERRORS) | cbe86b69c073c36e0f5d97616c9de59f2b4c2652 | 30,313 |
from typing import Tuple
def _get_efron_values_single(
X: pd.DataFrame,
T: pd.Series,
E: pd.Series,
weights: pd.Series,
entries: None,
beta: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
X = X.values
T = T.values
E = E.values
weights = weights.values
n, d = X.shape # n: samples; d: variables
hessian = zeros((d, d))
gradient = zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = zeros((d,)), zeros((d,))
risk_phi_x_x, tie_phi_x_x = zeros((d, d)), zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * exp(dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1): # i = n-1, n-2, n-3, ..., 3, 2, 1, 0
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was at least one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - multiply.outer(increasing_proportion, tie_phi_x)
a1 = einsum("ab,i->ab", risk_phi_x_x, denom) - einsum("ab,i->ab", tie_phi_x_x, increasing_proportion * denom)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + dot(x_death_sum, beta) + weighted_average * log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = zeros((d,))
tie_phi = 0
tie_phi_x = zeros((d,))
tie_phi_x_x = zeros((d, d))
return hessian, gradient, log_lik | 2d6a049e6894f3be6e002d22cc1c2b7d4705a66f | 30,314 |
import os
import scipy
def Interp_photometry_nosum(grid, wteff, wlogg, wmu, jteff, jlogg, jmu, area, val_mu):
"""
Simple interpolation of an atmosphere grid having axes (logtemp, logg, mu).
Note: As opposed to Interp_photometry, this function does not sum
the surface elements.
Parameters
----------
The interpolation takes a set of points to be interpolated.
grid : ndarray
Atmosphere grid, with dimensions (logtemp, logg, mu, wav).
wteff, wlogg, wmu : ndarray
Weights of the temperature, logg, mu.
jteff, jlogg, jmu : ndarray
Fractional position of the temperature, logg, mu.
area : ndarray
Area (i.e. weight) of each surface element for the summation.
val_mu : ndarray
Value of the cross-section visible to us.
Returns
-------
flux : ndarray
Flux _not_ integrated over the surface.
"""
code = """
#pragma omp parallel shared(grid,wteff,wlogg,wmu,jteff,jlogg,jmu,area,val_mu,nsurf,fl) default(none)
{
double w1teff, w0teff, w1logg, w0logg, w1mu, w0mu, tmp_fl;
int j0teff, j1teff, j0logg, j1logg, j0mu, j1mu;
#pragma omp for
for (int i=0; i<nsurf; i++) {
w1teff = wteff(i);
w0teff = 1.-w1teff;
j0teff = jteff(i);
j1teff = 1.+j0teff;
w1logg = wlogg(i);
w0logg = 1.-w1logg;
j0logg = jlogg(i);
j1logg = 1.+j0logg;
w1mu = wmu(i);
w0mu = 1.-w1mu;
j0mu = jmu(i);
j1mu = 1.+j0mu;
tmp_fl = w1mu*(w0logg*(w0teff*grid(j0teff,j0logg,j1mu) + w1teff*grid(j1teff,j0logg,j1mu)) \
+ w1logg*(w0teff*grid(j0teff,j1logg,j1mu) + w1teff*grid(j1teff,j1logg,j1mu))) \
+ w0mu*(w0logg*(w0teff*grid(j0teff,j0logg,j0mu) + w1teff*grid(j1teff,j0logg,j0mu)) \
+ w1logg*(w0teff*grid(j0teff,j1logg,j0mu) + w1teff*grid(j1teff,j1logg,j0mu)));
fl(i) = exp(tmp_fl) * area(i) * val_mu(i);
}
}
"""
grid = np.ascontiguousarray(grid)
wteff = np.ascontiguousarray(wteff)
wlogg = np.ascontiguousarray(wlogg)
wmu = np.ascontiguousarray(wmu)
jteff = np.ascontiguousarray(jteff)
jlogg = np.ascontiguousarray(jlogg)
jmu = np.ascontiguousarray(jmu)
area = np.ascontiguousarray(area)
val_mu = np.ascontiguousarray(val_mu)
nsurf = jteff.size
fl = np.zeros(nsurf, dtype=float)
try:
if os.uname()[0] == 'Darwin':
extra_compile_args = extra_link_args = ['-O3']
else:
extra_compile_args = extra_link_args = ['-O3 -fopenmp']
get_flux = scipy.weave.inline(code, ['grid', 'wteff', 'wlogg', 'wmu', 'jteff', 'jlogg', 'jmu', 'area', 'val_mu', 'nsurf', 'fl'], type_converters=scipy.weave.converters.blitz, compiler='gcc', extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, headers=['<omp.h>','<cmath>'], libraries=['m'], verbose=2)
except:
get_flux = scipy.weave.inline(code, ['grid', 'wteff', 'wlogg', 'wmu', 'jteff', 'jlogg', 'jmu', 'area', 'val_mu', 'nsurf', 'fl'], type_converters=scipy.weave.converters.blitz, compiler='gcc', extra_compile_args=['-O3'], extra_link_args=['-O3'], headers=['<cmath>'], libraries=['m'], verbose=2)
tmp = get_flux
return fl | 96e1d8e2fff960b6d8f528b413da5fee83dbebf4 | 30,315 |
import re
from datetime import datetime
def get_and_save_data(_=None):
"""Download data from John Hopkins, do some processing, and save as pickles
Args:
_: Empty variable. Was needed for the Google Cloud Function to work
"""
tot_deaths_df = load_raw_covid_file(DEATHS_FILE)
tot_cases_df = load_raw_covid_file(CASES_FILE)
uid_pop = tot_deaths_df[['uid', 'pop']].set_index('uid', drop=True)
tot_cases_df = tot_cases_df.join(uid_pop, on='uid')
state_cases_df = tot_cases_df.drop(
['uid', 'fips', 'county'], axis='columns')
state_cases_df = state_cases_df.groupby(['state']).sum()
state_cases_df = state_cases_df.drop(
['Diamond Princess', 'Guam', 'American Samoa', 'Grand Princess',
'Northern Mariana Islands', 'Virgin Islands'], axis='rows')
state_cases_df.loc['USA'] = state_cases_df.sum()
def new_cases(df):
date_cols_bool = [bool(re.match('\d*/\d*/\d\d', c)) for c in df.columns]
df = df.iloc[:, date_cols_bool].T
df = df.diff()[1:]
df = df.clip(lower=0) #FIXME: Remove positive tests from previous day instead?
df.index = pd.to_datetime(df.index)
# Only keep data from Feb 24 on
slice_i = datetime(year=2020, month=2, day=24)
return df[slice_i:]
states_map_df = state_cases_df['pop'].to_frame('pop')
state_cases_df = state_cases_df.drop('pop', axis='columns')
states_df = new_cases(state_cases_df)
counties_df = tot_cases_df.dropna().set_index('fips', drop=True)
counties_df = counties_df[~(counties_df['county'] == 'Unassigned')]
counties_df = counties_df[~(counties_df['county'].str.contains('Out of'))]
counties_df = new_cases(counties_df)
def make_map_df(df, map_df):
loc_pop_dict = map_df['pop'].to_dict()
ave_df = df.rolling(7, ).mean().dropna()
ave_rate_df = ave_df.apply(lambda s: s / loc_pop_dict[s.name] * 100000)
map_df['week_ave'] = ave_df.iloc[-1]
map_df['ave_rate'] = ave_rate_df.iloc[-1]
return map_df.reset_index()
counties_map_df = tot_deaths_df[['pop', 'county', 'state', 'fips']]
counties_map_df = counties_map_df.set_index('fips', drop=True)
counties_map_df = make_map_df(counties_df, counties_map_df)
states_map_df = make_map_df(states_df, states_map_df)
def custom_number_str(num, max_val_for_decimals=10):
if num > max_val_for_decimals:
return str(int(round(num, 0)))
else:
return str(round(num, 1))
counties_map_df['text'] = [
'<b>{} County, {}</b><br>Avg. Daily Cases: {}<br> Per 100k: {}'.format(
tup.county,
tup.state,
custom_number_str(tup.week_ave),
custom_number_str(tup.ave_rate)
) for tup in counties_map_df.itertuples()]
states_map_df['text'] = [
'<b>{}</b><br>Avg. Daily Cases: {}<br> Per 100k: {}'.format(
tup.state,
custom_number_str(tup.week_ave),
custom_number_str(tup.ave_rate)
) for tup in states_map_df.itertuples()]
DataHandler.save_pkl_file(counties_df, 'counties_df')
DataHandler.save_pkl_file(counties_map_df, 'counties_map_df')
DataHandler.save_pkl_file(states_df, 'states_df')
DataHandler.save_pkl_file(states_map_df, 'states_map_df')
return f'Completed' | 9f5533c4843a77d05c0a5a3f8bee5fbf120d37da | 30,316 |
import re
def get_battery_information():
"""Return device's battery level."""
output = adb.run_adb_shell_command(['dumpsys', 'battery'])
# Get battery level.
m_battery_level = re.match(r'.*level: (\d+).*', output, re.DOTALL)
if not m_battery_level:
logs.log_error('Error occurred while getting battery status.')
return None
# Get battery temperature.
m_battery_temperature = re.match(r'.*temperature: (\d+).*', output, re.DOTALL)
if not m_battery_temperature:
logs.log_error('Error occurred while getting battery temperature.')
return None
level = int(m_battery_level.group(1))
temperature = float(m_battery_temperature.group(1)) / 10.0
return {'level': level, 'temperature': temperature} | dba773386e88728b3a1cf752c6b3bfa74b38963d | 30,317 |
def _tensor_setitem_by_tuple_with_tuple(data, tuple_index, value):
"""
Tensor assignment.
Note:
Syntax support: A[B, C, D] = U.
Restraint condition: 1) A is a Tensor, and B, C, D are index Tensors.
2) A B and C could be broadcast.
3) U is a Tensor.
Inputs:
data (Tensor): Assigned tensor.
index (Tuple): A tuple of tensor, these tensor could be broadcast.
value (Tensor): Assignment tensor, should has the same data type as 'data'.
Outputs:
Tensor, element type and shape is same as data.
"""
indexes_types = compile_utils.hyper_map(F.typeof, tuple_index)
index_elements_type = const_utils.tuple_index_elements_type(indexes_types, const_utils.TENSOR_SETITEM)
if index_elements_type == const_utils.ALL_TENSOR:
indices = compile_utils.generate_indices_from_tuple_of_tensor(data,
tuple_index,
const_utils.TENSOR_SETITEM)
else:
indices = compile_utils.generate_indices_from_tuple_of_mixed_tensors(data,
tuple_index,
const_utils.TENSOR_SETITEM)
updates = compile_utils.generate_updates_from_tuple(data,
indices,
value,
const_utils.SET_ITEM_BY_TUPLE_OF_TENSOR)
return F.scatter_nd_update(data, indices, updates) | d00d08cb1391c96938bf5390c5e7f58bac6724a5 | 30,318 |
def templates_global_context(request):
"""
Return context for use in all templates.
"""
global_context = {
'constant_ddd': constants.DDD,
'constant_estado': constants.ESTADO,
'constant_municipio': constants.MUNICIPIO,
'constant_cep': constants.CEP,
'constant_pais': constants.PAIS,
'constant_current_year': constants.CURRENT_YEAR,
}
return global_context | 500ce9eaf26631fdeaa48c4d9001847e713262f5 | 30,319 |
import warnings
from typing import Concatenate
def doubleunet(num_classes,
input_shape=(224, 224, 3),
model_weights=None,
num_blocks=5,
encoder_one_type='Default',
encoder_one_weights=None,
encoder_one_freeze=False,
encoder_one_filters=[32, 64, 128, 256, 512],
dspp_one_filters=256,
decoder_one_type='upsampling',
num_decoder_one_block_conv_layers=1,
decoder_one_filters=[512, 256, 128, 64, 32],
decoder_one_activation=None,
decoder_one_use_skip_connection=True,
decoder_one_use_batchnorm=True,
decoder_one_dropout_rate=0,
output_one_activation=None,
encoder_two_type='Default',
encoder_two_weights=None,
encoder_two_freeze=False,
encoder_two_filters=[32, 64, 128, 256, 512],
dspp_two_filters=256,
decoder_two_type='upsampling',
decoder_two_filters=[512, 256, 128, 64, 32],
num_decoder_two_block_conv_layers=1,
decoder_two_activation=None,
decoder_two_use_skip_connection=True,
decoder_two_use_batchnorm=True,
decoder_two_dropout_rate=0,
output_two_activation=None):
"""
Merge the doubleunet_encoder and doubleunet_decoder functions to instantiate
the doubleunet architecture for semantic segmantation tasks.
Args:
num_classes: number of the segmentation classes.
input_shape: a tuple containing image height, width and channels
respectively. Default to (224,224,3).
model_weights: (optional) link to pre-trained weights.
num_blocks: (optional) number of encoder and decoder blocks.
Default to 5.
############################ Encoder Blocks ########################
encoder_one_type & encoder_two_type:
type of model to build upon. One of 'Default',
'DenseNet121', 'DenseNet169' 'EfficientNetB0',
'EfficientNetB1', 'EfficientNetB2', 'EfficientNetB3',
'EfficientNetB4', 'EfficientNetB5', 'EfficientNetB6',
'EfficientNetB7', 'MobileNet', 'MobileNetV2',
'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'VGG16', 'VGG19'. Default encoder type is 'Default'.
encoder_one_weights & encoder_two_weights:
(optional) pre-trained weights for encoder function.
One of None (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded
encoder_one_freeze & encoder_two_freeze:
(optional) boolean to specify whether to train
encoder model parameters or not. Default is False.
encoder_one_filters & encoder_two_filters:
(optional) a list containing number of filters to use
for each encoder convolution blocks.
Default to [32, 64, 128, 256, 512].
############################ DSPP Blocks ###########################
dspp_one_filters & dspp_two_filters:
(optional) a list containing number of filters to use
for each DSSP block. Default to 256.
############################# Decoder Blocks #######################
decoder_one_type & decoder_two_type:
(optional) one of 'transpose' (to use Conv2DTanspose
operation for upsampling operation) or 'upsampling' (to
use UpSampling2D operation for upsampling operation).
Default to upsampling.
decoder_one_filters & decoder_two_filters:
(optional) a list containing number of filters to use
for each decoder convolution blocks.
Default to [512, 256, 128, 64, 32].
num_decoder_one_blocks & num_decoder_two_blocks:
(optional) number of decoder blocks to use. Default to 5.
decoder_one_filters & decoder_two_filters:
(optional) a list containing filter sizes for each
decoder block. Default to [32, 64, 128, 256, 512].
num_decoder_one_block_conv_layers & num_decoder_two_block_conv_layers:
(optional) number of convolution layers for each decoder
block (i.e. number of Conv2D layers after upsampling
layers). Default is 1.
decoder_one_activation & decoder_two_activation:
(optional) decoder activation name or function.
decoder_one_use_skip_connection & decoder_two_use_skip_connection:
(optional) one of True (to use residual/skip connections)
or False (not to use residual/skip connections).
Default to True.
decoder_use_batchnorm:
(optional) boolean to specify whether decoder layers
should use BatchNormalization or not.
Default to False.
decoder_dropout_rate:
(optional) dropout rate. Float between 0 and 1.
output_activation:
(optional) activation for output layer.
Default is either 'sigmoid' or 'softmax' based on
the value of the 'num_classes' argument.
Returns:
model: keras double-unet segmentation model
"""
#--------------------------------------------------------------------------#
# Validate and preprocess arguments
#--------------------------------------------------------------------------#
# 1. num_classes - check doubleunet_decoder functon
# 2. encoder_type - check doubleunet_encoder functon
# 3. input_shape - check doubleunet_encoder functon
# 2. input_shape
if not isinstance(input_shape, tuple):
raise ValueError("The `input_shape` argument should a tuple containing "
"the image width, height and channels respectively.")
if not len(input_shape) == 3:
warnings.warn("The `input_shape` argument should be a tuple containing "
"three integer values for each of the image width, "
"height, and channels respectively.")
# 4. model_weights
if not (model_weights in {None} or file_io.file_exists_v2(model_weights)):
warnings.warn('The `model_weights` argument should either be '
'`None` (random initialization), '
'or the path to the weights file to be loaded.')
# 5. encoder_weights - check doubleunet_encoder functon
# 6. encoder_freeze - check doubleunet_encoder functon
# 7. num_bottleneck_conv_layers - check doubleunet_bottleneck functon
# 8. num_bottleneck_conv_filters - check doubleunet_bottleneck functon
# 9. bottleneck_use_batchnorm - check doubleunet_bottleneck functon
# 10. num_decoder_blocks - check doubleunet_decoder functon
# 11. decoder_type - check doubleunet_decoder functon
# 12. decoder_filters - check doubleunet_decoder functon
# 13. num_decoder_block_conv_layers - check doubleunet_decoder functon
# 14. decoder_activation - check doubleunet_decoder functon
# 15. decoder_use_skip_connection - check doubleunet_decoder functon
# 16. decoder_use_batchnorm - check doubleunet_decoder functon
# 17. decoder_dropout_rate - check doubleunet_decoder functon
# 18. output_activation - check doubleunet_decoder functon
#--------------------------------------------------------------------------#
# Build Model
#--------------------------------------------------------------------------#
# Network 1
#--------------------------------------------------------------------------#
# 1. Get the encoder model, model output layer and skip connection layers
input_1 = Input(shape=(input_shape), name='input_1')
encoder_model_1, encoder_model_output_1, skip_connection_layers_1 = encoder(
encoder_type=encoder_one_type,
input_tensor=input_1,
encoder_weights=encoder_one_weights,
encoder_freeze=encoder_one_freeze,
num_blocks=num_blocks,
encoder_filters=encoder_one_filters
)
# 2. Get the ASPP/DSPP block output layer
dspp_output_1 = DilatedSpatialPyramidPooling(
dspp_input=encoder_model_output_1,
num_filters=dspp_one_filters
)
# 3. Decoder blocks
# Extend the model by adding the decoder blocks
output_1 = decoder(
num_classes=num_classes,
decoder_input=dspp_output_1,
skip_connection_layers_1 = skip_connection_layers_1,
skip_connection_layers_2= None,
decoder_type=decoder_one_type,
num_blocks=num_blocks,
decoder_filters=decoder_one_filters,
num_decoder_block_conv_layers=num_decoder_one_block_conv_layers,
decoder_activation=decoder_one_activation,
decoder_use_skip_connection=decoder_one_use_skip_connection,
decoder_use_batchnorm=decoder_one_use_batchnorm,
decoder_dropout_rate=decoder_one_dropout_rate,
output_activation=output_one_activation)
# Rename encoder model one layer names to avoid none of the layers from
# encoders one and two are the same.
enc_1_layers = [layer for layer in
Model(encoder_model_1.inputs, output_1).layers]
for layer in enc_1_layers:
layer._name = layer._name + str("_a")
#--------------------------------------------------------------------------#
# Network 2
#--------------------------------------------------------------------------#
input_2 = Concatenate(axis=-1, name='input_2')([output_1, input_1])
# 1. Get the encoder model, model output layer and skip connection layers
encoder_model_2, encoder_model_output_2, skip_connection_layers_2 = encoder(
encoder_type=encoder_two_type,
input_tensor=input_2,
encoder_weights=encoder_two_weights,
encoder_freeze=encoder_two_freeze,
num_blocks=num_blocks,
encoder_filters=encoder_two_filters
)
# 2. Get the ASPP/DSPP block output layer
dspp_output_2 = DilatedSpatialPyramidPooling(
dspp_input=encoder_model_output_2,
num_filters=dspp_two_filters
)
# 3. Decoder blocks
# Extend the model by adding the decoder blocks
output_2 = decoder(
num_classes=num_classes,
decoder_input=dspp_output_2,
skip_connection_layers_1 = skip_connection_layers_1,
skip_connection_layers_2 = skip_connection_layers_2,
decoder_type=decoder_two_type,
num_blocks=num_blocks,
decoder_filters=decoder_two_filters,
num_decoder_block_conv_layers=num_decoder_two_block_conv_layers,
decoder_activation=decoder_two_activation,
decoder_use_skip_connection=decoder_two_use_skip_connection,
decoder_use_batchnorm=decoder_two_use_batchnorm,
decoder_dropout_rate=decoder_two_dropout_rate,
output_activation=output_two_activation
)
# Rename encoder model two layer names if both encoder one and two are the same
enc_1_layers = [layer for layer in
Model(encoder_model_1.inputs, output_1).layers]
enc_2_layers = [layer for layer in
Model(encoder_model_2.inputs, output_2).layers
if layer not in enc_1_layers]
for layer in enc_2_layers:
layer._name = layer._name + str("_b")
outputs = Add()([output_1, output_2])
inputs = encoder_model_1.inputs
## Image Segmentation Model
model = Model(inputs, outputs)
return model | cf50030dfe2ace708b7ee192aa7e4631af2a5e2c | 30,320 |
def get_calendar_name(request):
"""
# Checks if the user has a saved calendar name and returns it
"""
# TODO: Try-except should be replaced
try:
if request.user.options.calendar_name:
# Saved in options
return request.user.options.calendar_name
except AttributeError:
pass
# Saved in session
return request.session.get('calendar_name', None) | 0816e1ef0997a74f71f3ce50243d5b6cc2494806 | 30,321 |
def get_lldp_neighbors(dut, interface=None):
"""
Get LLDP Neighbours Info
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
:param dut:
:param interface: localport
:return:
"""
command = "show lldp neighbors"
if interface:
command = "show lldp neighbors {}".format(interface)
return st.show(dut, command) | 2fadb9f1c61a3b289b8d66e3e3cb0566c336bd03 | 30,322 |
def encode_onehot(batch_inputs, max_len=None):
"""One-hot encode a string input."""
if max_len is None:
max_len = get_max_input_len()
def encode_str(s):
tokens = CTABLE.encode(s)
unpadded_len = len(tokens)
if unpadded_len > max_len:
raise ValueError(f'Sequence too long ({len(tokens)}>{max_len}): \'{s}\'')
tokens = np.pad(tokens, [(0, max_len-len(tokens))], mode='constant')
return jax.nn.one_hot(tokens, CTABLE.vocab_size, dtype=jnp.float32)
return np.array([encode_str(inp) for inp in batch_inputs]) | 2cecbfd553cde1184720c3b0a5c666f5762b174d | 30,323 |
def plot_confusion_matrix(cm,
normalize=True,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
plt.show() must be run to view the plot, this is not done
by this function.
Parameters
----------
cm : np.ndarray
the confusion matrix to plot
normalize : bool, optional
whether to normalise the matrix, by default True
title : string, optional
title of the plot, otherwise otherwise a sensible title is generated. By default None
cmap : matplotlib.colormap, optional
matplotlib colormap, by default plt.cm.Blues
Returns
-------
matplotlib.Axes
axes object of the plot generated
"""
if not title:
title = 'Normalized confusion matrix' if normalize else 'Confusion matrix, without normalization'
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots(figsize=(4,2))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=CLASSES, yticklabels=CLASSES,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax | 2dc9f5917d97278844c90eb851a45bc246f22c7c | 30,324 |
def get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):
""" #TODO: check whether capping is necessary
Inconsistent capping (remove all caps for now, does not need this cluster to be physical)
Possible fix: change mult in neighbor list
Extract smaller cluster containing the extra-framework atoms and cap all the O. Then the capped cluster is moved
to the center of the cell to avoid boundary issue.
Save cluster in both .traj file and .pdb format.
:param atoms:
:param folder_path:
:param file_name:
:param save_traj: if True, save clusters into .traj as well, for later comparison and trouble shooting
:param EF_O_index: if not none, will use this value, else, will find the index using Extraframework code
:return: 1. EF-cluster including 13 atoms, index of the EF atoms in original zeolite, index of the EF atoms in
the current cluster (the later two output index lists share the ordering)
"""
EFMaker = ExtraFrameworkAnalyzer(atoms)
cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]
cluster_EF_index = get_EF_atom_indices(cluster)
centering_pos = cluster.get_positions()[cluster_EF_index[-1]]
recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]
# FIXME: recentering doesn't work well for very small unit cells. eg. SOD
# cluster = Zeolite(cluster).cap_atoms()
proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)
if save_traj is True:
write(folder_path + '/%s.traj' % file_name, recentered_cluster)
return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index | ecb65d1ac9d64ec61613a1fafabfe2f91c30cdb9 | 30,325 |
def detect_id_type(sid):
"""Method that tries to infer the type of abstract ID.
Parameters
----------
sid : str
The ID of an abstract on Scopus.
Raises
------
ValueError
If the ID type cannot be inferred.
Notes
-----
PII usually has 17 chars, but in Scopus there are valid cases with only
16 for old converted articles.
Scopus ID contains only digits, but it can have leading zeros. If ID
with leading zeros is treated as a number, SyntaxError can occur, or the
ID will be rendered invalid and the type will be misinterpreted.
"""
sid = str(sid)
try:
isnumeric = sid.isnumeric()
except AttributeError: # Python2
isnumeric = unicode(sid, 'utf-8').isnumeric()
if not isnumeric:
if sid.startswith('2-s2.0-'):
id_type = 'eid'
elif '/' in sid or "." in sid:
id_type = 'doi'
elif 16 <= len(sid) <= 17:
id_type = 'pii'
elif isnumeric:
if len(sid) < 10:
id_type = 'pubmed_id'
else:
id_type = 'scopus_id'
try:
return id_type
except UnboundLocalError:
raise ValueError('ID type detection failed for \'{}\'.'.format(sid)) | b9c6f1442f6824e990ac1275296bb50fdad682cd | 30,326 |
def config_to_dict(plato_config):
""" Convert the plato config (can be nested one) instance to the dict. """
# convert the whole to dict - OrderedDict
plato_config_dict = plato_config._asdict()
def to_dict(elem):
for key, value in elem.items():
try:
value = value._asdict()
elem[key] = to_dict(value)
except:
pass
if isinstance(value, list):
for idx, value_item in enumerate(value):
try:
value_item = value_item._asdict()
value[idx] = to_dict(value_item)
except:
pass
elem[key] = value
return elem
plato_config_dict = to_dict(plato_config_dict)
return plato_config_dict | 9e68c2859dc33370554f8015f96bd501f827c1b2 | 30,327 |
def analyze_single_user_info(result=load_data()):
"""
:param result:
:return:
examp: {user_id: 1, meal_info: {breakfast:{food_name:菜名,times:次数}}, {early_dinner:{...}}, {supper:{...}}}
"""
result = pd.DataFrame(result, columns=['user_id', 'user_name', 'food_code', 'food_name', 'meal_type', 'eat_time'])
user_id = set(result['user_id'])
for i in user_id:
user_info = result[result['user_id'] == i]
breakfast_info = user_info[user_info['meal_type'] == MEAL_TYPE[0]]
early_dinner_info = user_info[user_info['meal_type'] == MEAL_TYPE[1]]
dinner_info = user_info[user_info['meal_type'] == MEAL_TYPE[0]]
def analyze_meal_info(meal_info):
"""
:param meal_info:
:return:
"""
food_name = set(meal_info['food_name'])
result_set = []
for name in food_name:
tmp = {'food_name': name, 'times': len(meal_info[meal_info['food_name'] == name])}
result_set.append(tmp)
return result_set
meal_type = {'breakfast': analyze_meal_info(breakfast_info),
'early_dinner': analyze_meal_info(early_dinner_info),
'supper': analyze_meal_info(dinner_info)}
user_dict = {'user_id': i, 'meal_info': meal_type}
return user_dict | 04b8084efce6e5f5707f61d114cdc3a98037c1c1 | 30,328 |
def mergeSort(data):
""" Implementation of the merge sort algorithm in ascending order """
n = len(data)
if n == 1:
return data
else:
midIndex = (int)(n/2)
leftHalf = mergeSort(data[0:midIndex])
rightHalf = mergeSort(data[midIndex:n])
return mergeHalves(leftHalf, rightHalf) | 68e693fdcaaf0127372ad3477df64473e989a2e2 | 30,329 |
def compute_gradient_logistic(y, tx, w):
"""Function to compute gradient of loss of logistic regression for given w.
Args:
y (numpy array): Matrix output of size N x 1.
tx (numpy array): Matrix input of size N x D.
w (numpy array): Matrix weight (parameters of the model) of size D x 1.
Returns:
gradient (numpy array) : Matrix Gradient of size D x 1.
"""
y_hat = sigmoid(tx.dot(w))
gradient = (np.transpose(tx)).dot(y_hat - y)
return gradient | db525602a5d64dda8e64592770210315da29e64f | 30,330 |
import re
def parse_py(fname):
"""Look for links in a .py file."""
with open(fname) as f:
lines = f.readlines()
urls = set()
for i, line in enumerate(lines):
for url in find_urls(line):
# comment block
if line.lstrip().startswith('# '):
subidx = i + 1
while True:
nextline = lines[subidx].strip()
if re.match('^# .+', nextline):
url += nextline[1:].strip()
else:
break
subidx += 1
urls.add(url)
return list(urls) | c95f6f326a74bfc3e123df4ac09171e0a44d4486 | 30,331 |
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
if cookie.rfc2109: h.append(("rfc2109", None))
keys = cookie.nonstandard_attr_keys()
keys.sort()
for k in keys:
h.append((k, str(cookie.get_nonstandard_attr(k))))
h.append(("version", str(cookie.version)))
return join_header_words([h]) | 5d7735397fdb23e629ed4db844cbbd44bc386674 | 30,332 |
def fiscalyear():
"""Retrieve Fiscal Years and display for selection by user."""
cascs = db.session.query(casc).order_by(casc.name).all()
cascs_and_fys = {}
class F(FyForm):
pass
list_fy = []
for curr_casc in cascs:
cascs_and_fys[curr_casc.name] = {}
cascs_and_fys[curr_casc.name]["id"] = curr_casc.id
fys = db.session.query(FiscalYear).order_by(
FiscalYear.name).filter(
FiscalYear.casc_id == curr_casc.id).all()
cascs_and_fys[curr_casc.name]["fiscal_years"] = []
for fy in fys:
fiscal_year = {}
list_fy.append("fy" + str(fy.id))
fiscal_year["id"] = fy.id
fiscal_year["name"] = fy.name
cascs_and_fys[curr_casc.name]["fiscal_years"].append(fiscal_year)
setattr(F, "fy" + str(fy.id), BooleanField(fy.name))
form = F()
if form.validate_on_submit():
id_list = []
projects = []
for fy in list_fy:
fy_attr = getattr(form, fy)
selected = fy_attr.data
if selected:
id_list.append(fy.replace("fy", ""))
print('length of id_list:', len(id_list))
for i in id_list:
fy_model = db.session.query(FiscalYear).get(i)
for proj in fy_model.projects:
project_dict = {}
project_dict['fy_id'] = i
project_dict['casc_id'] = fy_model.casc_id
project_dict['proj_id'] = proj.id
projects.append(project_dict)
session["projects"] = projects
print('length of projects:', len(projects))
return redirect(url_for('main.report'))
elif request.method == 'GET':
pass
return render_template('fiscalYears.html',
form=form,
cascs_and_fys=cascs_and_fys,
title="Select Fiscal Years"), 400 | 9656aafc00083097417eae8b6633ea99ac9bb9e4 | 30,333 |
def forbidden(error) -> str:
""" Forbidden resource
"""
return jsonify({"error": error.description}), 403 | 6c9fb0c1ad696b9337a2345a82613f2359a00778 | 30,334 |
def get_parameter(model, name):
"""
Finds the named parameter within the given model.
"""
for n, p in model.named_parameters():
if n == name:
return p
raise LookupError(name) | ba35b743d9189c94da0dcce27630bba311ea8a46 | 30,335 |
def _update_method(oldmeth, newmeth):
"""Update a method object."""
# XXX What if im_func is not a function?
_update(oldmeth.im_func, newmeth.im_func)
return oldmeth | 1c05204067610acb4f540839e647466f07952323 | 30,336 |
import sys
def romanize(string, system):
"""
Transliterate Burmese text with latin letters.
>>> romanize("ကွန်ပျူတာ", IPA)
'kʊ̀ɴpjùtà'
>>> romanize("ပဒေသရာဇာ", MLC)
'padezarājā'
>>> romanize("ဘင်္ဂလားအော်", BGN_PCGN)
'bin-gala-aw'
"""
romans = []
for syllable in PhonemicSyllableBreak(string, UnicodeEncoding()):
phoneme = syllable['syllable']
# TODO add more normalization
if "virama_stack" in syllable:
phoneme = phoneme.replace(SIGN_VIRAMA, SIGN_ASAT)
length = len(phoneme)
string = ""
scan = 0
while scan < length:
# longest matching
matches = [phoneme[scan:length - i] for i in range(length)]
for match in matches:
if match in system.table:
string += system.table[match]
scan += len(match)
break
else:
sys.stderr.write("Unable to romanize " + phoneme[scan])
string += phoneme[scan]
scan += 1
if string: # if not empty
last = romans[-1] if romans else None # prev is last here
roman = system.normalize(string, last)
romans.append(roman)
return "".join(romans) | 7242021137124345656d90ee528a039058bbe82c | 30,337 |
def get_stats_asmmemmgr(space):
"""Returns the raw memory currently used by the JIT backend,
as a pair (total_memory_allocated, memory_in_use)."""
m1 = jit_hooks.stats_asmmemmgr_allocated(None)
m2 = jit_hooks.stats_asmmemmgr_used(None)
return space.newtuple([space.newint(m1), space.newint(m2)]) | 16aa01635d08ea39ab9051c15e60b11c3bc027a5 | 30,338 |
def parse_function(filename):
""" Parse a filename and load the corresponding image. Used for faces.
Parameters
----------
filename : str
Path to the faces image.
Returns
-------
image : tensorflow.Tensor
Image object.
Raises
------
None
Notes
-----
None
"""
image_string = tf.io.read_file(filename)
#Don't use tf.image.decode_image, or the output shape will be undefined
image = tf.image.decode_jpeg(image_string, channels=3)
#This will convert to float values in [0, 1], so do not use it here.
# we normalize later.
#image = tf.image.convert_image_dtype(image, tf.float32)
return image | 878d00c1f9c7dc37041e79a96e030b249e2ca350 | 30,339 |
def calc_final_speed(v_i, a, d):
"""
Computes the final speed given an initial speed, distance travelled,
and a constant acceleration.
:param:
v_i: initial speed (m/s)
a: acceleration (m/s^2)
d: distance to be travelled (m)
:return:
v_f: the final speed (m/s)
"""
discr = 2 * d * a + v_i ** 2
if discr > 0:
v_f = np.sqrt(discr)
else:
v_f = 0
return v_f | 14dbf3f6e7391b0fd0c1796f77c5966875b689b8 | 30,340 |
def build_samples(ctx, version="DEBUG", filter="", delphi_version=DEFAULT_DELPHI_VERSION):
"""Builds samples"""
init_build(version)
delphi_projects = get_delphi_projects_to_build('samples', delphi_version)
return build_delphi_project_list(ctx, delphi_projects, version, filter, delphi_version) | 7584e39ed25e32ad637e11f782b55140e5c04c7c | 30,341 |
def write_table(fh, data, samples=None, tree=None, rankdic=None, namedic=None,
name_as_id=False):
"""Write a profile to a tab-delimited file.
Parameters
----------
fh : file handle
Output file.
data : dict
Profile data.
samples : list, optional
Ordered sample ID list.
tree : dict, optional
Taxonomic tree, to inform "Lineage" column.
rankdic : dict, optional
Rank dictionary, to inform "Rank" column.
namedic : dict, optional
Taxon name dictionary, to inform "Name" column.
name_as_id : bool, optional
Replace feature IDs with names. It applies to row headers and "Lineage"
column, and removes "Name" column.
Returns
-------
int
Number of samples in the table.
int
Number of features in the table.
Notes
-----
The output table will have columns as samples and rows as features.
Optionally, three metadata columns, "Name", "Rank" and "Lineage" will be
appended to the right of the table.
"""
if samples:
samples = [x for x in samples if x in data]
else:
samples = sorted(data)
# table header
header = ['#FeatureID'] + samples
if namedic and not name_as_id:
header.append('Name')
if rankdic:
header.append('Rank')
if tree:
header.append('Lineage')
print('\t'.join(header), file=fh)
# table body
nrow = 0
for key in sorted(allkeys(data)):
# stratification
stratum, feature = key if isinstance(key, tuple) else (None, key)
# get feature name
name = namedic[feature] if namedic and feature in namedic else None
# fill row header (feature Id or name)
head = name if name_as_id and name else feature
row = [f'{stratum}|{head}'] if stratum else [head]
# fill cell values (feature counts)
for sample in samples:
row.append(str(data[sample][key]) if key in data[sample] else '0')
# fill name column
if namedic and not name_as_id:
row.append(name or '')
# fill rank column
if rankdic:
row.append(rankdic[feature] if feature in rankdic else '')
# fill lineage column
if tree:
row.append(get_lineage_gg(
feature, tree, namedic if name_as_id else None))
# print row
print('\t'.join(row), file=fh)
nrow += 1
return len(samples), nrow | 40698102a0a000e3ec2ba7fff6cff35e6cf2b598 | 30,342 |
def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get(uuid, notification_uuid, value_name): # noqa: E501
"""data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get
returns tapi.notification.NameAndValueChange # noqa: E501
:param uuid: Id of notif-subscription
:type uuid: str
:param notification_uuid: Id of notification
:type notification_uuid: str
:param value_name: Id of changed-attributes
:type value_name: str
:rtype: TapiNotificationNameAndValueChange
"""
return 'do some magic!' | 7171e1dab60d838d0a321e1d339b07511674a4f6 | 30,343 |
def angular_misalignment_loss_db(n, w, theta, lambda0):
"""
Calculate the loss due to angular fiber misalignment.
See Ghatak eqn 8.75
Args:
n: index between fiber ends [-]
w: mode field radius [m]
theta: angular misalignment [radians]
lambda0: wavelength in vacuum [m]
Returns:
angular misalignment loss in dB [-]
"""
return 4.34 * (np.pi * w * theta * n / lambda0)**2 | 4233dad15b3840dda95a762d32eec657a423d28d | 30,344 |
def replace_number(token):
"""Replaces a number and returns a list of one or multiple tokens."""
if number_match_re.match(token):
return number_split_re.sub(r' @\1@ ', token)
return token | c5954c447142581efd80aedf0215e66240ef89ae | 30,345 |
import timeit
from typing import DefaultDict
def dnscl_rpz(
ip_address: str,
filename: str = FILENAME,
tail_num: int = 0,
quiet_mode: bool = False,
) -> int:
"""Return rpz names queried by a client IP address."""
start_time = timeit.default_timer()
rpz_dict: DefaultDict = defaultdict(int)
line_count = 0
ip_address_search = ip_address + "#"
if tail_num:
syslog = tail(filename, tail_num)
else:
syslog = tail(filename)
for line in syslog:
line = line.decode("utf-8")
if ip_address_search in line:
if "QNAME" in line and "SOA" not in line:
fields = line.strip().split(" ")
rpz_domain_fields = find_rpz_domain_field(fields).split("/")
rpz_domain = rpz_domain_fields[0]
if len(fields) > 11:
rpz_dict[rpz_domain] += 1
line_count += 1
rpz_list_sorted = sort_dict(rpz_dict)
elapsed_time = timeit.default_timer() - start_time
print(f"{ip_address} total queries: {line_count}")
print("queries: ")
for domain_name, query_count in rpz_list_sorted:
print(query_count, "\t", domain_name)
if not quiet_mode:
print(
f"\nSummary: Searched {ip_address} and found {line_count}",
f"queries for {len(rpz_dict)} rpz names.",
)
print(f"Query time: {round(elapsed_time, 2)} seconds")
return line_count | 951d40f56a7b12a454499524da36e39b1f91b2bd | 30,346 |
def sweep_centroids(nrays, rscale, nbins, elangle):
"""Construct sweep centroids native coordinates.
Parameters
----------
nrays : int
number of rays
rscale : float
length [m] of a range bin
nbins : int
number of range bins
elangle : float
elevation angle [radians]
Returns
-------
coordinates : 3d array
array of shape (nrays,nbins,3) containing native centroid radar
coordinates (slant range, azimuth, elevation)
"""
ascale = 2 * np.pi / nrays
azimuths = ascale / 2. + np.linspace(0, 2 * np.pi, nrays, endpoint=False)
ranges = np.arange(nbins) * rscale + rscale / 2.
coordinates = np.empty((nrays, nbins, 3), dtype=float)
coordinates[:, :, 0] = np.tile(ranges, (nrays, 1))
coordinates[:, :, 1] = np.transpose(np.tile(azimuths, (nbins, 1)))
coordinates[:, :, 2] = elangle
return coordinates | 0d5d39589a6b6945618d4cd122c88a9a8f711f57 | 30,347 |
import time
def toc():
"""
对应MATLAB中的toc
:return:
"""
t = time.clock() - globals()['tt']
print('\nElapsed time: %.8f seconds\n' % t)
return t | ce7d5898972fa751178ab35a41736fd136f85d24 | 30,348 |
import logging
def dashboard():
"""
This function deals with the server side of the flask application
:return: render the html page
"""
logging.info("client accessed either route")
deaths, hospital_cases, nat_week_ava = covid_API_request('England', 'Nation')
local_average = covid_API_request()[2]
logging.info("covid data API requests made")
update_schedules = []
hospital_cases = "National hospital cases:", str(hospital_cases)
deaths = "National death total: ", str(deaths)
# load the page initially
if request.method == 'GET':
# if a form has been submitted
# if request.method == 'POST':
text_field = request.args.get('two')
if text_field:
update_time = request.args.get('update')
covid_data = request.args.get('covid-data')
news_update = request.args.get('news')
repeat = request.args.get('repeat')
# schedules = {}
# schedules.update(update_time,covid_data,news_update,repeat)
# updateSchedules.append(schedules)
# use workshop 4 to convert to hhmmss, and to find the current time
logging.debug("schedule to be update at: "+update_time+" ,Name: "+text_field+" To update: " +
covid_data+" " + news_update+" "+repeat)
# schedule_add_news
return render_template('index.html', title='Smart Covid Dashboard', news_articles=news, deaths_total=deaths,
hospital_cases=hospital_cases, national_7day_infections=nat_week_ava,
local_7day_infections=local_average, updates=update_schedules, image='covidPic.jpg',
notif=removing_news()) | 162ccd3133259188907c7b1e993153bd3eddf5bf | 30,349 |
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images | e1baf9988b74a8e0108d84bca48d2cf2f10f7358 | 30,350 |
import re
import os
def Filter_Readouts_by_RNAfold(cand_readout_file='selected_candidates_genome.fasta',
rnafold_exe=r'E:\Shared_Apps\ViennaRNA\RNAfold',
energy_th =-6.0, readout_folder=_readout_folder,
make_plot=False, verbose=True):
"""Filter Readouts by energy of secondary structure generated by RNAfold
Inputs:
Outputs:
"""
if not os.path.isfile(cand_readout_file):
cand_readout_file = os.path.join(readout_folder, cand_readout_file)
if not os.path.isfile(cand_readout_file):
raise IOError(f"Wrong input candidate readout file:{cand_readout_file}, not exist.")
elif '.fasta' not in cand_readout_file:
raise IOError(f"Wrong input file type for {cand_readout_file}")
# run RNAfold
_rnafold_output = cand_readout_file.replace('.fasta', '_structreport.txt')
os.system(f"{rnafold_exe} < {cand_readout_file} > {_rnafold_output}")
# load RNAfold result and read it
structure_dics = []
energy_list = []
with open(_rnafold_output, 'r') as handle:
structure_reports = handle.read()
structure_reports = structure_reports.split('>')[1:]
for structure_report in structure_reports:
lines = structure_report.split('\n')[:-1]
barcode_id = lines[0].split(' ')[0]
barcode_description = lines[0].split(' ')[1]
re_result = re.match('(\S+)\s\(\s*?([0-9\-\+\.]+)\)', lines[2])
barcode_energy = float(re_result.group(2))
structure_dic = {'id':barcode_id, 'description':barcode_description, 'energy':barcode_energy}
structure_dics.append(structure_dic)
energy_list.append(barcode_energy)
# whether keep each record:
structure_keeps = np.array(energy_list) > energy_th
# extract kept records
kept_records = []
with open(os.path.join(readout_folder, 'selected_candidates_genome.fasta'), "r") as handle:
for _i, record in enumerate(SeqIO.parse(handle, "fasta")):
if structure_keeps[_i]:
kept_records.append(record)
# save selected records
_save_filename = cand_readout_file.replace('.fasta', '_structure.fasta')
with open(_save_filename, "w") as output_handle:
SeqIO.write(kept_records, output_handle, "fasta")
if make_plot:
f1 = plt.figure()
plt.hist(energy_list)
plt.show()
return kept_records | 0c0c9210fbe48a4527f6490e8b634128f3146af2 | 30,351 |
def __no_conflicts(items):
"""Return True if each possible pair, from a list of items, has no conflicts."""
return all(__no_conflict(combo[0], combo[1]) for combo in it.combinations(items, 2)) | 761641bd59162e4714ce4ab04274307353f0aefa | 30,352 |
import os
def _demo_home(options):
"""For convenience demo home is in the same folder as jar file"""
bp, fn = os.path.split(options.jar_file)
demo_home = os.path.join(bp, 'demo')
assert os.path.isdir(demo_home), 'Folder does not exist: "%s"' % demo_home
return demo_home | 97cdb9a36e56539719cf3aab1e7a5d0c95d87a1d | 30,353 |
def valid_tetrodes(tetrode_ids, tetrode_units):
"""
Only keep valid tetrodes with neuron units so that there is corresponding spike train data.
:param tetrode_ids: (list) of tetrode ids in the order of LFP data
:param tetrode_units: (dict) number of neuron units on each tetrode
:return: (list) of tetrode ids with neuron units
"""
return [x for x in tetrode_ids if tetrode_units[x] > 0] | c887f5e5c29d841da63fe0cd56c41eda5ddde891 | 30,354 |
def PreviewApply(source,
deployment_full_name,
stage_bucket,
messages,
location,
ignore_file,
source_git_subdir='.',
preview_format=_PREVIEW_FORMAT_TEXT,
config_controller=None):
"""Executes preview of a deployment.
Bundles parameters for creating/updating a deployment.
Args:
source: string, either a local path, a GCS bucket, or a Git repo.
deployment_full_name: string, the fully qualified name of the deployment,
e.g. "projects/p/locations/l/deployments/d".
stage_bucket: an optional string. When not provided, the default staging
bucket will be used. This is of the format "gs://bucket-name/".
messages: ModuleType, the messages module that lets us form blueprints API
messages based on our protos.
location: string, a region like "us-central1".
ignore_file: optional string, a path to a gcloudignore file.
source_git_subdir: optional string. If "source" represents a Git repo, then
this argument represents the directory within that Git repo to use.
preview_format: output format for preview results. Either "text" or "json".
config_controller: optional string, the fully qualified name of the
config-controller instance to use. Only valid for previewing without an
existing deployment. e.g.
"projects/{project}/locations/{location}/krmApiHosts/{instance}".
Returns:
Returns a messages.Preview that contains preview results.
"""
location_ref = resources.REGISTRY.Create(
collection='config.projects.locations',
projectsId=properties.VALUES.core.project.GetOrFail(),
locationsId=location)
blueprint = _CreateBlueprint(messages, source, source_git_subdir,
stage_bucket, ignore_file)
# Check if a deployment with the given name already exists. If it does, we'll
# update that deployment. If not, we'll create it.
try:
existing_deployment = blueprints_util.GetDeployment(deployment_full_name)
except apitools_exceptions.HttpNotFoundError:
existing_deployment = None
is_creating_deployment = existing_deployment is None
preview = messages.Preview(
applyInput=messages.ApplyInput(
blueprint=blueprint,
deployment='' if is_creating_deployment else deployment_full_name))
if is_creating_deployment:
preview.applyInput.configController = _GetOrCreateConfigControllerInstance(
config_controller, deployment_full_name, async_=False)
# This just allows --config-controller to be set as a 'passthrough' as long
# as it matches the value on the existing deployment. The value of the flag
# is not used.
elif config_controller != existing_deployment.configController:
msg = ('--config-controller cannot differ from existing Deployment when '
'previewing an update.')
if existing_deployment.configController:
msg += ' Existing deployment has config_controller: [{}].'.format(
existing_deployment.configController)
raise c_exceptions.InvalidArgumentException('config-controller', msg)
op = blueprints_util.CreatePreview(preview, location_ref.RelativeName())
log.debug('LRO: %s', op.name)
preview_result = blueprints_util.WaitForApplyPreviewOperation(op)
_PrintPreview(messages, preview_result, preview_format)
return preview_result | 68a56d5f13f395d59a697cbeb6934f92dca620fa | 30,355 |
from datetime import datetime
def get_us_week(date):
"""Determine US (North American) week number"""
# Each date belongs to some week. Each week has a Saturday. The week_sat_offset is number of
# days between the Saturday and the date:
week_sat_offset = (12 - date.weekday()) % 7
week_sat = date + datetime.timedelta(days=week_sat_offset)
week_year = week_sat.year
frst_sat_offset = (12 - datetime.date(week_year, 1, 1).weekday()) % 7
frst_sat = datetime.date(week_year, 1, 1) + datetime.timedelta(days=frst_sat_offset)
return (((date - frst_sat).days - 1) // 7) + 2 | 30e7f7179d732cdf08c0dcdeff627c889af6c340 | 30,356 |
def is_street_name(elem):
"""This function takes an element and returns whether it contains an attrib key
'addr:street'.
This is an modification from https://classroom.udacity.com/nanodegrees/nd002/parts/0021345404/modules/316820862075461/lessons/5436095827/concepts/54446302850923"""
return (elem.attrib["k"] == "addr:street") or (elem.attrib["k"] == "addr:street_1") | 2b753fab69959200cc79895f382767af76295420 | 30,357 |
from typing import List
import os
def run_fast_scandir(path: str, ext: List[str]) -> (List[str], List[str]):
"""
From [stack overflow](https://stackoverflow.com/a/59803793/9163028) answer
Searches all files with extensions below path
"""
subfolders, files = [], []
for i in os.scandir(path):
if i.is_dir():
subfolders.append(i.path)
if i.is_file():
if os.path.splitext(i.name)[1].lower() in ext:
files.append(i.path)
for path in list(subfolders):
sf, i = run_fast_scandir(path, ext)
subfolders.extend(sf)
files.extend(i)
return subfolders, files | df055eaa50c126da3be640693aa05b1358b3144f | 30,358 |
def read_youtube_urls():
"""
Required format that the txt file containing the youtube urls must have:
url_1
url_2
.
.
.
url_n
:param filepath:
:return:
"""
yt_urls = []
file_to_read = askopenfile(mode="r", filetypes=[("Text file", "*.txt")])
if file_to_read is not None:
while True:
curr_url = file_to_read.readline()
cleaned_curr_url = curr_url.strip().rstrip("\n").strip("\r").strip("\t")
if not curr_url:
break
if not cleaned_curr_url:
continue
if YOUTUBE_URL_REGEX.findall(cleaned_curr_url):
yt_urls.append(cleaned_curr_url)
else:
show_error_message(
f'"{cleaned_curr_url}" IS NOT A VALID YOUTUBE URL. SKIPPED.'
)
return yt_urls | 5a8d505fe39d35c117ceaef33cc878f5ed7f5a1c | 30,359 |
def _get_search_direction(state):
"""Computes the search direction to follow at the current state.
On the `k`-th iteration of the main L-BFGS algorithm, the state has collected
the most recent `m` correction pairs in position_deltas and gradient_deltas,
where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.
Assuming these, the code below is an implementation of the L-BFGS two-loop
recursion algorithm given by [Nocedal and Wright(2006)][1]:
```None
q_direction = objective_gradient
for i in reversed(range(m)): # First loop.
inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]
alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]
q_direction = q_direction - alpha[i] * gradient_deltas[i]
kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /
gradient_deltas[-1]^T * gradient_deltas[-1])
r_direction = kth_inv_hessian_factor * I * q_direction
for i in range(m): # Second loop.
beta = gradient_deltas[i]^T * r_direction / inv_rho[i]
r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)
return -r_direction # Approximates - H_k * objective_gradient.
```
Args:
state: A `LBfgsOptimizerResults` tuple with the current state of the
search procedure.
Returns:
A real `Tensor` of the same shape as the `state.position`. The direction
along which to perform line search.
"""
# The number of correction pairs that have been collected so far.
num_elements = tf.minimum(
state.num_iterations,
distribution_util.prefer_static_shape(state.position_deltas)[0])
def _two_loop_algorithm():
"""L-BFGS two-loop algorithm."""
# Correction pairs are always appended to the end, so only the latest
# `num_elements` vectors have valid position/gradient deltas.
position_deltas = state.position_deltas[-num_elements:]
gradient_deltas = state.gradient_deltas[-num_elements:]
# Pre-compute all `inv_rho[i]`s.
inv_rhos = tf.reduce_sum(
input_tensor=gradient_deltas * position_deltas, axis=1)
def first_loop(acc, args):
_, q_direction = acc
position_delta, gradient_delta, inv_rho = args
alpha = tf.reduce_sum(input_tensor=position_delta * q_direction) / inv_rho
return (alpha, q_direction - alpha * gradient_delta)
# Run first loop body computing and collecting `alpha[i]`s, while also
# computing the updated `q_direction` at each step.
zero = tf.zeros_like(inv_rhos[0])
alphas, q_directions = tf.scan(
first_loop, [position_deltas, gradient_deltas, inv_rhos],
initializer=(zero, state.objective_gradient), reverse=True)
# We use `H^0_k = gamma_k * I` as an estimate for the initial inverse
# hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.
gamma_k = inv_rhos[-1] / tf.reduce_sum(input_tensor=gradient_deltas[-1] *
gradient_deltas[-1])
r_direction = gamma_k * q_directions[0]
def second_loop(r_direction, args):
alpha, position_delta, gradient_delta, inv_rho = args
beta = tf.reduce_sum(input_tensor=gradient_delta * r_direction) / inv_rho
return r_direction + (alpha - beta) * position_delta
# Finally, run second loop body computing the updated `r_direction` at each
# step.
r_directions = tf.scan(
second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos],
initializer=r_direction)
return -r_directions[-1]
return prefer_static.cond(tf.equal(num_elements, 0),
(lambda: -state.objective_gradient),
_two_loop_algorithm) | 5659dd49c9dcf67b65c3952a839df6c9b099ed76 | 30,360 |
import os
import json
async def get_config(guildid):
"""
:param guildid:
:return: Guild-Config as Json
"""
path = os.path.join("data", "configs", f"{guildid}.json")
with open(path, "r") as f:
data = json.load(f)
return data | 4057569c71ac546a504cabe1ec19d6778f6ab6fa | 30,361 |
def basevectors_sm(time, dipole=None):
"""
Computes the unit base vectors of the SM coordinate system with respect to
the standard geographic coordinate system (GEO).
Parameters
----------
time : float or ndarray, shape (...)
Time given as modified Julian date, i.e. with respect to the date 0h00
January 1, 2000 (mjd2000).
dipole : ndarray, shape (3,), optional
Dipole spherical harmonics :math:`g_1^0`, :math:`g_1^1` and
:math:`h_1^1`. Defaults to ``basicConfig['params.dipole']``.
Returns
-------
sm_1, sm_2, sm_3 : ndarray, shape (..., 3)
SM unit base vectors. The leading dimension agrees with the shape of
``time``, while the last dimension contains the unit vector
components in terms of GEO.
"""
if dipole is None:
dipole = basicConfig['params.dipole']
vec = _dipole_to_unit(dipole)
# get sun's position at specified times and convert to cartesian
theta_sun, phi_sun = sun_position(time)
x_sun, y_sun, z_sun = spherical_to_cartesian(1, theta_sun, phi_sun)
# create array in which the sun's vector resides in last dimension
s = np.empty(x_sun.shape + (3,))
s[..., 0] = x_sun
s[..., 1] = y_sun
s[..., 2] = z_sun
# set third unit base vector of SM to dipole unit vector
sm_3 = np.empty(x_sun.shape + (3,))
sm_3[..., 0] = vec[0]
sm_3[..., 1] = vec[1]
sm_3[..., 2] = vec[2]
# compute second base vector of SM using the cross product of the IGRF
# dipole unit vector and the sun direction vector
sm_2 = np.cross(sm_3, s)
norm_sm_2 = np.linalg.norm(sm_2, axis=-1, keepdims=True)
sm_2 = sm_2 / norm_sm_2
# compute third unit base vector using the cross product of second and
# third unit base vector
sm_1 = np.cross(sm_2, sm_3)
return sm_1, sm_2, sm_3 | 434d2ad867aaefb483f8ec212943fc1af1f4949b | 30,362 |
def rewrite_metadata(content, dic):
"""From content, which is the old text with the metadata and dic which has the new data, return new_txt which has data replaced by dic content, with relevant headers added """
#Splitting into headers and body. Technically, body is a list of paragraphs where first one is the headers
new_headers = ""
body = content.split("\n\n")
headers = body[0]
#Replacing data in headers
for line in headers.split("\n") :
has_match = False
#Replace data in preexisting line
for key in list(dic.keys()) :
if line.startswith(key) :
new_headers = new_headers + key + ": " + str(dic[key]) + "\n"
del dic[key]
has_match = True
#Copies existing header that is not overwrote by dic
if not has_match :
new_headers = new_headers + line + "\n"
# In case we forgot to add a line manually
for left in list(dic.keys()) :
new_headers = new_headers + left + ": " + str(dic[left]) + "\n"
#Formatting, joining new text
body[0] = new_headers
new_txt = "\n\n".join(body)
return new_txt | 14f7da66f19c24d073f1fdee4b56d49d28320e71 | 30,363 |
def rotate_points(points, axis, angle, origin=None):
"""Rotates points around an arbitrary axis in 3D (radians).
Parameters:
points (sequence of sequence of float): XYZ coordinates of the points.
axis (sequence of float): The rotation axis.
angle (float): the angle of rotation in radians.
origin (sequence of float): Optional. The origin of the rotation axis.
Default is ``[0.0, 0.0, 0.0]``.
Returns:
list: the rotated points
Notes:
For more info, see [1]_.
References:
.. [1] Wikipedia. *Rotation matrix*.
Available at: https://en.wikipedia.org/wiki/Rotation_matrix.
"""
# rotation matrix
R = rotation_matrix(angle, axis, origin)
# apply rotation
points = transform(points, R)
return points | a2eb1857dac96d46f7319e638423164ae6951ebe | 30,364 |
def resolve_translation(instance, info, language_code):
"""Get translation object from instance based on language code."""
loader = TYPE_TO_TRANSLATION_LOADER_MAP.get(type(instance))
if loader:
return loader(info.context).load((instance.pk, language_code))
raise TypeError(f"No dataloader found to {type(instance)}") | 50ada7fd7d681a5ca8def13a5f07c9fe73f4461a | 30,365 |
def reverse_dict_old(dikt):
"""
takes a dict and return a new dict with old values as key and old keys as values (in a list)
example
_reverse_dict({'AB04a':'b', 'AB04b': 'b', 'AB04c':'b', 'CC04x': 'c'})
will return
{'b': ['AB04a', 'AB04b', 'AB04c'], 'c': 'CC04x'}
"""
new_dikt = {}
for k, v in dikt.items():
if v in new_dikt:
new_dikt[v].append(k)
else:
new_dikt[v] = [k]
return new_dikt | 50155858fbbe52dc8daae66e6a94c8885b80ba05 | 30,366 |
def get_active_user(request):
"""
Endpoint for getting the active user
through the authtoken
"""
return Response(UserSerializer(request.user, context={'is_public_view': False}).data, status=status.HTTP_200_OK) | b86214eee8c34c53ed66992420f13f64cc2bda30 | 30,367 |
import joblib
import os
import inspect
import shutil
import json
import subprocess
def subprocessdec(func, jobtype="source", cache_dir="."):
"""calls function as subprocess (allows for nested multiprocessing)"""
@wraps(func)
def nfunc(*args, **kwds):
# collapse args into kwds to dump to json
nkwds = genkwds(func, *args, **kwds)
kwds_hash = joblib.hash(nkwds)
# handle different jobtypes
if jobtype == "source":
mod_lab = "mod_%s" % kwds_hash
elif jobtype == "module":
mod_lab = func.__module__
else:
raise ValueError("Unknown jobtype: %s" % jobtype)
# prep file names
loc = os.path.join(cache_dir, ".labbot", "subprocessdec_tmp_files")
os.makedirs(loc, exist_ok=True)
run_f = "run_%s.py" % kwds_hash
mod_f = "%s.py" % mod_lab
json_f = "%s.json" % kwds_hash
# create module file containing code from func
mod_script = inspect.getfile(func)
shutil.copy(mod_script, os.path.join(loc, mod_f))
# prep run script
fn_name = func.__name__
with open(os.path.join(loc, run_f), "w") as fd:
fd.write("from %s import %s as func\n" % (mod_lab, fn_name))
fd.write("import json\n")
fd.write("\n")
fd.write("json_file = '%s'\n" % json_f)
fd.write("\n")
fd.write("with open(json_file, 'r') as fd:\n")
fd.write(" func_kwds = json.load(fd)\n")
fd.write("\n")
fd.write("func(**func_kwds)\n")
# prep kwds/json
with open(os.path.join(loc, json_f), "w") as fd:
json.dump(nkwds, fd)
# run code
process = subprocess.Popen(["python", run_f], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=loc)
# while there is output coming from the process write
# it to the logger
with process.stdout as pipe:
for line in iter(pipe.readline, b""):
print("subprocess output: %s" % line)
# catch the return code, if it is non-zero something has
# gone wrong and we need to throw a CalledProcessError
retcode = process.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, "python %s" % run_f)
# remove script files
os.remove(os.path.join(loc, mod_f))
os.remove(os.path.join(loc, json_f))
os.remove(os.path.join(loc, run_f))
return nfunc | 6932b32dae4ef370a37fa4ff021e5bfc14bb36ef | 30,368 |
def upsert_website(admin_id, root, data, force_insert=False):
"""Method to update and insert new website to live streaming.
Args:
admin_id (str): Admin privileges flag.
root (str): Root privileges activation flag.
data (dict): website data structure.
force_insert (bool): Force insert Flag for updating the website information.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
if root:
result = seer.online_streamer.add_website(data["name"], data["url"], data["server"], force_insert=force_insert)
else:
result = False
except Exception:
result = False
return result | bc118cf7c42a375cc458b92713d4e4f802239d3c | 30,369 |
def rest_query_object_by_id(bc_app, url, obj_id, json_obj_name, object_type, para_query_mode=False):
"""
query object by id
:param bc_app: used to attach app sign
:param url: do NOT contain params at the end
:param obj_id: object id
:param json_obj_name: like 'plan' for plan query
:param object_type: object type like beecloud.entity.BCPlan
:param para_query_mode: true if query string is para={}, else k1=v1&k2=v2
:return: beecloud.entity.BCResult
"""
query_param = _TmpObject()
attach_app_sign(query_param, BCReqType.QUERY, bc_app)
if para_query_mode:
url = url + '/' + obj_id + '?para=' + obj_to_quote_str(query_param)
tmp_resp = http_get(url, bc_app.timeout)
else:
tmp_resp = http_get(url + '/' + obj_id, bc_app.timeout, obj_to_dict(query_param))
# if err encountered, [0] equals 0
if not tmp_resp[0]:
return tmp_resp[1]
# [1] contains result dict
resp_dict = tmp_resp[1]
bc_result = BCResult()
set_common_attr(resp_dict, bc_result)
if not bc_result.result_code:
setattr(bc_result, json_obj_name, parse_dict_to_obj(resp_dict.get(json_obj_name), object_type))
return bc_result | 2ed5390fba651c5874cfc51e472629ba9ad4369b | 30,370 |
import re
import unicodedata
def bert_clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
text = re.sub('[_—.]{4,}', '__', text)
text = unicodedata.normalize("NFKC", text)
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
# elif _is_whitespace(char):
# output.append(" ")
else:
output.append(char)
return "".join(output) | e31930f7eb04cfc24f5dc2dd031de40d58643027 | 30,371 |
import sys
import time
import os
def _fail_callback(die_lock_file: str,
actor_rank: int = 0,
fail_iteration: int = 6):
"""Returns a callback to cause an Xgboost actor to fail training.
Args:
die_lock_file (str): A file lock used to prevent race conditions
when causing the actor to fail.
actor_rank (int): The rank of the actor to fail.
fail_iteration (int): The iteration after which the training for
the specified actor fails.
"""
class _FailCallback(TrainingCallback):
def after_iteration(self, model, epoch, evals_log):
if get_actor_rank() == actor_rank:
put_queue((epoch, time.time()))
if get_actor_rank() == actor_rank and \
epoch == fail_iteration and \
not os.path.exists(die_lock_file):
with open(die_lock_file, "wt") as fp:
fp.write("")
time.sleep(2)
print(f"Testing: Rank {get_actor_rank()} will now fail.")
sys.exit(1)
return _FailCallback() | 4a65c4015d9bc6a5292108f330a48f123ae07141 | 30,372 |
import random
def get_successors(curr_seq):
""" Function to generate a list of 100 random successor sequences
by swapping any cities. Please note that the first and last city
should remain unchanged since the traveller starts and ends in
the same city.
Parameters
----------
curr_seq : [list]
[list of cities]
Returns
-------
[list of list]
[list of list of random cities]
"""
successor_list = list([])
A = 0 #index variable for swapping
B = 0 #index variable for swapping
copy_list = list([])
for i in range(100):
copy_list = curr_seq[:]
A = random.randint(1, len(curr_seq) - 2)
B = random.randint(1, len(curr_seq) - 2)
sequence = swap(copy_list, A, B)
successor_list.append(sequence)
return successor_list | db928f0baed2c46211f9633c2e2223e39c177dbe | 30,373 |
def Q(lambda_0, lambda_, eps_c, Delta, norm_zeta2, nu):
"""
Quadratic upper bound of the duality gap function initialized at lambda_0
"""
lmd = lambda_ / lambda_0
Q_lambda = (lmd * eps_c + Delta * (1. - lmd) +
0.5 * nu * norm_zeta2 * (1. - lmd) ** 2)
return Q_lambda | e7c624d822713efd9a63e92d40ecb9c13d5ee8d6 | 30,374 |
def solve(equation):
"""
Solves equation using shunting-yard algorithm
:param equation: string
equation to be solved
:return: float
result of equation
"""
postfix = rpn(equation)
result = shunting_yard(postfix)
return result | c57dc10b4c41f048a5690548a7155550b52d8d1c | 30,375 |
def get_project_url(): # pragma no cover
"""Open .git/config file and git the url from it."""
project_info = {}
try:
with open('./.git/config', 'r') as git_config:
for line in git_config:
if "url = git@" in line:
dont_need, need = line.split(' = ')
if "url = git@" in line:
dont_need, need = line.split('@')
url = need.strip()
url = url.replace(':', '/')
url = url.replace('.git', '')
url = url.replace(url, 'https://' + url)
project_info['url'] = url
break
elif "url = https://github.com" in line:
dont_need, need = line.split(' = ')
url = need.strip()
url = url.replace(".git", '')
project_info['url'] = url
break
except FileNotFoundError:
project_info['url'] = "YOUR PROJECT URL HERE"
project_info['project_user'] = "YOUR NAME HERE"
project_info['project_name'] = "YOUR PROJECT NAME HERE"
project_info['project_user_profile_url'] = "YOUR USER PROFILE URL HERE"
return project_info
project_user = get_user_name(url)
project_info['project_user'] = project_user
project_name = get_project_name(url)
project_info['project_name'] = project_name
project_user_profile_url = get_user_profile_url(project_user)
project_info['project_user_profile_url'] = project_user_profile_url
return project_info | d296a23372c22adfd35e4c0ea463db2fbac557b1 | 30,376 |
from datetime import datetime
def sign_out(entry, time_out=None, forgot=False):
"""Sign out of an existing entry in the timesheet. If the user
forgot to sign out, flag the entry.
:param entry: `models.Entry` object. The entry to sign out.
:param time_out: (optional) `datetime.time` object. Specify the sign out time.
:param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten.
:return: The signed out entry.
""" # noqa
if time_out is None:
time_out = datetime.today().time()
if forgot:
entry.forgot_sign_out = True
logger.info(
'{} forgot to sign out on {}.'.format(entry.user_id, entry.date)
)
else:
entry.time_out = time_out
logger.info('{} ({}) signed out.'.format(entry.user_id, entry.user_type))
return entry | c94ce2231dda115a53ea41a12dd04cbcd728088f | 30,377 |
def get_button_write(deck_id: str, page: int, button: int) -> str:
"""Returns the text to be produced when the specified button is pressed"""
return _button_state(deck_id, page, button).get("write", "") | 34cec488aa5245a620953319ce5dab8a0b7032e0 | 30,378 |
def opensafety_a(data: bytes) -> int:
"""
Compute a CRC-16 checksum of data with the opensafety_a algorithm.
:param bytes data: The data to be computed
:return: The checksum
:rtype: int
:raises TypeError: if the data is not a bytes-like object
"""
_ensure_bytes(data)
return _crc_16_opensafety_a(data) | be2a432874c50e7edd6af0555ed4cd2a7fb4c4b2 | 30,379 |
def EM_frac(pdf, iters=30, EPS=1E-12, verbose=True):
""" EM-algorithm for unknown integrated class fractions
Args:
pdf : (n x K) density (pdf) values for n measurements, K classes
iter : Number of iterations
Returns:
frac : Integrated class fractions
"""
n = pdf.shape[0]
K = pdf.shape[1]
P = np.zeros((n,K))
frac = np.ones(K) / K
for k in range(iters):
# Loop over observations
for i in range(n):
# E-step, obtain normalized probabilities
P[i,:] = pdf[i,:] * frac[:]
P[i,:] /= (np.sum(P[i,:]) + EPS)
# M-step, update fractions by averaging over observations
frac = np.sum(P,axis=0) / n
if verbose:
print(f'EM_frac: iter {k:4}, NLL = {mixture_nll(pdf,frac):.3f}, frac = {frac}')
return frac | 7944e75b955b27cc0c7479a5eb7b3e6a6d656ede | 30,380 |
import os
def get_files(extensions, args):
"""Generates a list of paths whose boilerplate should be verified.
If a list of file names has been provided on the command line, it will be
treated as the initial set to search. Otherwise, all paths within rootdir
will be discovered and used as the initial set.
Once the initial set of files is identified, it is normalized via
normalize_files() and further stripped of any file name whose extension is
not in extensions.
Args:
extensions: a list of file extensions indicating which file types
should have their boilerplate verified
Returns:
A list of absolute file paths
"""
files = []
if args.filenames:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for dpath in SKIPPED_PATHS:
if dpath in dirs:
dirs.remove(dpath)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files, args)
outfiles = []
for pathname in files:
basename, extension = get_file_parts(pathname)
extension_present = extension in extensions or basename in extensions
if args.force_extension or extension_present:
outfiles.append(pathname)
return outfiles | 9262c3586e9370fd4ca42a6f0994898859d547ee | 30,381 |
def gaussian_loss(y_true, y_pred, interval, eta):
""" non zero mean absolute loss for one batch
This function parameterizes a loss of the form
Loss = - exp(- x ^ 2 / 2*sigma ^ 2)
where x = y_true - y_pred and
sigma = eta * y_true
and eta is a constant, generally much less than 1
Args:
y_true: true depth
y_pred: predicted depth
interval: depth interval used
eta: multiplictive constant appearing in standard deviations of gaussian loss
"""
with tf.name_scope('MAE'):
shape = tf.shape(y_pred)
interval = tf.reshape(interval, [shape[0]])
# mask_true is a tensor of 0's and 1's, where 1 is for valid pixels and 0 for invalid pixels
mask_true = tf.cast(tf.not_equal(y_true, 0.0), dtype='float32')
# The number of valid pixels in the depth map -- used for taking the average over only valid pixels
num_valid_pixels = tf.abs(tf.reduce_sum(
mask_true, axis=[1, 2, 3])) + 1e-6
# The standard deviation used in the gaussian is of the form eta * y_true
# with a small offset to prevent division by zero on invalid pixels
sigma = eta * y_true + 1e-6
# Below we assume the random error in y_true used to regularize the divergence
# increases linearly with distance
error = y_true - y_pred
error = error*mask_true
x = - tf.math.pow(error / sigma, 2.0) / 2.0
loss = - tf.math.exp(x)
# Average over the number of valid pixels
loss = tf.reduce_sum(loss) / num_valid_pixels
return loss, tf.no_op() | a39e4caa12304f43512f843034c143711797e5f8 | 30,382 |
import time
def create(params):
"""
Create Neo4j Docker instance
:param params: dict
:return: Help message for end user
"""
con_name = params['dbname']
config_dat = Config.info[params['dbtype']]
volumes = config_dat['volumes']
for vol in volumes:
if vol[0] == 'DBVOL': vol[0] = params['db_vol']
if vol[0] == 'BAKVOL': vol[0] = Config.backup_vol
if container_util.container_exists(con_name):
return "Container name %s already in use" % con_name
used_ports = container_util.get_ports()
# find two consecutive ports that are not in usebolt_port
for port in range(Config.base_port, max(used_ports) + 2):
if port not in used_ports and (
(port + 1) not in used_ports):
break
Ports = {}
for p in config_dat['pub_ports']:
Ports[int(p)] = (Config.container_ip, port)
port += 1
params['port_bindings'] = Ports
bolt_port = params['port'] = Ports[7687][1]
print("Ports: ", Ports)
env = {'NEO4J_dbms_memory_pagecache_size': '4G',
'NEO4J_AUTH': 'neo4j/changeme',
'DB_USER': params['dbuser']}
# create container
container_util.make_dirs(con_name, volumes)
command = config_dat['command']
(c_id, con) = container_util.create_con(params, env, args=command)
print('waiting for container to startup...')
time.sleep(5)
status = reset_neo4j_password(params['port'])
if status:
stat2 = create_account(params['dbuser'], params['dbuserpass'],
params['port'])
badness = 0
while status is not True and badness < 6:
print('create_account failed: %d' % badness)
time.sleep(3)
status = reset_neo4j_password(params['port'])
if status:
stat2 = create_account(params['dbuser'],
params['dbuserpass'],
params['port'])
badness += 1
if status is not True:
print('DEBUG very bad')
return "Failed: neo4j unable to create accounts"
https_port = Ports[7473][1]
res = "Your neo4j container %s " % con_name
res += "has been created.\n\n"
res += "neo4j HTTPS interface: %d\n" % https_port
res += "neo4j Bolt interface: %d\n" % bolt_port
res += '\n'
res += 'Web access: https://%s:%d' % (Config.container_host, https_port)
res += '\n\n'
res += 'Note: Web interface will display the default Bolt port of 7687. '
res += 'Change the Bolt port number from 7687 to %s ' % bolt_port
res += 'before loginging in.\n\n'
res += 'bolt://%s:%d' % (Config.FQDN_host, bolt_port)
msg = 'Neo4j created: %s\n' % params['dbname']
msg += 'Created by: %s <%s>\n' % (params['owner'], params['contact'])
send_mail("DB4SCI: created neo4j", msg)
return res | 2a705e2d71319f427e13d91476100c69e9ee96b6 | 30,383 |
import logging
import argparse
def str_to_verbosity(verbosity_str: str) -> int:
"""
Return a logging level from a string (compared in lower case).
- `DEBUG` <=> {`debug`, `d`, `10`}
- `INFO` <=> {`info`, `i`, `20`}
- `WARNING` <=> {`warning`, `w`, `warn`}
- `ERROR` <=> {`error`, `e`, `err`}
.. code-block:: python
>>> str_to_bool("d") == logging.DEBUG # Works with 'debug', 'd', 10 (accepted with any letter case)
True
>>> str_to_bool("i") == logging.INFO # Works with 'info', 'i', 20 (accepted with any letter case)
True
>>> str_to_bool("w") == logging.WARNING # Works with 'warning', 'w', 'warn', 30 (accepted with any letter case)
True
>>> str_to_bool("e") == logging.ERROR # Works with 'error', 'e', 'err', 40 (accepted with any letter case)
True
Args:
verbosity_str (str): String to be converted
Returns:
logging level: Logging level (INFO, DEBUG, WARNING, ERROR)
"""
debug_str = ("debug", "d", 10)
info_str = ("info", "i", 20)
warn_str = ("warning", "w", "warn", 30)
err_str = ("error", "e", "err", 40)
if isinstance(verbosity_str, str):
verbosity_str = verbosity_str.lower()
if verbosity_str in info_str:
verbosity = logging.INFO
elif verbosity_str in debug_str:
verbosity = logging.DEBUG
elif verbosity_str in warn_str:
verbosity = logging.WARNING
elif verbosity_str in err_str:
verbosity = logging.ERROR
else:
raise argparse.ArgumentTypeError(
f"Incorrect logging level value: {verbosity_str}, "
f"should be {info_str}, {debug_str}, {warn_str} or {err_str}"
)
return verbosity | b23fb8828ea06ac4e5f43d6f72c1ebc57459019a | 30,384 |
import copy
def create_registration_data(legal_type, identifier='FM1234567', tax_id=None):
"""Test data for registration."""
person_json = {
'officer': {
'id': 2,
'firstName': 'Peter',
'lastName': 'Griffin',
'middleName': '',
'partyType': 'person'
},
'mailingAddress': {
'streetAddress': 'mailing_address - address line one',
'streetAddressAdditional': '',
'addressCity': 'mailing_address city',
'addressCountry': 'CA',
'postalCode': 'H0H0H0',
'addressRegion': 'BC'
}
}
org_json = copy.deepcopy(person_json)
org_json['officer'] = {
'id': 2,
'organizationName': 'Xyz Inc.',
'identifier': 'BC1234567',
'taxId': '123456789',
'email': 'peter@email.com',
'partyType': 'organization'
}
business = create_business(identifier,
legal_type=legal_type,
legal_name='test-reg-' + legal_type)
if tax_id:
business.tax_id = tax_id
json_filing = {
'filing': {
'header': {
'name': 'registration'
},
'registration': {
}
}
}
filing = create_filing(json_filing=json_filing)
party = create_party(person_json if legal_type == 'SP' else org_json)
role = 'proprietor' if legal_type == 'SP' else 'partner'
create_party_role(business, party, [role])
business.save()
filing.business_id = business.id
filing.save()
return filing.id, business.id | d0be4516f8f67a5aaa05365ab47c0258b1e174d1 | 30,385 |
def proctored_exam_results_csv(entry_id, xmodule_instance_args):
"""
Compute proctored exam results report for a course and upload the
CSV for download.
"""
action_name = 'generating_proctored_exam_results_report'
task_fn = partial(upload_proctored_exam_results_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name) | e49927963c17c0c7582f4614dbb570760c84fd34 | 30,386 |
def get_twiter_position(twit, market):
"""
Get's Vicki's position on the appropriate stock
:param twit: Twitter API Object
:param market: The market pair which to observe
:type twit: twitter
:type market: str
:return: String contining Vicki's position on the relavant market
:rtype : str
"""
statuses = fetch_timeline(twit)
try:
latest_tweet = ""
for tweet in statuses:
if market in tweet.text:
latest_tweet = tweet.text
break
if 'long' in latest_tweet:
return 'long'
elif 'short' in latest_tweet:
return 'short'
#Generic exceptor used to catch all posible error types when itterating through tweets
except Exception as e:
error_handler('Error itterating through tweets in statuses', e) | fb5cf927de81ae39ba913da27e61916315664f4c | 30,387 |
def build_new_devices_list(module):
"""
Build List of new devices to register in CV.
Structure output:
>>> configlets_get_from_facts(cvp_device)
{
[
{
"name": "veos01",
"configlets": [
"cv_device_test01",
"SYS_TelemetryBuilderV2_172.23.0.2_1",
"veos01-basic-configuration",
"SYS_TelemetryBuilderV2"
],
"cv_configlets": [],
"parentContainerName": "DC1_VEOS",
"imageBundle": []
}
]
}
Parameters
----------
module : AnsibleModule
Ansible module.
Returns
-------
list
List of new devices to provision on CV.
"""
# Get variable from module
devices_filter = module.params["device_filter"]
devices_ansible = module.params["devices"]
device_info = dict()
devices_info = list()
# facts_devices = facts_devices(module)
# Loop in Input devices to see if it is part of CV Facts
for ansible_device_hostname, ansible_device in devices_ansible.items():
if is_in_filter(
hostname_filter=devices_filter, hostname=ansible_device_hostname
):
cvp_device = device_get_from_facts(
module=module, device_name=ansible_device_hostname
)
if len(cvp_device) >= 0:
if is_in_container(device=cvp_device, container="undefined_container"):
device_info = {
"name": ansible_device_hostname,
"parentContainerName": ansible_device["parentContainerName"],
"configlets": ansible_device["configlets"],
"cv_configlets": [],
"imageBundle": ansible_device["imageBundle"],
"message": "Device will be provisionned",
}
devices_info.append(device_info)
return devices_info | 915ca10ee20c4da5bf1ff4f504ecba1b0f217411 | 30,388 |
from typing import Tuple
def preprocess(input_path: str, image_size: int) -> Tuple[pd.Series, np.ndarray]:
"""
Preprocss imager data into a depth, image tuple.
Image is resized to a given width.
Additionally, to avoid floating point difficulties, depth measurements are converted
to centimeters and integer type.
Parameters
----------
input_path : str
path to an imager file in a RawImagerData format
image_size : int
resulted image width
Returns
-------
Tuple[pd.Series, np.ndarray]
depth measurements, image
"""
data = pd.read_csv(input_path, **RawImagerData.STORAGE_FORMAT['read_params'])
data = data.dropna()
data = RawImagerData.convert(data)
img = Image.fromarray(data.df.iloc[:, 1:].values)
resized_img = img.resize((image_size, img.size[1]))
result_img = np.asarray(resized_img)
depth = (data.df.depth * 10**RawImagerData.DEPTH_PRECISION_DIGITS).astype(np.int64)
return depth, result_img | 7116555c07fdbb7277d1c9da84b51e83b399dd74 | 30,389 |
from typing import List
import subprocess
def import_template_ids() -> List[str]:
"""Return a list of all the supported template IDs."""
return subprocess.check_output(["meme", "-list-templates"]).decode("utf-8").splitlines() | 89914e20965e87e9d9589955000854dc8f8d743b | 30,390 |
def _scale_pot(pot, scale_coeff, numtors):
""" Scale the potential
"""
print('scale_coeff test 0:', scale_coeff, numtors)
scale_factor = scale_coeff**(2.0/numtors)
print('scale_coeff test:', scale_coeff, numtors, scale_factor)
new_pot = {}
for idx, val in pot.items():
new_pot[idx] = pot[idx] * scale_factor
return new_pot | 0e634b7766a5822d3b2e80fffa0b56dccee125ab | 30,391 |
import pkg_resources
def get_substation_file():
"""Return the default substation file for the CONUS."""
return pkg_resources.resource_filename('cerf', 'data/hifld_substations_conus_albers.zip') | 7628c7981dd9f82b4210a451ad62fffa72222fe8 | 30,392 |
def configure_assignment_caller(context, pyramid_request, parsed_params=None):
"""
Call BasicLTILaunchViews.configure_assignment().
Set up the appropriate conditions and then call
BasicLTILaunchViews.configure_assignment(), and return whatever
BasicLTILaunchViews.configure_assignment() returns.
"""
# The document_url, resource_link_id and tool_consumer_instance_guid parsed
# params are always present when configure_assignment() is called.
# ConfigureAssignmentSchema ensures this.
pyramid_request.parsed_params = {
"document_url": "TEST_DOCUMENT_URL",
"resource_link_id": "TEST_RESOURCE_LINK_ID",
"tool_consumer_instance_guid": "TEST_TOOL_CONSUMER_INSTANCE_GUID",
}
if parsed_params:
pyramid_request.parsed_params.update(parsed_params)
views = BasicLTILaunchViews(context, pyramid_request)
return views.configure_assignment() | dc607bf0e82a2956a1e435bfd480442cd9b6b920 | 30,393 |
import types
import pandas
import numpy
def hpat_pandas_series_isna(self):
"""
Pandas Series method :meth:`pandas.Series.isna` and :meth:`pandas.Series.isnull` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isnull1
Parameters
-----------
self : :obj:`pandas.Series` object
input argument
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method isna/isnull().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if isinstance(self.data.dtype, (types.Integer, types.Float)):
def hpat_pandas_series_isna_impl(self):
return pandas.Series(data=numpy.isnan(self._data), index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_isna_impl(self):
result = numpy.empty(len(self._data), numpy.bool_)
byte_size = 8
# iterate over bits in StringArrayType null_bitmap and fill array indicating if array's element are NaN
for i in range(len(self._data)):
bmap_idx = i // byte_size
bit_idx = i % byte_size
bmap = self._data.null_bitmap[bmap_idx]
bit_value = (bmap >> bit_idx) & 1
result[i] = bit_value == 0
return pandas.Series(result, index=self._index, name=self._name)
return hpat_pandas_series_isna_impl | 5a541da044e83e8248446c8b2a0d883213bddd17 | 30,394 |
from typing import Optional
def _filter_stmts(base_node: nodes.NodeNG, stmts, frame, offset):
"""Filter the given list of statements to remove ignorable statements.
If base_node is not a frame itself and the name is found in the inner
frame locals, statements will be filtered to remove ignorable
statements according to base_node's location.
:param stmts: The statements to filter.
:type stmts: list(nodes.NodeNG)
:param frame: The frame that all of the given statements belong to.
:type frame: nodes.NodeNG
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: The filtered statements.
:rtype: list(nodes.NodeNG)
"""
# if offset == -1, my actual frame is not the inner frame but its parent
#
# class A(B): pass
#
# we need this to resolve B correctly
if offset == -1:
myframe = base_node.frame().parent.frame()
else:
myframe = base_node.frame()
# If the frame of this node is the same as the statement
# of this node, then the node is part of a class or
# a function definition and the frame of this node should be the
# the upper frame, not the frame of the definition.
# For more information why this is important,
# see Pylint issue #295.
# For example, for 'b', the statement is the same
# as the frame / scope:
#
# def test(b=1):
# ...
if (
base_node.parent
and base_node.statement(future=True) is myframe
and myframe.parent
):
myframe = myframe.parent.frame()
mystmt: Optional[nodes.Statement] = None
if base_node.parent:
mystmt = base_node.statement(future=True)
# line filtering if we are in the same frame
#
# take care node may be missing lineno information (this is the case for
# nodes inserted for living objects)
if myframe is frame and mystmt and mystmt.fromlineno is not None:
assert mystmt.fromlineno is not None, mystmt
mylineno = mystmt.fromlineno + offset
else:
# disabling lineno filtering
mylineno = 0
_stmts = []
_stmt_parents = []
statements = _get_filtered_node_statements(base_node, stmts)
for node, stmt in statements:
# line filtering is on and we have reached our location, break
if stmt.fromlineno and stmt.fromlineno > mylineno > 0:
break
# Ignore decorators with the same name as the
# decorated function
# Fixes issue #375
if mystmt is stmt and _is_from_decorator(base_node):
continue
if node.has_base(base_node):
break
if isinstance(node, nodes.EmptyNode):
# EmptyNode does not have assign_type(), so just add it and move on
_stmts.append(node)
continue
assign_type = node.assign_type()
_stmts, done = assign_type._get_filtered_stmts(base_node, node, _stmts, mystmt)
if done:
break
optional_assign = assign_type.optional_assign
if optional_assign and assign_type.parent_of(base_node):
# we are inside a loop, loop var assignment is hiding previous
# assignment
_stmts = [node]
_stmt_parents = [stmt.parent]
continue
if isinstance(assign_type, nodes.NamedExpr):
# If the NamedExpr is in an if statement we do some basic control flow inference
if_parent = _get_if_statement_ancestor(assign_type)
if if_parent:
# If the if statement is within another if statement we append the node
# to possible statements
if _get_if_statement_ancestor(if_parent):
optional_assign = False
_stmts.append(node)
_stmt_parents.append(stmt.parent)
# If the if statement is first-level and not within an orelse block
# we know that it will be evaluated
elif not if_parent.is_orelse:
_stmts = [node]
_stmt_parents = [stmt.parent]
# Else we do not known enough about the control flow to be 100% certain
# and we append to possible statements
else:
_stmts.append(node)
_stmt_parents.append(stmt.parent)
else:
_stmts = [node]
_stmt_parents = [stmt.parent]
# XXX comment various branches below!!!
try:
pindex = _stmt_parents.index(stmt.parent)
except ValueError:
pass
else:
# we got a parent index, this means the currently visited node
# is at the same block level as a previously visited node
if _stmts[pindex].assign_type().parent_of(assign_type):
# both statements are not at the same block level
continue
# if currently visited node is following previously considered
# assignment and both are not exclusive, we can drop the
# previous one. For instance in the following code ::
#
# if a:
# x = 1
# else:
# x = 2
# print x
#
# we can't remove neither x = 1 nor x = 2 when looking for 'x'
# of 'print x'; while in the following ::
#
# x = 1
# x = 2
# print x
#
# we can remove x = 1 when we see x = 2
#
# moreover, on loop assignment types, assignment won't
# necessarily be done if the loop has no iteration, so we don't
# want to clear previous assignments if any (hence the test on
# optional_assign)
if not (optional_assign or nodes.are_exclusive(_stmts[pindex], node)):
del _stmt_parents[pindex]
del _stmts[pindex]
# If base_node and node are exclusive, then we can ignore node
if nodes.are_exclusive(base_node, node):
continue
# An AssignName node overrides previous assignments if:
# 1. node's statement always assigns
# 2. node and base_node are in the same block (i.e., has the same parent as base_node)
if isinstance(node, (nodes.NamedExpr, nodes.AssignName)):
if isinstance(stmt, nodes.ExceptHandler):
# If node's statement is an ExceptHandler, then it is the variable
# bound to the caught exception. If base_node is not contained within
# the exception handler block, node should override previous assignments;
# otherwise, node should be ignored, as an exception variable
# is local to the handler block.
if stmt.parent_of(base_node):
_stmts = []
_stmt_parents = []
else:
continue
elif not optional_assign and mystmt and stmt.parent is mystmt.parent:
_stmts = []
_stmt_parents = []
elif isinstance(node, nodes.DelName):
# Remove all previously stored assignments
_stmts = []
_stmt_parents = []
continue
# Add the new assignment
_stmts.append(node)
if isinstance(node, nodes.Arguments) or isinstance(
node.parent, nodes.Arguments
):
# Special case for _stmt_parents when node is a function parameter;
# in this case, stmt is the enclosing FunctionDef, which is what we
# want to add to _stmt_parents, not stmt.parent. This case occurs when
# node is an Arguments node (representing varargs or kwargs parameter),
# and when node.parent is an Arguments node (other parameters).
# See issue #180.
_stmt_parents.append(stmt)
else:
_stmt_parents.append(stmt.parent)
return _stmts | 744710684fd6f8b3e90e01d93e6533d1fa87c117 | 30,395 |
def lcp_coordinate_conversion(start_coords,end_coords,crs,transform):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'start_coords' is a list of tuples (lon,lat)
- 'end_coords' is a list of lists of tuples. Each list of end points corresponds to
a start point, so len(start_coords) must equal len(end_coords), although each
list OF end points can be of any length one or greater.
- 'crs' is a Coordinate Reference System of the type returned by rasterio (or neilpy).
- 'transform' is an Affine transformation matrix as returned by rasterio (or neilpy).
Output:
- 'converted_start_coords' is a list of tuples of PIXEL coordinates.
- 'converted_end_coords' is a list of list of tupes of pixel coordiantes.
"""
converted_start_coords = []
converted_end_coords = []
for i,this_start_coord in enumerate(start_coords):
these_end_coords = end_coords[i]
# Convert from lat/lon to map coordinates
this_start_coord = coord_transform(*this_start_coord,4326,crs)
these_end_coords = [coord_transform(*item,4326,crs) for item in these_end_coords]
# Convert from map coordinates to pixel coordinates
this_start_coord = (~transform*this_start_coord)[::-1]
these_end_coords = [(~transform*item)[::-1] for item in these_end_coords]
# Round them to ints
this_start_coord = tuple(np.round(this_start_coord).astype(np.uint32))
these_end_coords = [tuple(item) for item in np.round(these_end_coords).astype(np.uint32)]
converted_start_coords.append(this_start_coord)
converted_end_coords.append(these_end_coords)
return converted_start_coords, converted_end_coords | 936a1e4df8147786923dea6e87d487ea61af4408 | 30,396 |
def create_large_map(sharing_model):
"""
Create larger map with 7 BS that are arranged in a typical hexagonal structure.
:returns: Tuple(map, bs_list)
"""
map = Map(width=230, height=260)
bs_list = [
# center
Basestation('A', Point(115, 130), get_sharing_for_bs(sharing_model, 0)),
# top left, counter-clockwise
Basestation('B', Point(30, 80), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(115, 30), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(200, 80), get_sharing_for_bs(sharing_model, 3)),
Basestation('E', Point(200, 180), get_sharing_for_bs(sharing_model, 4)),
Basestation('F', Point(115, 230), get_sharing_for_bs(sharing_model, 5)),
Basestation('G', Point(30, 180), get_sharing_for_bs(sharing_model, 6)),
]
return map, bs_list | 4348bc97177ec18dcaaf7f72f5d439a17a100956 | 30,397 |
def np_scatter_add(input,axis,index,src):
""" numpy wrapper for scatter_add """
th_input = th.as_tensor(input,device="cpu")
th_index = th.as_tensor(index,device="cpu")
th_src = th.as_tensor(src,device="cpu")
dim = axis
th_output = th.scatter_add(th_input,dim,th_index,th_src)
output = th_output.numpy()
return output | 4575d0d65ae93e403511b4ba6e5b920616c8bb37 | 30,398 |
def netmiko_connect(device_name, device):
"""
Successful connection returns: (True, connect_obj)
Failed authentication returns: (False, None)
"""
hostname = device["host"]
port = device.get("port", 22)
msg = ""
try:
net_connect = ConnectHandler(**device)
msg = f"Netmiko connection succesful to {hostname}:{port}"
logger.info(msg)
return (True, net_connect)
except NetmikoAuthenticationException:
msg = f"Authentication failure to: {device_name} {hostname}:{port}"
except NetmikoTimeoutException as e:
if "DNS failure" in str(e):
msg = (
f"Device {device_name} failed due to a DNS failure, hostname {hostname}"
)
elif "TCP connection to device failed" in str(e):
msg = f"Netmiko was unable to reach the provided host and port: {hostname}:{port}"
logger.error(msg)
return (False, None) | f273149ccde031512cd159ca61296ef09bc11d2d | 30,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.