content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def help_package_prompt(help_list, on_select, on_cancel=None):
"""
Given a list of loaded help indexes, prompt the user to select one of the
packages. on_select is invoked with the name of the selected package,
while on_cancel is invoked if the user cancels the selection.
"""
if not help_list:
return log("No packages with help are installed", status=True)
pkg_list = sorted([key for key in help_list])
captions = [[help_list[key].package,
help_list[key].description]
for key in pkg_list]
def pick_package(index):
package = None if index < 0 else captions[index][0]
if index >= 0:
return on_select(package) if on_select is not None else None
return on_cancel() if on_cancel is not None else None
sublime.active_window().show_quick_panel(
captions,
on_select=lambda index: pick_package(index)) | 8f1cb14fb49660634ea5979c7189a7358c950a58 | 3,634,600 |
def exponential_function(X):
"""
Benchmark exponential function f(x) = exp(-3x)
"""
return np.exp(-3 * X) | b600cc45242ffe5634af007eafae43fe01d6098b | 3,634,601 |
import re
import os
def _get_firebase_db_url():
"""Grabs the databaseURL from the Firebase config snippet. Regex looks
scary, but all it is doing is pulling the 'databaseURL' field from the
Firebase javascript snippet"""
regex = re.compile(r'\bdatabaseURL\b.*?["\']([^"\']+)')
cwd = os.path.dirname(__file__)
try:
with open(os.path.join(cwd, 'templates', _FIREBASE_CONFIG)) as f:
url = next(regex.search(line) for line in f if regex.search(line))
except StopIteration:
raise ValueError(
'Error parsing databaseURL. Please copy Firebase web snippet '
'into templates/{}'.format(_FIREBASE_CONFIG))
return url.group(1) | 8b4f65f0a769d7d80a92ee78876f359cc105697a | 3,634,602 |
def get_user_by_name(name):
"""通过 name/oname 获取 user 对象,用于导入评论者
Comment只接受User对象
Args:
name/oname
Returns:
user<object>: 用户对象
user_type: 用户类型
"""
try: return NaturalPerson.objects.get(name=name).person_id, UTYPE_PER
except: pass
try: return Organization.objects.get(oname=name).organization_id, UTYPE_ORG
except: pass
print(f"{name} is neither natural person nor organization!") | 4bf5157cc3d4d6e12eb15f2fec36c58fc6e3e173 | 3,634,603 |
import random
def gen_html(length=10, include_tags=True):
"""Return a random string made up of html characters.
:param int length: Length for random data.
:returns: A random string made up of html characters.
:rtype: str
"""
random.seed()
html_tag = random.choice(HTML_TAGS)
if not include_tags:
if length < 8:
raise ValueError(u'Cannot generate html with less than 7 chars')
maybe_len = length - len('<{0}></{0}>'.format(html_tag))
if maybe_len <= 0:
length -= 7
html_tag = 'a'
else:
length = maybe_len
output_string = u'<{0}>{1}</{0}>'.format(
html_tag, gen_string('alpha', length))
else:
output_string = '<{0}>{1}</{0}>'.format(
html_tag, gen_string('alpha', length))
return output_string | ad3c76f98591c3f412de0c4eda410e92afc09e55 | 3,634,604 |
from typing import Dict
import itertools
def format_coco(chip_dfs: Dict, patch_size: int, row_name: str):
"""
Format train and test chip geometries to COCO json format.
COCO train and val set have specific ids.
"""
chip_height, chip_width = patch_size, patch_size
cocojson = {
"info": {},
"licenses": [],
"categories": [
{
"supercategory": "Burned Areas",
"id": 1, # id needs to match category_id.
"name": "agfields_singleclass",
}
],
}
for key_idx, key in enumerate(chip_dfs.keys()):
key_image = {
"file_name": f"{key}.jpg",
"id": int(key_idx),
"height": chip_width,
"width": chip_height,
}
cocojson.setdefault("images", []).append(key_image)
for row_idx, row in chip_dfs[key]["chip_df"].iterrows():
# Convert geometry to COCO segmentation format:
# From shapely POLYGON ((x y, x1 y2, ..)) to COCO [[x, y, x1, y1, ..]].
# The annotations were encoded by RLE, except for crowd region (iscrowd=1)
coco_xy = list(
itertools.chain.from_iterable(
(x, y) for x, y in zip(*row.geometry.exterior.coords.xy)
)
)
coco_xy = [round(coords, 2) for coords in coco_xy]
# Add COCO bbox in format [minx, miny, width, height]
bounds = row.geometry.bounds # COCO bbox
coco_bbox = [
bounds[0],
bounds[1],
bounds[2] - bounds[0],
bounds[3] - bounds[1],
]
coco_bbox = [round(coords, 2) for coords in coco_bbox]
key_annotation = {
"id": key_idx,
"image_id": int(key_idx),
"category_id": 1, # with multiple classes use "category_id" : row.reclass_id
"mycategory_name": "agfields_singleclass",
"old_multiclass_category_name": row[row_name],
"bbox": coco_bbox,
"area": row.geometry.area,
"iscrowd": 0,
"segmentation": [coco_xy],
}
cocojson.setdefault("annotations", []).append(key_annotation)
return cocojson | d48c8308a7bc23f737a969c5e4cf55aafb58e74e | 3,634,605 |
def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, invvar=None,
lower=None, upper=None, maxdev=None, maxrej=None, groupdim=None,
groupsize=None, groupbadpix=False, grow=0, sticky=True, use_mad=False,
verbose=False,
**kwargs_optimizer):
"""
A routine to perform robust optimization. It is completely analogous
to :func:`robust_polyfit_djs`, but is more general in that it allows
one to fit a more general model using the optimizer of the users
choice. If you are fitting simple functions like Chebyshev or
Legednre polynomials using a linear least-squares algorithm, you
should use :func:robust_polyfit_djs` instead of this function.
Args:
ydata (`numpy.ndarray`_):
Data to fit.
fitfunc (callable):
The callable object used to perform the fitting. The
calling sequence must be::
ret_tuple = fitfunc(ydata, inmask, arg_dict, **kwargs_optimizer)
See the descriptions of `ydata`, `inmask`, `arg_dict`, and
`kwargs_optimizer`. The returned object ret_tuple that can
have two or three elements. If it has two elements (result,
ymodel):
- `result`: Object returned by the specific
scipy.optimize method used to perform the fit.
- `ymodel`: A `numpy.ndarray` with the model fit to
`ydata` and with the same shape.
If it has three elements (result, ymodel, newivar):
- `newivar`: new inverse variance for the ydata ymodel
comparison, in other words chi = (ydata -
ymodel)*np.sqrt(newivar). This functionality allows
one to deal with cases where the noise of the
data-model comaprison is model dependent.
arg_dict (:obj:`dict`):
Dictionary containing the other variables needed to evaluate
the model fit.
maxiter (:obj:`int`, optional):
Maximum number of rejection iterations. Set this to zero to
disable rejection and simply do a fit.
inmask (`numpy.ndarray`_, optional):
Input mask. Bad points are marked with a value that
evaluates to `False`. Must have the same number of
dimensions as `ydata`. Points masked as `False` in `inmask`
will also always evaluate to `False` in the output mask.
invvar (:obj:`float`, `numpy.ndarray`_, optional):
Inverse variance of the data, used to reject points based on
the values of `upper` and `lower`. This can either be a
single float for the entire yarray or a ndarray with the
same shape as the yarray.
lower (:obj:`int`, :obj:`float`, optional):
If set, reject points with ``data < model - lower * sigma``, where
``sigma = 1/sqrt(invvar)``
upper (:obj:`int`, :obj:`float`, optional):
If set, reject points with ``data > model + upper * sigma``, where
``sigma = 1/sqrt(invvar)``.
maxdev (:obj:`int` or :class:`float`, optional):
If set, reject points with ``abs(data-model) > maxdev``. It
is permitted to set all three of `lower`, `upper` and
`maxdev`.
maxrej (:obj:`int`, `numpy.ndarray`_, optional):
Maximum number of points to reject in this iteration. If
`groupsize` or `groupdim` are set to arrays, this should be
an array, as well.
groupdim (:obj:`int`, optional):
Dimension along which to group the data. Set to 1 to group
along the 1st dimension, 2 for the 2nd dimension, etc. For
example, if data has shape [100,200], then setting
`groupdim=2` is equivalent to grouping the data with
`groupsize=100`. In either case, there are 200 groups,
specified by `[*,i]`. This functionality is **not well
tested in python**!
groupsize (:obj:`int`, optional):
If this and `maxrej` are set, then reject a maximum of
`maxrej` points per group of `groupsize` points. If
`groupdim` is also set, then this specifies sub-groups
within that. This functionality is **not well tested in
python**!
groupbadpix (:obj:`bool`, optional):
If `True`, consecutive sets of bad pixels are considered
groups, overriding the values of `groupsize`.
grow (:obj:`int`, optional):
If set to a non-zero integer, N, the N nearest neighbors of
rejected pixels will also be rejected.
sticky (:obj:`bool`, optional):
If `True`, pixels rejected in one iteration remain rejected
in subsequent iterations, even if the model changes.
use_mad (:obj:`bool`, optional):
It `True`, compute the median of the maximum absolute
deviation between the data and use this for the rejection
instead of the default, which is to compute the standard
deviation of `ydata - modelfit`. Note that it is not
possible to specify `use_mad=True` and also pass in a value for
`invvar`, and the code will return an error if this is done.
**kwargs_optimizer:
Optional parameters passed to the optimizer.
Returns:
Three objects are returned:
- The object returned by the `scipy.optimize` function used
by the fitter. See `fitfunc`.
- A `numpy.ndarray`_ with the model value fit to `ydata` and
has its same shape.
- Boolean `numpy.ndarray`_ with the same shape as data
indicating which pixels were masked in the final fit.
Convention is that `True` are good values where `False`
indicates bad values.
"""
# Setup the initial mask
if inmask is None:
inmask = np.ones(ydata.size, dtype=bool)
nin_good = np.sum(inmask)
iter = 0
qdone = False
thismask = np.copy(inmask)
while (not qdone) and (iter < maxiter):
ret_tuple = fitfunc(ydata, thismask, arg_dict, **kwargs_optimizer)
if (len(ret_tuple) == 2):
result, ymodel = ret_tuple
invvar_use = invvar
elif (len(ret_tuple) == 3):
result, ymodel, invvar_use = ret_tuple
else:
msgs.error('Invalid return value from fitfunc')
thismask_iter = thismask.copy()
thismask, qdone = pydl.djs_reject(ydata, ymodel, outmask=thismask, inmask=inmask, invvar=invvar_use,
lower=lower, upper=upper, maxdev=maxdev, maxrej=maxrej,
groupdim=groupdim, groupsize=groupsize, groupbadpix=groupbadpix, grow=grow,
use_mad=use_mad, sticky=sticky)
nrej = np.sum(thismask_iter & np.invert(thismask))
nrej_tot = np.sum(inmask & np.invert(thismask))
if verbose:
msgs.info(
'Iteration #{:d}: nrej={:d} new rejections, nrej_tot={:d} total rejections out of ntot={:d} '
'total pixels'.format(iter, nrej, nrej_tot, nin_good))
iter += 1
if (iter == maxiter) & (maxiter != 0):
msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_optimize')
outmask = np.copy(thismask)
if np.sum(outmask) == 0:
msgs.warn('All points were rejected!!! The fits will be zero everywhere.')
# Perform a final fit using the final outmask if new pixels were rejected on the last iteration
if qdone is False:
ret_tuple = fitfunc(ydata, outmask, arg_dict, **kwargs_optimizer)
return ret_tuple + (outmask,)
#return result, ymodel, outmask | 649f17cb8b104daec735018edef6d6cb5d3dd7fd | 3,634,606 |
def DAGcon(G, N):
"""A method to construct a Directed Acyclic graph
Parameters
----------
G : The subsumption set.
N : The number of nodes.
Returns
-------
Returns the DAG as a list.
"""
l = list()
caches = list()
l_g = sorted(G, key=lambda x: G[x], reverse = True)
for i in range(N):
l.append(set())
caches.append(set())
for i in range(len(l_g)):
if i%10000 == 0:
print(i)
a, b = l_g[i][0], l_g[i][1]
if b in caches[a] or a in caches[b]:
continue
elif checkpath(l, b, a):
caches[b].add(a)
continue
elif checkpath(l, a, b):
caches[a].add(b)
continue
else:
#print(a,b)
caches[b].add(a)
l[b].add(a)
return l, caches | d2500216d41b34167116046b7a9b0ca5a738f756 | 3,634,607 |
def authenticate_owner_password(password : 'bytes', encryption_dict : 'dict', id_array : 'list'):
"""
Authenticate the owner password.
Parameters
----------
password : bytes
The password to be authenticated as owner password.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
id_array : list
The two elements array ID, contained in the trailer dictionary.
Returns
-------
The encryption key if the owner password is valid, None otherwise.
"""
Length = encryption_dict.get("Length", 40)
if Length % 8 != 0:
# TODO: better exception handling
raise Exception()
Length = Length // 8
R = encryption_dict["R"]
O = encryption_dict["O"]
O = O.value if isinstance(O, PDFLiteralString) else unhexlify(O.value)
input_to_md5 = bytearray()
input_to_md5.extend((password + PASSWORD_PADDING)[:32])
input_to_md5 = md5(input_to_md5).digest()
if R >= 3:
for i in range(50):
input_to_md5 = md5(input_to_md5).digest()
encryption_key = input_to_md5[:Length]
if R == 2:
decrypted = rc4(O, encryption_key)
else:
decrypted = O
for i in range(19, -1, -1):
decrypted = rc4(decrypted, bytes(x ^ i for x in encryption_key))
return authenticate_user_password(decrypted, encryption_dict, id_array) | ffe543bcddc341d6b1744cd4f3dcb60bc3653ce9 | 3,634,608 |
def next_neighbors(p, ps, k):
"""
Function to find the next neighbors for a non-periodic setup
This function gives for a value p the k points next to it which are found in
in the vector ps
Args:
p: the current point
ps (np.ndarray): the grid with the potential neighbors
k (int): number of neighbors to find
Returns:
list: the k next neighbors
"""
distance_to_p = np.abs(ps - p)
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(list(map(lambda s: s[1], value_index_sorted[0:k]))) | 2c0e430434b036a040b3c4cab0e75bc872ab174c | 3,634,609 |
from datetime import datetime
def parse_date(datestring):
"""
Return a datetime for a date string
"""
try:
dt = datetime.strptime(datestring, TIME_FORMAT_FINE)
except ValueError:
dt = datetime.strptime(datestring, TIME_FORMAT_COARSE)
return dt.replace(tzinfo=timezone.utc) | 3a6aeddd0229daa626de377957b8a5597ac2e9e9 | 3,634,610 |
def eigval2(k,aee,aeiaie,aii,see,sei,sii,tau=1,alpha=0):
""" smaller eigenvalue of recurrently connected network of E,I units,
analytically determined
input:
k: spatial frequency
aee: ampltidue E to E connectivity
aeiaie: product of amplitudes of E to I and I to E connectivity
aii: ampltidue I to I connectivity
see: standard deviation/width of E to E connectivity
sei: standard deviation/width of E to I connectivity
sii: standard deviation/width of I to I connectivity
tau: ratio of excitatory to inhibitory time constant, default is 1
alpha: float, 0<=alpha<=1, strength of self-inhibitory connections
"""
tr = tracek(k,aee,aii,see,sii,tau,alpha)
arg = tr**2 - 4*detk(k,aee,aeiaie,aii,see,sei,sii,tau,alpha)
sign = np.ones_like(arg,dtype=float)
sign[arg<0] = -1
factor = np.ones_like(arg,dtype=complex)
factor[arg<0] = 1j
return tr/2. - factor*1./2*np.sqrt(arg*sign) | 3fd23845d2c4cb1199bc9a782cf28a18b88c4a2d | 3,634,611 |
def randomize_censored_values(x,
lower_bound=None, lower_threshold=None,
upper_bound=None, upper_threshold=None,
inplace=False, inverse=False,
seed=None, lower_power=1., upper_power=1.):
"""
Randomizes values beyond threshold in x or de-randomizes such formerly
randomized values.
Parameters
----------
x : array
Time series to be (de-)randomized.
lower_bound : float, optional
Lower bound of values in time series.
lower_threshold : float, optional
Lower threshold of values in time series.
upper_bound : float, optional
Upper bound of values in time series.
upper_threshold : float, optional
Upper threshold of values in time series.
inplace : boolean, optional
If True, change x in-place. If False, change a copy of x.
inverse : boolean, optional
If True, values beyond thresholds in x are set to the respective bound.
If False, values beyond thresholds in x are randomized, i.e. values that
exceed upper_threshold by are replaced by random numbers from the
interval [lower_bound, lower_threshold), and values that fall short
of lower_threshold are replaced by random numbers from the interval
(upper_threshold, upper_bound].
seed : int, optional
Used to seed the random number generator before replacing values beyond
threshold.
lower_power : float, optional
Numbers for randomizing values that fall short of lower_threshold are
drawn from a uniform distribution and then taken to this power.
upper_power : float, optional
Numbers for randomizing values that exceed upper_threshold are drawn
from a uniform distribution and then taken to this power.
Returns
-------
x : array
Randomized or de-randomized time series.
"""
y = x if inplace else x.copy()
if seed is not None:
np.random.seed(seed)
# randomize lower values
if lower_bound is not None and lower_threshold is not None:
if inverse:
y[y < lower_threshold] = lower_bound
else:
i_lower = y <= lower_bound
n_lower = np.sum(i_lower)
if n_lower:
p = np.power(np.random.uniform(0, 1, n_lower), lower_power)
y[i_lower] = lower_bound + p * (lower_threshold - lower_bound)
# randomize upper values
if upper_bound is not None and upper_threshold is not None:
if inverse:
y[y > upper_threshold] = upper_bound
else:
i_upper = y >= upper_bound
n_upper = np.sum(i_upper)
if n_upper:
p = np.power(np.random.uniform(0, 1, n_upper), upper_power)
y[i_upper] = upper_bound - p * (upper_bound - upper_threshold)
return y | 117e523e7be8c1ed51ba1f7b5d7120685a175b49 | 3,634,612 |
def read_fits(filename, params):
"""
read in the polynomial fit fits file (saved from Extract_Orders.py)
and extract using polynomial fits
:param filename: string, location and file name of the file containing the
polynomial fits (params['poly_fits_file'] by default)
:param params: dict, parmaeter dictionary, must include the following
keywords:
- fitmax: int, highest order polynomial to fit
the order to (default = 3)
fits file should look like the following:
Order 6 5 ... 0 low_x high_x
float64 float64 float64 ... float64 float64 float64
------- ------------------ ------------------ ... ------------- ------- -------
1.0 7.80380819018e-20 -7.28455089523e-16 ... 1086.73647399 0.0 3028.0
2.0 2.09138850722e-19 -2.01802656266e-15 ... 1123.38227429 0.0 3082.0
3.0 1.56641316229e-19 -1.79748717147e-15 ... 1159.81599355 0.0 3203.0
4.0 2.26957361856e-19 -2.31431373935e-15 ... 1196.07630084 0.0 3319.0
where 6, 5, 4, 3, 2, 1, 0 are the polynomial powers in p
i.e. p where:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
:return:
"""
# convert to astropy table
atable = Table.read(filename)
orders = np.array(atable['Order'], dtype=int)
pfits1, xls, xhs = [], [], []
for order in orders:
pfit1 = []
for p in range(params['fitmax']+1)[::-1]:
pfit1.append(atable[str(p)][order-1])
pfits1.append(pfit1)
xls.append(int(atable['low_x'][order-1]))
xhs.append(int(atable['high_x'][order-1]))
return pfits1, xls, xhs | 547f02b9d471bc191f9906c9c7cbf0e5b502c97e | 3,634,613 |
def IntCurve_PConicTool_NbSamples(*args):
"""
:param C:
:type C: IntCurve_PConic &
:rtype: int
:param C:
:type C: IntCurve_PConic &
:param U0:
:type U0: float
:param U1:
:type U1: float
:rtype: int
"""
return _IntCurve.IntCurve_PConicTool_NbSamples(*args) | 9b3d78bdc3c19660b0a99d03ce0bd0618f37f4a4 | 3,634,614 |
def nearest_neighbor(graph, restarts=10, weight='weight'):
"""
Recursive Nearest-Neighbor algorithm to solve the traveling salesman problem.
These are the steps of the algorithm for each restart:
#. Start on a random node as current node;
#. Find the shortest edge connecting the current node with an unvisited node;
#. Mark the found node as current and visited;
#. Go to step 2 until all nodes are visited
Parameters
----------
graph: NetworkX graph
restarts: int, optional (default=10)
Number of times the algorithm will try a different, randomly selected, node
as starting point
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
best_tour: list
The sequence for visiting all the nodes
"""
restarts = min(restarts, graph.number_of_nodes())
nodelist = graph.nodes()
tried_nodes = []
min_cost = float('inf')
for _ in xrange(restarts):
while True:
current_node = np.random.choice(nodelist)
if current_node not in tried_nodes:
tried_nodes.append(current_node)
break
g = graph.copy()
tour = [current_node]
while g.number_of_nodes() > 1:
min_weight = float('inf')
for neighbor in g.neighbors_iter(current_node):
w = g.edge[current_node][neighbor][weight]
if w < min_weight:
nn = neighbor
min_weight = w
g.remove_node(current_node)
tour.append(nn)
current_node = nn
cost = compute_tour_cost(graph, tour)
if cost < min_cost:
min_cost = cost
best_tour = list(tour)
return best_tour | 806617a99bcbfcde6a9366fad1bbfedf3757511d | 3,634,615 |
import time
def update_metrics(metrics, losses, mode, src_seq, tracking_loss=None, batch_level=True):
"""
Records relevant metrics in the metrics data structure while training.
If batch_level is true, this means the loss for the current batch is
recorded in addition to the running epoch loss.
Parameters
----------
losses
"""
drmsd, ln_drmsd, mse, combined, rmsd = losses["drmsd-full"], losses["lndrmsd-full"], losses["mse-full"], losses["combined-full"], losses["rmsd-full"]
# Update loss values
if batch_level:
metrics["n_batches"] += 1
metrics[mode]["batch-drmsd-full"] = drmsd.item()
metrics[mode]["batch-lndrmsd-full"] = ln_drmsd.item()
metrics[mode]["batch-mse-full"] = mse.item()
metrics[mode]["batch-combined-full"] = combined.item()
if rmsd: metrics[mode]["batch-rmsd-full"] = rmsd.item()
metrics[mode]["batch-drmsd-bb"] = losses["drmsd-bb"].item()
metrics[mode]["batch-mse-bb"] = losses["mse-bb"].item()
metrics[mode]["batch-mse-sc"] = losses["mse-sc"].item()
metrics[mode]["batch-lndrmsd-bb"] = losses["lndrmsd-bb"].item()
metrics[mode]["epoch-drmsd-full"] += drmsd.item()
metrics[mode]["epoch-lndrmsd-full"] += ln_drmsd.item()
metrics[mode]["epoch-mse-full"] += mse.item()
metrics[mode]["epoch-combined-full"] += combined.item()
if rmsd: metrics[mode]["epoch-rmsd-full"] += rmsd.item()
metrics[mode]["epoch-drmsd-bb"] = losses["drmsd-bb"].item()
metrics[mode]["epoch-mse-bb"] = losses["mse-bb"].item()
metrics[mode]["epoch-mse-sc"] = losses["mse-sc"].item()
metrics[mode]["epoch-lndrmsd-bb"] = losses["lndrmsd-bb"].item()
# Compute and update speed
num_res = (src_seq != VOCAB.pad_id).sum().item()
metrics[mode]["speed"] = num_res / (time.time() - metrics[mode]["batch-time"])
if "speeds" not in metrics[mode].keys():
metrics[mode]["speeds"] = []
metrics[mode]["speeds"].append(metrics[mode]["speed"])
metrics[mode]["batch-time"] = time.time()
metrics[mode]["speed-history"].append(metrics[mode]["speed"])
if tracking_loss:
metrics[mode]["batch-history"].append(float(tracking_loss))
return metrics | 5b33722af1ad8ad2d0397d7c4a15e6b253c32333 | 3,634,616 |
def dmet_low_rdm(active_fock, number_active_electrons):
"""Construct the one-particle RDM from low-level calculation.
Args:
active_fock (numpy.array): Fock matrix from low-level calculation (float64).
number_active_electrons (int): Number of electrons in the entire system.
Returns:
onerdm (numpy.array): One-particle RDM of the low-level calculation (float64).
"""
# Extract the occupied part of the one-particle RDM
num_occ = number_active_electrons / 2
e, c = np.linalg.eigh(active_fock)
new_index = e.argsort()
e = e[new_index]
c = c[ : , new_index]
onerdm = np.dot(c[ : , : int(num_occ)], c[ : , : int(num_occ)].T) * 2
return onerdm | da8109616337eb2b95f948ed0f0065cad857bed5 | 3,634,617 |
def reversed_complement(string):
"""Find the reverse complement of a DNA string.
Given: A DNA string Pattern.
Return: Pattern, the reverse complement of Pattern."""
complements = {'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'
}
return "".join([complements[string[i]] for i in range(len(string))][::-1]) | b4ecaf6d2c58a0c14d87122529e316b43082cf54 | 3,634,618 |
import os
import pickle
def create_service(scopes):
"""
Creates a Gmail service based on the credentials.json found in the current directory.
"""
creds = None
if os.path.exists("modules/token.pickle"):
with open("modules/token.pickle", "rb") as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"modules/credentials.json", scopes)
creds = flow.run_local_server(port=0)
with open("modules/token.pickle", "wb") as token:
pickle.dump(creds, token)
service = build("gmail", "v1", credentials=creds)
return service | 274430d6ad54de2a84e615b8b16b3f0b321decbf | 3,634,619 |
def package_positions_images(image, position_arr):
"""
获取 背包 位置图片
:param image:
:param position_arr:
:return:
"""
res = []
for position in position_arr:
x1, x2, y1, y2 = position
if image is None:
res.append(image_util.capture((x1, y1, x2, y2)))
else:
res.append(image.crop((x1, y1, x2, y2)))
return res | 46e01e09ef5ce948132811095b3edfd95eae4b0e | 3,634,620 |
def save(operator, context, filepath, apply_modifiers, separator,
default_texture_flag, flip_uv, alt_color,
matrix, scale, f15_rot_space, obj=None, **kwargs):
"""
:param bpy.types.Operator operator:
:param context:
:param str filepath:
:param bool apply_modifiers:
:param str separator:
:param int default_texture_flag: 1|2|4|...|64
:param bool flip_uv:
:param int alt_color: -2(random 0-255)|-1(random 32-175)|0-255
:param mathutils.Matrix matrix: global matrix
:param int scale:
:param str f15_rot_space: 'local'|'world'
:param bpy.types.Object obj:
:return:
"""
exporter = Exporter(apply_modifiers, separator,
default_texture_flag, flip_uv, alt_color,
matrix, scale, f15_rot_space, **kwargs)
if exporter.build_model(obj or context.active_object):
with open(filepath, 'wb') as f:
f.write(exporter.model.to_bytes())
return {'FINISHED'} | cf85302a23cbb051a39403bee1d72faf1f3bbb87 | 3,634,621 |
import os
import sys
def get_checkpoint_path(train_dir, num_iters_ckpt):
""" Finds the checkpoint path that corresponds to the num_iters_ckpt.
Args:
train_dir: string: path to the directory where the checkpoints files are
saved.
num_iters_ckpt: int: number of training iterations performed when the
checkpoint was saved.
Returns:
string: path to the corresponding checkpoint with num_iters_ckpt
iterations.
"""
ckpt_path = None
if osp.exists(train_dir):
ckpt = tf.train.get_checkpoint_state(train_dir)
if num_iters_ckpt < 0 and ckpt and ckpt.model_checkpoint_path:
ckpt_path = ckpt.model_checkpoint_path
elif num_iters_ckpt == 0:
ckpt_path = None
else:
ckpt_files = [f for f in os.listdir(train_dir) if '.ckpt' in f]
ckpt_files = [f[:f.rfind('.')] for f in ckpt_files]
ckpt_files = sorted(list(set(ckpt_files)))
ckpt_num = [int(f.split('.')[0].split('-')[-1])
for f in ckpt_files]
ckpt_dict = {n: f for n, f in zip(ckpt_num, ckpt_files)}
if num_iters_ckpt < 0 and len(ckpt_files) == 0:
ckpt_path = None
else:
if num_iters_ckpt < 0:
num_iters_ckpt = sorted(ckpt_num)[-1]
ckpt_name = ckpt_dict.get(num_iters_ckpt)
if ckpt_name is None:
ckpt_num_str = sorted(list(set(ckpt_num)))
ckpt_num_str = [str(x) for x in ckpt_num_str]
print('Checkpoint with ' + str(num_iters_ckpt) +
' iterations cannot be found, the available ' +
'values are: {' + ', '.join(ckpt_num_str) + '}')
sys.exit()
ckpt_path = osp.join(train_dir, ckpt_name)
return ckpt_path | db122c5fac69d1322d5ef69d8ff99be90ecf9eba | 3,634,622 |
def merge_masks(masks):
"""
Merge masks for each organ into one ndimage, overlapped pixels will
be override by the later class value
contours: [num_class, D, H, W]
return: merged contour of shape [D, H, W]
"""
num_class, D, H, W = masks.shape
merged_masks = np.zeros((D, H, W), dtype=np.uint8)
for i in range(num_class):
merged_masks[masks[i] > 0] = i + 1
return merged_masks | 833a9a462b3ed661e2dd45d178e3e81122bf86f8 | 3,634,623 |
def print_subheader(object_type):
"""
Print out a subheader for a text file.
"""
return """
#################################################################
# {0}
#################################################################
""".format(object_type) | 1ea7185f024ec7dc45a1ccac9f7e2feb6a2a6bf2 | 3,634,624 |
def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]]
"""Get the virtualenv version for the given python intepreter, if available, otherwise return None."""
try:
cache = get_virtualenv_version.cache
except AttributeError:
cache = get_virtualenv_version.cache = {}
if python not in cache:
try:
stdout = run_command(args, [python, '-m', 'virtualenv', '--version'], capture=True)[0]
except SubprocessError as ex:
stdout = ''
if args.verbosity > 1:
display.error(ex)
version = None
if stdout:
# noinspection PyBroadException
try:
version = str_to_version(stdout.strip())
except Exception: # pylint: disable=broad-except
pass
cache[python] = version
version = cache[python]
return version | 4a7d61567224723e46090c79c416134555419f34 | 3,634,625 |
def require_data(PWState, TheAnalyzer):
"""Defines the transition targets for each involved state.
"""
variable_db.require("path_iterator")
def __door_adr_sequences(PWState):
result = ["{\n"]
length = 0
for path_id, door_id_sequence in enumerate(PWState.door_id_sequence_list):
# NOTE: For all states in the path the 'from_state_index, to_state_index' can
# be determined, **except** for the FIRST state in the path. Thus for
# this state the 'door' cannot be determined in case that it is
# "not uniform_doors_f()".
#
# However, the only occasion where the FIRST state in the path may be
# used is reload during the FIRST state. The reload adapts the positions
# and acceptances are not changed. So, we can use the common entry
# to the first state as a reference here.
result.append(" ")
result.append("/* Padding */0x0, ")
for door_id in door_id_sequence:
PWState.entry.dial_db.mark_address_as_routed(door_id.related_address)
result.append("%s, " % Lng.ADRESS_LABEL_REFERENCE(door_id.related_address))
result.append("\n")
length += len(door_id_sequence) + 1 # 1 padding element
result.append(" }");
return length, result
def __character_sequences(PathList):
result = ["{\n"]
offset = 0
for path_id, step_list in enumerate(PathList):
## Commenting the transition sequence is not dangerous. 'COMMENT' eliminates
## comment delimiters if they would appear in the sequence_str.
## sequence_str = imap(lambda x: Interval(x[1]).get_utf8_string(), step_list[:-1])
## memory.append(Lng.COMMENT("".join(sequence_str)) + "\n")
result.append(" ")
result.extend("%i, " % x.trigger for x in step_list[:-1])
result.append("QUEX_SETTING_BUFFER_LEXATOM_PATH_TERMINATION,")
result.append("\n")
offset += len(step_list)
result.append(" }")
return offset, result
# (*) Path Walker Basis
# The 'base' must be defined before all --> PriorityF (see table in variable_db)
element_n, character_sequence_str = __character_sequences(PWState.path_list)
offset = 0
for path_id, step_list in enumerate(PWState.path_list):
variable_db.require("path_walker_%i_path_%i",
Initial = "&path_walker_%i_path_base[%i]" % (PWState.index, offset),
Index = (PWState.index, path_id))
offset += len(step_list)
variable_db.require_array("path_walker_%i_path_base",
ElementN = element_n,
Initial = character_sequence_str,
Index = PWState.index)
# (*) The DFA_State Information for each path step
if PWState.uniform_door_id is None:
element_n, door_adr_sequence_str = __door_adr_sequences(PWState)
variable_db.require_array("path_walker_%i_state_base",
ElementN = element_n,
Initial = door_adr_sequence_str,
Index = PWState.index)
# The path_iterator is incremented before the 'goto', thus
# 'path_iterator - (path_base + 1)' gives actually the correct offset.
# We define a variable for that, for elegance.
variable_db.require("path_walker_%i_reference",
Initial = "path_walker_%i_path_base" % PWState.index,
Index = (PWState.index)) | 34f4af6d229f6e00d73dbcaa55eb9ec88d54a5d8 | 3,634,626 |
def pad_batch_data(insts,
pad_idx=0,
return_pos=False,
return_input_mask=False,
return_max_len=False,
return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and input mask.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts
])
return_list += [inst_data.astype("int64").reshape([-1, max_len])]
# position data
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, max_len])]
if return_input_mask:
# This is used to avoid attention on paddings.
input_mask_data = np.array(
[[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0] | 9cc5f394d5b6fbb4fbdc384e06b70ce83397d0d5 | 3,634,627 |
from typing import Optional
from typing import List
import torch
def push2d(inp, g, shape: Optional[List[int]], bound: List[Bound],
extrapolate: int = 1):
"""
inp: (B, C, iX, iY) tensor
g: (B, iX, iY, 2) tensor
shape: List{2}[int], optional
bound: List{2}[Bound] tensor
extrapolate: ExtrapolateType
returns: (B, C, *shape) tensor
"""
dim = 2
boundx, boundy = bound
if inp.shape[-dim:] != g.shape[-dim-1:-1]:
raise ValueError('Input and grid should have the same spatial shape')
ishape = list(inp.shape[-dim:])
g = g.reshape([g.shape[0], 1, -1, dim])
gx, gy = torch.unbind(g, -1)
inp = inp.reshape(list(inp.shape[:2]) + [-1])
batch = max(inp.shape[0], gx.shape[0])
channel = inp.shape[1]
if shape is None:
shape = ishape
shape = list(shape)
nx, ny = shape
# mask of inbounds voxels
mask = inbounds_mask_2d(extrapolate, gx, gy, nx, ny)
# corners
# (upper weight, lower corner, upper corner, lower sign, upper sign)
gx, gx0, gx1, signx0, signx1 = get_weights_and_indices(gx, nx, boundx)
gy, gy0, gy1, signy0, signy1 = get_weights_and_indices(gy, ny, boundy)
# scatter
out = torch.zeros([batch, channel, nx*ny],
dtype=inp.dtype, device=inp.device)
# - corner 00
idx = sub2ind_list([gx0, gy0], shape)
idx = idx.expand([batch, channel, idx.shape[-1]])
out1 = inp.clone()
sign = make_sign([signx0, signy0])
if sign is not None:
out1 *= sign
if mask is not None:
out1 *= mask
out1 *= (1 - gx) * (1 - gy)
out.scatter_add_(-1, idx, out1)
# - corner 01
idx = sub2ind_list([gx0, gy1], shape)
idx = idx.expand([batch, channel, idx.shape[-1]])
out1 = inp.clone()
sign = make_sign([signx0, signy1])
if sign is not None:
out1 *= sign
if mask is not None:
out1 *= mask
out1 *= (1 - gx) * gy
out.scatter_add_(-1, idx, out1)
# - corner 10
idx = sub2ind_list([gx1, gy0], shape)
idx = idx.expand([batch, channel, idx.shape[-1]])
out1 = inp.clone()
sign = make_sign([signx1, signy0])
if sign is not None:
out1 *= sign
if mask is not None:
out1 *= mask
out1 *= gx * (1 - gy)
out.scatter_add_(-1, idx, out1)
# - corner 11
idx = sub2ind_list([gx1, gy1], shape)
idx = idx.expand([batch, channel, idx.shape[-1]])
out1 = inp.clone()
sign = make_sign([signx1, signy1])
if sign is not None:
out1 *= sign
if mask is not None:
out1 *= mask
out1 *= gx * gy
out.scatter_add_(-1, idx, out1)
out = out.reshape(list(out.shape[:2]) + shape)
return out | f6fda82c2c2c628e51bc19ccf13a64aba6e7b432 | 3,634,628 |
import time
def condor_tables(sqlContext,
hdir='hdfs:///project/monitoring/archive/condor/raw/metric',
date=None, verbose=False):
"""
Parse HTCondor records
Example of HTCondor recornd on HDFS
{"data":{"AccountingGroup":"analysis.wverbeke","Badput":0.0,"CMSGroups":"[\"/cms\"]","CMSPrimaryDataTier":"MINIAODSIM","CMSPrimaryPrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CMSPrimaryProcessedDataset":"RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1","CRAB_AsyncDest":"T2_BE_IIHE","CRAB_DataBlock":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM#291c85fa-aab1-11e6-846b-02163e0184a6","CRAB_ISB":"https://cmsweb.cern.ch/crabcache","CRAB_Id":30,"CRAB_JobArch":"slc6_amd64_gcc530","CRAB_JobSW":"CMSSW_9_2_4","CRAB_JobType":"analysis","CRAB_OutLFNDir":"/store/user/wverbeke/heavyNeutrino/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1/171111_214448","CRAB_PrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CRAB_Publish":false,"CRAB_PublishName":"crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1-00000000000000000000000000000000","CRAB_Retry":0,"CRAB_SaveLogsFlag":true,"CRAB_SiteBlacklist":"[]","CRAB_SiteWhitelist":"[]","CRAB_SubmitterIpAddr":"193.58.172.33","CRAB_TaskEndTime":1513028688,"CRAB_TaskLifetimeDays":30,"CRAB_TaskWorker":"vocms052","CRAB_TransferOutputs":true,"CRAB_UserHN":"wverbeke","CRAB_Workflow":"171111_214448:wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","Campaign":"crab_wverbeke","ClusterId":20752288,"Cmd":"/data/srv/glidecondor/condor_local/spool/2259/0/cluster20752259.proc0.subproc0/gWMS-CMSRunAnalysis.sh","CommittedCoreHr":0.0,"CommittedSlotTime":0,"CommittedSuspensionTime":0,"CommittedTime":0,"CommittedWallClockHr":0.0,"CoreHr":0.0,"CoreSize":-1,"Country":"Unknown","CpuBadput":0.0,"CpuEff":0.0,"CpuTimeHr":0.0,"CumulativeRemoteSysCpu":0.0,"CumulativeRemoteUserCpu":0.0,"CumulativeSlotTime":0,"CumulativeSuspensionTime":0,"CurrentHosts":0,"DAGNodeName":"Job30","DAGParentNodeNames":"","DESIRED_Archs":"X86_64","DESIRED_CMSDataLocations":"T2_FR_IPHC,T2_CH_CERN_HLT,T1_ES_PIC,T2_DE_DESY,T2_BE_IIHE,T2_CH_CERN,T2_ES_IFCA","DESIRED_CMSDataset":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM","DESIRED_Overflow_Region":"none,none,none","DESIRED_Sites":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataCollection":1510475761000,"DataCollectionDate":1510475761000,"DataLocations":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataLocationsCount":7,"DesiredSiteCount":7,"DiskUsage":5032,"DiskUsageGB":0.005032,"EncryptExecuteDirectory":false,"EnteredCurrentStatus":1510436775000,"EstimatedWallTimeMins":1250,"ExecutableSize":9,"ExitBySignal":false,"ExitStatus":0,"GLIDEIN_CMSSite":"Unknown","GlobalJobId":"crab3@vocms0122.cern.ch#20752288.0#1510436775","HasSingularity":false,"ImageSize":9,"JOB_CMSSite":"$$(GLIDEIN_CMSSite:Unknown)","JOB_Gatekeeper":"Unknown","JobBatchName":"RunJobs.dag+20752259","JobPrio":10,"JobStatus":1,"JobUniverse":5,"MaxHosts":1,"MaxWallTimeMins":1250,"MemoryMB":0.0,"MinHosts":1,"NumJobCompletions":0,"NumJobStarts":0,"NumRestarts":0,"NumSystemHolds":0,"OVERFLOW_CHECK":false,"Original_DESIRED_Sites":["UNKNOWN"],"OutputFiles":2,"Owner":"cms1315","PostJobPrio1":-1510436758,"PostJobPrio2":0,"PreJobPrio1":1,"ProcId":0,"QDate":1510436775000,"QueueHrs":10.951667712198363,"REQUIRED_OS":"rhel6","Rank":0,"RecordTime":1510475761000,"RemoteSysCpu":0,"RemoteUserCpu":0,"RemoteWallClockTime":0,"RequestCpus":1,"RequestDisk":1,"RequestMemory":2000,"ScheddName":"crab3@vocms0122.cern.ch","ShouldTransferFiles":"YES","Site":"Unknown","SpoolOnEvict":false,"Status":"Idle","TaskType":"Analysis","Tier":"Unknown","TotalSubmitProcs":1,"TotalSuspensions":0,"TransferInputSizeMB":4,"Type":"analysis","Universe":"Vanilla","User":"cms1315@cms","VO":"cms","WMAgent_TaskType":"UNKNOWN","WallClockHr":0.0,"WhenToTransferOutput":"ON_EXIT_OR_EVICT","Workflow":"wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","metadata":{"id":"crab3@vocms0122.cern.ch#20752288.0#1510436775","timestamp":1510476202,"uuid":"8aa4b4fe-c785-11e7-ad57-fa163e15539a"},"x509UserProxyEmail":"Willem.Verbeke@UGent.be","x509UserProxyFQAN":["/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke wlverbek@UGent.be","/cms/Role=NULL/Capability=NULL"],"x509UserProxyFirstFQAN":"/cms/Role=NULL/Capability=NULL","x509UserProxyVOName":"cms","x509userproxysubject":"/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke wlverbek@UGent.be"},"metadata":{"_id":"380721bc-a12c-9b43-b545-740c10d2d0f0","hostname":"monit-amqsource-fafa51de8d.cern.ch","kafka_timestamp":1510476204057,"partition":"1","producer":"condor","timestamp":1510476204022,"topic":"condor_raw_metric","type":"metric","type_prefix":"raw","version":"001"}}
"""
if not date:
# by default we read yesterdate data
date = time.strftime("%Y/%m/%d", time.gmtime(time.time()-60*60*24))
hpath = '%s/%s' % (hdir, date)
# create new spark DataFrame
condor_df = sqlContext.read.json(hpath)
condor_df.registerTempTable('condor_df')
# condor_df = condor_df.select(unpack_struct("data", condor_df)) # extract data part of JSON records
condor_df.printSchema()
tables = {'condor_df':condor_df}
return tables | 8742f240f65755431a5b640e9481f657ce3048d5 | 3,634,629 |
from typing import Callable
from typing import List
from typing import Tuple
def _contiguous_groups(
length: int,
comparator: Callable[[int, int], bool]
) -> List[Tuple[int, int]]:
"""Splits range(length) into approximate equivalence classes.
Args:
length: The length of the range to split.
comparator: Determines if two indices have approximately equal items.
Returns:
A list of (inclusive_start, exclusive_end) range endpoints. Each
corresponds to a run of approximately-equivalent items.
"""
result = []
start = 0
while start < length:
past = start + 1
while past < length and comparator(start, past):
past += 1
result.append((start, past))
start = past
return result | fc25e286a2b6ec9ab7de15146e8b26922ea56e6b | 3,634,630 |
def datatype_derive(times, series):
"""
returns series converted to datatype derive
store only differeces between two subsequent values
parameters:
series <tuple> of <float>
returns:
<tuple> of <float>
"""
new_series = [0.0, ]
for index in range(1, len(series)):
new_series.append(series[index] - series[index - 1])
return new_series | 4a2689030e1911a8b4ee5777157c61c623e94da0 | 3,634,631 |
def ajax_delete_entry(request):
"""Asynchronously deletes an entry
This method is for RUSERs who wish to delete a single entry from
their TrackingEntries. This method is only available via ajax
and obviously requires that users be logged in.
We then create our json_data map to hold our success status and any
error codes we may generate so that we may inform the user of the
status of the request once we complete.
This part of the code will catch all errors because, well, this is
production code and there's no chance I'll be letting server 500
errors bubble to the client without catching and making them
sound pretty and plausable. Therefore we catch all errors.
We then take the entry date, and generate the calendar for that year/
month.
:param request: :class:`HttpRequest`
:returns: :class:`HttpResponse` object with mime/application of json
:rtype: :class:`HttpResponse`
"""
form = {
'hidden-id': None,
'entry_date': None
}
# get the form data from the request object
form.update(get_request_data(form, request))
# create our json structure
json_data = {
'success': False,
'error': '',
'calendar': ''
}
if form['hidden-id']:
# get the user and make sure that the user
# assigned to the TrackingEntry is the same
# as what's requesting the deletion
user = Tbluser.objects.get(id__exact=form['user_id'])
entry = TrackingEntry.objects.get(id=form['hidden-id'],
user=user)
entry.delete()
year, month, day = map(int,
form['entry_date'].split("-")
)
calendar = gen_calendar(year, month, day,
user=form['user_id'])
# if all went well
json_data['success'] = True
json_data['calendar'] = calendar
return json_data | 61e8b34ce8b64f451ca4e934313b2d2b0c43f7f1 | 3,634,632 |
def mni152_to_civet(img, civet_density='41k', method='linear'):
"""
Projects `img` in MNI152 space to CIVET surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
civet_density : {'41k'}, optional
Desired output density of CIVET surface. Default: '41k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
civet : (2,) tuple-of-nib.GiftiImage
Projected `img` on CIVET surface
"""
if civet_density == '164k':
raise NotImplementedError('Cannot perform registration fusion to '
'CIVET 164k space yet.')
return _vol_to_surf(img, 'civet', civet_density, method) | 22620ebbe57d8a5090ee450240713e4fd13808b5 | 3,634,633 |
def get_rotational_part(trans):
"""
Get the :math:`d×d` rotational part of a :math:`(d+1)×(d+1)` transformation matrix.
Parameters
----------
trans : array_like
The given transformation matrix.
Returns
-------
numpy.ndarray
The rotational part, with potential scaling removed.
"""
trans = np.asarray(trans)
assert trans.shape[0] == trans.shape[1]
ndim = trans.shape[0] - 1
result = remove_scaling(trans[:ndim, :ndim])
return result | 6a0655afe9bca082d4cffcd8d540247be1900213 | 3,634,634 |
def imitation_terminal_condition(env,
dist_fail_threshold=1.0,
rot_fail_threshold=0.5 * np.pi):
"""A terminal condition for motion imitation task.
Args:
env: An instance of MinitaurGymEnv
dist_fail_threshold: Max distance the simulated character's root is allowed
to drift from the reference motion before the episode terminates.
rot_fail_threshold: Max rotational difference between simulated character's
root and the reference motion's root before the episode terminates.
Returns:
A boolean indicating if episode is over.
"""
pyb = env._pybullet_client
task = env._task
motion_over = task.is_motion_over()
foot_links = env.robot.GetFootLinkIDs()
ground = env.get_ground()
contact_fall = False
# sometimes the robot can be initialized with some ground penetration
# so do not check for contacts until after the first env step.
if env.env_step_counter > 0:
robot_ground_contacts = env.pybullet_client.getContactPoints(
bodyA=env.robot.quadruped, bodyB=ground)
for contact in robot_ground_contacts:
if contact[3] not in foot_links:
contact_fall = True
break
root_pos_ref, root_rot_ref = pyb.getBasePositionAndOrientation(
task.get_ref_model())
root_pos_sim, root_rot_sim = pyb.getBasePositionAndOrientation(
env.robot.quadruped)
root_pos_diff = np.array(root_pos_ref) - np.array(root_pos_sim)
root_pos_fail = (
root_pos_diff.dot(root_pos_diff) >
dist_fail_threshold * dist_fail_threshold)
root_rot_diff = transformations.quaternion_multiply(
np.array(root_rot_ref),
transformations.quaternion_conjugate(np.array(root_rot_sim)))
_, root_rot_diff_angle = pose3d.QuaternionToAxisAngle(
root_rot_diff)
root_rot_diff_angle = motion_util.normalize_rotation_angle(
root_rot_diff_angle)
root_rot_fail = (np.abs(root_rot_diff_angle) > rot_fail_threshold)
done = motion_over \
or contact_fall \
or root_pos_fail \
or root_rot_fail
return done | 21804759cc45ef54a46236a50f27ded0186de640 | 3,634,635 |
import subprocess
def iproute2_is_vrf_capable():
"""
Checks if the iproute2 version installed on the system is capable of
handling VRFs by interpreting the output of the 'ip' utility found in PATH.
Returns True if capability can be detected, returns False otherwise.
"""
if is_linux():
try:
subp = subprocess.Popen(
["ip", "route", "show", "vrf"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
if iproute2_err != "Error:":
return True
except Exception:
pass
return False | eabb042893167244991450287655fb6cb6d4e8d0 | 3,634,636 |
def cleanRepl(matchobj):
"""
Clean up a directory name so that it can be written to a
matplotlib title without encountering LaTeX escape sequences
Replace backslashes with forward slashes
replace underscores (subscript) with escaped underscores
"""
if matchobj.group(0) == r'\\':
return '/'
if matchobj.group(0) == r'_':
return r'\_'
if matchobj.group(0) == r'/':
return '/'
else:
return '' | ffe9abb42df66780134e058ad24457a75f873055 | 3,634,637 |
def bitcoinAddress2bin(btcAddress):
"""convert a bitcoin address to binary data capable of being put in a CScript"""
# chop the version and checksum out of the bytes of the address
if ":" in btcAddress:
pfx, addr = btcAddress.split(":")
decoded = cashaddrutil.b32decode(addr)
if not cashaddrutil.verify_checksum(pfx, decoded):
raise InvalidAddress('Bad cash address checksum')
converted = cashaddrutil.convertbits(decoded, 5, 8)
return bytes(converted[1:21]) # 0 is address type, last 6 are checksum
else:
return decodeBase58(btcAddress)[1:-4] | 9e0404169dc7c0d4d07c1c8dff5fab2822e6fc1b | 3,634,638 |
def StartITMAgent(TargetServerIP,TargetServerUsername,TargetServerPasswd,StartCommand=r'/itm/bin/itmcmd agent start ux um'):
"""
Start ITM Agent
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
return BaseAdapter.ExecuteCMDviaTelnet(TargetServerIP,
TargetServerUsername,
TargetServerPasswd,
StartCommand,
port=TargetServerPort) | 5c9cea3b55ea749f4584566efc31effe336ce99e | 3,634,639 |
import argparse
def parse_arguments():
"""
Parse command line argument and construct the DNN
:return: a dictionary comprising the command-line arguments
"""
# define the program description
text = 'Coverage Analyzer for DNNs'
# initiate the parser
parser = argparse.ArgumentParser(description=text)
# add new command-line arguments
parser.add_argument("-V", "--version", help="show program version",
action="version", version="DeepFault %f" % __version__)
parser.add_argument("-M", "--model", help="Path to the model to be loaded.\
The specified model will be used.") # , required=True)
# choices=['lenet1','lenet4', 'lenet5'], required=True)
parser.add_argument("-DS", "--dataset", help="The dataset to be used (mnist\
or cifar10).", choices=["mnist", "cifar10"]) # , required=True)
parser.add_argument("-A", "--approach", help="the approach to be employed \
to measure coverage", choices=['idc', 'nc', 'kmnc',
'nbc', 'snac', 'tknc', 'ssc', 'lsa', 'dsa'])
parser.add_argument("-C", "--class", help="the selected class", type=int)
parser.add_argument("-Q", "--quantize", help="quantization granularity for \
combinatorial other_coverage_metrics.", type=int)
parser.add_argument("-L", "--layer", help="the subject layer's index for \
combinatorial cov. NOTE THAT ONLY TRAINABLE LAYERS CAN \
BE SELECTED", type=int)
parser.add_argument("-KS", "--k_sections", help="number of sections used in \
k multisection other_coverage_metrics", type=int)
parser.add_argument("-KN", "--k_neurons", help="number of neurons used in \
top k neuron other_coverage_metrics", type=int)
parser.add_argument("-RN", "--rel_neurons", help="number of neurons considered\
as relevant in combinatorial other_coverage_metrics", type=int)
parser.add_argument("-AT", "--act_threshold", help="a threshold value used\
to consider if a neuron is activated or not.", type=float)
parser.add_argument("-LOG", "--logfile", help="path to log file")
parser.add_argument("-ADV", "--advtype", help="path to log file")
parser.add_argument("-S", "--seed", help="seed ot random", type=int)
args = parser.parse_args()
return vars(args) | 0b9bb7437c6719dda7936f3c9cf19e2d7324877f | 3,634,640 |
def cost_matrix(gdf, dist_3d_matrix, line_bc,resolution,Rivers_option):
"""
Creates the cost matrix in €/km by finding the average weight between
two points and then multiplying by the distance and the line base cost.
:param gdf: Geodataframe being analyzed
:param dist_3d_matrix: 3D distance matrix of all points [meters]
:param line_bc: line base cost for line deployment [€/km]
:return value: Cost matrix of all the points present in the gdf [€]
"""
# Altitude distance in meters
weight = gdf['Weight'].values
n = gdf['X'].size
weight_columns = np.repeat(weight[:, np.newaxis], n, 1)
weight_rows = np.repeat(weight[np.newaxis, :], n, 0)
if Rivers_option:
river_inters =river_intersection(gdf,resolution)
total_weight = (weight_columns + weight_rows) / 2 + river_inters
else:
total_weight = (weight_columns + weight_rows) / 2
# 3D distance
value = (dist_3d_matrix * total_weight) * line_bc / 1000
return value | d9e9f316009ce80df88420f38918618762254cd8 | 3,634,641 |
def home():
"""
The home page that asks for user input.
"""
return render_template('index.html') | b8aa9a362929e2c8ba29c7e8c124b59f1fdb7ac6 | 3,634,642 |
def tanh(x):
"""
Computes hyperbolic tangent of x element-wise.
Parameters
----------
x : tensor
Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
Returns
-------
A Tensor. Has the same type as x.
"""
_tanh = ms.ops.Tanh()
return _tanh(x) | 1e87af329177dfd6208a252d717fc400c8c4f0e6 | 3,634,643 |
def all_directions():
"""
:return: Returns all the available directions.
"""
return dt.groups.keys() | 389b2e4a60cf8ee739a4081ab66de51813c3de4e | 3,634,644 |
def determine_result(image, reference, result):
"""
Determine a test result against a reference and thresholds.
Args:
image (TestImage): The image being compressed.
reference (Record): The reference result to compare against.
result (Record): The test result.
Returns:
Result: The result code.
"""
dPSNR = result.psnr - reference.psnr
if (dPSNR < RESULT_THRESHOLD_FAIL) and (not image.is3D):
return trs.Result.FAIL
if (dPSNR < RESULT_THRESHOLD_3D_FAIL) and image.is3D:
return trs.Result.FAIL
if dPSNR < RESULT_THRESHOLD_WARN:
return trs.Result.WARN
return trs.Result.PASS | 92451294e49c212ec54ed6b84705d611ade45bfe | 3,634,645 |
def str2period(x, tostring=False):
"""
Convert string into pandas.Period
ex) 99991231 -> 9999-12-31
"""
if x is not None:
ret = pd.Period(year=x // 10000, month=x // 100 % 100, day=x % 100, freq='D')
else:
ret = None
if tostring and ret is not None:
ret = str(ret)
return ret | 7a1996240f0386260dc9aa850c63f136ef53c2fb | 3,634,646 |
def webauthn_begin_assertion():
"""
This url is called when the authentication process begins
"""
username = request.form.get("login_username")
if not util.validate_username(username):
return make_response(jsonify({"fail": "Invalid username."}), 401)
credentials = database.get_credentials(username)
user = database.get_user(username)
if not user:
return make_response(jsonify({"fail": "User does not exist."}), 401)
session.pop("challenge", None)
challenge = util.generate_challenge(32)
session["challenge"] = challenge.rstrip("=")
webauthn_users = []
for credential in credentials:
webauthn_users.append(
webauthn.WebAuthnUser(
credential.ukey,
credential.username,
credential.display_name,
"",
credential.credential_id,
credential.pub_key,
credential.sign_count,
credential.rp_id,
)
)
webauthn_assertion_options = webauthn.WebAuthnAssertionOptions(
webauthn_users, challenge
)
session.pop("login_username", None)
session["login_username"] = user.id
return jsonify(webauthn_assertion_options.assertion_dict) | f1e04dd9c4d3633deadb9179fd09b3c4b9f1da85 | 3,634,647 |
from datetime import datetime
def add_new_repository(user, full_name, githubprofile_service, commit_service):
"""
Register a new repository and all your commits from last month
Raises:
github.UnknownObjectException
GitHubProfile.DoesNotExists
RepositoryNotBelongToUserException
Returns: (Repository) created
"""
LOGGER.info(f'Registering a new Repository for "{full_name}"')
github_api = Github(user.githubprofile.access_token)
git_repository = github_api.get_repo(full_name)
owner = get_repository_owner(user, full_name, githubprofile_service)
if owner != user:
raise RepositoryNotBelongToUserException
repository = Repository.objects.create(
user=user,
github_id=git_repository.id,
owner=owner,
name=git_repository.name,
full_name=git_repository.full_name,
description=git_repository.description or '',
language=git_repository.language or '',
stargazers_count=git_repository.stargazers_count,
archived=git_repository.archived,
# disabled=git_repository.disabled, # TODO: Encontrar o local correto de onde está esse campo
clone_url=git_repository.clone_url or '',
git_url=git_repository.git_url or '',
created_at=make_aware(git_repository.created_at),
updated_at=make_aware(git_repository.updated_at),
)
total_commits_created = commit_service.update_repository_commits(
user,
repository,
since=timezone.now() - datetime.timedelta(days=30)
)
LOGGER.info(f'{total_commits_created} Commits created for {full_name}')
return repository | 2e7a4aabef4debdcda936c04673c709d3e02983c | 3,634,648 |
def case_copy(request):
"""
复制case
:param request:
:return:
"""
user_id = request.session.get('user_id', '')
if not get_user(user_id):
request.session['login_from'] = '/base/case/'
return HttpResponseRedirect('/login/')
else:
if request.method == 'GET':
case_id = request.GET.get('case_id', '')
case_ = Case.objects.get(case_id=case_id)
case_name = case_.case_name + 'copy'
content = case_.content
project = case_.project
description = case_.description
username = request.session.get('user', '')
case = Case(case_name=case_name, project=project, description=description, update_time=timezone.now(),
content=content, update_user=username)
case.save()
log.info('copy case {} success. case info: {} // {} '.format(case_name, project, content))
return HttpResponseRedirect("base/case/") | 9c1d5531f0d37b220f42ef2b4c4340a07419ce6c | 3,634,649 |
def get_node_config(node_dir, key):
"""
This function retrieves a setting from the indigo node configuration.
"""
return exec_node_command(node_dir, "config", "get", key) | dde44c551b7d1185752d738c1dee663cda5fe3e1 | 3,634,650 |
def get_argparser():
"""
Returns an argument parser for this script
"""
parser = ArgumentParser(description='Fit a U-Time model defined in'
' a project folder. Invoke '
'"ut init" to start a new project.')
parser.add_argument("--num_GPUs", type=int, default=1,
help="Number of GPUs to use for this job (default=1)")
parser.add_argument("--force_GPU", type=str, default="")
parser.add_argument("--continue_training", action="store_true",
help="Continue the last training session")
parser.add_argument("--initialize_from", type=str, default=None,
help="Path to a model weights file to initialize from.")
parser.add_argument("--log_file_prefix", type=str,
help="Optional prefix for logfiles.", default="")
parser.add_argument("--overwrite", action='store_true',
help='overwrite previous training session in the '
'project path')
parser.add_argument("--just_one", action="store_true",
help="For testing purposes, run only on the first "
"training and validation samples.")
parser.add_argument("--no_val", action="store_true",
help="For testing purposes, do not perform validation.")
parser.add_argument("--max_train_samples_per_epoch", type=int,
default=5e5,
help="Maximum number of sleep stages to sample in each"
"epoch. (defaults to 5e5)")
parser.add_argument("--val_samples_per_epoch", type=int,
default=5e4,
help="Number of sleep stages to sample in each"
"round of validation. (defaults to 5e4)")
parser.add_argument("--n_epochs", type=int, default=None,
help="Overwrite the number of epochs specified in the"
" hyperparameter file with this number (int).")
parser.add_argument("--channels", nargs='*', type=str, default=None,
help="A list of channels to use instead of those "
"specified in the parameter file.")
parser.add_argument("--final_weights_file_name", type=str,
default="model_weights.h5")
parser.add_argument("--train_on_val", action="store_true",
help="Include the validation set in the training set."
" Will force --no_val to be active.")
return parser | 669427213e3c24ae45f88e143a51d35f38a78c97 | 3,634,651 |
def _deep_different(left, right, entry):
"""
checks that entry is identical between ZipFile instances left and
right
"""
left = chunk_zip_entry(left, entry)
right = chunk_zip_entry(right, entry)
for ldata, rdata in izip_longest(left, right):
if ldata != rdata:
return True
return False | b4e0a47800e1ff2bb74cec9a292e0272edc63520 | 3,634,652 |
def test_xscov_asymmetric(text_cov_lb5_asym):
"""Check that `XsCov` raises error because matrix non symmetric"""
tape = sandy.formats.endf6.Endf6.from_text(text_cov_lb5_asym)
with pytest.raises(Exception):
return sandy.XsCov.from_endf6(tape) | d66bb7baa540468e565edaa77a02db59b9cb3cf9 | 3,634,653 |
def segment(img, img_bg, img0_sigma=5, img0_min_size=300, img0_min_distance=50, img0_thresh_bg=500, img0_min_size_bg=10_000, img0_dilation=5):
"""Standard DAPI / nuclear based cellular segmentation.
Args:
img (np.array): Image array with nuclear labeling.
img_bg (np.array): Image to be used for cytoplasmic extraction.
img0_sigma (int): Size of gaussian smoothing kernel.
img0_min_size (int): Minimum size of objects (removed below).
img0_min_distance (int): Minimum distance between two nuclei.
img0_thresh_bg (int): Thresholding of background in img_bg.
img0_min_size_bg (int): Minimum size of cytoplasm (removed below).
img0_dilation (int): Size of dilation kernel (expand cytoplasm).
Returns:
img0_seg_clean (np.array): Segmented cells with consecutive labels.
img0_nuclei (np.array): Corresponding nuclear labels.
"""
# Gauss-smoothening
img0_smooth = ndi.filters.gaussian_filter(img, img0_sigma)
# Thresholding and removal of small objects
img0_thresh = filters.threshold_otsu(img0_smooth)
img0_smooth_thresh = img0_smooth>img0_thresh
img0_smooth_thresh_fill = ndi.binary_fill_holes(img0_smooth_thresh).astype(bool)
img0_nuclei = morphology.remove_small_objects(img0_smooth_thresh_fill, img0_min_size)
# Labeling, euclidean distance transform, smoothing
img0_dist_trans = ndi.distance_transform_edt(img0_nuclei)
img0_dist_trans_smooth = ndi.filters.gaussian_filter(img0_dist_trans, sigma=img0_sigma)
# Seeding (and dilating for visualization)
img0_seeds = feature.peak_local_max(img0_dist_trans_smooth, indices=False, min_distance=img0_min_distance)
img0_seeds_labeled, _ = ndi.label(img0_seeds)
# Treshold background (in img_bg)
img0_smooth_thresh_bg = img_bg>img0_thresh_bg
# Remove small objects and dilute
img0_smooth_objects = morphology.remove_small_objects(img0_smooth_thresh_bg, min_size=img0_min_size_bg)
img0_kernel = morphology.selem.diamond(img0_dilation)
img0_dil = convolve2d(img0_smooth_objects.astype(int), img0_kernel.astype(int), mode='same').astype(bool)
img1_dil = img0_dil * img_bg
# Inverted watershed for segmentation
img0_seg = segmentation.watershed(~img1_dil, img0_seeds_labeled)
# Add small objects again (watershed covers full image)
img0_seg_clean = img0_seg * img0_dil
return img0_seg_clean, img0_nuclei | 645398d74c132c837a6715b163c4a310436ba33f | 3,634,654 |
from typing import Optional
from typing import Callable
def get_parsing_function(input_format: Optional[str], filename: str) -> Callable:
"""Return appropriate parser function based on input format of file.
:param input_format: File format
:param filename: Filename
:raises Exception: Unknown file format
:return: Appropriate 'read' function
"""
if input_format is None:
input_format = get_file_extension(filename)
if input_format == "tsv":
return read_sssom_table
elif input_format == "rdf":
return read_sssom_rdf
elif input_format == "json":
return read_sssom_json
elif input_format == "alignment-api-xml":
return read_alignment_xml
elif input_format == "obographs-json":
return read_obographs_json
else:
raise Exception(f"Unknown input format: {input_format}") | 350126b15b226ba83bade24e924409d10d929f9c | 3,634,655 |
from typing import Iterable
def process_proto_file(proto_file) -> Iterable[OutputFile]:
"""Generates code for a single .proto file."""
_, package_root = build_node_tree(proto_file)
output_filename = _proto_filename_to_generated_header(proto_file.name)
generator = RawCodeGenerator(output_filename)
codegen.generate_package(proto_file, package_root, generator)
codegen.package_stubs(package_root, generator.output, StubGenerator())
return [generator.output] | 3041228160c7e6ea17a7d0d41adc624e85377d69 | 3,634,656 |
def custom_resampler(array_like):
"""calculating heat index using monthly values of temperature."""
return np.sum(np.power(np.divide(array_like, 5.0), 1.514)) | 23f9fed7e430c5856ec23e2cdea4e884f9e4bf85 | 3,634,657 |
def make_nhwc(batch, c=3):
"""Makes a NxHxW(x1) tensor NxHxWxC, written in graph mode.
"""
# Assert 3D or 4D
n_dims = tf.rank(batch)
assert_op = tf.debugging.Assert(
tf.logical_or(tf.equal(n_dims, 3),
tf.equal(n_dims, 4)), [n_dims])
# If necessary, 3D to 4D
with tf.control_dependencies([assert_op]):
batch = tf.cond(
tf.equal(n_dims, 4),
true_fn=lambda: batch,
false_fn=lambda: tf.expand_dims(batch, -1))
# Repeat the last channel Cx, after asserting #channels is 1
shape = tf.shape(batch)
assert_op = tf.debugging.Assert(tf.equal(shape[3], 1), [shape])
with tf.control_dependencies([assert_op]):
return tf.tile(batch, (1, 1, 1, c)) | 5c2605de058bd3114c247a213f27201e993f8d2a | 3,634,658 |
from typing import Union
from typing import Tuple
from typing import List
def word_window(sequence: str, target: str, size: int) -> Union[Tuple[List[str], List[str]], None]:
"""
Retrieves word windows of 'size' to the left and 'size' to the right.
If size == 0: Take the entire sequence as window
"""
assert size >= 0
tokens = word_tokenize(sequence.lower())
if target not in tokens:
return None
else:
target_idx = tokens.index(target)
left_idx = max(target_idx-size, 0) if size > 0 else 0
right_idx = target_idx+size+1 if size > 0 else len(tokens)
return tokens[left_idx:target_idx], tokens[target_idx+1:right_idx] | bcc19bf4283fc8132a77e723dd24f9ab578bd2f4 | 3,634,659 |
def get_pheonix_restaurants():
"""
Get All Phoenix restaurant.
Returns:
All Phoenix restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Phoenix", PHOENIX_RESTAURANTS_PATH) | 12c38e534680c7bc3da0ae676de4b382724b8ac1 | 3,634,660 |
def get_p_detect_small_jurisdictions(end_date):
"""
Apply a scaling to the daily reported cases by accounting for a ~75% detection probability pre
15/12/2021 and 0.5 following that. To improve the transition, we assume that detection
probability decreases from 0.75 to 0.5 over 7 days beginning 9/12/2021.
"""
CAR_dates = pd.date_range(third_start_date, end_date)
# get baseline CAR
CAR = 0.75 * np.ones(CAR_dates.shape)
return CAR | b6762477197a45e7c45bf5541b7fb60361a94cc5 | 3,634,661 |
def f(x):
"""
Compute function value.
"""
return np.sin(x) | 4716b72bec5cc9c4ced57ac3559719a995703e9c | 3,634,662 |
def _arnoldi_step(j, V_v, H_s, A, A_inv, precision):
""" Performs an iteration of the Arnoldi process:
- A new Krylov vector new_v = (A @ A_inv) @ V_v[:, j] is computed.
- new_v is orthogonalized against the columns of V_v, yielding
the orthogonalized vector orth_v along with new overlaps.
- orth_v is stored in V_v[:, j+1] and the overlaps in H_s[:, j].
We should have
`A @ V_v[:, :j] = V_v[:, :j+1] @ H_s[:j+1, :j]`
along with
`H_s[:j+1, :j] = V_v[:, :j]^H @ A @ V_v[:, :j]`.
"""
new_v = vops.get_columns(V_v, j, 1)
if A_inv is not None:
new_v = vops.matvec(A_inv, new_v, precision=lax.Precision.DEFAULT)
new_v = vops.matvec(A, new_v, precision=precision)
orth_v, overlaps, v_norm = cgs(new_v, V_v, precision=precision)
V_v, H_s = _update_arnoldi(V_v, H_s, j, orth_v, overlaps, v_norm)
return V_v, H_s | 7f992206fb2c88096b8b937a388909bbeb3ecb8e | 3,634,663 |
def get_papers():
"""Get papers discussing COVID-19 treatments
Returns
-------
DataFrame
dataframe of treatment documents
"""
def count_treatments(df):
# TODO: normalize by text length?
return df["text"].str.count(RE_TOPIC)
return filter_docs_by_count(count_treatments, 10) | bce1106e4bb435a1feb286bf63ea8f9fb46cf152 | 3,634,664 |
def calculate_speed(ds):
"""Calculate speed on the central (T) grid.
First, interpolate U and V to the central grid, then square, add, and take
root.
Parameters
----------
ds : xarray dataset
A grid-aware dataset as produced by `xorca.lib.preprocess_orca`.
Returns
-------
speed : xarray data array
A grid-aware data array with the speed in `[m/s]`.
"""
grid = xgcm.Grid(ds, periodic=["Y", "X"])
U_cc = grid.interp(ds.vozocrtx, "X", to="center")
V_cc = grid.interp(ds.vomecrty, "Y", to="center")
speed = (U_cc**2 + V_cc**2)**0.5
return speed | 90cbf24068de77f55d21eea4189bbf17a47614bb | 3,634,665 |
def image_preprocessing(image_path, preprocess_input, target_size):
"""
Read and preprocess an image from disk.
Args:
image_path (str): path to the image.
preprocess_input (funciton): a preprocessing function.
target_size (tuple): image target size.
Returns:
np.ndarray: the preprocessed image.
"""
image = keras.preprocessing.image.load_img(
image_path, target_size=target_size
)
x = keras.preprocessing.image.img_to_array(image)
x = np.expand_dims(x, axis=0)
return preprocess_input(x) | 61717a7a6fd3b732cfe4e0f1259111b2713dc854 | 3,634,666 |
def get_ndjson(obj, jobType):
"""
Given an S3 object that points to a JSON file,
read it into memory and return an ndjson string
representation
"""
json_content = json.loads(obj.get()['Body'].read().decode('utf-8'))
output_records =[]
for record in json_content:
output_record = {}
company = record['company']
jobTitle = record['jobTitle']
actualTitle = jobTitle
salary = record['meanPay']
salaryType = "annual"
if "hourly" in actualTitle.lower():
actualTitle = jobTitle.split("-")[0]
salaryType = "hourly"
output_record["rawTitle"] = jobTitle
output_record["title"] = actualTitle.strip().title()
output_record["salaryType"] = salaryType.strip().title()
output_record["salary"] = salary
output_record["company"] = company.strip().title()
output_record["jobType"] = jobType.strip().title()
output_records.append(output_record)
ndjson = '\n'.join([json.dumps(output_record) for output_record in output_records])
return(ndjson) | b87ccfbf088db7ad248a19fc2b2d4f84974e42f9 | 3,634,667 |
import json
import http
def render_to_json(data, is_json=False):
"""Create a JSON response from a data dictionary and return a
Django response object."""
if not is_json:
js = json.dumps(data, cls=DjangoJSONEncoder)
else:
js = data
mime = mimetype = "application/json;charset=utf-8"
response = http.HttpResponse(enc(js), content_type=mime)
return response | 9c0792a647a82ca1539c3ea2cabcfb90d29b4042 | 3,634,668 |
def get_ctd_from_txt(fname, summary, source, sea_name, p_lat, p_lon,
p_time, ca=[]):
"""
Create an ambient.Profile object from a text file of ocean property data
Read the CTD and current data in the given filename (fname) and use that
data to create an ambient.Profile object for use in TAMOC. This function
is built to work with an ascii file organized with data stored in columns
that report depth (m), temperature (deg C), salinity (psu), u-component
of velocity (m/s) and v-component of velocity (m/s).
Parameters
----------
fname : str
String containing the relative path to the water column data file.
summary : str
String describing the simulation for which this data will be used.
source : str
String documenting the source of the ambient ocean data provided.
sea_name : str
NC-compliant name for the ocean water body as a string.
p_lat : float
Latitude (deg)
p_lon : float
Longitude, negative is west of 0 (deg)
p_time : netCDF4 time format
Date and time of the CTD data using netCDF4.date2num().
ca : list, default=[]
List of dissolved atmospheric gases to include in the ambient ocean
data as a derived concentration; choices are 'nitrogen', 'oxygen',
'argon', and 'carbon_dioxide'.
Returns
-------
profile : ambient.Profile
Returns an ambient.Profile object for manipulating ambient water
column data in TAMOC.
"""
# Read in the data
data = np.loadtxt(fname, comments='#')
# Describe what should be stored in this dataset
units = ['m', 'deg C', 'psu', 'm/s', 'm/s']
labels = ['z', 'temperature', 'salinity', 'ua', 'va']
comments = ['modeled', 'modeled', 'modeled', 'modeled', 'modeled']
# Extract a file name for the netCDF4 dataset that will hold this data
# based on the name of the text file.
nc_name = '.'.join(fname.split('.')[:-1]) # remove text file .-extension
nc_name = nc_name + '.nc'
# Create the ambient.Profile object
profile = create_ambient_profile(data, labels, units, comments, nc_name,
summary, source, sea_name, p_lat, p_lon, p_time, ca)
return profile | 77c04bf3ab0ec166e5b8a296e8fd96c061435f16 | 3,634,669 |
import scipy
def get_ndimage_module(*args):
"""
Returns either the scipy.ndimage or cupyx.scipy.ndimage module, cupy module is returned if any
argument is on the GPU.
"""
return cupyx.scipy.ndimage if any(is_on_gpu(arg) for arg in args) else scipy.ndimage | ef763c0bcfd15c07d288ccb65caf8860781bb03e | 3,634,670 |
def sensitivity_plot_comparison(n_bins_energy, energy, sensitivity):
"""
Main sensitivity plot.
We plot the sensitivity achieved, MAGIC sensitivity and Crab SEDs
Parameters
--------
n_bins_energy: `int` number of bins in energy
energy: `numpy.ndarray` sensitivity array
sens: `numpy.ndarray` sensitivity array (bins of energy, gammaness and theta2)
Returns
--------
fig_sens: `matplotlib.pyplot.figure` Figure containing sensitivity plot
"""
# Final sensitivity plot
fig_sens, ax = plt.subplots()
plot_sensitivity(energy, sensitivity, ax)
plot_Crab_SED(ax, 100, 10**1. * u.GeV, 10**5 * u.GeV, label = r'Crab')
plot_Crab_SED(ax, 1, 10**1. * u.GeV, 10**5 * u.GeV, ls = 'dotted',label = '1% Crab')
plot_Crab_SED(ax, 10, 10**1. * u.GeV, 10**5 * u.GeV, ls = '-.',label = '10% Crab')
plot_MAGIC_sensitivity(ax)
format_axes_sensitivity(ax)
ax.legend(numpoints = 1, prop = {'size':9}, ncol = 2, loc = 'upper right')
return fig_sens | 43897782fe250fcdd04f6cf64782d42988ef548a | 3,634,671 |
def factor(from_units, to_units, units_class=None):
"""
Return a conversion factor:
>>> value_in_cm = 25
>>> value_in_cm * factor('cm', 'mm')
250
class: If specified, the class of the units must match the class provided.
"""
if (from_units is None or not len(from_units)) and (
to_units is None or not len(to_units)
): # pylint: disable=len-as-condition
return 1.0
if from_units == to_units:
return 1.0
if _ureg[from_units][0] != _ureg[to_units][0]:
raise ValueError(
"Can't convert between apples and oranges (%s and %s)"
% (from_units, to_units)
)
if units_class and _ureg[from_units][0] != units_class:
raise ValueError(
"Units class must be %s, but got %s" % (units_class, _ureg[from_units][0])
)
return _ureg[from_units][1] / _ureg[to_units][1] | d7893b20652ae8cdbefa50d2d8ac75db92ef7db1 | 3,634,672 |
def fibonacci_index(index):
"""
Returns fibonacci sequence with the given index being the last value.
raises a type error if given index is a string, float, zero or negative number.
returns a string for given indexes that are 1 and 2.
"""
try:
if type(index) == str or type(index) == float or index < 1:
raise TypeError
elif index < 3:
return f"""[0, 1]
For getting better results enter a whole number bigger than 2."""
else:
initial_sequence = [0, 1]
while len(initial_sequence) < index:
next = initial_sequence[-2] + initial_sequence[-1]
initial_sequence.append(next)
return initial_sequence
except TypeError:
raise TypeError('Please enter a positive whole number.') | fe6af59ed30d2559ed3d8822ff3b78d21fee6f65 | 3,634,673 |
from pathlib import Path
def get_history_file_path():
"""Returns path to the training command history file."""
return Path(__file__).parent / 'history' | b55e6ad58f1b841d22cce1fe421f17740abc32a0 | 3,634,674 |
def var_is_protein_effecting(variant_data):
"""Check if variant has a MED or HIGH impact
:param variant_data: A GeminiRow for a single variant.
:type variant_data: GeminiRow.
:returns: bool -- True or False.
"""
if variant_data.INFO.get('impact_severity') != "LOW":
return True
else:
return False | 7e0915086ada165c0ab814a70907756c976cd361 | 3,634,675 |
def center_embeddings(X, Y):
""" Copied from Alvarez-Melis & Jaakkola (2018) """
X -= X.mean(axis=0)
Y -= Y.mean(axis=0)
return X, Y | a583c400db2e3ddcabc535dc20c8866b432828d6 | 3,634,676 |
def verification_code_form(request):
"""
form to enter the verification code
"""
if request.method == 'POST':
code = request.POST['code']
return _verify_code(request, code)
return {} | bece318d0ea7fd9af4effa595abb79acac8129e2 | 3,634,677 |
import os
def _ProcessGccConfig(target, output_dir):
"""Do what gcc-config would have done"""
binpath = '/bin'
envd = os.path.join(output_dir, 'etc', 'env.d', 'gcc', '*')
srcpath = _EnvdGetVar(envd, 'GCC_PATH')
for prog in os.listdir(output_dir + srcpath):
# Skip binaries already wrapped.
if (not prog.endswith('.real') and
not prog.endswith('.elf') and
prog.startswith(target)):
GeneratePathWrapper(output_dir, os.path.join(binpath, prog),
os.path.join(srcpath, prog))
return srcpath | a3f0b23aebb6b1716783040c1d5a9ff5c88dcafc | 3,634,678 |
from typing import Union
from datetime import datetime
def symbol_directory(date: Union[str, datetime.date, None] = None, filter: str = ''):
"""
Args:This call returns an array of all IEX-listed securities and their corresponding data fields. The IEX-Listed
Symbol Directory Daily List is initially generated and posted to the IEX website at 8:30 p.m. Eastern Time (ET)
before each trading day, and then once per hour from 9 p.m. until 6 p.m. ET the following day.
Args:
date: Effective date
filter: https://iextrading.com/developer/docs/#filter-results
Returns:
dict: result
See: https://iextrading.com/developer/docs/#iex-listed-symbol-directory
"""
if date:
date = string_or_date(date)
return get_json('ref-data/daily-list/symbol-directory/' + date, filter)
return get_json('ref-data/daily-list/symbol-directory', filter) | 86796ab2701a34ab72915b1becafff510ac6cfc0 | 3,634,679 |
import re
def read_requirements(*parts):
"""
Return requirements from parts.
Given a requirements.txt (or similar style file),
returns a list of requirements.
Assumes anything after a single '#' on a line is a comment, and ignores
empty lines.
:param parts: list of filenames which contain the installation "parts",
i.e. submodule-specific installation requirements
:returns: A compiled list of requirements.
"""
requirements = []
for line in read(*parts).splitlines():
new_line = re.sub( # noqa: PD005
r"(\s*)?#.*$", # the space immediately before the
# hash mark, the hash mark, and
# anything that follows it
"", # replace with a blank string
line,
)
new_line = re.sub( # noqa: PD005
r"-r.*$", # link to another requirement file
"", # replace with a blank string
new_line,
)
new_line = re.sub( # noqa: PD005
r"-e \..*$", # link to editable install
"", # replace with a blank string
new_line,
)
# print(line, "-->", new_line)
if new_line: # i.e. we have a non-zero-length string
requirements.append(new_line)
return requirements | c281666075e6a6863f5f4e8ca13226f3f20f783c | 3,634,680 |
def build_persistence(config):
"""
Factory method to build a Persistence object from the given config
"""
try:
if config.getboolean("taky", "redis"):
return RedisPersistence(config.get("taky", "hostname"))
return Persistence()
except (AttributeError, ValueError):
pass
conn_str = config.get("taky", "redis")
if conn_str:
return RedisPersistence(config.get("taky", "hostname"), conn_str)
return Persistence() | 786cbfcff5e2c7853bf9661c82a2f763d909dfba | 3,634,681 |
def EulerP_G(e0,e1,e2,e3):
""" Angular velocity matrix such that omega_global = G theta_dot for Euler parameters
G = 2 E
Shabana (2.35, 2.54)
"""
G = 2*np.array([
[-e1 , e0, -e3, e2],
[-e2 , e3, e0, -e1],
[-e3 ,-e2, e1, e0]])
return G | 71c85ba992d67988d52451012b8eb1da1ae426dc | 3,634,682 |
def scale_range(x, x_range, y_range=(0.0, 1.0)):
"""
scale the number x from the range specified by x_range to the range specified by y_range
:param x: the number to scale
:type x: float
:param x_range: the number range that x belongs to
:type x_range: tuple
:param y_range: the number range to convert x to, defaults to (0.0, 1.0)
:type y_range: tuple
:return: the scaled value
:rtype: float
"""
x_min, x_max = x_range
y_min, y_max = y_range
return (y_max - y_min) * (x - x_min) / (x_max - x_min) + y_min | 3e2f5185f1565d70e8d1d699f3b5b1e00d375e21 | 3,634,683 |
import math
def circular_difference(num1, num2):
"""Cicrular Difference on ring: num1 - num2
Arguments:
num1 {Integer}
num2 {Integer}
Returns:
Integer -- Circular Difference
"""
global M
if num1 > num2:
return num1 - num2
else:
return int(math.pow(2, M)) + num1 - num2 | dcfd58eab23744c1be4da521df4b85fd3436ee5d | 3,634,684 |
import os
def find_free_port(ports_socket, name):
"""Retrieve a free TCP port from test server."""
request_name = '-'.join((name, str(os.getpid())))
while True:
port = test_server_request(ports_socket, request_name, GETPORT)
if not tcp_listening(port):
return port
error('port %u is busy, try another' % port) | b316cba4cb75f80e9022bbecdc886d2cb6449afb | 3,634,685 |
def make_leaderboard() -> dict:
"""
make a leaderboard from the data.
:rtype: dict
:return:
"""
sync_data()
# order function
order = sorted(users.items(), key=lambda val: val[1]['wallet'], reverse=True)
# ranks dict
names = {
'first': {'id': 0, 'name': "None", 'score': 0},
'second': {'id': 0, 'name': "None", 'score': 0},
'third': {'id': 0, 'name': "None", 'score': 0},
'fourth': {'id': 0, 'name': "None", 'score': 0},
'fifth': {'id': 0, 'name': "None", 'score': 0}
}
# assigning ranks
for number, name in zip(range(5), names):
# print(name)
names[name]['id'] = str(order[number])[2:]
x = 0
while 1 == 1:
if names[name]['id'][x] == "'":
break
x += 1
names[name]['id'] = str(order[number])[2:x + 2]
names[name]['name'] = users[names[name]['id']]['name']
names[name]['score'] = users[names[name]['id']]['wallet']
# print(names[name]['id'], names[name]['name'], names[name]['score'])
return names | de9665cf8512b1c791c4db387e2f9df286715de8 | 3,634,686 |
from typing import Union
from typing import Optional
import os
def resolve_value_descriptor(value_descriptor: Union[str, dict]) -> Optional:
"""
Resolves the value of a value descriptor, which may be an environment variable
name, or a map with keys `env` (the environment variable name) and `value` (the
value to use if `env` is not specified or if the environment variable is unset.
Args:
value_descriptor:
Returns:
"""
if isinstance(value_descriptor, str):
return os.environ.get(value_descriptor)
elif "env" in value_descriptor:
return os.environ.get(
value_descriptor["env"], value_descriptor.get("value")
)
else:
return value_descriptor.get("value") | 1f99a120ea75f48d5093b3fa8253acbb2657947d | 3,634,687 |
def simple_rouse_mid_msd(t, b, N, kbT=1, xi=1, num_modes=1000):
"""
modified from Weber Phys Rev E 2010, Eq. 24.
"""
rouse_corr = 0
for p in range(1, num_modes+1):
k2p = rouse_mode_coef(2*p, b, N, kbT)
rouse_corr += 12*kbT/k2p*(1 - np.exp(-k2p*t/(N*xi)))
return rouse_corr + 6*kbT/xi/N*t | a836a23c6bdd180f9a4df563b1cd283aefe370b5 | 3,634,688 |
def get_output(img):
"""
Input: Image as numpy array.
"""
height, width = img.shape
c = Canvas()
with open("out.txt",'w') as f:
for h in range(height):
for w in range(width):
if img[h,w] == 255:
c.set(w,h)
f.write(c.frame())
f.write("\n")
return c.frame() | 1d5d356ae6b9eaaf1c89955d7ac91a42ddc115f8 | 3,634,689 |
def threshold_binarize(x, threshold=0.5):
""" Thresholds tensor, making each element
that is more than 0.5 equal to 1.
"""
ge = tf.greater_equal(x, tf.constant(threshold))
y = tf.where(ge, x=tf.ones_like(x), y=tf.zeros_like(x))
return y | 921db92a7f2af6e5020368589bcc6570aef0fea7 | 3,634,690 |
from scipy.stats import linregress
from scipy.spatial import ConvexHull
from scipy.interpolate import interp1d
def dir_io_surface(links, nodes, dims):
"""
Set directionality by first building a "DEM surface" where inlets are "hills"
and outlets are "depressions," then setting links such that they flow
downhill.
"""
def hull_coords(xy):
# Find the convex hull of a set of coordinates, then order them clockwisely
# and remove the longest edge
hull_verts = ConvexHull(np.transpose(np.vstack((xy[0], xy[1])))).vertices
hull_coords = np.transpose(np.vstack((xy[0][hull_verts], xy[1][hull_verts])))
hull_coords = np.reshape(np.append(hull_coords, [hull_coords[0,:]]), (int((hull_coords.size+2)/2), 2))
# Find the biggest gap between hull points
dists = np.sqrt((np.diff(hull_coords[:,0]))**2 + np.diff(hull_coords[:,1])**2)
maxdist = np.argmax(dists) + 1
first_part = hull_coords[maxdist:,:]
second_part = hull_coords[0:maxdist,:]
if first_part.size == 0:
hull_coords = second_part
elif second_part.size == 0:
hull_coords = first_part
else:
hull_coords = np.concatenate((first_part, second_part))
return hull_coords
alg = 3
# Create empty image to store surface
I = np.zeros(dims, dtype=np.float) + 1
# Get row,col coordinates of outlet nodes, arrange them in a clockwise order
outs = [nodes['idx'][nodes['id'].index(o)] for o in nodes['outlets']]
outsxy = np.unravel_index(outs, I.shape)
hc = hull_coords(outsxy)
# Burn the hull into the Iout surface
for i in range(len(hc)-1):
linterp = interp1d(hc[i:i+2,0], hc[i:i+2,1])
xinterp = np.arange(np.min(hc[i:i+2,0]), np.max(hc[i:i+2,0]), .1)
yinterp = linterp(xinterp)
for x,y in zip(xinterp,yinterp):
I[int(round(x)), int(round(y))] = 0
Iout = distance_transform_edt(I)
Iout = (Iout - np.min(Iout)) / (np.max(Iout) - np.min(Iout))
# Get coordinates of inlet nodes; use only the widest inlet and any inlets within 25% of its width
ins = [nodes['idx'][nodes['id'].index(i)] for i in nodes['inlets']]
in_wids = []
for i in nodes['inlets']:
linkid = nodes['conn'][nodes['id'].index(i)][0]
linkidx = links['id'].index(linkid)
in_wids.append(links['wid_adj'][linkidx])
maxwid = max(in_wids)
keep = [ii for ii, iw in enumerate(in_wids) if abs((iw - maxwid)/maxwid) < .25]
ins_wide_enough = [ins[k] for k in keep]
insxy = np.unravel_index(ins_wide_enough, dims)
if len(insxy[0]) < 3:
hci = np.transpose(np.vstack((insxy[0], insxy[1])))
else:
hci = hull_coords(insxy)
# Burn the hull into the Iout surface
I = np.zeros(dims, dtype=np.float) + 1
if hci.shape[0] == 1:
I[hci[0][0], hci[0][1]] = 0
else:
for i in range(len(hci)-1):
linterp = interp1d(hci[i:i+2,0], hci[i:i+2,1])
xinterp = np.arange(np.min(hci[i:i+2,0]), np.max(hci[i:i+2,0]), .1)
yinterp = linterp(xinterp)
for x,y in zip(xinterp,yinterp):
I[int(round(x)), int(round(y))] =0
Iin = distance_transform_edt(I)
Iin = np.max(Iin) - Iin
Iin = (Iin - np.min(Iin)) / (np.max(Iin) - np.min(Iin))
# Compute the final surface by adding the inlet and outlet images
Isurf = Iout + Iin
# lid = 650
# linkidx = links['id'].index(lid)
# rc = np.unravel_index(links['idx'][linkidx], dims)
# plt.plot(rc[1], rc[0])
# Determine the flow direction of each link
slopes = []
slopes2 = []
for lid in links['id']:
linkidx = links['id'].index(lid)
lidcs = links['idx'][linkidx][:]
rc = np.unravel_index(lidcs, dims)
dists_temp = np.cumsum(np.sqrt(np.diff(rc[0])**2 + np.diff(rc[1])**2))
dists_temp = np.insert(dists_temp, 0, 0)
elevs = Isurf[rc[0], rc[1]]
linreg = linregress(dists_temp, elevs)
# Make sure slope is negative, else flip direction
if linreg.slope > 0:
usnode = nodes['id'][nodes['idx'].index(lidcs[-1])]
else:
usnode = nodes['id'][nodes['idx'].index(lidcs[0])]
# Store guess
links['guess'][linkidx].append(usnode)
links['guess_alg'][linkidx].append(alg)
# Store slope
slopes.append(linreg.slope)
slopes2.append((elevs[-1]-elevs[0])/ dists_temp[-1])
links['slope'] = slopes
links['slope2'] = slopes2
return links, nodes | 4f5cc7243332ae75077b2092498ecf80c827e010 | 3,634,691 |
def guardian_join(team):
"""Returns a string of all of the parent guardians on the team joined together"""
guardian_names = []
for player in team['team_players']:
guardian_names.extend(player['guardians'])
guardian_string = ", "
guardian_string = guardian_string.join(guardian_names)
return guardian_string | 5b9c7908598a65bb5e465fae13258de99fbf8597 | 3,634,692 |
def LCF_graph(n,shift_list,repeats,create_using=None):
"""
Return the cubic graph specified in LCF notation.
LCF notation (LCF=Lederberg-Coxeter-Fruchte) is a compressed
notation used in the generation of various cubic Hamiltonian
graphs of high symmetry. See, for example, dodecahedral_graph,
desargues_graph, heawood_graph and pappus_graph below.
n (number of nodes)
The starting graph is the n-cycle with nodes 0,...,n-1.
(The null graph is returned if n < 0.)
shift_list = [s1,s2,..,sk], a list of integer shifts mod n,
repeats
integer specifying the number of times that shifts in shift_list
are successively applied to each v_current in the n-cycle
to generate an edge between v_current and v_current+shift mod n.
For v1 cycling through the n-cycle a total of k*repeats
with shift cycling through shiftlist repeats times connect
v1 with v1+shift mod n
The utility graph K_{3,3}
>>> G=nx.LCF_graph(6,[3,-3],3)
The Heawood graph
>>> G=nx.LCF_graph(14,[5,-5],7)
See http://mathworld.wolfram.com/LCFNotation.html for a description
and references.
"""
if create_using is not None and create_using.is_directed():
raise NetworkXError("Directed Graph not supported")
if n <= 0:
return empty_graph(0, create_using)
# start with the n-cycle
G=cycle_graph(n, create_using)
G.name="LCF_graph"
nodes=G.nodes()
n_extra_edges=repeats*len(shift_list)
# edges are added n_extra_edges times
# (not all of these need be new)
if n_extra_edges < 1:
return G
for i in range(n_extra_edges):
shift=shift_list[i%len(shift_list)] #cycle through shift_list
v1=nodes[i%n] # cycle repeatedly through nodes
v2=nodes[(i + shift)%n]
G.add_edge(v1, v2)
return G | 7d46b4d246ccbd821fbb31659f516fb1e185cea9 | 3,634,693 |
from typing import ByteString
def is_prefix_of(prefix: ByteString, label: ByteString) -> bool:
""" Whether label starts with prefix """
if len(prefix) > len(label):
return False
for (a,b) in zip(prefix, label):
if a != b: return False
return True | 6be10ca432876f7847e2f8513e5205a9ae4d3c16 | 3,634,694 |
from ..architectures import create_unet_model_3d
from ..utilities import get_pretrained_network
from ..utilities import get_antsxnet_data
def lung_extraction(image,
modality="proton",
antsxnet_cache_directory=None,
verbose=None):
"""
Perform proton or ct lung extraction using U-net.
Arguments
---------
image : ANTsImage
input image
modality : string
Modality image type. Options include "ct" and "proton".
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be resused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
Dictionary of ANTs segmentation and probability images.
Example
-------
>>> output = lung_extraction(lung_image, modality="proton")
"""
if image.dimension != 3:
raise ValueError( "Image dimension must be 3." )
if antsxnet_cache_directory == None:
antsxnet_cache_directory = "ANTsXNet"
image_mods = [modality]
channel_size = len(image_mods)
weights_file_name = None
unet_model = None
if modality == "proton":
weights_file_name = get_pretrained_network("protonLungMri",
antsxnet_cache_directory=antsxnet_cache_directory)
classes = ("background", "left_lung", "right_lung")
number_of_classification_labels = len(classes)
reorient_template_file_name_path = get_antsxnet_data("protonLungTemplate",
antsxnet_cache_directory=antsxnet_cache_directory)
reorient_template = ants.image_read(reorient_template_file_name_path)
resampled_image_size = reorient_template.shape
unet_model = create_unet_model_3d((*resampled_image_size, channel_size),
number_of_outputs = number_of_classification_labels,
number_of_layers = 4, number_of_filters_at_base_layer = 16, dropout_rate = 0.0,
convolution_kernel_size = (7, 7, 5), deconvolution_kernel_size = (7, 7, 5))
unet_model.load_weights(weights_file_name)
if verbose == True:
print("Lung extraction: normalizing image to the template.")
center_of_mass_template = ants.get_center_of_mass(reorient_template * 0 + 1)
center_of_mass_image = ants.get_center_of_mass(image * 0 + 1)
translation = np.asarray(center_of_mass_image) - np.asarray(center_of_mass_template)
xfrm = ants.create_ants_transform(transform_type="Euler3DTransform",
center=np.asarray(center_of_mass_template), translation=translation)
warped_image = ants.apply_ants_transform_to_image(xfrm, image, reorient_template)
batchX = np.expand_dims(warped_image.numpy(), axis=0)
batchX = np.expand_dims(batchX, axis=-1)
batchX = (batchX - batchX.mean()) / batchX.std()
predicted_data = unet_model.predict(batchX, verbose=0)
origin = warped_image.origin
spacing = warped_image.spacing
direction = warped_image.direction
probability_images_array = list()
for i in range(number_of_classification_labels):
probability_images_array.append(
ants.from_numpy(np.squeeze(predicted_data[0, :, :, :, i]),
origin=origin, spacing=spacing, direction=direction))
if verbose == True:
print("Lung extraction: renormalize probability mask to native space.")
for i in range(number_of_classification_labels):
probability_images_array[i] = ants.apply_ants_transform_to_image(
ants.invert_ants_transform(xfrm), probability_images_array[i], image)
image_matrix = ants.image_list_to_matrix(probability_images_array, image * 0 + 1)
segmentation_matrix = np.argmax(image_matrix, axis=0)
segmentation_image = ants.matrix_to_images(
np.expand_dims(segmentation_matrix, axis=0), image * 0 + 1)[0]
return_dict = {'segmentation_image' : segmentation_image,
'probability_images' : probability_images_array}
return(return_dict)
elif modality == "ct":
weights_file_name = get_pretrained_network("ctHumanLung")
classes = ("background", "left_lung", "right_lung", "trachea")
number_of_classification_labels = len(classes)
reorient_template_file_name_path = get_antsxnet_data("ctLungTemplate",
antsxnet_cache_directory=antsxnet_cache_directory)
reorient_template = ants.image_read(reorient_template_file_name_path)
resampled_image_size = reorient_template.shape
unet_model = create_unet_model_3d((*resampled_image_size, channel_size),
number_of_outputs = number_of_classification_labels,
number_of_layers = 4, number_of_filters_at_base_layer = 8, dropout_rate = 0.0,
convolution_kernel_size = (3, 3, 3), deconvolution_kernel_size = (2, 2, 2))
unet_model.load_weights(weights_file_name)
if verbose == True:
print("Lung extraction: normalizing image to the template.")
center_of_mass_template = ants.get_center_of_mass(reorient_template * 0 + 1)
center_of_mass_image = ants.get_center_of_mass(image * 0 + 1)
translation = np.asarray(center_of_mass_image) - np.asarray(center_of_mass_template)
xfrm = ants.create_ants_transform(transform_type="Euler3DTransform",
center=np.asarray(center_of_mass_template), translation=translation)
warped_image = ants.apply_ants_transform_to_image(xfrm, image, reorient_template)
batchX = np.expand_dims(warped_image.numpy(), axis=0)
batchX = np.expand_dims(batchX, axis=-1)
batchX = (batchX - batchX.mean()) / batchX.std()
predicted_data = unet_model.predict(batchX, verbose=0)
origin = warped_image.origin
spacing = warped_image.spacing
direction = warped_image.direction
probability_images_array = list()
for i in range(number_of_classification_labels):
probability_images_array.append(
ants.from_numpy(np.squeeze(predicted_data[0, :, :, :, i]),
origin=origin, spacing=spacing, direction=direction))
if verbose == True:
print("Lung extraction: renormalize probability mask to native space.")
for i in range(number_of_classification_labels):
probability_images_array[i] = ants.apply_ants_transform_to_image(
ants.invert_ants_transform(xfrm), probability_images_array[i], image)
image_matrix = ants.image_list_to_matrix(probability_images_array, image * 0 + 1)
segmentation_matrix = np.argmax(image_matrix, axis=0)
segmentation_image = ants.matrix_to_images(
np.expand_dims(segmentation_matrix, axis=0), image * 0 + 1)[0]
return_dict = {'segmentation_image' : segmentation_image,
'probability_images' : probability_images_array}
return(return_dict) | 4ca10e869413f1739098b46a097b0f07b907734b | 3,634,695 |
def adjective_to_verb(sentence, index):
"""
:param sentence: str that uses the word in sentence
:param index: index of the word to remove and transform
:return: str word that changes the extracted adjective to a verb.
A function takes a `sentence` using the
vocabulary word, and the `index` of the word once that sentence
is split apart. The function should return the extracted
adjective as a verb.
"""
sentence = sentence.split()
if index == -1:
# print(sentence[index])
temp = sentence[index]
sentence[index] = temp[:-1]
# print(sentence[index])
return sentence[index] + "en" | 3a07f8eaaa8e39e77270b7eda9d2e4bc51bdbaf5 | 3,634,696 |
async def async_setup_entry(hass, config_entry):
"""Load the saved entities."""
_LOGGER.info(
"Version %s is starting, if you have any issues please report" " them here: %s",
VERSION,
ISSUE_URL,
)
config_entry.options = config_entry.data
config_entry.add_update_listener(update_listener)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True | 6645db4b75711e3c0c4bd5d635187286e68b1fb4 | 3,634,697 |
import re
def entry(request):
"""微信处理入口
"""
msg_crypt = WXBizMsgCrypt(
Conf.get('WECHAT_TOKEN'),
Conf.get('WECHAT_ENCODING_AES_KEY'),
Conf.get('WECHAT_CORPID'))
msg_signature = request.GET.get('msg_signature')
timestamp = request.GET.get('timestamp')
nonce = request.GET.get('nonce')
if request.method == 'GET':
echostr = request.GET.get('echostr')
result = msg_crypt.VerifyURL(msg_signature, timestamp, nonce, echostr)
return HttpResponse(result[1])
else:
logger.info('wechat request raw body: %s' % request.body)
result = msg_crypt.DecryptMsg(request.body, msg_signature, timestamp, nonce)
logger.info('wechat request decrypt body: %s, %s' % result)
if result[0] != 0:
return HttpResponse('403')
json_obj = xmltodict.parse(result[1])
msg_type = json_obj['xml']['MsgType']
# 只处理text请求
if msg_type != 'text':
return HttpResponse('403')
# from_user = json_obj['xml']['FromUserName']
from_user = user_transfer(json_obj['xml']['FromUserName'])
content = json_obj['xml']['Content']
match = re.match(r'(?P<action>\w+) (?P<obj_id>\d+_\d+)$', content)
if not match:
return HttpResponse('403')
action, obj_id = match.groups()
if action not in ['TY', 'BH']:
return HttpResponse('403')
obj = Approve.objects.filter(obj_id=obj_id, status='WAITING').first()
if not obj:
return HttpResponse('403')
if from_user not in obj.to_users:
return HttpResponse('403')
try:
obj.callback(action, from_user)
except Exception as error:
logger.error(u"微信审批异常: %s" % error)
return HttpResponse('500')
return HttpResponse('200') | 4069690213d923bd3b516f9c83322f3d1107084d | 3,634,698 |
from typing import Union
def unit2internal(src_unit: Union[str, float]):
"""
Convert unit to internal unit system defined above.
Args:
src_unit (str, float): Name of unit
Returns:
float: conversion factor from external to internal unit system.
"""
return _parse_unit(src_unit, conversion_factor=_conversion_factor_internal) | cd48d4da21d4fd1ac625a8be80ae854db478af3e | 3,634,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.