content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def stringify_array(v,
maxDepth=None,
maxItems=-1,
maxStrlen=-1):
"""
Convert a dict to a string representation.
Parameters:
d(dict) : the data dict to convert
maxDepth (int|None): if > 0, then ellipsise structures deeper than this
maxItems (int|-1): if > 0, then ellipsise lists longer than this or dicts with more than this many items
maxStrlen (int|-1): if > 0, then ellipsise strings longer than this
Returns:
tuple(depth:int, str): the depth (explored) of the structure and the string representation of the data
"""
return _stringify_array(v, maxDepth=maxDepth, maxItems=maxItems, maxStrlen=maxStrlen) | 17bf5008c7a263c102f0fa03fdcc708c0fcc9a0f | 3,638,400 |
import pickle
def rpickle(picke_file, state=None):
"""
Save the state of the gps file treated
"""
logger.warning('Running rpickle ...')
results = []
if picke_file.isfile():
with open(picke_file, 'rb') as read_pickle:
results += pickle.load(read_pickle)
# print results
return results | a3f0cc46d6992032d008053e679ec75c64805141 | 3,638,401 |
import jsonschema
import json
import pkg_resources
import os
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found" | e29ee534accf932ac2b1b407a8c6bc83201e10f7 | 3,638,402 |
def should_print(test_function):
"""should_print is a helper for testing code that uses print
For example, if you had a function like this:
```python
def hello(name):
print('Hello,', name)
```
You might want to test that it prints "Hello, Nate" if you give it the
name "Nate". To do that, you could write the following test.
```python
@should_print
def test_hello_nate(output):
hello("Nate")
assert output == "Hello, Nate"
```
There are a couple pieces of this:
- Put `@should_print` directly above the test function.
- Add an `output` parameter to the test function.
- Assert against `output`
"""
return mock.patch("sys.stdout", new_callable=FakeStringIO)(test_function) | 16a1f675d3dced411fe5a6ffdc566db61ca7890f | 3,638,403 |
def fista(y, A, At, reg_weight, noise_eng, max_iter=100, update_reg=False, **kwargs):
"""
The FISTA algorithm for the ell1 minimisation problem:
min_x |y - Ax|^2 + reg * |x|_1
:param y: the given measurements (here it is the Fourier transform at certain frequencies)
:param A: the mapping from the sparse signal x to the measurements y
:param At: the mapping from the measurements y to the sparse signal x
:param reg_weight: regularisation weight for the ell1-norm of x
:param noise_eng: noise energy, i.e., |y - Ax|^2
:param max_iter: maximum number of FISTA iterations
:param update_reg: whether to update the regularisation weight or not
:param max_iter_reg: maximum number of iterations used to update the regularisation weight
:return:
"""
if not update_reg:
max_iter_reg = 1
else:
max_iter_reg = kwargs['max_iter_reg']
# initialise
x = At(y)
AtA = lambda input_arg: np.real(At(A(input_arg)))
# Lipschitz constant for 2 * A^H Ax
L = 1.01 * 2. * np.real(power_method(AtA, x.shape, 100))
# print repr(L) # for debug purposes
for reg_loop in range(max_iter_reg):
x = At(y)
beta = x
t_new = 1.
for fista_loop in range(max_iter):
x_old = x
t_old = t_new
# gradient step
beta = beta - 2. / L * At(A(beta) - y)
# soft-thresholding
x = soft(beta, reg_weight / L)
# update t and beta
t_new = (1. + np.sqrt(1. + 4. * t_old ** 2)) / 2.
beta = x + (t_old - 1.) / t_new * (x - x_old)
reg_weight *= (noise_eng / linalg.norm(y - A(x)) ** 2)
return x, reg_weight | dece002fff126c68bb6427eb37de5972a24bec00 | 3,638,404 |
def produce_segmentation(indices: list[list[int]], wav_name: str) -> list[dict]:
"""produces the segmentation yaml content from the indices of the probabilistic_dac
Args:
indices (list[list[int]]): output of the probabilistic_dac function
wav_name (str): the name of the wav file (with the .wav suffix)
Returns:
list[dict]: the content of the segmentation yaml
"""
talk_segments = []
for ind in indices:
size = len(ind) / TARGET_SAMPLE_RATE
if size < NOISE_THRESHOLD:
continue
start = ind[0] / TARGET_SAMPLE_RATE
talk_segments.append(
{
"duration": round(size, 6),
"offset": round(start, 6),
"rW": 0,
"uW": 0,
"speaker_id": "NA",
"wav": wav_name,
}
)
return talk_segments | cd8267e90f5e69589325a4e261d3f8136b36cc53 | 3,638,405 |
def trac_get_tracs_for_object(obj, user=None, trac_type=None):
"""
Returns tracs for a specific object.
"""
content_type = ContentType.objects.get_for_model(type(obj))
qs = Trac.objects.filter(content_type=content_type, object_id=obj.pk)
if user:
qs = qs.filter(user=user)
if trac_type:
qs = qs.filter(trac_type=trac_type)
return qs | 9617fc5e417e40fb27bfe90b2f87434902cdb70b | 3,638,406 |
def size_from_ftp(ftp, url):
"""Get size of a file on an FTP server.
Parameters
----------
ftp : FTP
An open ftplib FTP session.
url : str
File URL.
Returns
-------
int
Size in bytes.
"""
url = urlparse(url)
return ftp.size(url.path) | 50d21fa95669a9863b32de3a67eda78de713fe7c | 3,638,407 |
def set_name_line(hole_lines, name):
"""Define the label of each line of the hole
Parameters
----------
hole_lines: list
a list of line object of the slot
name: str
the name to give to the line
Returns
-------
hole_lines: list
List of line object with label
"""
for ii in range(len(hole_lines)):
hole_lines[ii].label = name + "_" + str(ii)
return hole_lines | a57667f269dac62d39fa127b2a4bcd438a8a989b | 3,638,408 |
import torch
def dist_to_boxes(points, boxes):
"""
Calculates combined distance for each point to all boxes
:param points: (N, 3)
:param boxes: (N, 7) [x, y, z, h, w, l, ry]
:return: distances_array: (M) torch.Tensor of [(N), (N), ...] distances
"""
distances_array = torch.Tensor([])
box_corners = kitti_utils.boxes3d_to_corners3d(boxes)
for box in box_corners:
minX = min(box[:, 0])
minY = min(box[:, 1])
minZ = min(box[:, 2])
maxX = max(box[:, 0])
maxY = max(box[:, 1])
maxZ = max(box[:, 2])
centroid = np.array([(maxX + minX) / 2, (maxY + minY) / 2, (maxZ + minZ) / 2])
dists_to_curr_box = dist_to_box_centroid(torch.from_numpy(points), torch.from_numpy(centroid)).reshape(1, len(points))
distances_array = torch.cat((distances_array.float(), dists_to_curr_box.float()), 0)
return distances_array | b3305ec8a4c8d5e0d5cf520e9e22d2c5377fe1de | 3,638,409 |
import logging
import os
def print_listdir(x):
"""."""
log = logging.getLogger('SIP.workflow.function')
log.info('HERE A')
print('Task id = {} {}'.format(x, os.listdir('.')))
return x, os.listdir('.') | 738fa091d5f7f9bca0bf43edfbf09eafcba87ba3 | 3,638,410 |
def blackwhite2D(data,xsize=None,ysize=None,show=1):
"""blackwhite2D(data,xsize=None,ysize=None,show=1)) - display list or array data as black white image
default popup window with (300x300) pixels
"""
if type(data) == type([]):
data = array(data)
w,h = data.shape[1],data.shape[0]
d = preprocess(data)
im = Image.new('L',(w,h))
for j in range(h):
for i in range(w):
ij = i+j*w
im.putpixel((i,j),d[j][i])
if show:
if xsize == None:
xsize = 300
if ysize == None:
ysize = 300
resizeImage(im,xsize,ysize)
return im | 78a76fab9f3eb989697b695c8d7b82c877f8dc9a | 3,638,411 |
def contains_digit(s):
"""Find all files that contain a number and store their patterns.
"""
isdigit = str.isdigit
return any(map(isdigit, s)) | 941bcee8b6fbca6a60a8845f88a3b5765e3711bb | 3,638,412 |
def to_signed(dtype):
"""
Return dtype that can hold data of passed dtype but is signed.
Raise ValueError if no such dtype exists.
Parameters
----------
dtype : `numpy.dtype`
dtype whose values the new dtype needs to be able to represent.
Returns
-------
`numpy.dtype`
"""
if dtype.kind == "u":
if dtype.itemsize == 8:
raise ValueError("Cannot losslessly convert uint64 to int.")
dtype = "int{:d}".format(min(dtype.itemsize * 2 * 8, 64))
return np.dtype(dtype) | 7be15d324eef6f9686a5866a92ad365a67949424 | 3,638,413 |
def listen_for_wakeword():
"""Continuously detecting the appeareance of wakeword from the audio stream. Higher priority than the listen() function.
Returns:
(bool): return True if detected wakeword, False otherwise.
"""
gotWakeWord = core.listen_for_wakeword()
return gotWakeWord | 49f600ed303fb9bea11cb9247653c66272fc5491 | 3,638,414 |
import sys
def import_string(import_name):
"""Returns a callable for a given setuptools style import string
:param import_name: A console_scripts style import string
"""
import_name = str(import_name).replace(":", ".")
try:
import_module(import_name)
except ImportError:
if "." not in import_name:
# this is a case like "import name", where continuing to the
# next style of import would not improve the situation, so
# we raise here.
raise
else:
return sys.modules[import_name]
# this is a case where the previous attempt may have failed due to
# not being importable. ("not a package", etc)
module_name, obj_name = import_name.rsplit(".", 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# Recurse to support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e) | 2e636dd65c5432f46999e14c46b63ca9e1db7570 | 3,638,415 |
from scipy.stats import kurtosis
def kurtosis(x,y):
"""
Calculate kurtosis of the probability
distribution of the forecast error if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
Kurtosis is a measure of the magnitude of the peak of the
distribution, or, conversely, how fat-tailed the distribution is,
and is the fourth standardized moment
The difference between the kurtosis of a sample distribution
and that of the normal distribution is known as the excess
kurtosis. In the subsequent anIn [142]: U
alysis, the term kurtosis will be
treated synonymously with excess kurtosis. A distribution
with a positive kurtosis value is known as leptokurtic, which
indicates a peaked distribution; whereas a negative kurtosis
indicates a flat data distribution, known as platykurtic. The
pronounced peaks of the leptokurtic distribution represent a
large number of very small forecast errors
:param x: vector of observations
:param y: vector of forecasts
:returns: Kurtosis
"""
return kurtosis(x-y) | b4242f58db8a48dbe9bec03ec641ae78858c28f7 | 3,638,416 |
def preprocess_text(sentence):
"""Handle some weird edge cases in parsing, like 'i' needing to be capitalized
to be correctly identified as a pronoun"""
cleaned = []
words = sentence.split(' ')
for w in words:
if w == 'i':
w = 'I'
if w == "i'm":
w = "I'm"
cleaned.append(w)
return ' '.join(cleaned) | 4e1d69eaf0adc1ede6bc67563e499602e320e76b | 3,638,417 |
import pkgutil
import os
def _GetModuleFromPathViaPkgutil(module_path, name_to_give):
"""Loads module by using pkgutil.get_importer mechanism."""
importer = pkgutil.get_importer(os.path.dirname(module_path))
if importer:
if hasattr(importer, '_par'):
# par zipimporters must have full path from the zip root.
# pylint:disable=protected-access
module_name = '.'.join(
module_path[len(importer._par._zip_filename) + 1:].split(os.sep))
else:
module_name = os.path.basename(module_path)
if importer.find_module(module_name):
return _LoadModule(importer, module_path, module_name, name_to_give)
raise ImportError('{0} not found'.format(module_path)) | d95eaf07f355a1fbb726d331e2a61ea4e8cf94e1 | 3,638,418 |
def weighted_l2_loss(gt_value, pred_value, weights):
"""Computers an l2 loss given broadcastable weights and inputs."""
diff = pred_value - gt_value
squared_diff = diff * diff
if isinstance(gt_value, float):
gt_shape = [1]
else:
gt_shape = gt_value.get_shape().as_list()
if isinstance(weights, float):
weight_shape = [1]
else:
weight_shape = weights.get_shape().as_list()
tf.logging.info('gt vs pred vs weights shape: %s vs %s vs %s', str(gt_shape),
str(pred_value.get_shape().as_list()), str(weight_shape))
# TODO(kgenova) Consider using tf.losses.mean_squared_error. But need to
# be careful about reduction method. Theirs is probably better since the
# magnitude of the loss isn't affected by the weights. But it would need
# hparam tuning, so it's left out in the first pass.
log.info('Weights: {}'.format(weights))
return weights * squared_diff | e7ebc8486a965912b28136013af0e5f4ade403bd | 3,638,419 |
def csr_scale_rows(*args):
"""
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
"""
return _csr.csr_scale_rows(*args) | 887f6c51d297649232d6fd297380c551dbb47008 | 3,638,420 |
import os
def get_homography_calibration_files(fullpath=True):
"""
Returns a list of the homography calibration yaml files in the homgraphies directory
of the mct configuration.
"""
file_list = os.listdir(homographies_dir)
dummy, params_file = os.path.split(homography_calibrator_params_file)
file_list.remove(params_file)
if fullpath:
file_list = [os.path.join(homographies_dir,f) for f in file_list]
return file_list | b6ddcdbee2305fb136a380e0c530aa534f584981 | 3,638,421 |
def complexity_hjorth(signal):
"""**Hjorth's Complexity and Parameters**
Hjorth Parameters are indicators of statistical properties initially introduced by Hjorth
(1970) to describe the general characteristics of an EEG trace in a few quantitative terms, but
which can applied to any time series. The parameters are activity, mobility, and complexity.
NeuroKit returns complexity directly in the output tuple, but the other parameters can be found
in the dictionary.
* The **activity** parameter is simply the variance of the signal, which corresponds to the
mean power of a signal (if its mean is 0).
.. math::
Activity = \\sigma_{signal}^2
* The **complexity** parameter gives an estimate of the bandwidth of the signal, which
indicates the similarity of the shape of the signal to a pure sine wave (for which the
value converges to 1). In other words, it is a measure of the "excessive details" with
reference to the "softest" possible curve shape. The Complexity parameter is defined as the
ratio of the mobility of the first derivative of the signal to the mobility of the signal.
.. math::
Complexity = \\sigma_{d}/ \\sigma_{signal}
* The **mobility** parameter represents the mean frequency or the proportion of standard
deviation of the power spectrum. This is defined as the square root of variance of the
first derivative of the signal divided by the variance of the signal.
.. math::
Mobility = \\frac{\\sigma_{dd}/ \\sigma_{d}}{Complexity}
:math:`d` and :math:`dd` represent the first and second derivatives of the signal, respectively.
Hjorth (1970) illustrated the parameters as follows:
.. figure:: ../img/hjorth1970.png
:alt: Figure from Hjorth (1970).
:target: http://dx.doi.org/10.1016/0013-4694(70)90143-4
See Also
--------
.fractal_petrosian
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
hjorth : float
Hjorth's Complexity.
info : dict
A dictionary containing the additional Hjorth parameters, such as ``"Mobility"`` and
``"Activity"``.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a signal with duration os 2s
signal = nk.signal_simulate(duration=2, frequency=5)
# Compute Hjorth's Complexity
complexity, info = nk.complexity_hjorth(signal)
complexity
info
References
----------
* Hjorth, B (1970) EEG Analysis Based on Time Domain Properties. Electroencephalography and
Clinical Neurophysiology, 29, 306-310. http://dx.doi.org/10.1016/0013-4694(70)90143-4
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var} | af5b5fb8925055da4cf48facadd1bed257e40f76 | 3,638,422 |
import pandas
def load_gecko():
"""
target variable is column "A375 Percent rank"
"""
data_nonessential = pandas.read_excel(settings.pj(settings.offtarget_data_dir, 'GeCKOv2_Non_essentials_Achilles_A375_complete.xls')) #(4697, 31)
data_all_A375 = pandas.read_csv(settings.pj(settings.offtarget_data_dir, 'GeckoAvanaSameUnits/GeCKOv2_DMSO_lentiGuide_A375.txt', sep="\t")) # (121964, 25)
guides = data_nonessential['sgRNA Sequence'].values
data = data_nonessential[data_all_A375["sgRNA Sequence"].isin(guides)]
#missing_guides = set(guides).difference(set(data_all_A375["sgRNA Sequence"].values))
#tmp = set(data_all_A375["sgRNA Sequence"].values).difference(set(guides))
return data | 31c2db07261fb1b242f4c52808c3b7e6312b1e54 | 3,638,423 |
def get_sample_eclat(name):
"""Read a tweet sample from a sample file and return it in a format eclat
can process.
"""
sampleFile = open(name)
X = []
Y = []
line = sampleFile.readline()
while line != '':
row = line.split()
Y.append(int(row[0]))
x = []
if int(row[3]) < 50:
x.append('#followers: 0-49')
elif int(row[3]) < 100:
x.append('#followers: 50-99')
elif int(row[3]) < 500:
x.append('#followers: 100-499')
elif int(row[3]) < 1000:
x.append('#followers: 500-999')
elif int(row[3]) < 5000:
x.append('#followers: 1000-4999')
elif int(row[3]) < 10000:
x.append('#followers: 5000-9999')
else:
x.append('#followers: 10000+')
for i in range(4, 12):
if int(row[i]):
x.append(cols[i - 3])
if int(row[12]) == 0:
x.append('Sentiment: Negative')
elif int(row[12]) == 1:
x.append('Sentiment: Neutral')
else:
x.append('Sentiment: Positive')
x.append('Topic: ' + row[13])
X.append(x)
for _ in range(8):
sampleFile.readline()
line = sampleFile.readline()
return X, Y | dd5daa2cd19b087c4b59379b8d3b2c2ea9ec27de | 3,638,424 |
from datetime import datetime
def submission_storage_path(instance, filename):
"""
Function DocString
"""
string = '/'.join(['submissions', instance.submission_user.user_nick, str(instance.submission_question.question_level), str(instance.submission_question.question_level_id)])
string += '/'+datetime.datetime.now().strftime("%I:%M%p-%m-%d-%Y")
string += filename
return string | 587785869da8906234bb572e9d635a892dc3270b | 3,638,425 |
def distance_to_center(n):
"""Return Manhattan distance to center of spiral of length <n>."""
dist = distances_to_center()
for _ in range(n - 1):
next(dist)
return next(dist) | 1301d0370a3f3dca72fb003073522376fd0790c0 | 3,638,426 |
import logging as log
def detect_tachycardia(heart_rate, age):
"""
This function makes best guess as to whether tachycardia is being exhibited
:param float heart_rate: heart rate in bpm
:param int age: age of user/patient
:return ble tachycardia: whether or not tachycardia detected
"""
log.debug("Checking for tachycardia.\n")
hr_hi = round(207 - (0.7 * age), 2)
tachycardia = False
if heart_rate > hr_hi:
tachycardia = True
return tachycardia | 2e6dafb581da8599cc71b790f10f45a9789bd617 | 3,638,427 |
from typing import List
from typing import Mapping
from typing import Any
from typing import Optional
import inspect
async def _assert_preconditions_async(preconditions: List[List[Contract]],
resolved_kwargs: Mapping[str, Any]) -> Optional[BaseException]:
"""Assert that the preconditions of an async function hold."""
exception = None # type: Optional[BaseException]
# Assert the preconditions in groups. This is necessary to implement "require else" logic when a class
# weakens the preconditions of its base class.
for group in preconditions:
exception = None
for contract in group:
assert exception is None, "No exception as long as pre-condition group is satisfiable."
condition_kwargs = select_condition_kwargs(contract=contract, resolved_kwargs=resolved_kwargs)
if inspect.iscoroutinefunction(contract.condition):
check = await contract.condition(**condition_kwargs)
else:
check_or_coroutine = contract.condition(**condition_kwargs)
if inspect.iscoroutine(check_or_coroutine):
check = await check_or_coroutine
else:
check = check_or_coroutine
if not_check(check=check, contract=contract):
exception = _create_violation_error(contract=contract, resolved_kwargs=resolved_kwargs)
break
# The group of preconditions was satisfied, no need to check the other groups.
if exception is None:
break
return exception | d89c355ed56e350a619e1d7324c8341bb74f827c | 3,638,428 |
import re
def moveGeneratorFromStrList (betaStringList, string_mode = True):
""" generate the final output of move sequence as a list of dictionary.
Input :
['F5-LH', 'F5-RH', 'E8-LH', 'H10-RH', 'E13-LH', 'I14-RH', 'E15-LH', 'G18-RH']
Length of the list: how many moves in this climb to the target hold. Target holds run from the third order hold to the last hold
Dictionary involves all information needed to evaluate grade/ analyze style for human. This is a basic building block of the route.
TargetHoldString : "A10" for example
TargetHoldHand: "RH" for example
TargetHoldScore: the difficulty to hold on the target hold applying the "RH" operation
RemainingHoldString : "A10" for example
RemainingHoldHand:
RemainingHoldScore
MovingHoldString : A10 for example
MovingHoldHand:
MovingHoldScore:
dxdyMtoT: vector Target - moving hand. This distance's physical meaning is the real hand traveling range during the move
dxdyRtoT: vector Target - Remaining hand. This distance's physical meaning is the inter distance between two remaining hand after finish the move
FootPlacement: [0,0,0,0,1,1,0] means there is hold on region 5 and 6.
MoveSuccessRate: estimation of how easy of this move
if coordinate_mode = True, String will be coordinate form and
"""
# From List of string to hand sequence and op sequence
handSequence = []
handOperatorSequence = []
xSequence = []
ySequence = []
for hold in betaStringList:
characterAndNum = [re.findall(r'(\w+?)(\d+)', hold.split("-")[0])[0]]
handOp = hold.split("-")[1]
alphabateList = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]
handOperatorSequence.append(handOp)
xSequence.append(alphabateList.index(characterAndNum[0][0]) )
ySequence.append(int(characterAndNum[0][1]) - 1)
outputDictionaryList = []
numOfMoves = len(handOperatorSequence) - 2 # calculate from the third hold to the end hold (no final match)
# loop over holds from third one to the finish hold (rank from 3 to end). In each move, this is the hold defined as target hold
for rank in range(2, len(handOperatorSequence)):
# Renew a dictionary
moveDictionary = {}
# Define target hold
targetHoldHand = handOperatorSequence[rank]
coordinateOfTarget = (xSequence[rank], ySequence[rank])
if string_mode == False:
moveDictionary["TargetHoldString"] = coordinateOfTarget
if targetHoldHand == "LH":
moveDictionary["TargetHoldHand"] = 0 # LH ->0
else: moveDictionary["TargetHoldHand"] = 1 # RH -> 1
else:
moveDictionary["TargetHoldString"] = coordinateToString(coordinateOfTarget)
moveDictionary["TargetHoldHand"] = targetHoldHand
moveDictionary["TargetHoldScore"] = holdScoreUseCordAndOp(coordinateOfTarget, targetHoldHand) # Could you file I/O excile file L/R hand difficulty?
# Define remaining hold
listBeforeTargetHold = handOperatorSequence[0:rank]
remainingHoldHand = oppositehand(targetHoldHand)
order = int(''.join(listBeforeTargetHold).rindex(remainingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand
coordinateOfRemaining = (xSequence[order], ySequence[order])
if string_mode == False:
moveDictionary["RemainingHoldString"] = coordinateOfRemaining
moveDictionary["RemainingHoldHand"] = 1 - moveDictionary["TargetHoldHand"]
else:
moveDictionary["RemainingHoldString"] = coordinateToString(coordinateOfRemaining)
moveDictionary["RemainingHoldHand"] = remainingHoldHand
moveDictionary["RemainingHoldScore"] = holdScoreUseCordAndOp(coordinateOfRemaining, remainingHoldHand)
moveDictionary["dxdyRtoT"] = (coordinateOfTarget[0] - coordinateOfRemaining[0], coordinateOfTarget[1] - coordinateOfRemaining[1])
# Define moving hold
movingHoldHand = targetHoldHand
order = int(''.join(listBeforeTargetHold).rindex(movingHoldHand)/2) # remaining hold is the last hold with opposite hand in the sequence before Target hand
coordinateOfMoving = (xSequence[order], ySequence[order])
if string_mode == False:
moveDictionary["MovingHoldString"] = coordinateOfMoving
moveDictionary["MovingHoldHand"] = moveDictionary["TargetHoldHand"]
else:
moveDictionary["MovingHoldString"] = coordinateToString(coordinateOfMoving)
moveDictionary["MovingHoldHand"] = movingHoldHand
moveDictionary["MovingHoldScore"] = holdScoreUseCordAndOp(coordinateOfMoving, movingHoldHand)
moveDictionary["dxdyMtoT"] = (coordinateOfTarget[0] - coordinateOfMoving[0], coordinateOfTarget[1] - coordinateOfMoving[1])
# Define foot region location
x0, y0 = int(coordinateOfRemaining[0]), int(coordinateOfRemaining[1])
region0 = [(x,y) for x in range(x0 - 4, x0 - 1) for y in range(y0 - 3, y0 - 1)]
region1 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 3, y0 - 1)]
region2 = [(x,y) for x in range(x0 + 2, x0 + 5) for y in range(y0 - 3, y0 - 1)]
region3 = [(x,y) for x in range(x0 - 5, x0 - 1) for y in range(y0 - 6, y0 - 3)]
region4 = [(x,y) for x in range(x0 - 1, x0 + 2) for y in range(y0 - 6, y0 - 3)]
region5 = [(x,y) for x in range(x0 + 2, x0 + 6) for y in range(y0 - 6, y0 - 3)]
region6 = [(x,y) for x in range(x0 - 2, x0 + 3) for y in range(y0 - 9, y0 - 6)]
# check is there foot holds in the region
footholdList = [0] * 7
regionList = [region0, region1, region2, region3, region4, region5, region6]
for holdx, holdy in zip(xSequence, ySequence):
for i in range(7):
if (holdx, holdy) in regionList[i]:
footholdList[i] = 1
# deal with additional footholds
if region1[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds (region's first element start from the lowest)
footholdList[0] = 1
footholdList[1] = 1
footholdList[2] = 1
elif region4[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds
footholdList[3] = 1
footholdList[4] = 1
footholdList[5] = 1
elif region6[0][1] < 0: # if the lowest hold in region1 is < 0, we can use additional footholds
footholdList[6] = 1
moveDictionary["FootPlacement"] = footholdList
# Add the singlemoveSuccessRate
if coordinateOfMoving == coordinateOfRemaining: ## If start from the match position
pass # May need special consideration when match hand
if targetHoldHand == "RH":
scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "LH")
if targetHoldHand == "LH":
scoreFromDistance = makeGaussian(coordinateOfTarget, 3, coordinateOfRemaining, "RH")
scoreFromfoot = 1
if sum(footholdList) < 1: scoreFromfoot = 0.5
moveSuccessRate = moveDictionary["RemainingHoldScore"] * moveDictionary["TargetHoldScore"] * scoreFromDistance * scoreFromfoot
moveDictionary["MoveSuccessRate"] = moveSuccessRate
# Finish fill in all components of a move
outputDictionaryList.append(moveDictionary)
return outputDictionaryList | c2905fffd9d1873c79239199027697e5c6162731 | 3,638,429 |
from datetime import datetime
def generateVtBar(row):
"""生成K线"""
bar = VtBarData()
symbol, exchange = row['symbol'].split('.')
bar.symbol = symbol
bar.exchange = exchangeMapReverse[exchange]
if bar.exchange in ['SSE', 'SZSE']:
bar.vtSymbol = '.'.join([bar.symbol, bar.exchange])
else:
bar.vtSymbol = bar.symbol
bar.open = row['open']
bar.high = row['high']
bar.low = row['low']
bar.close = row['close']
bar.volume = row['volume']
bar.date = str(row['date'])
bar.time = str(row['time']).rjust(6, '0')
#将bar的时间改成提前一分钟
hour=bar.time[0:2]
minute=bar.time[2:4]
sec=bar.time[4:6]
if minute=="00":
minute="59"
h = int(hour)
if h == 0:
h = 24
hour=str(h-1).rjust(2,'0')
else:
minute=str(int(minute)-1).rjust(2,'0')
bar.time=hour+minute+sec
bar.datetime = datetime.strptime(' '.join([bar.date, bar.time]), '%Y%m%d %H%M%S')
return bar | 5beecf78f932c8e1bf76c680157ecd29fbdf9567 | 3,638,430 |
import sqlite3
def index_with_links():
"""post request that the form link uses
"""
db = sqlite3.connect('link_shortner.db')
c = db.cursor()
link = request.forms.get('link')
generated_id = gen_id()
#row = db.execute('SELECT * from links where link_id=?', generate_id).fetchone()
c.execute("INSERT INTO links values (?, ?)", (generated_id, link))
db.commit()
db.close()
shortened = app.config.get('info.hostname', 'localhost:8080') + '/' + generated_id
return dict(short_link=shortened, csrf_tag=csrf.csrf_tag()) | 38e4ee6e63bacbc55a40533759c06b836a050e56 | 3,638,431 |
import torch
import os
def SinGAN_generate(Gs, Zs, reals, styles, NoiseAmp, opt, in_s=None, scale_v=1, scale_h=1, n=0, gen_start_scale=0, num_samples=10):
"""
Generate image with the given parameters.
Returns:
I_curr(torch.cuda.FloatTensor) : Current Image
"""
#if torch.is_tensor(in_s) == False:
if in_s is None:
in_s = torch.full(reals[0].shape, 0, device=opt.device)
images_cur = []
for G, Z_opt, noise_amp, style in zip(Gs, Zs, NoiseAmp, styles):
pad1 = ((opt.ker_size - 1) * opt.num_layer) / 2
m = nn.ZeroPad2d(int(pad1))
nzx = (Z_opt.shape[2] - pad1*2) * scale_v
nzy = (Z_opt.shape[3] - pad1*2) * scale_h
images_prev = images_cur
images_cur = []
m_style = m(style)
for i in range(0,num_samples,1):
if n == 0:
z_curr = generate_noise([1,nzx,nzy], device=opt.device)
z_curr = z_curr.expand(1,3,z_curr.shape[2],z_curr.shape[3])
z_curr = m(z_curr)
else:
z_curr = generate_noise([opt.nc_z,nzx,nzy], device=opt.device)
z_curr = m(z_curr)
if images_prev == []:
I_prev = m(in_s)
else:
I_prev = images_prev[i]
I_prev = imresize(I_prev,1/opt.scale_factor, opt)
I_prev = I_prev[:, :, 0:round(scale_v * reals[n].shape[2]), 0:round(scale_h * reals[n].shape[3])]
I_prev = m(I_prev)
I_prev = I_prev[:,:,0:z_curr.shape[2],0:z_curr.shape[3]]
I_prev = upsampling(I_prev,z_curr.shape[2],z_curr.shape[3])
if n < gen_start_scale:
z_curr = Z_opt
z_in = noise_amp*(z_curr)+I_prev
I_curr = G(z_in.detach(),I_prev, m_style.detach())
if n == len(reals)-1:
if opt.mode == 'train':
dir2save = '%s/RandomSamples/%s/gen_start_scale=%d' % (opt.out, opt.content[:-4], gen_start_scale)
else:
dir2save = generate_dir2save(opt)
try:
os.makedirs(dir2save)
except OSError:
pass
plt.imsave('%s/%d.png' % (dir2save, i), convert_image_np(I_curr.detach()), vmin=0, vmax=1)
#plt.imsave('%s/%d_%d.png' % (dir2save,i,n),functions.convert_image_np(I_curr.detach()), vmin=0, vmax=1)
#plt.imsave('%s/in_s.png' % (dir2save), functions.convert_image_np(in_s), vmin=0,vmax=1)
images_cur.append(I_curr)
n += 1
return I_curr.detach() | a6dbd66a8b991033e58ac507c52982d6742c0254 | 3,638,432 |
def divide_blend(img_x: np.ndarray, img_y: np.ndarray) -> np.ndarray:
"""
Blend image x and y in 'divide' mode
:param img_x: input grayscale image on top
:param img_y: input grayscale image at bottom
:return:
"""
result = np.zeros_like(img_x, np.float_)
height, width = img_x.shape
for i in range(height):
for j in range(width):
if img_x[i, j] == 0:
color = img_y[i, j] and 255 or 0
elif img_x[i, j] == 255:
color = img_y[i, j]
elif img_x[i, j] == img_y[i, j]:
color = 255
else:
color = (img_y[i, j] / img_x[i, j]) * 255
result[i, j] = color
return result.astype(np.uint8) | 27207b209c871a794162ee5b2932344a185668e7 | 3,638,433 |
from typing import Hashable
from typing import Optional
from typing import Tuple
from typing import Any
def table_to_bipartite_graph(
table: Tabular,
first_part_col: Hashable,
second_part_col: Hashable,
*,
node_part_attr: str = "part",
edge_weight_attr: str = "weight",
first_part_data: Optional[RowDataSpec] = None,
second_part_data: Optional[RowDataSpec] = None,
first_part_name: Optional[Hashable] = None,
second_part_name: Optional[Hashable] = None,
disjoint_keys: bool = False,
) -> AnyGraph:
"""
Function creating a bipartite graph from the given tabular data.
Args:
table (Iterable[Indexable] or pd.DataFrame): input tabular data. It can
be a large variety of things as long as it is 1. iterable and 2.
yields indexable values such as dicts or lists. This can for instance
be a list of dicts, a csv.DictReader stream etc. It also supports
pandas DataFrame if the library is installed.
first_part_col (Hashable): the name of the column containing the
value representing a node in the resulting graph's first part.
It could be the index if your rows are lists or a key if your rows
are dicts instead.
second_par_col (Hashable): the name of the column containing the
value representing a node in the resulting graph's second part.
It could be the index if your rows are lists or a key if your rows
are dicts instead.
node_part_attr (str, optional): name of the node attribute containing
the part it belongs to. Defaults to "part".
edge_weight_attr (str, optional): name of the edge attribute containing
its weight, i.e. the number of times it was found in the table.
Defaults to "weight".
first_part_data (Sequence or Callable, optional): sequence (i.e. list, tuple etc.)
of column from rows to keep as node attributes for the graph's first part.
Can also be a function returning a dict of those attributes.
Note that the first row containing a given node will take precedence over
subsequent ones regarding data to include.
Defaults to None.
second_part_data (Sequence or Callable, optional): sequence (i.e. list, tuple etc.)
of column from rows to keep as node attributes for the graph's second part.
Can also be a function returning a dict of those attributes.
Note that the first row containing a given node will take precedence over
subsequent ones regarding data to include.
Defaults to None.
first_part_name (Hashable, optional): can be given to rename the first part.
Defaults to None.
second_part_name (Hashable, optional): can be given to rename the second part.
to display as graph's second part's name.
Defaults to None.
disjoint_keys (bool, optional): set this to True as an optimization
mechanism if you know your part keys are disjoint, i.e. if no
value for `first_part_col` can also be found in `second_part_col`.
If you enable this option wrongly, the result can be incorrect.
Defaults to False.
Returns:
nx.AnyGraph: the bipartite graph.
"""
if first_part_col == second_part_col:
raise TypeError("first_part_col and second_part_col must be different")
if first_part_name is None:
first_part_name = first_part_col
if second_part_name is None:
second_part_name = second_part_col
table = iterator_from_dataframe(table)
graph = nx.Graph()
node_id = IncrementalIdRegister[Tuple[Hashable, Any]]()
for i, row in enumerate(table):
try:
label1 = row[first_part_col]
label2 = row[second_part_col]
except (IndexError, KeyError):
raise TypeError(
'row %i lacks the "%s" or the "%s" value'
% (i, first_part_col, second_part_col)
)
if disjoint_keys:
n1 = label1
n2 = label2
else:
# TODO: possibility to save lookups for sorted data
n1 = node_id[first_part_col, label1]
n2 = node_id[second_part_col, label2]
if n1 not in graph:
node_attr = {node_part_attr: first_part_name, "label": str(label1)}
if first_part_data:
node_attr.update(collect_row_data(first_part_data, row))
graph.add_node(n1, **node_attr)
if n2 not in graph:
node_attr = {node_part_attr: second_part_name, "label": str(label2)}
if second_part_data:
node_attr.update(collect_row_data(second_part_data, row))
graph.add_node(n2, **node_attr)
if graph.has_edge(n1, n2):
graph[n1][n2][edge_weight_attr] += 1
else:
edge_attr = {edge_weight_attr: 1}
graph.add_edge(n1, n2, **edge_attr)
return graph | ba37a806e96a4c1747fddf9789115d7a1eb4d074 | 3,638,434 |
def init_wavefunction(n_sites,bond_dim,**kwargs):
"""
A function that initializes the coefficients of a wavefunction for L sites (from 0 to L-1) and arranges
them in a tensor of dimension n_0 x n_1 x ... x n_L for L sites. SVD
is applied to this tensor iteratively to obtain the matrix product state.
Parameters
----------
n_sites : int
Number of sites.
kwargs
----------
conserve_n : boolean
True for conservation of number of particles.
num_e : int
Number of electrons
Returns
-------
mps : tensornetwork
Matrix Product State.
"""
# t1 = time.time()
mps = [ \
tn.Node( block(2, bond_dim),axis_names=["n_0","i_0"] )] + \
[tn.Node( block(2, bond_dim, bond_dim),axis_names=["n_{}".format(l),"i_{}".format(l-1),"i_{}".format(l)]) for l in range(1,n_sites-1)] + \
[tn.Node( block(2, bond_dim),axis_names=["n_{}".format(n_sites-1),"i_{}".format(n_sites-2)] ) \
]
#Right Canonicalize
for i in range(n_sites-1,0,-1):
if i == n_sites-1:
redges = [mps[i]["n_{}".format(i)]]
else:
redges = [mps[i]["i_{}".format(i)],mps[i]["n_{}".format(i)]]
ledges = [mps[i]["i_{}".format(i-1)]]
u,s,v,_ = tn.split_node_full_svd(mps[i], left_edges=ledges, right_edges=redges,\
left_edge_name="d_{}".format(i-1), right_edge_name="i_{}".format(i-1),\
# max_singular_values=bond_dim)
max_truncation_err=1e-5)
if i == n_sites-1:
reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)]]
else:
reord_edges=[v["n_{}".format(i)],v["i_{}".format(i-1)],v["i_{}".format(i)]]
v.reorder_edges(reord_edges)
if i == 1:
mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,'k'),('k','l'),('l',-2)])
else:
mps[i-1].tensor = tn.ncon([mps[i-1].tensor, u.tensor, s.tensor],[(-1,-2,'k'),('k','l'),('l',-3)])
mps[i].tensor = v.tensor
#connect edges to build mps
connected_edges=[]
conn=mps[0]["i_0"]^mps[1]["i_0"]
connected_edges.append(conn)
for k in range(1,n_sites-1):
conn=mps[k]["i_{}".format(k)]^mps[k+1]["i_{}".format(k)]
connected_edges.append(conn)
mod = np.linalg.norm(mps[0].tensor)
mps[0].tensor /= mod
# t2 = time.time()
#print("MPS CONSTRUCTION TIME=",t2-t1)
return mps
#NOW FOR SVD | 8f1a4d456945d9a345f560ee3d87dadbf353e7d3 | 3,638,435 |
def num_channels_to_num_groups(num_channels):
"""Returns number of groups to use in a GroupNorm layer with a given number
of channels. Note that these choices are hyperparameters.
Args:
num_channels (int): Number of channels.
"""
if num_channels < 8:
return 1
if num_channels < 32:
return 2
if num_channels < 64:
return 4
if num_channels < 128:
return 8
if num_channels < 256:
return 16
else:
return 32 | e2095fba2b1b9cdada72d354ddcd781d99e4aa48 | 3,638,436 |
def response_message(status, message, status_code):
"""
method to handle response messages
"""
return jsonify({
"status": status,
"message": message
}), status_code | e9dd25f237f264835d507af01a71ef9c826bf28d | 3,638,437 |
def glDrawBuffers( baseOperation, n=None, bufs=None ):
"""glDrawBuffers( bufs ) -> bufs
Wrapper will calculate n from dims of bufs if only
one argument is provided...
"""
if bufs is None:
bufs = n
n = None
bufs = arrays.GLenumArray.asArray( bufs )
if n is None:
n = arrays.GLenumArray.arraySize( bufs )
return baseOperation( n,bufs ) | ef5a83ea633138d4cb18d8d2d20736d8c1942bc0 | 3,638,438 |
def compare_rendered(obj1, obj2):
"""
Return True/False if the normalized rendered version of
two folium map objects are the equal or not.
"""
return normalize(obj1) == normalize(obj2) | b7debf048ea41b882003283b6e3b94d257f0e0fa | 3,638,439 |
async def _get_device_client_adapter(settings_object):
"""
get a device client adapter for the given settings object
"""
if not settings_object.device_id and not settings_object.id_scope:
return None
adapter = adapters.create_adapter(settings_object.adapter_address, "device_client")
adapter.device_id = settings_object.device_id
return adapter | 411b52a4e916d55b46933afbfa4e8513243b4397 | 3,638,440 |
def is_reserved(word):
"""
Determines if word is reserved
:param word: String representing the variable
:return: True if word is reserved and False otherwise
"""
lorw = ['define','define-struct']
return word in lorw | 0b0e3706bcafe36fc52e6384617223078a141fb2 | 3,638,441 |
def verify_figure_hash(name, figure=None):
"""
Verifies whether a figure has the same hash as the named hash in the current hash library.
If the hash library does not contain the specified name, the hash is added to the library.
Parameters
----------
name : string
The identifier for the hash in the hash library
figure : matplotlib.figure.Figure
If None is specified, the current figure is used (as determined by matplotlib.pyplot.gcf())
Returns
-------
out : bool
False if the figure's hash does not match the named hash, otherwise True
"""
if name not in hash_library:
hash_library[name] = hash_figure(figure)
return True
return hash_library[name] == hash_figure(figure) | 09ee240c9efbeddd4a0f33401d80b918175a579e | 3,638,442 |
def x_span_contains_y(x_spans, y_spans):
"""
Return whether all elements of y_spans are contained by some elements of x_spans
:param x_spans:
:type x_spans:
:param y_spans:
:type y_spans:
"""
for i, j in y_spans:
match_found = False
for m, n in x_spans:
if i >= m and j <= n:
match_found = True
break
# If this particular x_span found
# a match, keep looking.
if match_found:
continue
# If we find an element that doesn't
# have a match, return false.
else:
return False
# If we have reached the end of both loops, then
# all elements match.
return True | c366a5a5543e2fe9f6325cd3d31eccffb921693c | 3,638,443 |
import argparse
def parse_args():
"""Parse arguments and return them
:returns: argparse object
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='configuration file',
required=True)
return parser.parse_args() | d0fc1399c058f53558e08f13811c9709e518fd84 | 3,638,444 |
import time
def log(fn):
"""
logging decorator for the for the REST method calls. Gets all important information
about the request and response, takes the time to complete the calls and writes it
to the logs.
"""
def wrapped(self, *args):
try:
start = time()
ret = fn(self, *args)
duration = time() - start
logData = extractLogData(ctx)
logData['duration'] = duration
logData['httpCode'] = ctx.status
logData['responseHeader'] = dumps(ctx.headers)
logger.info('', extra=logData)
return ret
except Exception:
duration = time() - start
logData = extractLogData(ctx)
logData['duration'] = duration
logData['httpCode'] = ctx.status
logData['responseHeader'] = dumps(ctx.headers)
if ctx.status[0] == '2':
logger.info('', extra=logData)
else:
logger.error('', extra=logData)
raise
return wrapped | 8efcfcf043c220565092971749a12876a55641dc | 3,638,445 |
def deal_line(text_str1, text_str2, para_bound=None):
"""行合并和段落拆分"""
global result_text
text_str2 = text_str2.strip()
len_text_str2 = len(text_str2)
if len_text_str2 > 3 and len(set(text_str2)) == 1: # 处理 ***** 这类分割线
st = list(set(text_str2))[0]
# new_file.write(' ' + st * 24 + '\n')
result_text += HEAD_SPACE + st * 24 + '\n'
return ""
if len_text_str2 > 3 and str(text_str2[0:3]) == str(text_str2[-3:]): # 处理 ***Text*** 这类分割线
# new_file.write(' ' + text_str1 + '\n')
# new_file.write(' ' + text_str2 + '\n')
result_text += HEAD_SPACE + text_str1 + '\n'
result_text += HEAD_SPACE + text_str2 + '\n'
return ""
else:
if isparagraph_break(text_str1):
# new_file.write(' ' + text_str1 + '\n')
result_text += HEAD_SPACE + text_str1 + '\n'
text_str1 = text_str2
else:
text_str1 += text_str2
if para_bound:
return split_paragraph(text_str1, para_bound)
else:
return text_str1 | b984cefd842071fed3359ac36f8bae46e916e956 | 3,638,446 |
def resized_image(image: np.ndarray, max_size: int) -> np.ndarray:
"""Resize image to feature_process_size."""
h, w = image.shape[:2]
size = max(w, h)
if 0 < max_size < size:
dsize = w * max_size // size, h * max_size // size
return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA)
else:
return image | a32f0639b8b59cef8817861d123b5c304b7c243c | 3,638,447 |
def load_folder_list(args, ndict):
"""
Args:
dict : "name_run" -> path
"""
l = []
for p in ndict:
print("loading %s" % p)
l.append(load_pickle_to_dataframe(args, p))
d = pd.concat(l)
d = d.sort_values("name_run")
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("Datasets:")
print("=========")
for n, name in zip(range(len(d.columns)), d.columns):
print(f"{n} -> {name}")
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print()
return d | bd434fd93b3cb06a18d40edc48f8119442e7f0ff | 3,638,448 |
def charge_initial():
"""
Not currently in use, parking spot id gets passed in and it carries over
and passes it into the stripe charge view.
"""
spot_id = int(request.args.get('id'))
spot = AddressEntry.query.get(spot_id)
return render_template('users/charge_initial.html', key=stripe_keys['publishable_key'], price=spot.price) | f971b5c69954ce2026d2c4b08d6877c9f7da6067 | 3,638,449 |
import csv
def read_csv_from_file(file):
"""
Reads the CSV data from the open file handle and returns a list of dicts.
Assumes the CSV data includes a header row and uses that header row as
fieldnames in the dict. The following fields are required and are
case-sensitive:
- ``artist``
- ``song``
- ``submitter``
- ``seed``
Other fields are ultimately preserved untouched in the output CSV.
If the CSV doesn't have a header row, uses the following hardcoded list:
- ``order``
- ``seed``
- ``submitter``
- ``year``
- ``song``
- ``artist``
- ``link``
If a tab character is present in the first row, assumes the data is
tab-delimited, otherwise assumes comma-delimited.
:returns: All parsed data from the already-opened CSV file given, as a list
of dicts as generated by `csv.DictReader`
"""
data = list(file)
delimiter = "\t" if "\t" in data[0] else ","
# Look for a header row
reader = csv.reader([data[0]], delimiter=delimiter)
row = next(reader)
for col in row:
try:
int(col)
# Found an integer, no headers present
headers = ["order", "seed", "submitter", "year", "song", "artist", "link"]
break
except ValueError:
pass
else:
# Unable to find an integer here, must be a header row
# Pop the header row off the data list and create a new reader just to
# parse that row
data.pop(0)
headers = row
return list(csv.DictReader(data, fieldnames=headers, delimiter=delimiter)) | 89cfce0be6270076230051a6e852d1add3f4dcaf | 3,638,450 |
def findOutliers(time, flux, gap=None,
threshold_sigma=4,
precision_days=0.0205,
maxClusterLen = 2
):
"""
Identify single point outliers.
Preserves consecutive outliers, and those that are evenly spaced in
time. This protects short duration transits.
Inputs:
------------
time, flux
(1d numpy array) Input data. Flux should have mean (or median) value
of zero. Units of time are assumed to be days.
Optional Inputs
------------------
precision_days
(float) Points that are evenly spaced to within this precision
are considered periodic, and not marked as outliers. Setting
this to zero turns off the search of periodicity.
threshold_sigma
(float) Points more than this many sigma from zero are considered
potential outliers.
maxClusterLen
(int) Outliers are not marked if they are part of
a contiguous cluster at least this long.
Returns:
------------
An array of indices indicating which points are single point
outliers. The length of this array is equal to the number of outlier
points flagged. The length of this array is NOT equal to the length
of the flux array.
Notes
----------
`precision_days` should be set to a value comparable to the cadence time.
For Kepler long cadence, this is 29.52 minutes = 0.0205 days.
If `time` is not in units of days, set the value of precision in the
same units.
"""
assert not np.all(gap), "Can't find outliers if all data is gapped"
if gap is None:
gap = np.zeros_like(flux, dtype=bool)
indices = np.zeros_like(gap)
#Remove as much signal as possible from the data
# fluxDetrended = medianDetrend(flux, 3)
fluxDetrended = np.diff(flux)
fluxDetrended = np.append(fluxDetrended, [0]) #Keep the length the same
#debug()
assert len(fluxDetrended) == len(flux)
#Find outliers as too far away from the mean.
rms = robustStd(fluxDetrended[~gap])
threshold_counts = threshold_sigma * rms / np.sqrt(2)
rawOutliers = plateau(np.fabs(fluxDetrended), threshold_counts)
if len(rawOutliers) == 0:
return indices
# Remove clusters of 2 or more consectutive outliers
#singleOutlierIndices = np.sort(outliers[(outliers[:,1] - outliers[:,0] <= 2)][:,0])
# debug()
span = rawOutliers[:,1] - rawOutliers[:,0]
outliers = rawOutliers[span < maxClusterLen+2]
for p1, p2 in outliers:
indices[p1+1 :p2] = True
#Check for periodicities in outliers
if precision_days > 0:
notOutliers = findPeriodicOutliers(time, indices, precision_days)
indices[notOutliers] = False
return indices | 223f0c06febb9699f5d8fda6fa8b4d2f54713e45 | 3,638,451 |
def identify_denonavr_receivers():
"""
Identify DenonAVR using SSDP and SCPD queries.
Returns a list of dictionaries which includes all discovered Denon AVR
devices with keys "host", "modelName", "friendlyName", "presentationURL".
"""
# Sending SSDP broadcast message to get devices
devices = send_ssdp_broadcast()
# Check which responding device is a DenonAVR device and prepare output
receivers = []
for device in devices:
try:
receiver = evaluate_scpd_xml(device["URL"])
except ConnectionError:
continue
if receiver:
receivers.append(receiver)
return receivers | 712cba308d150ec179a390c27ae6931595cdffa9 | 3,638,452 |
def get_index_settings(index):
"""Returns ES settings for this index"""
return (get_es().indices.get_settings(index=index)
.get(index, {}).get('settings', {})) | 6d5d13bc30fdf8db666206bb07c3310394f3ff44 | 3,638,453 |
import hashlib
import six
def make_hashkey(seed):
"""
Generate a string key by hashing
"""
h = hashlib.md5()
h.update(six.b(str(seed)))
return h.hexdigest() | 38d088005cb93fc0865933bbb706be171e72503a | 3,638,454 |
import asyncio
async def report(database, year, month, limit):
"""Get a report."""
matches_query = """
select count(*) as count
from matches
where extract(year from played)=:year and extract(month from played)=:month
"""
players_query = """
select count(distinct players.user_id) as count
from matches join players on matches.id=players.match_id
where extract(year from played)=:year and extract(month from played)=:month
"""
most_matches_query = """
select players.user_id, players.platform_id, players.user_name, count(matches.id) as count
from players join matches on players.match_id=matches.id
where players.user_id != '' and
extract(year from matches.played)=:year and extract(month from matches.played)=:month
group by players.user_id, players.platform_id, players.user_name
order by count(matches.id) desc
limit :limit
"""
popular_maps_query = """
select map_name as name, count(map_name) as count
from matches
where extract(year from played)=:year and extract(month from played)=:month
group by map_name
order by count(map_name) desc
"""
longest_matches_query = """
select id
from matches
where extract(year from played)=:year and extract(month from played)=:month
order by duration desc
limit :limit
"""
total_matches, total_players, most_matches, popular_maps, longest_matches = await asyncio.gather(
database.fetch_one(matches_query, values={'year': year, 'month': month}),
database.fetch_one(players_query, values={'year': year, 'month': month}),
database.fetch_all(most_matches_query, values={'limit': limit, 'year': year, 'month': month}),
database.fetch_all(popular_maps_query, values={'year': year, 'month': month}),
database.fetch_all(longest_matches_query, values={'limit': limit, 'year': year, 'month': month}),
)
return {
'year': year,
'month': month,
'total_matches': total_matches['count'],
'total_players': total_players['count'],
'most_matches': [dict(
user=dict(id=m['user_id'], platform_id=m['platform_id'], name=m['user_name']),
rank=i + 1,
count=m['count']
) for i, m in enumerate(most_matches)],
'popular_maps': compute_map_data(popular_maps)[:limit],
'longest_match_ids': list(map(lambda m: m['id'], longest_matches))
} | 91059c5a8bd44536f24a7edbb88ff27b9036b83a | 3,638,455 |
import sys
def compose_ntx_graph(input_file=None, delimiter=None, weighted=None):
"""
This function creates a networkx graph from provided file
:param input_file: Input file path
:param delimiter: separator for the column of the input file
:param weighted: Simple yes/no if the input file is weighted or not
:return: networkx graph
"""
# Check sanity status of input
sanity_status = file_operations.sanity_check(input_file, delimiter, weighted)
# Get data for weighted networkx graph
file_is_weighted = file_operations.is_weighted(weighted)
# Create a networkx graph from the edgelist
if sanity_status == 1:
if file_is_weighted:
print('Creating Networkx weighted graph.....', log_type='info')
try:
ntx_graph = nx.read_weighted_edgelist(input_file, delimiter=delimiter, nodetype=int)
except Exception as e:
print('Can not create weighted networkx graph. ERROR: {}'.format(e), color='red', log_type='error')
sys.exit(1)
else:
print('Creating Networkx unweighted graph.....', log_type='info')
try:
ntx_graph = nx.read_edgelist(input_file, delimiter=delimiter, nodetype=int)
except Exception as e:
print('Can not create unweighted networkx graph. ERROR: {}'.format(e), color='red', log_type='error')
sys.exit(1)
# Return graph
return ntx_graph
else:
print('Sanity check failed!', log_type='error', color='red')
sys.exit(1) | d2622fac8ca97083c49a054406ccb44752f54871 | 3,638,456 |
def dy3(vector, g, m1, m2, L1, L2):
"""
Abbreviations
M = m0 + m1
S = sin(y1 - y2)
C = cos(y1 - y2)
s1 = sin(y1)
s2 = sin(y2)
Equation
y3' = g*[m2 * C * s2 - M * s1] - S*m2*[L1 * y3^2 * C + L2*y4^2]
-------------------------------------------------------------
L1*[M - m2*C^2]
"""
y1, y2, y3, y4 = vector
M, S, C, s1, s2 = abbreviate(m1, m2, y1, y2)
# Split up the equations, for almost clarity
num_a = g*(m2*C*s2-M*s1)
num_b = S*m2*(L1*C*y3**2 + L2*y4**2)
den = L1*(M - m2*C**2)
return (num_a - num_b)/den | b93086cfcbb9d5f32143279ad01972d3f8719a78 | 3,638,457 |
from typing import Any
def getType(resp: falcon.Response, class_type: str, method: str) -> Any:
"""Return the @type of object allowed for POST/PUT."""
for supportedOp in get_doc(resp).parsed_classes[class_type]["class"].supportedOperation:
if supportedOp.method == method:
return supportedOp.expects.replace("vocab:", "")
# NOTE: Don't use split, if there are more than one substrings with 'vocab:' not everything will be returned. | d20b77b4f40d266e685ce87f67d8f2fcbcfbe3eb | 3,638,458 |
def full_data_numeric():
"""DataFrame with numeric data
"""
data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
df = pd.DataFrame(data_dict)
return df | ebd105f2648475dc7dcd40f51482d18e29486254 | 3,638,459 |
def fix(x):
"""
Replaces spaces with tabs, removes spurious newlines, and lstrip()s each
line. Makes it really easy to create BED files on the fly for testing and
checking.
"""
s = ""
for i in x.splitlines():
i = i.lstrip()
if i.endswith('\t'):
add_tab = '\t'
else:
add_tab = ''
if len(i) == 0:
continue
i = i.split()
i = '\t'.join(i) + add_tab + '\n'
s += i
return s | ecd3a4d7f470feae1b697025c8fbf264d5c6b149 | 3,638,460 |
def get_collect_method(collect_method_name):
"""Return the collect method."""
try:
collect_method = CollectMethod.get(name=collect_method_name)
except ValueError:
raise RuntimeError(f'Collect Method {collect_method_name} not found!')
return collect_method | b80fcb916d461deea1784386062017291292f218 | 3,638,461 |
def TriangleBackwardSub(U,b):
"""C = TriangleBackwardSub(U,b)
Solve linear system UC = b
"""
C = solve(U,b)
return C | 95c7fb76ad02a5546a79b95f18b51fe385307329 | 3,638,462 |
from unittest.mock import patch
def test_binance_query_balances_unknown_asset(function_scope_binance):
"""Test that if a binance balance query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported asset."""
binance = function_scope_binance
def mock_unknown_asset_return(url): # pylint: disable=unused-argument
return MockResponse(200, BINANCE_BALANCES_RESPONSE)
with patch.object(binance.session, 'get', side_effect=mock_unknown_asset_return):
# Test that after querying the assets only ETH and BTC are there
balances, msg = binance.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC]['amount'] == FVal('4723846.89208129')
assert balances[A_ETH]['amount'] == FVal('4763368.68006011')
warnings = binance.msg_aggregator.consume_warnings()
assert len(warnings) == 2
assert 'unknown binance asset IDONTEXIST' in warnings[0]
assert 'unsupported binance asset ETF' in warnings[1] | 7521fd3039398c3eedccb16e16202687b4c28b2d | 3,638,463 |
def petsc_to_stencil(x, Xh):
""" converts a numpy array to StencilVector or BlockVector format"""
x = x.array
u = array_to_stencil(x, Xh)
return u | 6df02bbbfb9e9e386ca03510f2e4d563a6fed1aa | 3,638,464 |
from typing import Optional
import contextlib
def index_internal_txs_task(self) -> Optional[int]:
"""
Find and process internal txs for monitored addresses
:return: Number of addresses processed
"""
with contextlib.suppress(LockError):
with only_one_running_task(self):
logger.info("Start indexing of internal txs")
number_traces = InternalTxIndexerProvider().start()
logger.info("Find internal txs task processed %d traces", number_traces)
if number_traces:
logger.info("Calling task to process decoded traces")
process_decoded_internal_txs_task.delay()
return number_traces | b1a40ec713ff8d302f5c47b2c5d41300c699f3b4 | 3,638,465 |
import math
def make_lagrangian(func, equality_constraints):
"""Make a Lagrangian function from an objective function `func` and `equality_constraints`
Args:
func (callable): Unary callable with signature `f(x, *args, **kwargs)`
equality_constraints (callable): Unary callable with signature `h(x, *args, **kwargs)`
Returns:
tuple: Triple of callables (init_multipliers, lagrangian, get_params)
"""
def init_multipliers(params, *args, **kwargs):
h = jax.eval_shape(equality_constraints, params, *args, **kwargs)
multipliers = tree_util.tree_map(lambda x: np.zeros(x.shape, x.dtype), h)
return params, multipliers
def lagrangian(params, multipliers, *args, **kwargs):
h = equality_constraints(params, *args, **kwargs)
return -func(params, *args, **kwargs) + math.pytree_dot(multipliers, h)
def get_params(opt_state):
return opt_state[0]
return init_multipliers, lagrangian, get_params | c5795cded21e9cc4a7092eee63b88a4fac3b346a | 3,638,466 |
import argparse
def render_task(task: todotxt.Task, namespace: argparse.Namespace, level: int = 0) -> str:
"""Render one task."""
indent = (level - 1) * " " + "- " if level else ""
rendered_task = colorize(reference(task, namespace), namespace)
rendered_blocked_tasks = render_blocked_tasks(task, namespace, level)
return indent + rendered_task + rendered_blocked_tasks | ff50a060c7898bba5aac02f85d9eefd903465cf0 | 3,638,467 |
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0]) | c007a51e5073d8a3cbcbe52ca32ad84d58f4100a | 3,638,468 |
import os
def remove_tmp_directories():
"""
remove tmp directories submitted in tmp_directories
Returns:
True
"""
for tmp_dir in tmp_directories:
os.remove(tmp_dir)
return True | 43d37e8382ada5073f072832a83f78f027003048 | 3,638,469 |
from typing import Type
from typing import List
from typing import cast
def get_actual_type(arg_type: Type, kind: int,
tuple_counter: List[int]) -> Type:
"""Return the type of an actual argument with the given kind.
If the argument is a *arg, return the individual argument item.
"""
if kind == nodes.ARG_STAR:
if isinstance(arg_type, Instance):
if arg_type.type.fullname() == 'builtins.list':
# List *arg.
return arg_type.args[0]
elif arg_type.args:
# TODO try to map type arguments to Iterable
return arg_type.args[0]
else:
return AnyType()
elif isinstance(arg_type, TupleType):
# Get the next tuple item of a tuple *arg.
tuplet = cast(TupleType, arg_type)
tuple_counter[0] += 1
return tuplet.items[tuple_counter[0] - 1]
else:
return AnyType()
elif kind == nodes.ARG_STAR2:
if isinstance(arg_type, Instance) and (
(cast(Instance, arg_type)).type.fullname() == 'builtins.dict'):
# Dict **arg. TODO more general (Mapping)
return (cast(Instance, arg_type)).args[1]
else:
return AnyType()
else:
# No translation for other kinds.
return arg_type | cb113cea89f5fd6835314c53b35afce4fa3b74a1 | 3,638,470 |
def test_qnn_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
y = relay.qnn.op.requantize(x,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
y = relay.Function([x], y)
return y
@register_qnn_legalize("qnn.requantize", level=100)
def legalize_qnn_requantize(attrs, inputs, types):
data = inputs[0]
data = relay.add(relay.const(0, 'int8'), data)
y = relay.qnn.op.requantize(data,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
y = relay.add(relay.const(0, 'int8'), x)
z = relay.qnn.op.requantize(y,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
z = relay.Function([x], z)
return z
a = before()
# Check that Relay Legalize does not change the graph.
a = run_opt_pass(a, relay.transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a)
# Check that QNN Legalize modifies the graph.
a = run_opt_pass(a, relay.qnn.transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert analysis.alpha_equal(a, b), "Actual = \n" + str(a) | b6f4a930e5c7156e60a5b26583b6e8fc48a6f441 | 3,638,471 |
import yaml
def load(data, schema, yamlLoader=yaml.UnsafeLoader):
"""
Loads the given data and validates it according to the schema provided.
Data must be either JSON or YAML, it must be a dictionary, a path, or a string of JSON.
Schema must be JSON, it must be a dictionary, a path, or a string of JSON.
"""
if isJson(data):
return loadAndValidateJson(data, schema)
return loadAndValidateYaml(data, schema, yamlLoader=yamlLoader) | e7f29e1b61e60ce1cac5b1b1217f1df645691c17 | 3,638,472 |
from typing import List
def calculate_slice_rotations(im_stack: np.ndarray, max_rotation:float = 45) -> List[float]:
"""Calculate the rotation angle to align each slice so the
objects long axis is aligned with the horizontal axis.
Parameters
----------
im_stack : np.ndarray
A stack of images. The images should be binary
or label iamges. The regions are found and processed
with the scikit-image label and regionprops functions.
The stack should have shape (z, y, x) for z images
with shape (y, x).
max_rotation : float
The maximum allowed rotation between slices in degrees.
If this value is exceeded, it is assumed that the
opposite rotation was found and 180 is added to the rotation.
The default value is 45.
Returns
-------
rotations : List[float]
The rotation for each slice in degrees.
"""
# get the rotations of the images
rotations = []
rotations_raw = []
prev_rot = 0
previous_values = []
for i, im in enumerate(im_stack):
previous_values.append(prev_rot)
rp = regionprops(im.astype(int))
if len(rp) > 0:
orientation = rp[0].orientation
angle_in_degrees = orientation * (180 / np.pi) + 90
else:
angle_in_degrees = 0
rotations_raw.append(angle_in_degrees)
if i > 0:
# check if we should flip the rotation
if abs(prev_rot - angle_in_degrees) > max_rotation:
angle_in_degrees = -1 * (180 - angle_in_degrees)
prev_rot = angle_in_degrees
rotations.append(angle_in_degrees)
return rotations | 42c0fdbdf02e937f449cb3ca137588003c715651 | 3,638,473 |
def calc_rest_interval(data):
"""
SubTool for Investigate: after median_deviation filters through all the
points run entropy on the remaining non_rest points. This will filter the
close but could still be rest points.
"""
lst, rest = median_deviation(data)
average = median(data)
st_entropy = entropy(lst)
maximum = 0.0
array = lst
result = []
for instance in lst:
temp_array = list(array)
temp_array.remove(instance)
temp_array.insert(0, average)
ne_entropy = entropy(temp_array)
if ne_entropy < (st_entropy - st_entropy/5):
rest.append(instance)
else:
result.append(instance)
return result, average, rest | 7710e0784a5a025d99c8ead9799b1062942e3cdc | 3,638,474 |
def get_objanno(fin_anno, godag, namespace='all'):
"""Get annotation object"""
fin_full = get_anno_fullname(fin_anno)
return get_objanno_factory(fin_full, godag=godag, namespace=namespace) | 5e071190596ab37943d4001b4f03cf20d6395e06 | 3,638,475 |
def create_table_descriptives(datasets):
"""Merge dataset descriptives."""
df = pd.concat(
[pd.read_json(ds, orient="index") for ds in datasets],
axis=0
)
df.index.name = "dataset_name"
return df | 7c4554381ffb14572d949c27035411567d69e25d | 3,638,476 |
def get_ngram_universe(sequence, n):
"""
Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe.
Example
--------
>>> sequence = [2,1,1,4,2,2,3,4,2,1,1]
>>> ps.get_ngram_universe(sequence, 3)
64
"""
# if recurrance is possible, the universe is given by k^t (SSA pg 68)
k = len(set(sequence))
if k > 10 and n > 10:
return 'really big'
return k**n | 3dbfe1822fdefb3e683b3f2b36926b4bb066468f | 3,638,477 |
from typing import Union
from typing import Iterable
def as_nested_dict(
obj: Union[DictLike, Iterable[DictLike]], dct_class: type = DotDict
) -> Union[DictLike, Iterable[DictLike]]:
"""
Given a obj formatted as a dictionary, transforms it (and any nested dictionaries)
into the provided dct_class
Args:
- obj (Any): An object that is formatted as a `dict`
- dct_class (type): the `dict` class to use (defaults to DotDict)
Returns:
- A `dict_class` representation of the object passed in
```
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)([as_nested_dict(d, dct_class) for d in obj])
# calling as_nested_dict on `Box` objects pulls out their "private" keys due to our recursion
# into `__dict__` if it exists. We can special-case Box and just convert it to dict this way,
# which automatically handles recursion.
elif isinstance(obj, Box):
return dict(obj)
elif isinstance(obj, (dict, DotDict)):
# DotDicts could have keys that shadow `update` and `items`, so we
# take care to avoid accessing those keys here
return dct_class(
{
k: as_nested_dict(v, dct_class)
for k, v in getattr(obj, "__dict__", obj).items()
}
)
return obj | a89261253174ce5b75d61343f0b45d3fe65e12f9 | 3,638,478 |
def twoindices_positive_up_to(n, m):
"""
build 2D integer indices up to n (each scanned from 0 to n)
"""
if not isinstance(n, int) or n <= 0:
raise ValueError("%s is not a positive integer" % str(n))
nbpos_n = n + 1
nbpos_m = m + 1
gripos = np.mgrid[: n : nbpos_n * 1j, : m : nbpos_m * 1j]
indices_pos = np.reshape(gripos.T, (nbpos_n * nbpos_m, 2))
return indices_pos | 63f850703f7598f1a4611c13700aa1921d77dd1a | 3,638,479 |
import os
import glob
def get_data_lists(data, MOT=False):
""" Prepare rolo data for SORT
Arguments:
data: config of the following form:
{
'image_folder': data_folder + 'images/train/',
'annot_folder': data_folder + 'annotations/train/',
'detected_folder': data_folder + 'detected/train/',
'sort_det_folder': data_folder + 'sort/train/'
}
Returns:
video_folders: list of video folder paths
video_annotations: list of annotation file paths
det_list : path name list of detected results
"""
if not os.path.exists(data['image_folder']):
raise IOError("Wrong image folder path:", data['image_folder'])
else:
print("Data folder:", data['image_folder'])
if not os.path.exists(data['annot_folder']):
raise IOError("Wrong annotation folder path:", data['annot_folder'])
else:
print("Annotations folder:", data['annot_folder'])
# Get the annotations as a list: [video1ann.txt, video2ann.txt, video3ann.txt, ...]
video_annots = sorted(glob.glob((data['annot_folder'] + "*")))
sort_nicely(video_annots)
if not os.path.exists(data['detected_folder']):
os.makedirs(data['detected_folder'])
else:
print("Detected folder:", data['detected_folder'])
if len(glob.glob((data['detected_folder'] + "*"))) < len(video_annots):
print(len(glob.glob((data['detected_folder'] + "*"))))
print(len(video_annots))
exit()
video_folders_list = sorted(glob.glob((data['image_folder'] + '*')))
sort_nicely(video_folders_list)
detect_videos(video_annots, video_folders_list, data['detected_folder'])
video_folders = []
det_list = []
for i, annot_path in enumerate(video_annots):
video_name = splitext(basename(annot_path))[0] # Get the file name from its full path
video_folder = os.path.join(data['image_folder'], video_name)
if not os.path.exists(video_folder):
raise IOError("Video folder does not exit:", video_folder)
video_folders.append(video_folder)
detected_name = os.path.join(data['detected_folder'], video_name + '.npy')
if not os.path.exists(detected_name):
raise IOError("Detected file does not exit:", detected_name)
if not os.path.exists(data['sort_det_folder']):
os.makedirs(data['sort_det_folder'])
if MOT:
mot_det_path = data['sort_det_folder'] + video_name + '.txt'
else:
mot_det_path = change_box_format(detected_name, data['sort_det_folder'], video_name)
det_list.append(mot_det_path)
return video_annots, video_folders, det_list | 537c3a8e1ffa8ab0a6835738b2032f26e1157406 | 3,638,480 |
def ban_user(request, user):
"""Bans a given user."""
user = User.query.filter_by(username=user).first()
if user is None:
raise NotFound()
next = request.next_url or url_for('admin.bans')
if user.is_banned:
request.flash(_(u'The user is already banned.'))
return redirect(next)
if user == request.user:
request.flash(_(u'You cannot ban yourself.'), error=True)
return redirect(next)
admin_utils.ban_user(user)
request.flash(_(u'The user “%s” was successfully banned and notified.') %
user.username)
return redirect(next) | dd8c2a43a3843a6055e9e690d8cffee8cfac2b0e | 3,638,481 |
def lastDate():
"""[summary]
lastDate() function: return the total revenue of the nearest day
Returns:
[type]: [description]
"""
lastDate = totalDate().tail(1)
last_date = lastDate.iloc[0]['total'].round(2)
return last_date | 93130bf39dc2a82fa2cae11a6ea11468211f61b6 | 3,638,482 |
from libmkMeteo import mkmeteo4lingrars
from liblingraRS import lingrars
def processlingrarow(col, rows, pixelWidth, pixelHeight, xO, yO, plot, netcdffile, rsdir, becsmosdir):
"""
Launch a single pixel of processing for LingraRS
:param col: column in Grassland raster file
:param rows: total rows in Grassland raster file
:param data: Value of pixel in Grassland raster file (0/1)
:param pixelWidth: Projected pixel width size
:param pixelHeight: Projected pixel height size
:param xO: Projected X origin
:param yO: Projected Y Origin
:param plot: Do you plot graphs of the run (False or True)
:param netcdffile: ERA5 netcdf file to extract weather data from
:param rsdir: RS data directory
:param becsmosdir: RS data directory for BEC-SMOS 1km soil moisture
:return: tiller, yielD, wlvg, wlvd1, parcu, grass, tracu, evacu
"""
# Import local libraries
# import main lingraRS library
result = []
for row in range(rows):
longitude = col * pixelWidth + xO
latitude = yO - row * pixelHeight
# print(col, row, longitude, latitude, data)
# Create the Meteo and RS data parameterisation for lingraRS
meteolist = mkmeteo4lingrars(netcdffile, rsdir, becsmosdir, longitude, latitude)
# Run the model
(tiller, yielD, wlvg, wlvd1, wa, grass, tracu, evacu) = lingrars(latitude, meteolist, plot)
# exit() TODO plot the graphs and check if all ok
# Let the pixels fit into each map (*1000 bc INT32 maps)
# TODO check values out for print("parcu=", parcu)
result.append([col, row, tiller * 1000, yielD * 1000, wlvg * 1000, wlvd1 * 1000, wa * 1000, grass * 1000, tracu, evacu])
return result | b0b19c514ed1ca1ad324962fac0023b7af501503 | 3,638,483 |
import json
def multitask_result(request):
"""多任务结果"""
task_id = request.GET.get('task_id')
task_obj = models.Task.objects.get(id=task_id)
results = list(task_obj.tasklog_set.values('id','status',
'host_user_bind__host__hostname',
'host_user_bind__host__ip_addr',
'result'
))
return HttpResponse(json.dumps(results)) | c9c37fe4852a8c04662a5061445c1565400e94a1 | 3,638,484 |
from typing import Dict
def process_xpath_list(node, property_manifest: Dict):
"""
Return a list of values as a result of running a list of XPath
expressions against an input node
:param node: Input node
:param property_manifest: Manifest snippet of the property
:return: List of values
"""
def complement_xpath(current_node, path):
"""
Return current node if XPath value is "."
else process XPath normally
"""
if path == ".":
return [current_node]
else:
return current_node.xpath(path)
if node:
return [
process_property_value(child_node, property_manifest)
for path in property_manifest["xpath"]
for child_node in complement_xpath(node, path)
]
return [] | e52ef3a7ff6b2f74554a69a5fec53125c077f6e5 | 3,638,485 |
def collect_username_and_password(db: Session) -> UserCreate:
"""Collect username and password information and validate"""
username = get_username("Enter your username: ")
password = get_password("Enter your password: ")
verify_pass = get_password("Enter your password again: ")
if password != verify_pass:
raise Exception("Passwords do not match.")
user_data = UserCreate(username=username, password=password)
user = FidesopsUser.get_by(db, field="username", value=user_data.username)
if user:
raise Exception(f"User with username '{username}' already exists.")
return user_data | be1557a4aa24cfb653c5e03f7f3cb340be1a6c1b | 3,638,486 |
def replace_header(input_df):
"""replace headers of the dataframe with first row of sheet"""
new_header = input_df.iloc[0]
input_df = input_df[1:]
input_df.columns=new_header
return input_df | c8946fc269dd313b80df421af8d0b3fc6c47aed7 | 3,638,487 |
def cartToRadiusSq(cartX, cartY):
"""Convert Cartesian coordinates into their corresponding radius squared."""
return cartX**2 + cartY**2 | 3fb79d2c056f06c2fbf3efc14e08a36421782dbd | 3,638,488 |
def unique_entity_id(entity):
"""
:param entity: django model
:return: unique token combining the model type and id for use in HTML
"""
return "%s-%s" % (type(entity).__name__, entity.id) | c58daf9a115c9840707ff5e807efadad36a86ce8 | 3,638,489 |
def normalize_tuple(value, n, name):
"""Transforms a single int or iterable of ints into an int tuple.
# Arguments
value: The value to validate and convert. Could be an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. `strides` or
`kernel_size`. This is only used to format error messages.
# Returns
A tuple of n integers.
# Raises
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {}'.format(name, n, value))
if len(value_tuple) != n:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {}'.format(name, n, value))
for single_value in value_tuple:
try:
int(single_value)
except ValueError:
raise ValueError('The `{}` argument must be a tuple of {} '
'integers. Received: {} including element {} '
'of type {}'.format(name, n, value, single_value,
type(single_value)))
return value_tuple | cf396bac48b720686bb65ae7ab91b2e4cb22ac0e | 3,638,490 |
def load_user(user_id):
"""
@login_manager.user_loader Passes in a user_id to this function and in return the
function queries the database and gets a user's id as a response...
"""
return User.query.get(int(user_id)) | 2c2a2e7f6f9a5bc7392056bfd16402c9d2e96c22 | 3,638,491 |
def replaceall(table, a, b):
"""
Convenience function to replace all instances of `a` with `b` under all
fields. See also :func:`convertall`.
.. versionadded:: 0.5
"""
return convertall(table, {a: b}) | 19d6c0fb60c71994de02deafb5ec9c2995aba622 | 3,638,492 |
def get_job_metadata(ibs, jobid):
"""
Web call that returns the metadata of a job
CommandLine:
# Run Everything together
python -m wbia.web.job_engine --exec-get_job_metadata
# Start job queue in its own process
python -m wbia.web.job_engine job_engine_tester --bg
# Start web server in its own process
./main.py --web --fg
pass
# Run foreground process
python -m wbia.web.job_engine --exec-get_job_metadata:0 --fg
Example:
>>> # xdoctest: +REQUIRES(--web-tests)
>>> # xdoctest: +REQUIRES(--slow)
>>> # xdoctest: +REQUIRES(--job-engine-tests)
>>> # xdoctest: +REQUIRES(--web-tests)
>>> from wbia.web.job_engine import * # NOQA
>>> import wbia
>>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88')
... # Test get metadata of a job id that does not exist
... response = web_ibs.send_wbia_request('/api/engine/job/metadata/', jobid='badjob')
"""
status = ibs.job_manager.jobiface.get_job_metadata(jobid)
return status | 24ba96d6a71f105057a9fc9012de9edb187787d5 | 3,638,493 |
import math
def create_learning_rate_scheduler(max_learn_rate, end_learn_rate, warmup_proportion, n_epochs):
"""Learning rate scheduler, that increases linearly within warmup epochs
then exponentially decreases to end_learn_rate.
Args:
max_learn_rate: Float. Maximum learning rate.
end_learn_rate: Float. Scheduler converges to this value.
warmup_proportion: Float. How many epochs to increase linearly, before decaying.
n_epochs: Float. Maximum number of epochs training will run.
Returns:
Keras learning rate scheduler
"""
def lr_scheduler(epoch):
warmup_epoch_count = int(warmup_proportion * n_epochs)
if epoch < warmup_epoch_count:
res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
else:
res = max_learn_rate * math.exp(
math.log(end_learn_rate / max_learn_rate)
* (epoch - warmup_epoch_count + 1)
/ (n_epochs - warmup_epoch_count + 1)
)
return float(res)
learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)
return learning_rate_scheduler | 5c5649e429ad5f138894d30064c24bf23e547f85 | 3,638,494 |
def matchyness(section, option):
"""Assign numerical 'matchyness' value between target and value
Parameters:
section -- target value
option -- proposed match
"""
if section != option:
return _hc.NEQ
if isinstance(section, rt.flask_placeholder):
if isinstance(option, rt.flask_placeholder):
return _hc.PP #Placeholder - placeholder
else:
return _hc.PS #Placeholder - string
else:
if option.value == section.value:
return _hc.SS #String - string
elif isinstance(option, rt.flask_placeholder):
return _hc.SP #String - placeholder
else:
return _hc.NEQ | c8e3773a8afe190181fd7552460852a27b2534d3 | 3,638,495 |
def log_sum_exp_elem(*a):
"""
:param a: elements
:return: (a[0].exp() + a[1].exp() + ...).log()
"""
bias = max(a).detach()
ans = bias + sum([(ai-bias).exp() for ai in a]).log()
return ans | a87871a7c8af9d2c6c8db683ba63124319d09a0d | 3,638,496 |
def car_portrayal(agent):
"""Visualises the cars for the Mesa webserver
:return: Dictionary containing the settings of an agent"""
if agent is None:
return
portrayal = {}
# update portrayal characteristics for each CarAgent object
if isinstance(agent, CarAgent):
if agent.is_from_traffic_light:
portrayal["Shape"] = "rect"
portrayal["w"], portrayal["h"] = .7, .7
else:
portrayal["Shape"] = "circle"
portrayal["r"] = .9
portrayal["Layer"] = 0
portrayal["Filled"] = "true"
# change the agents color to its velocity
portrayal["Color"] = colour_spectrum[agent.velocity - 1]
return portrayal | 68c0bffb02299f2b03abf6ee2dc590375ad8e2a5 | 3,638,497 |
import re
from typing import OrderedDict
def _load_spc_format_type_a(filepath: str):
"""load A(w,k) in the spc format type a
Args:
filepath (str): output filename
Returns:
np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray: kcrt, Awk, kdist, energy, kpath
"""
with open(filepath) as f:
lines = f.readlines()
lines2 = []
for line in lines:
lines2.append(line.strip())
lines = lines2
lines_iter = iter(lines)
line = next(lines_iter)
s = line.strip().split()
internal_format_type = s[-1]
if internal_format_type == "(a)":
line = next(lines_iter)
if False:
s = line.strip().split()
emin = float(s[1])
emax = float(s[2])
ne = int(s[3])
nhighsymkp = int(s[4])
line = next(lines_iter)
line = line.strip()
kstr_list = re.split(" +", line)
kpath = OrderedDict()
kcrt = []
for kstr in kstr_list[1:]:
s = kstr.split()
idx = int(s[0])
idx -= 1 # index convert from fortran to Python
name = " ".join(s[1:]).replace("'", "")
kpath[idx] = name
kcrt.append(idx)
kcrt = np.array(kcrt)
elif internal_format_type == "(a1)": # short format without kpoints
line = next(lines_iter)
line = line.strip()
kstr_list = line.split()
kpath = OrderedDict()
kcrt = []
kpoints = None
for kstr in kstr_list[2:]:
s = kstr.split()
idx = int(s[0])
idx -= 1 # index convert from fortran to Python
name = None
kpath[idx] = name
kcrt.append(idx)
kcrt = np.array(kcrt)
while True:
line = next(lines_iter)
if line.startswith("### end of header"):
break
line = next(lines_iter)
Awk, kdist, energy = _load_spc_format_type_a_Awk(lines_iter)
return kcrt, Awk, kdist, energy, kpath | 0a5c3f316875495e37502dd426e6eee7dd76ee53 | 3,638,498 |
def variable_to_json(var):
"""Converts a Variable object to dict/json struct"""
o = {}
o['x'] = var.x
o['y'] = var.y
o['name'] = var.name
return o | 86497a7915e4825e6e2cbcfb110c9bc4c229efed | 3,638,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.