content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def fake_map_matrix_T_without_enemy(map, mySide):
"""
伪造一个没有敌方坦克的地图类型矩阵
WARNING:
首先检查是不是对方 tank ,因为可能遇到对方已经死亡或者两方坦克重合
这种时候如果己方坦克恰好在这个位置,就会被删掉,assert 不通过
"""
map_ = map
oppSide = 1 - mySide
cMatrixMap = map_.matrix_T.copy()
for oppTank in map_.tanks[oppSide]:
if (cMatrixMap[oppTank.xy] == Field.TANK + 1 + oppSide
or cMatrixMap[oppTank.xy] == Field.MULTI_TANK # 还需要考虑重叠的坦克
):
cMatrixMap[oppTank.xy] = Field.EMPTY
return cMatrixMap | 3930ce8bf3dffb2f5f0edac44afa5f8d112a6cac | 3,631,600 |
def dict_factory(cursor, row):
"""
Factory function to convert a sqlite3 result row in a dictionary
:param cursor: cursor object
:param row: a row object
:return: dictionary representation of the row object
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d | 133ac7df43b8bf2b173257c3ca2ed095def84a90 | 3,631,601 |
def ndcgREval(data, gt, gf):
"""
Compute NDCG@R, where R is the number of relevant documents
"""
ideal = generate_ideal(gt,gf)
dcgScore = dcg(data[:(gt)])
norm = dcg(ideal[:(gt)][:len(data)])
if(norm == 0): print('norm=0 in ndcgREval')
return dcgScore / norm | 4583fb030ab9548bbc30726090e4ea9ad31f9dca | 3,631,602 |
def create_modifiers(y_position):
"""Create all creatable modifiers"""
def create():
for mod in rt.modifier.classes:
try:
created = mod()
print(created)
box = create_box()
rt.addModifier(box, created)
yield box
except RuntimeError:
pass
print("-- Modifiers")
return layout_objects("Modifiers", create(), y_position) | 2eb8f02304d79ce3f948a8150c1de51f8cfb8dcb | 3,631,603 |
from typing import List
def queue_busy_workers(queue: str) -> List[str]:
"""
This function counts the number of busy workers for a given queue.
"""
return [
worker.name
for worker in rq.Worker.all(queue=queue)
if worker.state == "busy"
] | 77de2195323420a664f2a3e16aad7200c5317b49 | 3,631,604 |
def coste(theta, X, Y):
"""
cost function
computes J(theta) for a given dataset
"""
m = np.shape(X)[0]
H = sigmoid((np.dot(X,theta)))
J = -1/m * ( np.log(H).transpose().dot(Y)
+ np.log(1-H).transpose().dot(1-Y))
return J | 21de5da396c5778842eb81abda8a7ab2a974426b | 3,631,605 |
import torch
def test_simhash_table():
"""Tests the HashCounter."""
# pylint: disable=too-many-locals,unused-variable
num_hash_buckets = 4
debug = True
class HashFn(object):
"""A mock hash function. Big-endian."""
codes = None
buckets = None
@staticmethod
def _i2b(i):
bitwidth = int(torch.np.log2(num_hash_buckets))
bin_rep = list(map(int, bin(i)[2:]))
return [0]*(bitwidth - len(bin_rep)) + bin_rep
def set_codes(self, bin_counts):
"""Sets the big-endian binary codes that the HashFn will return."""
codes = []
buckets = []
for i, count in enumerate(bin_counts):
codes.extend([self._i2b(i)]*count)
buckets.extend([i]*count)
rp = torch.randperm(len(codes))
self.codes = torch.FloatTensor(codes)[rp]
self.buckets = torch.LongTensor(buckets)[rp]
def __call__(self, _):
return Variable(self.codes)
hash_fn = HashFn()
simhash_table = HashCounter(**locals())
expected_counts_train = [1, 2, 0, 4]
hash_fn.set_codes(expected_counts_train)
toks = Variable(torch.LongTensor(sum(expected_counts_train), 4))
assert (simhash_table(toks, 'counts2') == hash_fn.buckets).all()
assert (simhash_table.counts2.numpy() == expected_counts_train).all()
assert (simhash_table.counts == 0).all()
expected_counts_test = [4, 3, 2, 1]
hash_fn.set_codes(expected_counts_test)
toks = Variable(torch.LongTensor(sum(expected_counts_test), 4))
assert (simhash_table(toks) == hash_fn.buckets).all()
assert (simhash_table.counts2.numpy() == expected_counts_train).all()
assert (simhash_table.counts.numpy() == expected_counts_test).all() | f943d5da82acbb288327c29969e7d53c569de300 | 3,631,606 |
import re
def replace_php_define(text, define, value):
"""
Replaces a named constaint (define) in PHP code.
Args:
text (str) : The PHP code to process.
define (str) : Name of the named constant to modify.
value (int,str) : Value to set the 'define' to.
Returns:
The modified PHP code.
"""
if isinstance(value, str): replacement = '\g<1>\'{0}\'\g<2>'.format(value)
elif isinstance(value,int): replacement = '\g<1>{0}\g<2>'.format(value)
else: raise RuntimeError('Datatype is not supported.')
regex = '^(\s*define\s*\(\s*\'{0}\'\s*,\s*).*(\s*\)\s*;.*)'.format(re.escape(define))
text,substitutions = re.subn(regex, replacement, text, 1, re.MULTILINE | re.UNICODE)
if substitutions == 0: raise RuntimeError('Named constant \'{0}\' is not part of the specified php code.'.format(define))
return text | 02e3194d6fb83958d525651cdca6e3cec1cf3bb7 | 3,631,607 |
import logging
def setup_logging(name: str, level=logging.DEBUG, json_logging=True) -> Logger:
"""
Sets up root logger.
"""
if json_logging:
setup_json_logging(level)
else:
setup_plain_logging(level)
# disable useless logging from flask
logging.getLogger('werkzeug').setLevel(logging.WARNING)
return logging.getLogger(name) | 2ba66ceaeac1ee04d403df576ba67781c7b87d2d | 3,631,608 |
def data_method(func):
"""
Decorate object methods by tagging them as data methods.
The generated data class will have the decorated methods in them.
.. code:: python
>>> from objetto.applications import Application
>>> from objetto.objects import Object, attribute, data_method
>>> class MyObject(Object):
... value = attribute(int, default=0)
...
... @data_method
... def get_double(self):
... return self.value * 2
...
>>> app = Application()
>>> my_obj = MyObject(app)
>>> my_obj.value = 42
>>> my_obj.get_double()
84
>>> my_obj.data.get_double()
84
:param func: The method function.
:type func: function
:return: Decorated method function.
:rtype: function
"""
@wraps(func)
@simplify_exceptions
def decorated(*args, **kwargs):
return func(*args, **kwargs)
setattr(decorated, DATA_METHOD_TAG, True)
return decorated | 2a41d343265c0f745242c0e8a369473e22413f8f | 3,631,609 |
def get_frontend_names():
"""Return the names of all supported frontends
Returns
-------
list : list of str
A list of frontend names as strings
"""
return [frontend.name() for frontend in ALL_FRONTENDS] | c251ea7361987d8f9acaa31e3dbfd805a965d94c | 3,631,610 |
import os
import inspect
def get_spec_file_path(step_class):
"""
Given a STep (sub)class, divine and return the full path to the
corresponding spec file. Use the fact that by convention, the spec file is
in the same directory as the `step_class` source file. It has the name of
the Step (sub)class and extension .spec.
"""
step_source_file = os.path.abspath(inspect.getfile(step_class))
# Since `step_class` could be defined in a file called whatever, we need
# the source file basedir and the class name.
dir = os.path.dirname(step_source_file)
return(os.path.join(dir, step_class.__name__ + '.spec')) | 55c0df8873322c8d31e728ea02d721df1deb2abe | 3,631,611 |
from gkutils.commonutils import getColour, getColourStats
import json
def getColourPlotData(g, r, i, z, y):
"""Collect the colour info from input filter data for plotting"""
colourPlotLimits = {}
coloursJSON = []
grColour = []
riColour = []
izColour = []
colourPlotLabels = [{'label': 'g-r', 'color': 0, 'display': True},
{'label': 'r-i', 'color': 1, 'display': True},
{'label': 'i-z', 'color': 2, 'display': True}]
meangr = None
meanri = None
meaniz = None
grEvolution = None
riEvolution = None
izEvolution = None
if g and r:
grColour = getColour(g, r, FILTER_RECURRENCE_PERIOD_GR)
if grColour and len(grColour) > 1:
meangr, grEvolution = getColourStats(grColour)
colourPlotLabels[0]["label"] += (' (avg = %4.2f mag,' % meangr) + (' trend = %5.3f mag/d)' % grEvolution)
if r and i:
riColour = getColour(r, i, FILTER_RECURRENCE_PERIOD_RI)
if riColour and len(riColour) > 1:
meanri, riEvolution = getColourStats(riColour)
colourPlotLabels[1]["label"] += (' (avg = %4.2f mag,' % meanri) + (' trend = %5.3f mag/d)' % riEvolution)
if i and z:
izColour = getColour(i, z, FILTER_RECURRENCE_PERIOD_IZ)
if izColour and len(izColour) > 1:
meaniz, izEvolution = getColourStats(izColour)
colourPlotLabels[2]["label"] += (' (avg = %4.2f mag,' % meaniz) + (' trend = %5.3f mag/d)' % izEvolution)
xMin, xMax, yMin, yMax = getLimitsFromLCData([grColour, riColour, izColour])
colourPlotLimits["xmin"] = xMin
colourPlotLimits["xmax"] = xMax
colourPlotLimits["ymin"] = yMin
colourPlotLimits["ymax"] = yMax
colours = [grColour, riColour, izColour]
colourPlotLabelsJSON = [json.dumps(colourPlotLabels[0]), json.dumps(colourPlotLabels[1]), json.dumps(colourPlotLabels[2])]
return colours, colourPlotLimits, colourPlotLabelsJSON | 37192d4d10ff2cc454d41ae7c91b2e9e87b98d4e | 3,631,612 |
def show(*args, **kwargs):
"""Wrapper for make_figure()"""
return make_figure(*args, **kwargs) | 143d1778fd3dbf1e63612bafafa596d86632c969 | 3,631,613 |
def set_title(node, title):
"""Sets the title of a link or image node. Returns 1 on success, 0 on failure.
Args:
node (cmark_node): The node to set the title attribute on
title (string): Title as string
Returns:
int: 0 on failure, 1 on success
"""
title=to_c_string(title)
return _cmark.node_set_title(node, title) | b6d1b289e22a768914436ab9a30e2f7036415ad1 | 3,631,614 |
def get_image(filename, convert_rgb=True):
"""Returns numpy array of an image"""
image = Image.open(filename)
# sometime image data is gray.
if convert_rgb:
image = image.convert("RGB")
else:
image = image.convert("L")
image = np.array(image)
return image | e7d760515cae8309dd7c5fe65c27eeaf0759391d | 3,631,615 |
def find_order_to_apply_confirmation(domain, location):
"""
Tries to find the EmergencyOrder that the receipt confirmation applies to.
:param domain: the domain to search
:param location: the SQLLocation that the confirmation is coming from
:return: the EmergencyOrder that the confirmation should apply to, or None
if none is found
"""
result = EmergencyOrder.objects.filter(
domain=domain,
location=location
).order_by('-timestamp')[:1]
result = list(result)
if len(result) > 0:
return result[0]
return None | 8ec732ae4338ff6fdde73096127f6bafb66f41bb | 3,631,616 |
def tiny(tmpdir):
"""Create a tiny fake brain."""
# This is a minimal version of what we need for our viz-with-timeviewer
# support currently
subject = 'test'
subject_dir = tmpdir.mkdir(subject)
surf_dir = subject_dir.mkdir('surf')
rng = np.random.RandomState(0)
rr = rng.randn(4, 3)
tris = np.array([[0, 1, 2], [2, 1, 3]])
curv = rng.randn(len(rr))
with open(surf_dir.join('lh.curv'), 'wb') as fid:
fid.write(np.array([255, 255, 255], dtype=np.uint8))
fid.write(np.array([len(rr), 0, 1], dtype='>i4'))
fid.write(curv.astype('>f4'))
write_surface(surf_dir.join('lh.white'), rr, tris)
write_surface(surf_dir.join('rh.white'), rr, tris) # needed for vertex tc
vertices = [np.arange(len(rr)), []]
data = rng.randn(len(rr), 10)
stc = SourceEstimate(data, vertices, 0, 1, subject)
brain = stc.plot(subjects_dir=tmpdir, hemi='lh', surface='white',
size=_TINY_SIZE)
# in principle this should be sufficient:
#
# ratio = brain.mpl_canvas.canvas.window().devicePixelRatio()
#
# but in practice VTK can mess up sizes, so let's just calculate it.
sz = brain.plotter.size()
sz = (sz.width(), sz.height())
sz_ren = brain.plotter.renderer.GetSize()
ratio = np.median(np.array(sz_ren) / np.array(sz))
return brain, ratio | 9cf3999cf343f2d2005613e668223183e57d4955 | 3,631,617 |
import glob
def find_exp_parameters(cfg, logger):
""" Extracts experimental parameters. """
hemi, space = cfg['hemi'], cfg['space']
space_idf = f'hemi-{hemi}*.func.gii' if 'fs' in space else 'desc-preproc_bold.nii.gz'
# Use all possible participants if not provided
if cfg['subject'] is None:
cfg['subject'] = [
op.basename(s).split('-')[1] for s in
sorted(glob(op.join(cfg['fprep_dir'], 'sub-*')))
if op.isdir(s)
]
logger.info(
f"Found {len(cfg['subject'])} participant(s) {cfg['subject']}")
else:
# Use a list by default
cfg['subject'] = [cfg['subject']]
# Use all sessions if not provided
if cfg['session'] is None:
cfg['session'] = []
for this_sub in cfg['subject']:
these_ses = [
op.basename(s).split('-')[1] for s in
sorted(
glob(op.join(cfg['fprep_dir'], f'sub-{this_sub}', 'ses-*')))
if op.isdir(s)
]
logger.info(
f"Found {len(these_ses)} session(s) for sub-{this_sub} {these_ses}")
these_ses = [None] if not these_ses else these_ses
cfg['session'].append(these_ses)
else:
cfg['session'] = [[cfg['session']]] * len(cfg['subject'])
# Use all tasks if no explicit task is provided
if cfg['task'] is None:
cfg['task'] = []
for this_sub, these_ses in zip(cfg['subject'], cfg['session']):
these_task = []
for this_ses in these_ses:
if this_ses is None: # only single session!
tmp = glob(op.join(
cfg['fprep_dir'],
f'sub-{this_sub}',
'func',
f"*space-{cfg['space']}*_{space_idf}"
))
else:
tmp = glob(op.join(
cfg['fprep_dir'],
f'sub-{this_sub}',
f'ses-{this_ses}',
'func',
f"*space-{cfg['space']}*_{space_idf}"
))
these_ses_task = list(set(
[op.basename(f).split('task-')[1].split('_')[0]
for f in tmp]
))
these_task.append(these_ses_task)
to_add = "" if this_ses is None else f"and ses-{this_ses}"
msg = f"Found {len(these_ses_task)} task(s) for sub-{this_sub} {to_add} {these_ses_task}"
logger.info(msg)
cfg['task'].append(these_task)
else:
all_ses_tasks = []
for this_sub, these_ses in zip(cfg['subject'], cfg['session']):
these_task = []
for this_ses in these_ses:
if this_ses is None:
tmp = glob(op.join(
cfg['fprep_dir'],
f'sub-{this_sub}',
'func',
f"*task-{cfg['task']}_*_space-{cfg['space']}*_{space_idf}"
))
else:
tmp = glob(op.join(
cfg['fprep_dir'],
f'sub-{this_sub}',
f'ses-{this_ses}',
'func',
f"*task-{cfg['task']}_*_space-{cfg['space']}*_{space_idf}"
))
if tmp:
these_task.append([cfg['task']])
else:
these_task.append([None])
all_ses_tasks.append(these_task)
cfg['task'] = all_ses_tasks
# If --pool-sessions, then "pool" all runs in a single session
# (Maybe this should be default, anyway.)
if cfg['pool_sessions']:
cfg['session'] = [[None] for _ in range(len(cfg['subject']))]
cfg['task'] = [[[t for t in task[0]]] for task in cfg['task']]
return cfg | a72a4c84cc1660005c31ecf65978c07cc5795420 | 3,631,618 |
import sys
def create_universe(code, infilepath_or_buffer=None, sids=None, from_universes=None,
exclude_delisted=False, append=False, replace=False):
"""
Create a universe of securities.
Parameters
----------
code : str, required
the code to assign to the universe (lowercase alphanumerics and hyphens only)
infilepath_or_buffer : str or file-like object, optional
create the universe from the sids in this file (specify '-' to read file from stdin)
sids : list of str, optional
create the universe from these sids
from_universes : list of str, optional
create the universe from these existing universes
exclude_delisted : bool
exclude delisted securities and expired contracts that would otherwise be
included (default is not to exclude them)
append : bool
append to universe if universe already exists (default False)
replace : bool
replace universe if universe already exists (default False)
Returns
-------
dict
status message
Examples
--------
Create a universe called 'nyse-stk' from a CSV file:
>>> create_universe("usa-stk", "nyse_securities.csv")
Create a universe from a DataFrame of securities:
>>> securities = get_securities(exchanges="TSEJ")
>>> create_universe("japan-stk", sids=securities.index.tolist())
"""
if append and replace:
raise ValueError("append and replace are mutually exclusive")
params = {}
if from_universes:
params["from_universes"] = from_universes
if exclude_delisted:
params["exclude_delisted"] = exclude_delisted
if replace:
params["replace"] = replace
if sids:
params["sids"] = sids
url = "/master/universes/{0}".format(code)
if append:
method = "PATCH"
else:
method = "PUT"
if infilepath_or_buffer == "-":
response = houston.request(method, url, params=params, data=to_bytes(sys.stdin))
elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
if infilepath_or_buffer.seekable():
infilepath_or_buffer.seek(0)
response = houston.request(method, url, params=params, data=to_bytes(infilepath_or_buffer))
elif infilepath_or_buffer:
with open(infilepath_or_buffer, "rb") as f:
response = houston.request(method, url, params=params, data=f)
else:
response = houston.request(method, url, params=params)
houston.raise_for_status_with_json(response)
return response.json() | cc265828146ccd3d625a487f5fb8ad3d04a55d9f | 3,631,619 |
def onroot_vc(t, y, solver):
"""
onroot function to reset the solver back at the start, but keep the current
velocity as long as the time is less than a given amount
"""
if t > 28: # we have found 4 interruption points, so we stop
return 1
solver.reinit_IC(t, [Y0, y[1]])
return 0 | aaabfcc4f06bd48fa2dd8858ffbec4b7d01e886f | 3,631,620 |
def _only_one_selected(*args):
"""Test if only one item is True."""
return sum(args) == 1 | 9966cc7c2cde16c689f29ba2add80b2cddce56e7 | 3,631,621 |
import re
def get_org_files(rcfile):
"""Get a list of org files from a 'vimrc' file."""
with open(rcfile, 'r') as vimrc:
data = vimrc.read()
orgfiles = re.search(r'org_agenda_files\s=.*?\[.*?\]', data, re.DOTALL).group()
orgfiles = orgfiles.split('[')[1].split(', ')
orgfiles = [slugify(x) for x in orgfiles]
return orgfiles | 0ca189535490a56b986060ab2634597aa40c1442 | 3,631,622 |
def transform_and_crop_coordinate(coordinate, transform=None, offset=None, theta=None, inverse=True, name=None):
""" Transforms a single coordinate then applies a crop offset.
You probably don't need to use this, just call random_projection_transform()
and then transform_and_crop_image().
Please note that when calling this function you should be very careful
about your transform matrix, because the projection function expects the matrix
to be inverted relative to what you would normally expect. Instead of taking a
point in the current frame and projecting it out to the final frame it does the inverse.
# Arguments
coordinate: A 2D image coordinate.
transform: A 3x3 homogenous 2D image transform matrix stored in shape [1, 8].
offset: A crop offset which is the location of (0,0) in the post-crop image.
offset is in y, x order.
theta: An optional orientation angle for the 2D image coordinate.
# Returns
The coordinate after a tranform and crop is applied.
"""
with tf.name_scope(name, "transform_and_crop_coordinate",
[coordinate, transform, offset, theta]) as name:
if transform is not None:
projection_matrix = _flat_transforms_to_matrices(transform)
# TODO(ahundt) replace above with the following once flat_transforms_to_matrices becomes public in tf
# projection_matrix = tf.contrib.image._flat_transforms_to_matrices(transform)
# Very important: most TF code expects y, x coordinates
# but the projection matrix is generated uing x, y coordinates
# so we swap it from (y,x) to (x,y) here.
if theta is None:
if not tf.contrib.framework.is_tensor(coordinate):
coordinate = tf.transpose(tf.convert_to_tensor(
[coordinate[1],
coordinate[0],
1]
))
else:
coordinate = tf.stack([tf.reshape(coordinate[1], (1,)),
tf.reshape(coordinate[0], (1,)),
tf.constant([1], tf.float32)], axis=-1)
coordinate = tf.transpose(coordinate)
if inverse:
projection_matrix = tf.matrix_inverse(projection_matrix)
projection_matrix = tf.squeeze(projection_matrix)
coordinate = tf.matmul(projection_matrix,
coordinate)
coordinate = tf.squeeze(tf.transpose(coordinate))
coordinate = tf.stack([coordinate[1], coordinate[0]])
# Very important: most TF code expects y, x coordinates
# but the projection matrix is generated uing x, y coordinates
# so we swap it back from (x,y) to (y,x) here.
else:
if not tf.contrib.framework.is_tensor(coordinate):
raise NotImplementedError
else:
# TODO(ahundt) should theta be positive or negative?
oriented_coordinate_matrix = array_ops.concat(
values=[
math_ops.cos(theta),
-math_ops.sin(theta),
coordinate[1], # x_offset
math_ops.sin(theta),
math_ops.cos(theta),
coordinate[0], # y_offset
array_ops.zeros((1, 2), dtypes.float32),
],
axis=1)
oriented_coordinate_matrix = tf.contrib.image.compose_transforms([transform, oriented_coordinate_matrix])
# Very important: most TF code expects y, x coordinates
# but the projection matrix is generated uing x, y coordinates
# so we swap it back from (x,y) to (y,x) here.
coordinate = tf.stack([oriented_coordinate_matrix[5], oriented_coordinate_matrix[2]])
theta = tf.atan2(oriented_coordinate_matrix[3], oriented_coordinate_matrix[4])
if offset is not None:
if isinstance(offset, list):
offset = tf.constant([[offset[0]], [offset[1]]], tf.float32)
coordinate = coordinate - tf.cast(offset[:2], tf.float32)
if theta is None:
return coordinate
else:
return coordinate, theta | 42acdd0af71144fab240304e256dbf8522d4aa9c | 3,631,623 |
from typing import Dict
from typing import Any
def parse_direct_to_payload(
logger: OchronaLogger, direct: str, config: OchronaConfig
) -> Dict[str, Any]:
"""
Parses direct input string as PEP-508 compliant file and outputs a JSON payload.
:param logger: A configured `OchronaLogger` instance
:param direct: input string
:param config: An instance of `OchronaConfig`
:return: JSON payload
"""
dependencies = []
parsers = Parsers()
dependencies = parsers.requirements.direct_parse(direct=direct)
logger.debug(f"Discovered dependencies: {dependencies}")
return {"dependencies": dependencies, "policies": config.policies, "logger": logger} | 975eac5df3108a5765b78880535427c100bb2cc7 | 3,631,624 |
from typing import Iterator
from typing import List
def build_uncertainty_calibrator(
calibration_method: str,
uncertainty_method: str,
regression_calibrator_metric: str,
interval_percentile: int,
calibration_data: MoleculeDataset,
calibration_data_loader: MoleculeDataLoader,
models: Iterator[MoleculeModel],
scalers: Iterator[StandardScaler],
num_models: int,
dataset_type: str,
loss_function: str,
uncertainty_dropout_p: float,
dropout_sampling_size: int,
spectra_phase_mask: List[List[bool]],
) -> UncertaintyCalibrator:
"""
Function that chooses the subclass of :class: `UncertaintyCalibrator`
based on the provided arguments and returns that class.
"""
if calibration_method is None:
if dataset_type == "regression":
if regression_calibrator_metric == "stdev":
calibration_method = "zscaling"
else:
calibration_method = "zelikman_interval"
if dataset_type in ["classification", "multiclass"]:
calibration_method == "isotonic"
supported_calibrators = {
"zscaling": ZScalingCalibrator,
"tscaling": TScalingCalibrator,
"zelikman_interval": ZelikmanCalibrator,
"mve_weighting": MVEWeightingCalibrator,
"platt": PlattCalibrator,
"isotonic": IsotonicCalibrator
if dataset_type == "classification"
else IsotonicMulticlassCalibrator,
}
calibrator_class = supported_calibrators.get(calibration_method, None)
if calibrator_class is None:
raise NotImplementedError(
f"Calibrator type {calibration_method} is not currently supported. Avalable options are: {list(supported_calibrators.keys())}"
)
else:
calibrator = calibrator_class(
uncertainty_method=uncertainty_method,
regression_calibrator_metric=regression_calibrator_metric,
interval_percentile=interval_percentile,
calibration_data=calibration_data,
calibration_data_loader=calibration_data_loader,
models=models,
scalers=scalers,
num_models=num_models,
dataset_type=dataset_type,
loss_function=loss_function,
uncertainty_dropout_p=uncertainty_dropout_p,
dropout_sampling_size=dropout_sampling_size,
spectra_phase_mask=spectra_phase_mask,
)
return calibrator | fe71921c0dfda06405101b2d5acdd514b5235ddc | 3,631,625 |
def generate_new_diversity_plots(otu_table_fs, gg_f, mapping_f,
mapping_category='Sample_Type',
min_num_samples=11,
category_values_to_exclude=None,
verbose=False):
"""Will exclude 'NA' category value by default if this parameter is not
provided"""
if category_values_to_exclude is None:
category_values_to_exclude = ['NA']
mapping_dict, mapping_comments = parse_mapping_file_to_dict(mapping_f)
sample_type_map = {}
for samp_id in mapping_dict:
sample_type_map[samp_id] = mapping_dict[samp_id][mapping_category]
gg_otus = [seq_id.split()[0] for seq_id, s in MinimalFastaParser(gg_f)]
# Track by sample ID, which allows multiple OTU tables (even with
# overlapping sample IDs) to be supported.
success_counts = defaultdict(int)
failure_counts = defaultdict(int)
new_otus = defaultdict(list)
processed_count = 0
for otu_table_f in otu_table_fs:
otu_table = parse_biom_table(otu_table_f)
novel_otus = set(otu_table.ObservationIds) - set(gg_otus)
for counts, otu_id, md in otu_table.iterObservations():
if otu_id in novel_otus:
for samp_id, count in zip(otu_table.SampleIds, counts):
failure_counts[samp_id] += count
if count > 0:
new_otus[samp_id].append(otu_id)
else:
for samp_id, count in zip(otu_table.SampleIds, counts):
success_counts[samp_id] += count
processed_count += 1
if verbose:
print "Processed %d OTU tables.\n" % processed_count
percent_failures_result = defaultdict(list)
num_new_otus_result = defaultdict(list)
for samp_id in set(success_counts.keys() + failure_counts.keys()):
try:
samp_type = sample_type_map[samp_id]
except KeyError:
samp_type = 'Unknown'
failure_count = failure_counts[samp_id]
success_count = success_counts[samp_id]
percent_failures = (failure_count /
(success_count + failure_count)) * 100.0
percent_failures_result[samp_type].append(percent_failures)
num_new_otus_result[samp_type].append(len(set(new_otus[samp_id])))
percent_failures_data = [(median(v), '%s (n=%d)' % (k, len(v)), v)
for k, v in percent_failures_result.items()
if k != 'Unknown' and k not in
category_values_to_exclude and
len(v) >= min_num_samples]
percent_failures_data.sort()
percent_failures_plot = create_plot(percent_failures_data,
mapping_category, '% Novel Seqs', '%% Novel Seqs by %s' %
mapping_category)
num_new_otus_data = [(median(v), '%s (n=%d)' % (k, len(v)), v)
for k, v in num_new_otus_result.items()
if k != 'Unknown' and k not in
category_values_to_exclude and
len(v) >= min_num_samples]
num_new_otus_data.sort()
num_new_otus_plot = create_plot(num_new_otus_data,
mapping_category, 'Number of Novel OTUs',
'Number of Novel OTUs by %s' % mapping_category)
return percent_failures_data, percent_failures_plot, num_new_otus_data, \
num_new_otus_plot | 02632d4e22c5c4740ebb536e182f7bb24820c170 | 3,631,626 |
def tile_data3d(data,(lentZ,lentY,lentX)):
"""
Tile sparky data into 1D numpy array
Parameters:
* data Three-dimensional data array
* lentZ Z (w1) dimention tile size
* lentY Y (w2) dimention tile size
* lentX X (w3) dimention tile size
Returns 1D numpy array of floats
"""
# determind the number of tiles in data
ttX = np.ceil(data.shape[2] / float(lentX)) # total tiles in X dim
ttY = np.ceil(data.shape[1] / float(lentY)) # total tiles in Y dim
ttZ = np.ceil(data.shape[0] / float(lentZ)) # total tiles in Z dim
tt = ttX*ttY*ttZ # total number of tiles
# calc some basic parameter
tsize = lentX*lentY*lentZ # number of points in one tile
t_tup = (lentZ,lentY,lentX) # tile size tuple
# create an empty array to store file data
out = np.empty( (tt*tsize),dtype="float32")
for i in xrange(int(tt)):
out[i*tsize:(i+1)*tsize] = find_tilen_3d(data,i,t_tup)
return out | 9544fe1ac4a42588bc143d6aaf55be31d382b34e | 3,631,627 |
from typing import Optional
from typing import Iterable
from typing import Dict
import itertools
def do_lock(
project: Project,
strategy: str = "all",
tracked_names: Optional[Iterable[str]] = None,
requirements: Optional[Dict[str, Dict[str, Requirement]]] = None,
) -> Dict[str, Candidate]:
"""Performs the locking process and update lockfile.
:param project: the project instance
:param strategy: update stratege: reuse/eager/all
:param tracked_names: required when using eager strategy
:param requirements: An optional dictionary of requirements, read from pyproject
if not given.
"""
check_project_file(project)
# TODO: multiple dependency definitions for the same package.
repository = project.get_repository()
requirements = requirements or project.all_dependencies
allow_prereleases = project.allow_prereleases
requires_python = project.python_requires
if strategy == "all":
provider = BaseProvider(repository, requires_python, allow_prereleases)
else:
provider_class = (
ReusePinProvider if strategy == "reuse" else EagerUpdateProvider
)
preferred_pins = project.get_locked_candidates("__all__")
provider = provider_class(
preferred_pins,
tracked_names or (),
repository,
requires_python,
allow_prereleases,
)
flat_reqs = list(
itertools.chain(*[deps.values() for _, deps in requirements.items()])
)
# TODO: switch reporter at io level.
with halo.Halo(text="Resolving dependencies", spinner="dots") as spin:
reporter = SpinnerReporter(flat_reqs, spin)
mapping, dependencies, summaries = resolve(
provider, reporter, requirements, requires_python
)
data = format_lockfile(mapping, dependencies, summaries)
spin.succeed("Resolution success")
project.write_lockfile(data)
return mapping | 535d82bd9d98eab55ea6e8325c1586df41bad427 | 3,631,628 |
import scipy
def sparse_to_vector(vector: scipy.sparse.spmatrix):
"""
Converts one dimensional sparse matrix to a vector array to allow more features.
:param vector: Vector as a sparse matrix (x,1) or (1,x).
:return: Vector as an one dimensional array (x,).
"""
return np.ravel(vector.toarray()) | 43fd27f48eea91754d95f86a631024a8b07a1a86 | 3,631,629 |
def uncentered_operator(X, func, center=None, fill=None, **kwargs):
"""Only apply the operator on a centered patch
In some cases, for example symmetry, an operator might not make
sense outside of a centered box. This operator only updates
the portion of `X` inside the centered region.
Parameters
----------
X: array
The parameter to update.
func: `function`
The function (or operator) to apply to `X`.
center: tuple
The location of the center of the sub-region to
apply `func` to `X`.
`fill`: `float`
The value to fill the region outside of centered
`sub-region`, for example `0`. If `fill` is `None`
then only the subregion is updated and the rest of
`X` remains unchanged.
"""
if center is None:
py, px = np.unravel_index(np.argmax(X), X.shape)
else:
py, px = center
cy, cx = np.array(X.shape) // 2
if py == cy and px == cx:
return func(X, **kwargs)
dy = int(2 * (py - cy))
dx = int(2 * (px - cx))
if not X.shape[0] % 2:
dy += 1
if not X.shape[1] % 2:
dx += 1
if dx < 0:
xslice = slice(None, dx)
else:
xslice = slice(dx, None)
if dy < 0:
yslice = slice(None, dy)
else:
yslice = slice(dy, None)
if fill is not None:
_X = np.ones(X.shape, X.dtype) * fill
_X[yslice, xslice] = func(X[yslice, xslice], **kwargs)
X[:] = _X
else:
X[yslice, xslice] = func(X[yslice, xslice], **kwargs)
return X | f166e16ea7e0438c8af5e28a86afd71c4ec375f1 | 3,631,630 |
def _get_range_and_pstring(variable, mean_cube, tropopause=False):
"""Get range for color bar and print string."""
if variable == "Air Temperature":
print_var = "Temperature [K]"
set_range = np.linspace(180, 230, 21)
elif variable == "Geopotential Height":
print_var = "Geopotential Height [m]"
set_range = np.linspace(8000, 22000, 25)
elif variable == "Relative Humidity":
print_var = "Relative Humidity [%]"
set_range = np.linspace(0, 100, 21)
elif variable == "Specific Humidity":
print_var = "Specific Humidity [kg/kg]"
if tropopause:
set_range = np.linspace(0.1e-5, 2.5e-5, 25)
else:
logval = np.log(np.array([1e-6, 1e-5]))
set_range = np.exp(np.linspace(logval[0], logval[1], 41))
else:
print_var = mean_cube.long_name
set_range = np.linspace(np.nanmin(mean_cube.data),
np.nanmax(mean_cube.data), 21)
return {'print_var': print_var, 'set_range': set_range} | ce3370929d490ae3636d5959a6e9b6ecbfe61110 | 3,631,631 |
def merge_storage(df, cons, prod, stor):
"""Merge positve storage in consumption and negative part in the production
"""
_df = df.copy()
assert not _df.isnull().values.any(), 'Include NaN values'
_df[prod] = _df[prod] - _df[stor].clip(upper=0)
_df[cons] = _df[cons] + _df[stor].clip(lower=0)
# Assert that total energy remains the same
assert ((df[cons] - df[prod] + df[stor]).sum() ==
pytest.approx((_df[cons] - _df[prod]).sum(), abs=1e-6))
assert any(_df[cons] >= 0)
assert any(_df[prod] >= 0)
_df.drop(stor, axis=1, inplace=True) # not needed
return _df | 28daaf07ecd6a83259388020e044017415a93201 | 3,631,632 |
from typing import Mapping
from re import T
import glob
import os
def load_page_data(img_dir: str, current_obj: Mapping[T, T]) -> Mapping[T, T]:
"""
Iterate through the img directory, and retrieve the page level data
"""
page_data = []
for f in glob.glob(f'{img_tmp}/*'):
page_obj = {}
page_num = int(os.basename(f))
img = Image.open(f)
width, height = img.size
with open(f, 'rb') as bimage:
bstring = bimage.read()
page_obj['bytes'] = bstring
page_obj['page_width'] = width
page_obj['page_height'] = height
page_obj['page_num'] = page_num
page_data.append(page_obj)
current_obj['page_data'] = page_data
current_obj['event_stream'] = ['imagedata']
return current_obj | 6b47f944a1a5d9b6c6e6c9aa097ee812b7aff100 | 3,631,633 |
from calendar import isleap
from datetime import datetime
def numeric_date(dt=None):
"""
Convert datetime object to the numeric date.
The numeric date format is YYYY.F, where F is the fraction of the year passed
Parameters
----------
dt: datetime.datetime, None
date of to be converted. if None, assume today
"""
if dt is None:
dt = datetime.datetime.now()
days_in_year = 366 if isleap(dt.year) else 365
try:
res = dt.year + (dt.timetuple().tm_yday-0.5) / days_in_year
except:
res = None
return res | 1a369bb8824db3f885b1af269f0d86eba5103769 | 3,631,634 |
def specific_clean_cell_lst():
"""Clean a list of cells - column cells"""
# List of strings
col_cells = request.json["cells"]
col_type = request.json["coltype"]
clean_cells = fix_specific(col_cells, col_type)
return clean_cells | e61e17dc311516bed179cde6a4b9cecaab4420de | 3,631,635 |
def compress_sym(sym_expanded, make_symmetric=True):
"""Compress symmetric matrix to a vector.
Similar to scipy.spatial.squareform, but also contains the
diagonal.
Parameters
----------
sym_expanded : nd-array, shape (size, size)
Input matrix to compress.
make_symmetric : bool (default=True)
Whether to symmetrize the input matrix before compressing.
It is made symmetric by using
``sym_expanded + sym_expanded.T - np.diag(np.diag(sym_expanded))``
This makes sense if only one of the two entries was non-zero before.
"""
size = sym_expanded.shape[0]
if make_symmetric:
sym_expanded = (sym_expanded + sym_expanded.T -
np.diag(np.diag(sym_expanded)))
return sym_expanded[np.tri(size, dtype=np.bool)] | f2d5b7ce91c18ae3730feda002cdcc76ad0540e7 | 3,631,636 |
def get_cc_biz_id_by_app(fta_application_id):
"""
通过fta_application_id获取cc_id
"""
app = session.query(AlarmApplication).filter_by(
app_id=fta_application_id,
is_deleted=False,
is_enabled=True).first()
if app:
return app.cc_biz_id
else:
return None | 597bd8dd170c42d19166e9db64c80bf490fb7a2d | 3,631,637 |
from pathlib import Path
def get_csv_filename(folder="zips"):
"""
Returns the Path of the csv-file stored in zips-folder
"""
csvs = [f for f in Path(folder).iterdir() if f.suffix == ".csv"]
if len(csvs) >= 1:
return csvs[0]
else:
logger.error(f"CSV-file missing")
raise FileNotFoundError("CSV-file does not exist in zips-folder") | d7e811ef15174d29c9514c2eeaa853e9da4996e8 | 3,631,638 |
import random
import string
def generate_random_id(start: str = ""):
"""
Generates a random alphabetic id.
"""
result = "".join(random.SystemRandom().choices(string.ascii_lowercase, k=16))
if start:
result = "-".join([start, result])
return result | f818ecf7ba4296a3ad010ef20bc5e286036bb56d | 3,631,639 |
def add_supplementary_xml(element: etree, config: dict) -> etree:
"""Add arbitrary xml from configuration object to xml
Args:
element (etree): original xml document
config (dict): standard ReadAlong-Studio configuration
Returns:
etree: xml with supplemental markup
"""
if "xml" not in config:
raise KeyError(
"Configuration tried to add supplementary xml, but no declarations were found in configuration"
)
for el in config["xml"]:
parents = element.xpath(el["xpath"])
if not parents:
LOGGER.warning(
f"No elements found at {el['xpath']}, please verify your configuration."
)
for parent in parents:
parent.append(etree.XML(el["value"]))
return element | 2b554a5de0b43731c75ccd7bf311591a3b51001d | 3,631,640 |
def get_client_names(worksheet) -> list:
"""Get list of client names from Excel worksheet."""
num_rows = worksheet.max_row
names = []
for i in range(2, num_rows+1):
cell_obj = worksheet.cell(row=i, column=1)
if cell_obj.value not in names:
names.append(cell_obj.value)
return names | 6da6e52ed10e84ae79119c511e063114bb61b334 | 3,631,641 |
def as_json(dictionary):
"""
Object hook used in order to create the right object reading a JSON.
:param dictionary: Dict, Dictionary to analyze.
:return: The right object represented in the JSON.
"""
if "first_name" in dictionary:
return User(**dictionary)
elif "update_id" in dictionary:
return Update(**dictionary)
elif "result" in dictionary:
return Response(**dictionary)
else:
dictionary["message_from"] = dictionary["from"]
del dictionary["from"]
return Message(**dictionary) | ed5f386e3c37a363cbec0e026960628d827462a4 | 3,631,642 |
def coefficient_map(cv: xr.DataArray) -> xr.DataArray:
"""
Return the coefficient map
:param cv: cost volume
:type cv: xarray.Dataset, with the data variables cost_volume 3D xarray.DataArray (row, col, disp)
:return: the coefficient map
:rtype : 2D DataArray (row, col)
"""
row = cv.coords['row']
col = cv.coords['col']
# Create the coefficient map
coeff_map = xr.DataArray(cv['cost_volume'].sel(disp=cv['disp_indices']).astype(np.float32),
coords=[('row', row), ('col', col)])
coeff_map.name = 'Coefficient Map'
coeff_map.attrs = cv.attrs
return coeff_map | 32179fff71635283394226ffac2ee9c0ba7f0f3d | 3,631,643 |
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes) | d51bc442e71f52c08b38cff98275540528e178be | 3,631,644 |
def post_token():
"""
<url>/notifications/api/PushToken
Get Device Tokens for notifications
"""
token = request.json.get('token')
deviceId = request.json.get('deviceId')
user = User.query.filter_by(token=token).first()
device = DevicesNotificationHandlers.query.filter_by(user_id=user.id).first()
if device is None:
# Device doesn't exist for that user create a new one
device = DevicesNotificationHandlers(user_id=user.id, notificationToken=deviceId)
else:
# Device does Exist for that user update it to use the latest device
device.notificationToken = deviceId
db.session.add(device)
db.session.commit()
return jsonify({'code': 'Success'}) | 3e287f339500c02e91e5a6512878178686ec3a22 | 3,631,645 |
from datetime import datetime
import pytz
def get_timestamp() -> str:
"""
Получение текущей временной метки
"""
return datetime.datetime.now(pytz.utc).strftime('%Y.%m.%d %H:%M:%S %z').strip() | 093b2275d5dc1381eb69a3b845702715e5b8281f | 3,631,646 |
from brambox.boxes.annotations import Annotation
def as_anno(class_id, x_center, y_center, w, h, Win, Hin):
"""
Construct an BramBox annotation using the basic YOLO box format
"""
anno = Annotation()
anno.class_id = class_id
anno.x_top_left = (x_center - w / 2) * Win
anno.y_top_left = (y_center - h / 2) * Hin
anno.width, anno.height = w * Win, h * Hin
return anno | e46f6b626bf500da0b2c63a4d093ad392e9a9b3a | 3,631,647 |
def parse_testcase_xml(testcase):
"""
Flatten fields of interest from a TestCase XML element into a dict,
where anything not found is None
"""
# We need to emit only Unicode things, but we may get str or Unicode
# depending on if the parser thinks we have UTF-8 or ASCII data in a field.
tc = dict()
tc['name'] = unicode(testcase.get('name'))
tc['time'] = int(float(testcase.get('time', 0)))
if testcase.find('system-out') is not None:
tc['stdout'] = unicode(testcase.find('system-out').text)
else:
tc['stdout'] = None
if testcase.find('system-err') is not None:
tc['stderr'] = unicode(testcase.find('system-err').text)
else:
tc['stderr'] = None
if testcase.find('skipped') is not None:
tc['skipped'] = True
tc['skip-msg'] = unicode(testcase.find('skipped').text)
else:
tc['skipped'] = False
tc['skip-msg'] = None
failure = testcase.find('failure')
if failure is not None:
tc['fail-txt'] = unicode(failure.text)
tc['fail-msg'] = unicode(failure.get('message'))
tc['failed'] = True
else:
tc['fail-txt'] = None
tc['fail-msg'] = None
tc['failed'] = False
return tc | e93d629f0953cecd30ce4a7241a8f6a1db76fb06 | 3,631,648 |
def build_coco_results(dataset, image_ids, rois, class_ids, scores):
"""Arrange results to match COCO specs in http://cocodataset.org/#format
rois: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
image_ids: [num_instances]
class_ids: [num_instances]
scores: (optional) confidence scores for each box
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "PanorAMS"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score
}
results.append(result)
return results | ac25c6e6a4ed45b976ff06369edaa13c8f5fcdb7 | 3,631,649 |
import requests
def get_instance_ip(compute_url, instance_id, token):
""" Retrieve the IPs of the running instance """
url = "%s/servers/%s/ips" % (compute_url, instance_id)
headers = {"X-Auth-Token": "%s" % token, "Content-type": "application/json"}
curl = requests.get(url=url, headers=headers)
if curl.status_code == 200:
data = curl.json()
return data | 224e916f574869023e540d6a4913acd7541c7942 | 3,631,650 |
def _domain_map(z, satu, mapType=0):
"""domain color the array `z`, with the mapping
type `mapType`, using saturation `s`. Currently
there is only one domain coloring type
"""
h = _hue(z)
s = satu*_np.ones_like(h, _np.float)
v = _absolute_map(_np.absolute(z))
hsv_map = _np.dstack((h, s, v))
rgb_map = _mplc.hsv_to_rgb(hsv_map)
return rgb_map | a422d2b463a97afdcae18f820f2ac7462f54ce91 | 3,631,651 |
from typing import Callable
def provider(provided_dependency_name: _Name = None,
**named_dependencies: _Name) -> Callable[[_ProviderMethod], _ProviderMethod]:
"""
Method decorator for instance provider methods in a module class. The provider method can take
parameters representing dependencies, just like the "@inject()" decorator, and named
dependencies can be specified in the call to "@provider(...)". For details on how this works,
see the "inject" function's documentation, or look at the README file.
This decorator does not change the behavior of the method, but it marks it as being a provider
(and also stores this in its parent class).
Note that you must call this decorator like a function (i.e. like "@provider()" or
"@provider( ... )", NOT just "@provider").
"""
def handle(provider_method: _ProviderMethod) -> _ProviderMethod:
err = _check_dependencies(provider_method, named_dependencies)
if err is not None:
raise BadProviderError("Provider \"%s\" %s" % (provider_method.__name__, err))
provided_dependency_type = _get_provider_return_type(provider_method, "Provider")
# noinspection PyCallingNonCallable
setattr(provider_method, _PYPROVIDE_PROPERTIES_ATTR,
_ProviderDecoratorProperties(named_dependencies,
provided_dependency_type,
provided_dependency_name,
False))
return provider_method
return handle | 52ef1a7a65b93b498ecfbb1b3173effc91de8e67 | 3,631,652 |
def _num_to_words(num):
"""
Turkish converter
Params:
num(int/long): number to be converted
Returns:
wordString
"""
units = ['', u'bir', u'iki', u'üç', u'dört', u'beş', u'altı', u'yedi', u'sekiz', u'dokuz']
teens = ['', u'onbir', u'oniki', u'onüç', u'ondört', u'onbeş', u'onaltı', u'onyedi', u'onsekiz', u'ondokuz']
tens = ['', u'on', u'yirmi', u'otuz', u'kırk', u'elli', u'altmış', u'yetmiş', u'seksen', u'doksan']
thousands = ['', u'bin', u'milyon', u'milyar', u'trilyon', u'katrilyon', u'kentilyon']
words = []
if num==0: words.append(u'sıfır')
else:
# Convert num to string
numStr = '%d'%num
numStrLen = len(numStr)
# Get the number of group with 3 digits
groups = (numStrLen+2)//3
if groups>(len(thousands)):
return ''
# Pad the zereos to the missing digits
numStr = numStr.zfill(groups*3)
for i in range(0,groups*3,3):
h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])
g = groups-(i//3+1)
# Add hundreds
if h>=1:
# In order not to say 'bir yüz'
if h!=1:
words.append(units[h])
words.append(u'yüz')
# Add tens
if t>1:
words.append(tens[t])
if u>=1: words.append(units[u])
# Add teens
elif t==1:
if u>=1: words.append(teens[u])
else: words.append(tens[t])
# If second digit is zero
else:
# In order not to say 'bir bin'
if g!=1 or u!=1:
if u>=1: words.append(units[u])
# Add thousands
if (g>=1) and ((h+t+u)>0): words.append(thousands[g])
return ' '.join(words) | 14adb62d17f2089127ca9b90f1d884063c028adf | 3,631,653 |
def get_vehicle_txn(session, vehicle_id):
"""
For when you just want a single vehicle.
Arguments:
session {.Session} -- The active session for the database connection.
vehicle_id {String} -- The vehicle's `id` column.
Returns:
{dict} or {None} -- Contains vehicle information for the vehicle
queried, or None of no vehicle found.
"""
v = aliased(Vehicle) # vehicles AS v
l = aliased(LocationHistory) # location_history as l
g = find_most_recent_timestamp_subquery(session)
# SELECT columns
vehicle = session.query(v.id, v.in_use, v.vehicle_info, v.battery,
l.longitude, l.latitude, l.ts). \
filter(l.vehicle_id == v.id). \
filter(l.vehicle_id == vehicle_id). \
join(g). \
filter(g.c.vehicle_id == l.vehicle_id). \
filter(g.c.max_ts == l.ts).order_by(v.id). \
first() # LIMIT 1;
# Return the row as a dictionary for flask to populate a page.
if vehicle is None:
return None
return {'id': str(vehicle.id), 'last_longitude': vehicle.longitude,
'last_latitude': vehicle.latitude, 'last_checkin': vehicle.ts,
'in_use': vehicle.in_use, 'battery': vehicle.battery,
'vehicle_info': vehicle.vehicle_info, 'serial_number': vehicle.serial_number} | 5f7f3c773e40f567a060015f2c8e5c043b6cb1f5 | 3,631,654 |
import six
def slugify(value):
"""
Slugify a string (even if it contains non-ASCII chars)
"""
# Re-map some strings to avoid important characters being stripped. Eg
# remap 'c++' to 'cpp' otherwise it will become 'c'.
for k, v in settings.OSCAR_SLUG_MAP.items():
value = value.replace(k, v)
# Allow an alternative slugify function to be specified
# Recommended way to specify a function is as a string
slugifier = getattr(settings, 'OSCAR_SLUG_FUNCTION', default_slugifier)
if isinstance(slugifier, six.string_types):
slugifier = import_string(slugifier)
# Use unidecode to convert non-ASCII strings to ASCII equivalents where
# possible.
value = slugifier(unidecode(six.text_type(value)))
# Remove stopwords
for word in settings.OSCAR_SLUG_BLACKLIST:
value = value.replace(word + '-', '')
value = value.replace('-' + word, '')
return value | 53273bd3f6ae2418736a22a2780581d1974dc92b | 3,631,655 |
def upper(value: str): # Only one argument.
"""Converts a string into all uppercase"""
return value.upper() | 8ec4c4ed284bc8d823e356db7749a4c98a00b194 | 3,631,656 |
from typing import Dict
from typing import List
def _create_sorted_hash_list(data: Dict, hash_function: str) -> List[Dict]:
"""Create a sorted sha256 hash list."""
out = []
for obj in data:
hash = _create_json_hash(obj, hash_function=hash_function)
out.append(hash)
out.sort()
return out | 2efbac4652bf2db975513610956445c932543d11 | 3,631,657 |
def disease_function_subset(ipa, network_dir, printing=False):
"""
Returns a disease subset of functions. A function is considered a
disease if its lowercase name is the same as its class and its name is
not a function category. Build must be run first
"""
disease_names = set()
for function in ipa.functions:
if function.name.lower() == function.function_class.lower():
disease_names.add(function.name)
diseases_to_remove = read_diseases_to_remove(network_dir)
disease_names -= diseases_to_remove
disease_functions = {ipa.name_to_function[disease] for disease in disease_names}
print len(disease_functions), 'diseases'
## print random sample of removed function names
omitted_functions = {function.name for function in ipa.functions - disease_functions}
if printing:
for function_name in random.sample(omitted_functions, 20):
print function_name
return disease_functions | 83370f5dd6a6245d4fc2dc988c43beec110a3f48 | 3,631,658 |
import torch
def map_tensor(x, func):
"""
Apply function @func to torch.Tensor objects in a nested dictionary or
list or tuple.
Args:
x (dict or list or tuple): a possibly nested dictionary or list or tuple
func (function): function to apply to each tensor
Returns:
y (dict or list or tuple): new nested dict-list-tuple
"""
return recursive_dict_list_tuple_apply(
x,
{
torch.Tensor: func,
type(None): lambda x: x,
}
) | 38675f836fcb462946e03054b74051e7ccea882e | 3,631,659 |
import os
import json
def get_keyname_to_fingerprint():
"""Get a map of pub key filename to fingerprint"""
trusted_keys_dir = get_trusted_keys_dir()
filepath = os.path.join(trusted_keys_dir, _KEYNAME_TO_FINGERPRINT_FILE)
with open(filepath, "r") as fi:
keyname_to_fingerprint = json.load(fi)
for keyname, fingerprint in keyname_to_fingerprint.items():
_validate_keyname_to_fingerprint_item(keyname, fingerprint)
return keyname_to_fingerprint | 77a04a1c7f9d53846a6e6fc11652508b5f406be6 | 3,631,660 |
def xor(a,b):
""" XOR two strings of same length"""
assert len(a) == len(b)
x = []
for i in range(len(a)):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x) | cbe3d32883dc5516821711181c7f5d52194d89de | 3,631,661 |
def wmts2twmsbox_scale(scale_denominator, col, row):
"""
Returns TWMS equivalent bounding box based on TILECOL and TILEROW.
Arguments:
scale_denominator -- WMTS scale denominator value from getCapabilities.
col -- WMTS TILECOL value.
row -- WMTS TILEROW value.
"""
print "Scale Denominator:",str(scale_denominator)
print "TILECOL="+str(col)
print "TILEROW="+str(row)
size = ((tilesize*2)*scale_denominator/units)*(pixelsize/2)
# set top_left values
top_left_minx = -180
top_left_maxy = 90
# calculate additional top_left values for reference
top_left_maxx = -180 + size
top_left_miny = 90 - size
print "Top Left BBOX: " + str(top_left_minx)+","+str(top_left_miny)+","+str(top_left_maxx)+","+str(top_left_maxy)
# calculate new bounding box based on col and row
request_minx = top_left_minx + (col*size)
request_miny = top_left_maxy - (row*size) - size
request_maxx = top_left_minx + (col*size) + size
request_maxy = top_left_maxy - (row*size)
return "Request BBOX: " + str(round(request_minx,10))+","+str(round(request_miny,10))+","+str(round(request_maxx,10))+","+str(round(request_maxy,10)) | 9660f5a1b3b9eecf5623d70c9b32861ab2e8dd88 | 3,631,662 |
import hashlib
import yaml
def get_hash(x, length=16):
"""Return hash of x."""
return hashlib.sha224(yaml.dump(dict(key=x)).encode()).hexdigest()[:length] | e13c278ef649e2d8c213580d5ccc27ae64d72027 | 3,631,663 |
def make_unhealthy():
"""Sets the server to simulate an 'unhealthy' status."""
global _is_healthy
_is_healthy = False
template = render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=False,
working=_is_working())
response = make_response(template, 302)
response.headers['Location'] = '/'
return response | c379b14b1a924bc81c31b74a322d6ddde67421d8 | 3,631,664 |
def filtreDonner(liste) :
"""
Fonction qui va filtrer les donner.
Cette fonction va filtrer les donners inutiles mot trop frequent ...
param : liste[string] -> liste chaine de caractere a filtrer
return : liste[string] -> liste chaine de caractere filtrer.
"""
return liste | b7e5f04a6645895a16c44f3f477ecc9d9a8ecef1 | 3,631,665 |
import os.path
def get_include():
"""
Return the directory that contains the dpctl *.h header files.
Extension modules that need to be compiled against dpctl should use
this function to locate the appropriate include directory.
"""
return os.path.join(os.path.dirname(__file__), "include") | 0d63b857071ce118dd8206187fddbf59d2d86583 | 3,631,666 |
def get_problem_set(dataset,LABELNAME,labels,i2s):
"""Aggregate labels and associated article/domain information by chosen classification task
Arguments
- dataset: a list of article text (the corpus)
- LABELNAME: a string given by user input identifying the classification task
- labels: a dict mapping domain names to bias and credibility label information
- i2s: a dict mapping integer vertex labels to string representations (domain names)
Returns:
- article_ids: a list of article ID's included in the classification task's problem set
- associated_labels: a list of labels corresponding to the article ID's in the classification task's problem set
- associated_domains: a list of domains corresponding to the article ID's in the classification task's problem set
- i2l: a dict mapping integer vertex labels to integer representations (binary labels)
"""
test_labels = {'bias':[['L','LC'],['R','RC'],['na','C']],'cred':[['LOW','VERY LOW'],['HIGH', 'VERY HIGH'],['na','MIXED']]}
article_ids = []
associated_labels = []
associated_domains = []
i2l = {}
for i,x in enumerate(dataset):
sdom = i2s[i]
if labels[sdom][LABELNAME] not in test_labels[LABELNAME][2]:
if labels[sdom][LABELNAME] in test_labels[LABELNAME][0]:
associated_labels.append(0)
article_ids.append(i)
i2l[i] = 0
associated_domains.append(sdom)
elif labels[sdom][LABELNAME] in test_labels[LABELNAME][1]:
associated_labels.append(1)
article_ids.append(i)
i2l[i] = 1
associated_domains.append(sdom)
elif LABELNAME == 'cred' and labels[sdom]['flag'] in flag:
associated_labels.append(0)
article_ids.append(i)
i2l[i] = 0
associated_domains.append(sdom)
return article_ids, associated_labels, associated_domains, i2l | 3f65f75e1d8fcde54babd5d236dc179955fb3fe7 | 3,631,667 |
def hello_world(text: str) -> str:
"""Print and return input."""
print(text)
return text | 7bfcb8e9cfccdf5fad8c702f97f6b7c4e56c7682 | 3,631,668 |
def NestedGroupKFold(model, X, y, parameter_grid, groups, class_weights, scorer=make_scorer(accuracy_score),
inner_cv=GroupKFold(n_splits=4), outer_cv=GroupKFold(n_splits=4)):
"""
Implements a nested version of GroupKFold cross-validation using GridSearchCV to evaluate models
that need hyperparameter tuning in settings where different groups exist in the available data.
Dependencies: sklearn.model_selection, sklearn.metrics, numpy
Input:
- X, y: features and labels (must be NumPy arrays).
- model, parameter_grid: the model instance and its parameter grid to be optimized.
- groups: the groups to use in both inner- and outer loop.
- scorer: the scoring to use in inner loop (default: accuracy).
- inner_cv, outer_cv: the iterators for both CV-loops (default: GroupKFold(n_splits=4))
- class_weights: class weights to account for class imbalance in performance measurements
Output: average of scores over all CV-runs.
"""
# define empty matrix to store performances (n CV runs and four performance metrics)
n_splits_outer = outer_cv.get_n_splits()
performances = np.zeros((n_splits_outer, 4))
# define outer loop
loop = 0
for train_outer, test_outer in outer_cv.split(X, y, groups):
X_train, X_test = X[train_outer], X[test_outer]
y_train, y_test = y[train_outer], y[test_outer]
groups_train, groups_test = groups[train_outer], groups[test_outer]
# define inner loop (in GridSearchCV)
tuned_model = GridSearchCV(model, cv=inner_cv, param_grid=parameter_grid, scoring=scorer)
tuned_model.fit(X_train, y_train, groups=groups_train)
# make predictions for test set (outer loop)
y_pred = tuned_model.predict(X_test)
# evaluate performance (factoring in class imbalance)
recall_list = list(recall_score(y_test, y_pred, average=None))
precision_list = list(precision_score(y_test, y_pred, average=None))
f1_list = list(f1_score(y_test, y_pred, average=None))
accuracy = accuracy_score(y_test, y_pred)
recall = sum([a*b for a,b in zip(recall_list, class_weights)])/sum(class_weights)
precision = sum([a*b for a,b in zip(precision_list, class_weights)])/sum(class_weights)
f1 = sum([a*b for a,b in zip(f1_list, class_weights)])/sum(class_weights)
performances[loop,:] = [accuracy, recall, precision, f1]
# next loop
loop += 1
average_performances = performances.mean(0)
return average_performances | 7fec0ff05ee002212432cb6fee414013ba079e6d | 3,631,669 |
def create_vespa_query(query, text_processor, number_videos):
"""
Create the body of a Vespa query.
:param query: a string representing the query.
:param text_processor: an instance of `TextProcessor` to convert string to embedding.
:param number_videos: Number of videos to return.
:return: body of a Vespa query request.
"""
valid_vespa_model_name = translate_model_names_to_valid_vespa_field_names(
text_processor.model_name
)
image_field_name = valid_vespa_model_name + "_image"
text_field_name = valid_vespa_model_name + "_text"
ranking_name = valid_vespa_model_name + "_similarity"
return {
"yql": 'select * from sources * where ({{"targetNumHits":100}}nearestNeighbor({},{})) | all(group(video_file_name) max({}) order(-max(relevance())) each( max(1) each(output(summary())) as(frame)) as(video))'.format(
image_field_name, text_field_name, number_videos
),
"hits": 0,
"ranking.features.query({})".format(text_field_name): text_processor.embed(
query
),
"ranking.profile": ranking_name,
"timeout": 10,
} | b5d5ead2b31244220a41474758b463910e9d8e9a | 3,631,670 |
def _thumb_from_pixel_clusters(images, mask, h, w, use_distance_from_centroid=False):
""" Alternate implementation. Results are sharper, but noisier """
# 6-color
# colors = np.array([[1, 0, 1, 0], [1, 0, 0, 0], [1, 1, 0, 0],
# [0, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 0]],
# dtype=np.float32)
colors = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]], dtype=np.float32)
num_clusters = len(colors)
pixels = images.transpose()[np.flatnonzero(mask)] # Only include unmasked pixels
kmeans = KMeans(num_clusters)
kmeans.fit(pixels)
labels = kmeans.labels_
labels_mask = labels[np.newaxis, :] == np.arange(num_clusters)[:, np.newaxis]
if use_distance_from_centroid:
# Get pixel intensity from inverse distance from centroid
pixel_centroids = kmeans.cluster_centers_[labels]
pixel_distance_from_centroid = np.sum((pixels - pixel_centroids) ** 2, axis=1) ** 0.5
cluster_radii = np.average(
pixel_distance_from_centroid[np.newaxis, :] * labels_mask, weights=labels_mask, axis=1
)
pixel_cluster_radii = cluster_radii[labels]
pixel_intensity = 1 - pixel_distance_from_centroid / pixel_cluster_radii
else:
pixel_intensity = np.mean(pixels, axis=1)
# Sort colors by total intensity for consistency, and normalize each label
label_intensity = np.average(
pixel_intensity[np.newaxis, :] * labels_mask, weights=labels_mask, axis=1
)
sorted_colors = colors[np.argsort(label_intensity)]
pixel_vals = sorted_colors[labels] * pixel_intensity[:, np.newaxis]
pixel_vals[:, :3] /= np.max(pixel_vals, axis=0)[:3]
unmasked_pixel_vals = np.zeros((w * h, 4), dtype=np.float32)
unmasked_pixel_vals[np.flatnonzero(mask)] = pixel_vals
unmasked_pixel_vals[:, 3] = mask.ravel()
return unmasked_pixel_vals.reshape((h, w, 4)) | a91440006ac42208ffaca71f6889f5b7ca1c9dcf | 3,631,671 |
def get_rtClock():
"""
Instanziiert ein Real Time Clock Objekt
"""
return rtc.SDL_DS3231() | 9a854aba4e6986f7ed56fb058a368ad629d5c3e1 | 3,631,672 |
def create_draft(service, user_id, message_body):
"""Create and insert a draft email. Print the returned draft's message and id.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message_body: The body of the email message, including headers.
Returns:
Draft object, including draft id and message meta data.
"""
try:
message = {'message': message_body}
draft = service.users().drafts().create(userId=user_id, body=message).execute()
print('Draft id: %s\nDraft message: %s' % (draft['id'], draft['message']))
return draft
except errors.HttpError as error:
print('An error occurred: %s' % error)
return None | 6ea595383349b74d5265b569b25a6e10b9748c6c | 3,631,673 |
def analyze_files(file_list):
"""return info for each file in a list that
if it passed in analyzedir? passed in basedir? the results are the same?"""
result = map(lambda f: (f,)+analyze(f, analyzedir, basedir), file_list)
return result | bd8be9c6a9047921670546eea5667c312e5fe26d | 3,631,674 |
from . import default_logger
import time
def profiling(func):
"""Decorator to mark a function for profiling. The time and memory usage will be recorded and printed.
Example:
.. highlight:: python
.. code-block:: python
@profiling
def foo():
print(1)
"""
@wraps(func)
def arg_wrapper(*args, **kwargs):
start_t = time.perf_counter()
start_mem = used_memory(unit=1)
r = func(*args, **kwargs)
elapsed = time.perf_counter() - start_t
end_mem = used_memory(unit=1)
# level_prefix = ''.join('-' for v in inspect.stack() if v and v.index is not None and v.index >= 0)
level_prefix = ''
mem_status = f'memory Δ {get_readable_size(end_mem - start_mem)} {get_readable_size(start_mem)} -> {get_readable_size(end_mem)}'
default_logger.info(f'{level_prefix} {func.__qualname__} time: {elapsed}s {mem_status}')
return r
return arg_wrapper | 36c83e39743336be49436f4cfcc8891040288f54 | 3,631,675 |
import psutil
import math
def find_no_of_workers(maxworkers, sys_share=0):
"""
Find the optimal number of workers for MP such that system does not crash.
Parameters
----------
maxworkers : Int. Maximum number of workers allowed.
Returns
-------
workers : Int. Workers used.
sys_share: Float. System share.
max_cores: Bool. Limit to number of physical(not logical cores)
"""
share_used = getattr(psutil.virtual_memory(), 'percent') / 100
if sys_share >= share_used:
sys_share = 0.9 * share_used
sys_share = sys_share / 2
workers = (1-sys_share) / (share_used-sys_share)
if workers > maxworkers:
workers = maxworkers
elif workers < 1.9:
workers = 1
else:
workers = maxworkers
workers = math.floor(workers + 1e-15)
return workers | ebb5140d6099ef6600a2373a12e18747ccdaddbd | 3,631,676 |
from typing import Callable
from typing import Dict
from typing import Any
import tqdm
from typing import Literal
def from_rdflib(
graph: Graph,
literal_cleaning_func: Callable = None,
kg_name: str = None,
multi_value: Callable = None,
) -> KG:
"""Create forayer knowledge graph object from rdflib graph.
Parameters
----------
graph : rdflib.Graph
Rdflib graph to transform.
literal_cleaning_func: Callable
Function to preprocess literals,
if None will simply cast to python types.
format : str
Triple format ("xml”, “n3” (use for turtle), “nt” or “trix”).
kg_name : str
How to name the knowledge graph object.
multi_value : Callable
How to handle multiple attribute values for an
entity, attribute name combination.
Default creates a set and adds to it
Returns
-------
KG
the transformed kg object
"""
if literal_cleaning_func is None:
literal_cleaning_func = cast_to_python_type
if multi_value is None:
multi_value = add_multi_value
entities: Dict[str, Dict[str, Any]] = defaultdict(dict)
rel: Dict[str, Dict[str, Any]] = defaultdict(dict)
for stmt in tqdm(graph, desc="Transforming graph", total=len(graph)):
s, p, o = stmt
if isinstance(o, Literal):
value = literal_cleaning_func(o)
if str(p) in entities[str(s)]:
value = multi_value(entities[str(s)][str(p)], value)
entities[str(s)][str(p)] = value
else:
rel[str(s)][str(o)] = str(p)
return KG(entities=entities, rel=rel, name=kg_name) | a3dae2f4586603de9a5b52c45715591c31d386fc | 3,631,677 |
def video_player(obj):
"""
Receives object with 'video' FileField and returns HTML5 player.
"""
return {'object': obj} | 197c16e2ff16777634cfad327c08df571481ed09 | 3,631,678 |
def wait_for(scope, prompt):
"""
Waits until the response of the remote host contains the given pattern.
:type prompt: regex
:param prompt: The prompt pattern.
"""
conn = scope.get('__connection__')
conn.expect(prompt)
scope.define(__response__=conn.response)
return True | 10c95350b4c2aa4ad8fe9bce040efc461f461ca0 | 3,631,679 |
from typing import Callable
from re import T
import requests
import time
def retry_temporary_errors(
download_func: Callable[[SelfWithConfig, DownloadRequest], T]
) -> Callable[[SelfWithConfig, DownloadRequest], T]:
"""Decorator function for handling server and connection errors"""
backoff_coefficient = 3
def new_download_func(self: SelfWithConfig, request: DownloadRequest) -> T:
download_attempts = self.config.max_download_attempts
sleep_time = self.config.download_sleep_time
for attempt_num in range(download_attempts):
try:
return download_func(self, request)
except requests.RequestException as exception:
if not (
_is_temporary_problem(exception)
or (
isinstance(exception, requests.HTTPError)
and exception.response.status_code >= requests.status_codes.codes.INTERNAL_SERVER_ERROR
)
):
raise exception from exception
if attempt_num == download_attempts - 1:
message = _create_download_failed_message(exception, request.url)
raise DownloadFailedException(message, request_exception=exception) from exception
LOGGER.debug(
"Download attempt failed: %s\n%d attempts left, will retry in %ds",
exception,
download_attempts - attempt_num - 1,
sleep_time,
)
time.sleep(sleep_time)
sleep_time *= backoff_coefficient
raise DownloadFailedException(
"No download attempts available - configuration parameter max_download_attempts should be greater than 0"
)
return new_download_func | 3bbaeee3c440691f7a66c75a81d0ed83f80ae02f | 3,631,680 |
def get_first_order_indices(param_names, sim_id_params, all_results):
"""
Sobol method - First Order Indices
Equations from https://en.wikipedia.org/wiki/Variance-based_sensitivity_analysis
TODO: CS: This is exploration and is not complete (or even correct).
"""
params_outcomes = [
(sim_id_params[sim_id], np.mean(np.log(all_results[sim_id]["bg"])))
for sim_id in all_results.keys()
]
V_i = []
for param_name in param_names:
y_given_x = defaultdict(list)
for pgrid, y in params_outcomes:
y_given_x[pgrid[param_name]].append(y)
V = np.var([np.mean(ys) for x_val, ys in y_given_x.items()])
V_i.append(V)
V_y = np.sum(V_i)
S_i = [V / V_y for V in V_i]
return list(zip(param_names, S_i)) | f598af62b597d376488de404b3d146e5cea9f4fd | 3,631,681 |
def render_list_categories():
"""
METHOD=GET.
Renders Category Page.
"""
categories = session.query(Category).order_by(asc(Category.name)).all()
return render_template('categories/list.html', categories=categories) | 45cb527a9188f10957e276952bf56ebd1b2b59ae | 3,631,682 |
import time
def perform_install(pspec, is_upgrade=False, force=False, quiet=False):
"""
Args:
pspec (PackageSpec): Package spec to install
is_upgrade (bool): If True, intent is an upgrade (not a new install)
force (bool): If True, check latest version even if recently checked
quiet (bool): If True, don't chatter
Returns:
(pickley.TrackedManifest): Manifest is successfully installed (or was already up-to-date)
"""
with SoftLock(pspec):
started = time.time()
pspec.resolve()
skip_reason = pspec.skip_reason(force)
if skip_reason:
inform("Skipping installation of %s: %s" % (pspec.dashed, runez.bold(skip_reason)))
return None
manifest = pspec.get_manifest()
if is_upgrade and not manifest and not quiet:
abort("'%s' is not installed" % runez.red(pspec))
if not pspec.version:
desired = pspec.get_desired_version_info(force=force)
if desired.problem:
action = "upgrade" if is_upgrade else "install"
abort("Can't %s %s: %s" % (action, pspec, runez.red(desired.problem)))
pspec.version = desired.version
if not force and manifest and manifest.version == pspec.version and pspec.is_healthily_installed():
if not quiet:
status = "up-to-date" if is_upgrade else "installed"
inform("%s v%s is already %s" % (pspec.dashed, runez.bold(pspec.version), status))
pspec.groom_installation()
return manifest
setup_audit_log()
manifest = PACKAGER.install(pspec)
if manifest and not quiet:
note = " in %s" % runez.represented_duration(time.time() - started)
action = "Upgraded" if is_upgrade else "Installed"
if runez.DRYRUN:
action = "Would state: %s" % action
inform("%s %s v%s%s" % (action, pspec.dashed, runez.bold(pspec.version), runez.dim(note)))
if not pspec._pickley_dev_mode:
pspec.groom_installation()
return manifest | 557deda9249bb0b117f3cc820d3d6d8009988940 | 3,631,683 |
import re
def tokens_to_str(message, section='body'):
""" Takes one section of a message as specified by key param and
returns it in string format to be joined with other messages
for summarization, printing, id creation (future).
"""
body = message[section]
new_mess = ''
if isinstance(body[0], list):
for sentence in body:
for word in sentence:
new_mess += (word + ' ')
# put chars in list for easy processing
interim_mess = []
for char in new_mess:
interim_mess.append(char)
# push some chars to the left
for i, char in enumerate(interim_mess):
if i>0:
match = re.match('[!.,?)]', char)
if match and interim_mess[i-1] == ' ':
interim_mess.pop(i-1)
# push some chars to the right
for i, char in enumerate(interim_mess):
if i>0:
match = re.match('[$(]', interim_mess[i-1])
if match and char == ' ':
interim_mess.pop(i)
elif section == 'tags':
interim_mess = ' #'.join(body)
interim_mess = '#' + interim_mess
else:
# put chars in list for easy processing
interim_mess = ' '.join(body)
return ''.join(interim_mess) | 4b8f57060dfe110a2a0e2c767a73966cc1d5abdb | 3,631,684 |
def azip(*aiterables):
"""async version of izip with parallel iteration"""
return _azip(*aiterables, fillvalue=None, stop_any=True) | 8b296a1775ee54d0a1d44997b3dd7682a8da434f | 3,631,685 |
def collect_data(
bids_dir,
participant_label,
bids_validate=True,
bids_filters=None,
):
"""
Uses pybids to retrieve the input data for a given participant
Examples
--------
>>> bids_root, _ = collect_data(str(datadir / 'ds054'), '100185',
... bids_validate=False)
>>> bids_root['fmap'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/fmap/sub-100185_magnitude1.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_magnitude2.nii.gz', \
'.../ds054/sub-100185/fmap/sub-100185_phasediff.nii.gz']
>>> bids_root['bold'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_bold.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_bold.nii.gz']
>>> bids_root['sbref'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/func/sub-100185_task-machinegame_run-01_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-02_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-03_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-04_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-05_sbref.nii.gz', \
'.../ds054/sub-100185/func/sub-100185_task-machinegame_run-06_sbref.nii.gz']
>>> bids_root['t1w'] # doctest: +ELLIPSIS
['.../ds054/sub-100185/anat/sub-100185_T1w.nii.gz']
>>> bids_root['t2w'] # doctest: +ELLIPSIS
[]
>>> bids_root, _ = collect_data(str(datadir / 'ds051'), '01',
... bids_validate=False, bids_filters={'t1w':{'run': 1}})
>>> bids_root['t1w'] # doctest: +ELLIPSIS
['.../ds051/sub-01/anat/sub-01_run-01_T1w.nii.gz']
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
queries = {
"fmap": {"datatype": "fmap"},
"sbref": {"datatype": "dwi", "suffix": "sbref"},
"dwi": {"datatype": "dwi", "suffix": "dwi"},
"flair": {"datatype": "anat", "suffix": "FLAIR"},
"t2w": {"datatype": "anat", "suffix": "T2w"},
"t1w": {"datatype": "anat", "suffix": "T1w"},
"roi": {"datatype": "anat", "suffix": "roi"},
}
bids_filters = bids_filters or {}
for acq, entities in bids_filters.items():
queries[acq].update(entities)
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query,
)
)
for dtype, query in queries.items()
}
return subj_data, layout | 5f654e4fb6b145e7ad34e238b1ad9e992e426b6e | 3,631,686 |
def filter_features(input_features, **kwargs):
"""
Args:
input_features: A Geojson feature collection
Returns:
A json of two geojson feature collections: passed and failed
"""
if type(input_features) is DictType:
if input_features.get("features"):
return iterate_geojson(input_features, **kwargs)
else:
print("The input_features are in a format {}, "
"which is not compatible with filter_features. Should be dict.".format(type(input_features)))
return None | 69c1f517b8344a6a493d41228a4e249eb385aaab | 3,631,687 |
import functools
def polygon_wrapper(func):
"""
Wrapper function to perform the setup and teardown of polygon
attributes before and after creating the polygon.
Keyword arguments:
func (function) -- the function to draw the polygon.
"""
@functools.wraps(func)
def draw_polygon(self, *args, **kwargs):
"""
Setup the Context, draw the polygon with attributes applied, and
teardown the environment.
"""
# Save the Context so we can restore it when this is done
self.context.save()
# Initialize the polygon's attributes
self._init_attributes(**kwargs)
# Call the function
result = func(self, *args, **kwargs)
# Fill the polygon, if it's being filled
if self.fill:
self.context.fill_preserve()
# Set the outline fill_color and outline the polygon
self.calling_surface._set_color(self.line_color)
self.context.stroke()
# Restore the Context now that the polygon is drawn
self.context.restore()
return result
return draw_polygon | 76056e41c36a2c15dcb8a2e05cc4ec4c1beb68dc | 3,631,688 |
def compose_base_find_query(user_id: str, administrator: bool, groups: list):
"""
Compose a query for filtering reference search results based on user read rights.
:param user_id: the id of the user requesting the search
:param administrator: the administrator flag of the user requesting the search
:param groups: the id group membership of the user requesting the search
:return: a valid MongoDB query
"""
if administrator:
return dict()
is_user_member = {
"users.id": user_id
}
is_group_member = {
"groups.id": {
"$in": groups
}
}
is_owner = {
"user.id": user_id
}
return {
"$or": [
is_group_member,
is_user_member,
is_owner
]
} | 2f398930603093ddc59e0c6ba4956e7d46a7758d | 3,631,689 |
def test_prevent_links():
"""Returning None from any callback should remove links or prevent them
from being created."""
def no_new_links(attrs, new=False):
if new:
return None
return attrs
def no_old_links(attrs, new=False):
if not new:
return None
return attrs
def noop(attrs, new=False):
return attrs
in_text = 'a ex.mp <a href="http://example.com">example</a>'
out_text = 'a <a href="http://ex.mp">ex.mp</a> example'
tests = (
([noop], ('a <a href="http://ex.mp">ex.mp</a> '
'<a href="http://example.com">example</a>'), 'noop'),
([no_new_links, noop], in_text, 'no new, noop'),
([noop, no_new_links], in_text, 'noop, no new'),
([no_old_links, noop], out_text, 'no old, noop'),
([noop, no_old_links], out_text, 'noop, no old'),
([no_old_links, no_new_links], 'a ex.mp example', 'no links'),
)
def _check(cb, o, msg):
eq_(o, linkify(in_text, cb), msg)
for (cb, o, msg) in tests:
yield _check, cb, o, msg | ad78f384621d301d1ce8c7339f4648bf2517c7f9 | 3,631,690 |
def intersection (l, r) :
"""Compute intersection of lists `l` and `r`.
>>> intersection (range (4), range (2,5))
[2, 3]
"""
r_set = set (r)
return [x for x in l if x in r_set] | 36d7003587204814b6e09ec093f2a6715e87a500 | 3,631,691 |
def filter_df_on_ncases(df, case_id_glue="case:concept:name", max_no_cases=1000):
"""
Filter a dataframe keeping only the specified maximum number of cases
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
max_no_cases
Maximum number of cases to keep
Returns
------------
df
Filtered dataframe
"""
cases_values_dict = dict(df[case_id_glue].value_counts())
cases_to_keep = []
for case in cases_values_dict:
cases_to_keep.append(case)
cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)]
df = df[df[case_id_glue].isin(cases_to_keep)]
return df | 5f8532ebe465d7b80934b35ef8d3925217f4e355 | 3,631,692 |
def welcome():
"""List all available api routes."""
return (
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
) | fd95f362d1e39ac6485e97ee0a77d318f3011bb8 | 3,631,693 |
import ftplib
import os
def pdbDownload(
file_list, hostname=HOSTNAME, directory=DIRECTORY, prefix=PREFIX, suffix=SUFFIX
):
"""
Download all pdb files in file_list and unzip them.
"""
success = True
# Log into server
print("Connecting...")
ftp = ftplib.FTP()
ftp.connect(hostname)
ftp.login()
# Remove .pdb extensions from file_list
for file_index, file in enumerate(file_list):
try:
file_list[file_index] = file[: file.index(".pdb")]
except ValueError:
pass
# Download all files in file_list
to_get = ["%s/%s%s%s" % (directory, prefix, f, suffix) for f in file_list]
to_write = ["%s%s" % (f, suffix) for f in file_list]
for i in range(len(to_get)):
try:
ftp.retrbinary("RETR %s" % to_get[i], open(to_write[i], "wb").write)
final_name = "%s.pdb" % to_write[i][: to_write[i].index(".")]
unZip(to_write[i], final_name)
print("%s retrieved successfully." % final_name)
except ftplib.error_perm:
os.remove(to_write[i])
print("ERROR! %s could not be retrieved!" % file_list[i])
success = False
# Log out
ftp.quit()
if success:
return True
else:
return False | d1845f2f7bc0befa6b2138034bba3b56311e9c50 | 3,631,694 |
def decode(s):
"""doc me"""
for encoding in "utf-8-sig", "utf-16":
try:
return s.decode(encoding)
except UnicodeDecodeError:
continue
return s.decode("latin-1") | 40ce76e5067e591eb1e433f18c4d574a7235ab4e | 3,631,695 |
def Quantum_Vibrational_S(Temperature, wavenumbers):
"""
Funciton to calculate the quantum vibrational entropy at a given temperature
**Required Inputs
Temperature = single temperature in Kelvin to determine the vibrational entropy (does not work at 0 K)
wavenumbers = array of wavenumber (in order with the first three being 0 cm**-1 for the translational modes)
"""
c = 2.998 * 10 ** 10 # Speed of light in cm/s
h = 2.520 * 10 ** (-38) # Reduced Plank's constant in cal*s
k = 3.2998 * 10 ** (-27) # Boltzmann constant in cal*K
Na = 6.022 * 10 ** 23 # Avogadro's number
beta = 1 / (k * Temperature)
wavenumbers = np.sort(wavenumbers)
S = []
for i in wavenumbers[3:]:
if i > 0:
s = (k * h * i * c / (Temperature * (np.exp(h * i * c * beta) - 1)) - k * np.log(1 - np.exp(-h * i * c * beta))) * Na
S.append(s)
else:
pass
S = sum(S)
return S | be0b760db66651ff95bcb1614d17573bbfa80941 | 3,631,696 |
def _open(full_path, state, year, variety, database='SID'):
"""Returns a handle using python's builtin open() function; however, it
will skip past any non-content rows as specified within.
This is necessary because HCUP files very occasionally have bonus
content, typically a data use notice, and it mucks up parsing.
"""
handle = open(full_path)
# kludgy workaround for None values passed to _open()
if state is None:
state = ''
if year is None:
year = 0
if variety is None:
variety = ''
screen = (str(database).upper(), str(state).upper(), int(year), str(variety).upper())
if screen in SKIP_ROWS:
skipped = [handle.readline() for x in xrange(SKIP_ROWS[screen])]
return handle | b7a236433122a051abc913f34112b4250908b508 | 3,631,697 |
def summon(self: Client, entity: str, pos: Vec3 = None,
nbt: dict = None) -> str:
"""Summons an entity."""
return self.run('summon', entity, pos, nbt) | 651adcb3f0ec1e8d1efef0ba79bfe1f4e208c3f9 | 3,631,698 |
def get_inner_html(node):
"""Gets the inner HTML of a node, including tags."""
children = ''.join(etree.tostring(e).decode('utf-8') for e in node)
if node.text is None:
return children
return node.text + children | 467af19497a12744851ddadbb6fbf138cf846809 | 3,631,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.