content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def validate_email_address(
value=_undefined,
allow_unnormalized=False,
allow_smtputf8=True,
required=True,
):
"""
Checks that a string represents a valid email address.
By default, only email addresses in fully normalized unicode form are
accepted.
Validation logic is based on the well written and thoroughly researched
[email-validator](https://pypi.org/project/email-validator/) library.
By default, `validate_email_address` will only accept email addresses in
the normalized unicode form returned by `email_validator.validate_email`.
Despite the conflict with this library's naming convention, we recommend
that you use `email-validator` for validation and sanitisation of untrusted
input.
:param str value:
The value to be validated.
:param bool allow_unnormalized:
Whether or not to accept addresses that are not completely normalized.
Defaults to False, as in most cases you will want equivalent email
addresses to compare equal.
:param bool allow_smtputf8:
Whether or not to accept email addresses with local parts that can't be
encoded as plain ascii. Defaults to True, as such email addresses are
now common and very few current email servers do not support them.
:param bool required:
Whether the value can be `None`. Defaults to `True`.
:raises TypeError:
If the value is not a unicode string.
:raises ValueError:
If the value is not an email address, or is not normalized.
"""
validate = _email_address_validator(
allow_unnormalized=allow_unnormalized,
allow_smtputf8=allow_smtputf8,
required=required,
)
if value is not _undefined:
validate(value)
else:
return validate
|
85b590186f2e147c6c19e91bace33f0301115d0e
| 3,641,100
|
import numpy
import math
def rotation_matrix_from_quaternion(quaternion):
"""Return homogeneous rotation matrix from quaternion."""
q = numpy.array(quaternion, dtype=numpy.float64)[0:4]
nq = numpy.dot(q, q)
if nq == 0.0:
return numpy.identity(4, dtype=numpy.float64)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1,1]-q[2,2], q[0,1]-q[2,3], q[0,2]+q[1,3], 0.0),
( q[0,1]+q[2,3], 1.0-q[0,0]-q[2,2], q[1,2]-q[0,3], 0.0),
( q[0,2]-q[1,3], q[1,2]+q[0,3], 1.0-q[0,0]-q[1,1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
|
51b169ffa702e3798f7a6138271b415b369566ba
| 3,641,101
|
import os
def get_woosh_dir(url, whoosh_base_dir):
"""
Based on the bigbed url and base whoosh directory
from settings generate the path for whoosh directory for index of this bed file
"""
path = urlparse(url).path
filename = path.split('/')[-1]
whoosh_dir = os.path.join(whoosh_base_dir, filename)
return whoosh_dir
|
d1429daaf419937cd751514520117acd51cfa91c
| 3,641,102
|
def get_metadata(doi):
"""Extract additional metadata of paper based on doi."""
headers = {"accept": "application/x-bibtex"}
title, year, journal = '', '', ''
sessions = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
sessions.mount('http://', adapter)
sessions.mount('https://', adapter)
try:
response = requests.get("http://dx.doi.org/" + doi, headers=headers)
except requests.exceptions.ConnectionError:
print "ConnectionError"
return title, year, journal
if (response.status_code != 200):
print 'Did not find '+doi+' article, error code '+str(response.status_code)
else:
try:
line = response.text.encode()
line = line.split('\n\t')
line = line[1:]
except UnicodeEncodeError:
print "UnicodeEncodeError"
return title, year, journal
for field in line:
if len(field) >= 8 and field[0:6] == "year =":
year = field[7:-1]
if len(field) >= 9 and field[0:7] == "title =":
title = field[9:-2]
if len(field) >= 11 and field[0:9] == "journal =":
journal = field[11:-3]
return title, year, journal
|
8b7fee95ca247b0ffebfa704628be8a4659dd008
| 3,641,103
|
def format_channel(channel):
""" Returns string representation of <channel>. """
if channel is None or channel == '':
return None
elif type(channel) == int:
return 'ch{:d}'.format(channel)
elif type(channel) != str:
raise ValueError('Channel must be specified in string format.')
elif 'ch' in channel:
return channel
elif channel.lower() in 'rgb':
return format_channel('rgb'.index(channel.lower()))
elif channel.lower() in ('red', 'green', 'blue'):
return format_channel('rgb'.index(channel.lower()[0]))
else:
raise ValueError('Channel string not recognized.')
|
4eeb42899762d334599df831b7520a956998155a
| 3,641,104
|
def download_emoji_texture(load=True): # pragma: no cover
"""Download emoji texture.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.Texture or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_emoji_texture()
>>> dataset.plot(cpos="xy")
"""
return _download_and_read('emote.jpg', texture=True, load=load)
|
3ff7805e6ab4f18d0064938f8863cbbe395a7c78
| 3,641,105
|
import random
def shuffle_sequence(sequence: str) -> str:
"""Shuffle the given sequence.
Randomly shuffle a sequence, maintaining the same composition.
Args:
sequence: input sequence to shuffle
Returns:
tmp_seq: shuffled sequence
"""
tmp_seq: str = ""
while len(sequence) > 0:
max_num = len(sequence)
rand_num = random.randrange(max_num)
tmp_char = sequence[rand_num]
tmp_seq += tmp_char
tmp_str_1 = sequence[:rand_num]
tmp_str_2 = sequence[rand_num + 1:]
sequence = tmp_str_1 + tmp_str_2
return tmp_seq
|
9e833aed9e5a17aeb419a77176713e76566d2d06
| 3,641,106
|
def bayesian_twosample(countsX, countsY, prior=None):
"""
Calculates a Bayesian-like two-sample test between `countsX` and `countsY`.
The idea is taken from [1]_. We assume the counts are generated IID. Then
we use Dirichlet prior to infer the underlying discrete distribution.
In the null hypothesis, we say that `countsX` and `countsY` were generated
from the same underlying distribution q. The alternative hypothesis is that
`countsX` and `countsY` were generated by different distribution.
A log Bayes factor is calculated:
\chi = log P(X, Y | H_1) - log P(X, Y | H_0)
If \chi is greater than 0, then reject the null hypothesis.
To calculate P(X, Y | H_1), we calculate the product of evidences from
two independent Bayesian inferences. That is, P(X)P(Y). To calculate
P(X, Y | H_0), we combine the counts and calculate the evidence from a
single Bayesian inference.
Parameters
----------
countsX : array-like, shape (n,)
The counts for X.
countsY : array-like, shape (n,)
The counts for Y.
prior : array-like, shape (n,)
The Dirichlet hyper-parameters to use during inference. If `None`, we
use Jeffrey's prior.
Returns
-------
reject : bool
If `True`, then the null hypothesis is rejected and the counts should
be considered as generated from different distributions.
chi : float
The base-2 logarithm of the evidence ratio. If this value is greater
than 0, then we reject the null hypothesis.
Examples
--------
>>> bayesian_twosample([1,10], [2,3])
(True, 0.11798407303051839)
>>> bayesian_twosample([1,30], [20,30])
(True, 9.4347501426274931)
References
----------
.. [1] Karsten M. Borgwardt and Zoubin Ghahramani, "Bayesian two-sample
tests". http://arxiv.org/abs/0906.4032
"""
if prior is None:
# Use Jeffrey's prior for Dirichlet distributions.
prior = np.ones(len(countsX)) * 0.5
countsX = np.asarray(countsX)
countsY = np.asarray(countsY)
chi = log_evidence(countsX, prior) + log_evidence(countsY, prior)
chi -= log_evidence(countsX + countsY, prior)
reject = chi > 0
return reject, chi
|
fc64ba0d6e64ffcd7d57d51b3ea0f4967d4e5096
| 3,641,107
|
import torch
def load_image(image_path):
"""
loads an image from the specified image path
:param image_path:
:return: the loaded image
"""
image = Image.open(image_path)
image = loader(image)
image = image.unsqueeze(0)
image = image.to(device, torch.float)
return image
|
064f9a4a71fdf9347a455269bf673bd2952dd9b2
| 3,641,108
|
import json
def feed_reader(url):
"""Returns json from feed url"""
content = retrieve_feed(url)
d = feed_parser(content)
json_string = json.dumps(d, ensure_ascii=False)
return json_string
|
47726236afe429ab29720e917238960b17891938
| 3,641,109
|
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
cors = CORS(app)
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
app.register_blueprint(book_blueprint, url_prefix='/api/v1/books')
@app.route('/', methods=['GET'])
def index():
"""
example endpoint
"""
return 'Congratulations! Your part 2 endpoint is working'
return app
|
b694e8867b0bd3efefff0f7cae753b0ede91a36c
| 3,641,110
|
def get_shed_tool_conf_dict( app, shed_tool_conf ):
"""Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry in the shed_tool_conf_dict associated with the file."""
for index, shed_tool_conf_dict in enumerate( app.toolbox.shed_tool_confs ):
if shed_tool_conf == shed_tool_conf_dict[ 'config_filename' ]:
return index, shed_tool_conf_dict
else:
file_name = strip_path( shed_tool_conf_dict[ 'config_filename' ] )
if shed_tool_conf == file_name:
return index, shed_tool_conf_dict
|
21eae3a037498758425b29f10110f8e4e8ad24ff
| 3,641,111
|
import time
def get_column_interpolation_dust_raw(key, h_in_gp1, h_in_gp2, index,
mask1, mask2, step1_a, step2_a,
target_a, dust_factors,
kdtree_index=None,
luminosity_factors = None, cache
= False, snapshot = False):
"""This function returns the interpolated quantity between two
timesteps, from step1 to step2. Some galaxies are masked out: Any
galaxy that doesn't pass the mask in step1 (mask1), any galaxy
that doesn't a decendent in step2, or any galaxy that whose
descendent doesn't pass the step2 mask (mask2).
"""
print("\tLoading key: {}".format(key))
# if luminosity_factors is None:
# print("\t\tluminosity factors is none")
#print("dust_factors: ", dust_factors)
t1 = time.time()
step_del_a = step2_a - step1_a
target_del_a = target_a - step1_a
##=========DEBUG==========
# print("step del_a {:.3f} - {:.3f} = {:.3f}".format(step2_a, step1_a, step_del_a))
# print("target_del_a {:.3f} - {:.3f} = {:.3f}".format(target_a, step1_a, target_del_a))
##=========DEBUG==========
# The masking all galaxies that fail galmatcher's requirements at
# step1, galaxies that don't have a descndent, or if the
# descendent galaxy at step2 doesn't pass galmatcher requirements.
# If index is set None, we aren't doing interpolation at all. We
# are just using
if not snapshot:
mask_tot = mask1 & (index != -1) & mask2[index]
if (key in no_slope_var) or any(ptrn in key for ptrn in no_slope_ptrn):
#print('\t\tno interpolation')
data = h_in_gp1[key].value[mask_tot]
if kdtree_index is None:
val_out = data
else:
val_out = data[kdtree_index]
elif ":dustAtlas" in key:
#print('\t\tinterpolation with dust')
key_no_dust = key.replace(":dustAtlas","")
val1_no_dust = h_in_gp1[key_no_dust].value[mask_tot]
val1_dust = h_in_gp1[key].value[mask_tot]
if index is not None:
val2_no_dust = h_in_gp2[key].value[index][mask_tot]
else:
val2_no_dust = val1_no_dust
# val1_no_dust_lg = np.log(val1_no_dust)
# val1_dust_lg = np.log(val1_dust)
# val2_no_dust_lg = np.log(val2_no_dust)
dust_effect = val1_dust/val1_no_dust
dust_effect[val1_no_dust == 0] = 1
slope = (val2_no_dust - val1_no_dust)/step_del_a
slope[step_del_a ==0] =0
# slope_mag = (val2_no_dust_lg - val1_no_dust_lg)/step_del_a
# slope_mag[step_del_a == 0] = 0
##=======DEBUG=======
# def print_vals(label, data):
# print("\t\t{} below/zero/size: {}/{}/{}".format(label, np.sum(data<0), np.sum(data==0), data.size))
# print_vals("val1_no_dust", val1_no_dust)
# print_vals("val2_no_dust", val2_no_dust)
# print_vals("val1_dust", val1_dust)
##=======DEBUG=======
if kdtree_index is None:
tot_dust_effect = dust_effect**dust_factors
slct = dust_effect > 1.0
tot_dust_effect[slct] = dust_effect[slct]
val_out = (val1_no_dust + slope*target_del_a)*tot_dust_effect
#val_out = np.exp((val1_no_dust_lg + slope_mag*target_del_a)*tot_dust_effect)
else:
tot_dust_effect = (dust_effect[kdtree_index]**dust_factors)
slct = dust_effect[kdtree_index] > 1.0
tot_dust_effect[slct] = dust_effect[kdtree_index][slct]
val_out = (val1_no_dust[kdtree_index] + slope[kdtree_index]*target_del_a)*tot_dust_effect
#val_out = np.exp((val1_no_dust_lg[kdtree_index] + slope_mag[kdtree_index]*target_del_a)*tot_dust_effect)
else:
#print('\t\tinerpolation without dust')
val1_data = h_in_gp1[key].value[mask_tot]
val2_data = h_in_gp2[key].value[index][mask_tot]
# val1_data_lg = np.log(val1_data)
# val2_data_lg = np.log(val2_data)
slope = (val2_data - val1_data)/step_del_a
slope[step_del_a==0]=0
# slope_mag = (val1_data_lg-val2_data_lg)/step_del_a
# slope_mag[step_del_a==0]=0
if kdtree_index is None:
val_out = val1_data + slope*target_del_a
#val_out = np.log(val1_data_lg + slope_mag*target_del_a)
else:
val_out = val1_data[kdtree_index] + slope[kdtree_index]*target_del_a
#val_out = np.log(val1_data_lg[kdtree_index] + slope_mag[kdtree_index]*target_del_a)
#print('\t\t',val_out.dtype)
# If running on snapshot, we don't need to interpolate
else:
mask_tot = mask1
val1_data = h_in_gp1[key].value[mask_tot]
# reorder the data if it's a post-matchup index
if kdtree_index is None:
val_out = val1_data
else:
val_out = val1_data[kdtree_index]
if not(luminosity_factors is None):
if(any(l in key for l in luminosity_factors_keys)):
#print("\t\tluminosity adjusted")
val_out = val_out*luminosity_factors
elif('Luminosities' in key or 'Luminosity' in key):
#print("\t\tluminosity adjusted 2")
val_out = val_out*luminosity_factors
else:
pass
#print("\t\tluminosity untouched")
if np.sum(~np.isfinite(val_out))!=0:
print(key, "has a non-fininte value")
print("{:.2e} {:.2e}".format(np.sum(~np.isfinite(val_out)), val_out.size))
if ":dustAtlas" in key:
print(np.sum(~np.isfinite(val1_no_dust)))
print(np.sum(~np.isfinite(dust_effect)))
slct = ~np.isfinite(dust_effect)
print(val1_no_dust[slct])
print(val1_dust[slct])
print(np.sum(~np.isfinite(slope_mag)))
print(np.sum(~np.isfinite(target_del_a)))
if "emissionLines" in key:
print("overwriting non-finite values with 0")
val_out[~np.isfinite(val_out)]=0.0
else:
raise
#print("\t\toutput size: {:.2e}".format(val_out.size))
print("\t\t mask size:{:.1e}/{:.1e} data size:{:.1e} read + format time: {}".format(np.sum(mask_tot), mask_tot.size, val_out.size, time.time()-t1))
##=======DEBUG======
# print("\t\t non-finite: {}/{}/{}".format(np.sum(~np.isfinite(val_out)), np.sum(val_out<0), val_out.size))
# print("\t\t below/zero/size: {}/{}/{}".format(np.sum(val_out<0), np.sum(val_out==0), val_out.size))
##=======DEBUG======
return val_out
|
d7f0c221cac7c2d72352db61f04c7b7efdd994e1
| 3,641,112
|
import os
def _get_next_foldername_index(name_to_check,dir_path):
"""Finds folders with name_to_check in them in dir_path and extracts which one has the hgihest index.
Parameters
----------
name_to_check : str
The name of the network folder that we want to look repetitions for.
dir_path : str
The folder where we want to look for network model repetitions.
Returns
-------
str
If there are no name matches, it returns the string '1'. Otherwise, it returns str(highest index found + 1)
"""
dir_content = os.listdir(dir_path)
dir_name_indexes = [int(item.split('.')[-1]) for item in dir_content if os.path.isdir(item) and name_to_check in item] #extracting the counter in the folder name and then we find the maximum
if len(dir_name_indexes) == 0:
return '1'
else:
highest_idx = max(dir_name_indexes)
return str(highest_idx + 1)
#find all folders that have name_to_check in them:
|
c8239f8ae8a34c2ae8432e7a9482f169ce0962ce
| 3,641,113
|
def oio_make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None,
make_env=oio_make_env):
"""
Same as swift's make_subrequest, but let some more headers pass through.
"""
return orig_make_subrequest(env, method=method, path=path, body=body,
headers=headers, agent=agent,
swift_source=swift_source,
make_env=make_env)
|
f454b027243f2e72b9e59c50dfc3819a22b887d6
| 3,641,114
|
from typing import Callable
def offline_data_fetcher(cfg: EasyDict, dataset: Dataset) -> Callable:
"""
Overview:
The outer function transforms a Pytorch `Dataset` to `DataLoader`. \
The return function is a generator which each time fetches a batch of data from the previous `DataLoader`.\
Please refer to the link https://pytorch.org/tutorials/beginner/basics/data_tutorial.html \
and https://pytorch.org/docs/stable/data.html for more details.
Arguments:
- cfg (:obj:`EasyDict`): Config which should contain the following keys: `cfg.policy.learn.batch_size`.
- dataset (:obj:`Dataset`): The dataset of type `torch.utils.data.Dataset` which stores the data.
"""
# collate_fn is executed in policy now
dataloader = DataLoader(dataset, batch_size=cfg.policy.learn.batch_size, shuffle=True, collate_fn=lambda x: x)
def _fetch(ctx: "OfflineRLContext"):
"""
Overview:
Every time this generator is iterated, the fetched data will be assigned to ctx.train_data. \
After the dataloader is empty, the attribute `ctx.train_epoch` will be incremented by 1.
Input of ctx:
- train_epoch (:obj:`int`): Number of `train_epoch`.
Output of ctx:
- train_data (:obj:`List[Tensor]`): The fetched data batch.
"""
while True:
for i, data in enumerate(dataloader):
ctx.train_data = data
yield
ctx.train_epoch += 1
# TODO apply data update (e.g. priority) in offline setting when necessary
return _fetch
|
88d7a8d58bb9a77e4656c1b1294e793e6d32f829
| 3,641,115
|
from typing import Optional
def get_channel(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetChannelResult:
"""
Resource schema for AWS::MediaPackage::Channel
:param str id: The ID of the Channel.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:mediapackage:getChannel', __args__, opts=opts, typ=GetChannelResult).value
return AwaitableGetChannelResult(
arn=__ret__.arn,
description=__ret__.description,
egress_access_logs=__ret__.egress_access_logs,
hls_ingest=__ret__.hls_ingest,
ingress_access_logs=__ret__.ingress_access_logs)
|
28ca816326203ca37cf514dade19ce9b6205144d
| 3,641,116
|
import torch
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return torch.nn.functional.relu
if activation == "gelu":
return torch.nn.functional.gelu
if activation == "glu":
return torch.nn.functional.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
ecc690e9b9ec6148b6ea8df4bd08ff2d0c1c322e
| 3,641,117
|
def make_05dd():
"""移動ロック終了(イベント終了)"""
return ""
|
bb85e01a4a4515ac88688690cacd67e7c9351034
| 3,641,118
|
import json
def lambda_handler(request, context):
"""Main Lambda handler.
Since you can expect both v2 and v3 directives for a period of time during the migration
and transition of your existing users, this main Lambda handler must be modified to support
both v2 and v3 requests.
"""
try:
logger.info("Directive:")
logger.info(json.dumps(request, indent=4, sort_keys=True))
logger.info("Received v3 directive!")
if request["directive"]["header"]["name"] == "Discover":
response = handle_discovery_v3(request)
else:
response = handle_non_discovery_v3(request)
logger.info("Response:")
logger.info(json.dumps(response, indent=4, sort_keys=True))
#if version == "3":
#logger.info("Validate v3 response")
#validate_message(request, response)
return response
except ValueError as error:
logger.error(error)
raise
|
8eddd8ac2a47ecbc9c15f2644da6a2c7c6575371
| 3,641,119
|
from re import T
def batch_flatten(x):
"""Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
"""
y = T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
if hasattr(x, '_keras_shape'):
if None in x._keras_shape[1:]:
y._keras_shape = (x._keras_shape[0], None)
else:
y._keras_shape = (x._keras_shape[0], np.prod(x._keras_shape[1:]))
return y
|
6543f3056d9e08b382cbd9e0ac1df6df84c59716
| 3,641,120
|
import typing
def msg_constant_to_behaviour_type(value: int) -> typing.Any:
"""
Convert one of the behaviour type constants in a
:class:`py_trees_ros_interfaces.msg.Behaviour` message to
a type.
Args:
value: see the message definition for details
Returns:
a behaviour class type (e.g. :class:`py_trees.composites.Sequence`)
Raises:
TypeError: if the message type is unrecognised
"""
if value == py_trees_ros_interfaces.msg.Behaviour.SEQUENCE:
return py_trees.composites.Sequence
elif value == py_trees_ros_interfaces.msg.Behaviour.SELECTOR:
return py_trees.composites.Selector
elif value == py_trees_ros_interfaces.msg.Behaviour.PARALLEL:
return py_trees.composites.Parallel
elif value == py_trees_ros_interfaces.msg.Behaviour.DECORATOR:
return py_trees.decorators.Decorator
elif value == py_trees_ros_interfaces.msg.Behaviour.BEHAVIOUR:
return py_trees.behaviour.Behaviour
else:
raise TypeError("invalid type specified in message [{}]".format(value))
|
bdb2342ad9abbbc6db51beebcb82313799bc110e
| 3,641,121
|
def calc_skewness(sig):
"""Compute skewness along the specified axes.
Parameters
----------
input: ndarray
input from which skewness is computed.
Returns
-------
s: int
skewness result.
"""
return skew(sig)
|
cad86d0a358a9bfe1891411522a6281e1eb291ed
| 3,641,122
|
import os
from aiida.common import UniquenessError, NotExistent
from aiida.orm.querybuilder import QueryBuilder
from .otfg import OTFGGroup
def upload_usp_family(folder,
group_label,
group_description,
stop_if_existing=True):
"""
Upload a set of usp/recpot files in a give group
:param folder: a path containing all UPF files to be added.
Only files ending in .usp/.recpot are considered.
:param group_label: the name of the group to create. If it exists and is
non-empty, a UniquenessError is raised.
:param group_description: a string to be set as the group description.
Overwrites previous descriptions, if the group was existing.
:param stop_if_existing: if True, check for the md5 of the files and,
if the file already exists in the DB, raises a MultipleObjectsError.
If False, simply adds the existing UPFData node to the group.
"""
#from aiida.common import aiidalogger
files = [
os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
if os.path.isfile(os.path.join(folder, i)) and (
i.lower().endswith('.usp') or i.lower().endswith('recpot')
or i.lower().endswith('.uspcc'))
]
nfiles = len(files)
try:
group = OTFGGroup.get(label=group_label)
group_created = False
except NotExistent:
group = OTFGGroup(label=group_label, )
group_created = True
# Update the descript even if the group already existed
group.description = group_description
pseudo_and_created = [] # A list of records (UspData, created)
for f in files:
md5sum = md5_file(f)
qb = QueryBuilder()
qb.append(UspData, filters={'attributes.md5': {'==': md5sum}})
existing_usp = qb.first()
# Add the file if it is in the database
if existing_usp is None:
pseudo, created = UspData.get_or_create(f,
use_first=True,
store_usp=False)
pseudo_and_created.append((pseudo, created))
# The same file is there already
else:
if stop_if_existing:
raise ValueError("A usp/recpot with identical MD5 to"
" {} cannot be added with stop_if_existing"
"".format(f))
existing_usp = existing_usp[0]
pseudo_and_created.append((existing_usp, False))
# Check for unique per element
elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
# Check if we will duplicate after insertion
if not group_created:
for aiida_n in group.nodes:
if not isinstance(aiida_n, UspData):
continue
elements.append((aiida_n.element, aiida_n.md5sum))
# Discard duplicated pairs
elements = set(elements)
elements_names = [e[0] for e in elements]
# Check the uniqueness of the complete group
if not len(elements_names) == len(set(elements_names)):
duplicates = set(
[x for x in elements_names if elements_names.count(x) > 1])
dup_string = ", ".join(duplicates)
raise UniquenessError(
"More than one usp/recpot found for the elements: " + dup_string +
".")
if group_created:
group.store()
# Save the usp in the database if necessary and add them to the group
for pseudo, created in pseudo_and_created:
if created:
pseudo.store()
#aiidalogger.debug("New node {} created for file {}".format(
# pseudo.uuid, pseudo.filename))
else:
#aiidalogger.debug("Reusing node {} for file {}".format(
# pseudo.uuid, pseudo.filename))
pass
nodes_new = [
pseduo for pseduo, created in pseudo_and_created if created is True
]
nodes_add = [pseduo for pseduo, created in pseudo_and_created]
group.add_nodes(nodes_add)
return nfiles, len(nodes_new)
|
3a51d7bdedec47fc0334ee9295d2b95d5f518eb1
| 3,641,123
|
import pandas as pd
import pkgutil
def check_is_pandas_dataframe(log):
"""
Checks if a log object is a dataframe
Parameters
-------------
log
Log object
Returns
-------------
boolean
Is dataframe?
"""
if pkgutil.find_loader("pandas"):
return type(log) is pd.DataFrame
return False
|
93fa02445302cf695fd86beb3e4836d58660e376
| 3,641,124
|
def mk_creoson_post_sessionId(monkeypatch):
"""Mock _creoson_post return dict."""
def fake_func(client, command, function, data=None, key_data=None):
return "123456"
monkeypatch.setattr(
creopyson.connection.Client, '_creoson_post', fake_func)
|
c8f5fcec40d55e14a7c955a90f32eccf597179f3
| 3,641,125
|
def isUsernameFree(name):
"""Checks to see if the username name is free for use."""
global username_array
global username
for conn in username_array:
if name == username_array[conn] or name == username:
return False
return True
|
6a30766e35228e1ebea47c7f6d4f7f4f2832572d
| 3,641,126
|
def get_quadrangle_dimensions(vertices):
"""
:param vertices:
A 3D numpy array which contains a coordinates of a quadrangle, it should look like this:
D---C
| |
A---B
[ [[Dx, Dy]], [[Cx, Cy]], [[Bx, By]], [[Ax, Ay]] ].
:return:
width, height (which are integers)
"""
temp = np.zeros((4, 2), dtype=int)
for i in range(4):
temp[i] = vertices[i, 0]
delta_x = temp[0, 0]-temp[1, 0]
delta_y = temp[0, 1]-temp[1, 1]
width1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[1, 0] - temp[2, 0]
delta_y = temp[1, 1] - temp[2, 1]
width2 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[2, 0] - temp[3, 0]
delta_y = temp[2, 1] - temp[3, 1]
height1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[3, 0] - temp[0, 0]
delta_y = temp[3, 1] - temp[0, 1]
height2 = int((delta_x**2 + delta_y**2)**0.5)
width = max(width1, width2)
height = max(height1, height2)
return width, height
|
25e74cf63237d616c1fd0b1b3428dd0763a27fba
| 3,641,127
|
def optimal_r(points, range_min, range_max):
"""
Computes the optimal Vietoris-Rips parameter r for the given list of points.
Parameter needs to be as small as possible and VR complex needs to have 1 component
:param points: list of tuples
:return: the optimal r parameter and list of (r, n_components) tuples
"""
step = (range_max - range_min) / 100
components_for_r = []
vr = defaultdict(list)
r = range_min
while r < range_max:
vr = vietoris(points, r)
comps = findComponents([s[0] for s in vr[0]], vr[1])
print("\rr=",r,"components=",len(comps), end="")
components_for_r.append((r, len(comps)))
if (len(comps) == 1):
# we found the solution with smallest r
print("\rDone, r=", r, "n components=", len(comps))
return r, vr, components_for_r
r += step
# ideal solution not found, return the last complex
print("\rNo ideal r found, returning the last one")
return r, vr, components_for_r
|
950c8cc40de9ff9fbb0bd142461fc2710b54ead9
| 3,641,128
|
from pathlib import Path
def get_root_path():
"""
this is to get the root path of the code
:return: path
"""
path = str(Path(__file__).parent.parent)
return path
|
bc01b4fb15569098286fc24379a258300ff2dfa0
| 3,641,129
|
def _handle_requirements(hass: core.HomeAssistant, component,
name: str) -> bool:
"""Install the requirements for a component."""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('deps')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
|
6608228271fe37d607e7980d6c4cfe876b45b853
| 3,641,130
|
def check_image(filename):
""" Check if filename is an image """
try:
im = Image.open(filename)
im.verify() # is it an image?
return True
except OSError:
return False
|
b2854195c83cc6ab20e967e25f3892a46d3c146f
| 3,641,131
|
import torch
def encode_save(sig=np.random.random([1, nfeat, nsensors]), name='simpleModel_ini', dir_path="../src/vne/models"):
"""
This function will create a model, test it, then save a persistent version (a file)
Parameters
__________
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
string with the filename to save the model under
dir:
dir_path, the local directory to save the model
Returns
--------
model:
A copy of the encoder model generated by the function
"""
model = simpleModel().eval()
model.apply(init_weights_simple)
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
# save the model
model.apply(vne.init_weights_simple)
save_model(encoder=model, name=name, dir_path=dir_path)
return model
|
96fbeb853c66afd7f4a6c3a1a087faee64ec76d0
| 3,641,132
|
from typing import Set
from typing import Tuple
from typing import Counter
def _imports_to_canonical_import(
split_imports: Set[Tuple[str, ...]],
parent_prefix=(),
) -> Tuple[str, ...]:
"""Extract the canonical import name from a list of imports
We have two rules.
1. If you have at least 4 imports and they follow a structure like
'a', 'a.b', 'a.b.c', 'a.b.d'
this is treated as a namespace package with a canonical import of `a.b`
2. If you have fewer imports but they have a prefix that is found in
KNOWN_NAMESPACE_PACKAGES
you are also treated as a namespace package
3. Otherwise return the commonprefix
"""
prefix: Tuple[str, ...] = commonprefix(list(split_imports))
c = Counter(len(imp) for imp in split_imports)
if (
len(prefix) == 1
and c.get(1) == 1
and (
(len(split_imports) > 3)
or (parent_prefix + prefix in KNOWN_NAMESPACE_PACKAGES)
)
):
ns_prefix = _imports_to_canonical_import(
split_imports={imp[1:] for imp in split_imports if len(imp) > 1},
parent_prefix=parent_prefix + prefix,
)
if prefix and ns_prefix:
return prefix + ns_prefix
return prefix
|
e9cfd11b5837576ccc6f380463fe4b8ce9f4a63d
| 3,641,133
|
import os
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script))
|
3b7dbe4061ff88bd42da91f854f561715df101df
| 3,641,134
|
def score_hmm_logprob(bst, hmm, normalize=False):
"""Score events in a BinnedSpikeTrainArray by computing the log
probability under the model.
Parameters
----------
bst : BinnedSpikeTrainArray
hmm : PoissonHMM
normalize : bool, optional. Default is False.
If True, log probabilities will be normalized by their sequence
lengths.
Returns
-------
logprob : array of size (n_events,)
Log probabilities, one for each event in bst.
"""
logprob = np.atleast_1d(hmm.score(bst))
if normalize:
logprob = np.atleast_1d(logprob) / bst.lengths
return logprob
|
d20e5e75c875602c7ac2e3b0dadc5adcae45406d
| 3,641,135
|
def rotate_2d_list(squares_list):
"""
http://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python
"""
return [x for x in zip(*squares_list[::-1])]
|
a6345ce6954643b8968ffb4c978395baf777fca4
| 3,641,136
|
def next_ticket(ticket):
"""Return the next ticket for the given ticket.
Args:
ticket (Ticket): an arbitrary ticket
Returns:
the next ticket in the chain of tickets, having the next
pseudorandom ticket number, the same ticket id, and a
generation number that is one larger.
Example:
>>> next_ticket(Ticket(ticket_number='0.26299714122838008416507544297546663599715395525154425586041245287750224561854', id='AB-130', generation=1))
Ticket(ticket_number='0.8232357229934205790595761924514048157652891124687533667363938813600770093316', id='AB-130', generation=2)
"""
return Ticket(next_fraction(ticket.ticket_number),
ticket.id,
ticket.generation+1)
|
7f5cfbd9992322e394a9e3f4316d654ef9ad6749
| 3,641,137
|
def icecreamParlor4(m, arr):
"""I forgot about Python's nested for loops - on the SAME array - and how
that's actually -possible-, and it makes things so much simplier.
It turns out, that this works, but only for small inputs."""
# Augment the array of data, so that it not only includes the item, but
# also the item's index into the array.
decorated_arr = []
for index, item in enumerate(arr):
decorated_arr.append((item, index))
# Iterate each combination of index, and see if conditions are met with
# them. There are 2 things we care about: both amounts equal to m, and
# they both aren't the same item.
for i in decorated_arr:
for j in decorated_arr:
if (i[0] + j[1]) == m and i[1] != j[1]:
return [i[1] + 1, j[1] + 1]
|
6af2da037aa3e40c650ac48ebeb931399f1a6eaa
| 3,641,138
|
import datasets
def get_data(filepath, transform=None, rgb=False):
"""
Read in data from the given folder.
Parameters
----------
filepath: str
Path for the file e.g.: F'string/containing/filepath'
transform: callable
A function which tranforms the data to the required format
rgb: bool
Image type for different channel types
Returns
-------
torchvision.datasets.folder.ImageFolder
Required data after read in
"""
if transform is None:
transform = transforms.Compose([transforms.ToTensor()])
if rgb:
# will read the data into 3 channels,
# majority of images are 1 channel only however
xray_data = datasets.ImageFolder(root=filepath,
transform=transform)
else:
# read in all images as one channel
xray_data = datasets.ImageFolder(root=filepath, transform=transform)
return xray_data
|
99eb506c9033e259ad5769d8eff10b7fd985a1d6
| 3,641,139
|
from ozmo import EcoVacsAPI, VacBot
def setup(hass, config):
"""Set up the Ecovacs component."""
_LOGGER.debug("Creating new Ecovacs component")
hass.data[ECOVACS_DEVICES] = []
hass.data[ECOVACS_CONFIG] = []
ecovacs_api = EcoVacsAPI(
ECOVACS_API_DEVICEID,
config[DOMAIN].get(CONF_USERNAME),
EcoVacsAPI.md5(config[DOMAIN].get(CONF_PASSWORD)),
config[DOMAIN].get(CONF_COUNTRY),
config[DOMAIN].get(CONF_CONTINENT),
)
devices = ecovacs_api.devices()
_LOGGER.debug("Ecobot devices: %s", devices)
for device in devices:
_LOGGER.info(
"Discovered Ecovacs device on account: %s with nickname %s",
device["did"],
device["nick"],
)
vacbot = VacBot(
ecovacs_api.uid,
ecovacs_api.REALM,
ecovacs_api.resource,
ecovacs_api.user_access_token,
device,
config[DOMAIN].get(CONF_CONTINENT).lower(),
monitor=True,
)
hass.data[ECOVACS_DEVICES].append(vacbot)
def stop(event: object) -> None:
"""Shut down open connections to Ecovacs XMPP server."""
for device in hass.data[ECOVACS_DEVICES]:
_LOGGER.info(
"Shutting down connection to Ecovacs device %s", device.vacuum["did"]
)
device.disconnect()
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
if hass.data[ECOVACS_DEVICES]:
_LOGGER.debug("Starting vacuum components")
dconfig = config[DOMAIN]
if len(dconfig.get(CONF_SUPPORTED_FEATURES)) == 0:
dconfig[CONF_SUPPORTED_FEATURES] = STRING_TO_SERVICE.keys()
if CONF_UNSUPPORTED_FEATURES in dconfig:
filtered_features = []
for supported_feature in dconfig.get(CONF_SUPPORTED_FEATURES):
if supported_feature not in dconfig.get(CONF_UNSUPPORTED_FEATURES):
filtered_features.append(supported_feature)
dconfig[CONF_SUPPORTED_FEATURES] = filtered_features
_LOGGER.debug("SUPPORTED FEATURES")
_LOGGER.debug(dconfig.get(CONF_SUPPORTED_FEATURES))
deebot_config = {
CONF_SUPPORTED_FEATURES: strings_to_services(dconfig.get(CONF_SUPPORTED_FEATURES), STRING_TO_SERVICE)
}
hass.data[ECOVACS_CONFIG].append(deebot_config)
_LOGGER.debug(hass.data[ECOVACS_CONFIG])
discovery.load_platform(hass, "vacuum", DOMAIN, {}, config)
return True
|
11c51d640f4e12f520a2ca7f8c3c4b8279392a7b
| 3,641,140
|
def read_input_files(input_file: str) -> frozenset[IntTuple]:
"""
Extracts an initial pocket dimension
which is a set of active cube 3D coordinates.
"""
with open(input_file) as input_fobj:
pocket = frozenset(
(x, y)
for y, line in enumerate(input_fobj)
for x, char in enumerate(line.strip())
if char == '#'
)
return pocket
|
c44e85e0c4f998e04b3d5e8b1e4dc30260102fce
| 3,641,141
|
def resnet101(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
ba77d75d52aa5ce92cc5a8bec09347c799ebc02a
| 3,641,142
|
def insert_left_side(left_side, board_string):
"""
Replace the left side of the Sudoku board 'board_string' with 'left_side'.
"""
# inputs should match in upper left corner
assert(left_side[0] == board_string[0])
# inputs should match in lower left corner
assert(left_side[8] == low_left_digit(board_string))
as_list = list(board_string)
for idx in range(9):
as_list[idx*9] = left_side[idx]
return "".join(as_list)
|
da607155e5c82a597f1ec62e6def3fbe31119c36
| 3,641,143
|
import tqdm
import uuid
def collect_story_predictions(story_file, policy_model_path, nlu_model_path,
max_stories=None, shuffle_stories=True):
"""Test the stories from a file, running them through the stored model."""
def actions_since_last_utterance(tracker):
actions = []
for e in reversed(tracker.events):
if isinstance(e, UserUttered):
break
elif isinstance(e, ActionExecuted):
actions.append(e.action_name)
actions.reverse()
return actions
if nlu_model_path is not None:
interpreter = RasaNLUInterpreter(model_directory=nlu_model_path)
else:
interpreter = RegexInterpreter()
agent = Agent.load(policy_model_path, interpreter=interpreter)
stories = _get_stories(story_file, agent.domain,
max_stories=max_stories,
shuffle_stories=shuffle_stories)
preds = []
actual = []
logger.info("Evaluating {} stories\nProgress:".format(len(stories)))
for s in tqdm(stories):
sender = "default-" + uuid.uuid4().hex
dialogue = s.as_dialogue(sender, agent.domain)
actions_between_utterances = []
last_prediction = []
for i, event in enumerate(dialogue.events[1:]):
if isinstance(event, UserUttered):
p, a = _min_list_distance(last_prediction,
actions_between_utterances)
preds.extend(p)
actual.extend(a)
actions_between_utterances = []
agent.handle_message(event.text, sender=sender)
tracker = agent.tracker_store.retrieve(sender)
last_prediction = actions_since_last_utterance(tracker)
elif isinstance(event, ActionExecuted):
actions_between_utterances.append(event.action_name)
if last_prediction:
preds.extend(last_prediction)
preds_padding = len(actions_between_utterances) - \
len(last_prediction)
preds.extend(["None"] * preds_padding)
actual.extend(actions_between_utterances)
actual_padding = len(last_prediction) - \
len(actions_between_utterances)
actual.extend(["None"] * actual_padding)
return actual, preds
|
d47d83c62d119f99c5c3c74b490866ffc64dc3ed
| 3,641,144
|
def get_plaintext_help_text(testcase, config):
"""Get the help text for this testcase for display in issue descriptions."""
# Prioritize a HELP_FORMAT message if available.
formatted_help = get_formatted_reproduction_help(testcase)
if formatted_help:
return formatted_help
# Show a default message and HELP_URL if only it has been supplied.
help_url = get_reproduction_help_url(testcase, config)
if help_url:
return 'See %s for instructions to reproduce this bug locally.' % help_url
return ''
|
6ad30b86d75c694f4027aacb7a49607af02819d2
| 3,641,145
|
import select
def sniff(store=False, prn=None, lfilter=None,
count=0,
stop_event=None, refresh=.1, *args, **kwargs):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args)
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
stop_event: Event that stops the function when set
refresh: check stop_event.set() every refresh seconds
"""
s = conf.L2listen(type=ETH_P_ALL, *args, **kwargs)
n = 0
lst = []
try:
while True:
if stop_event and stop_event.is_set():
break
sel = select([s], [], [], refresh)
if s in sel[0]:
p = s.recv(MTU)
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
if prn:
r = prn(p)
if r is not None:
print(r)
n += 1
if count and n == count:
break
except KeyboardInterrupt:
pass
finally:
s.close()
return plist.PacketList(lst, "Sniffed")
|
06309d056c0a68046025b806d91b7c7f0c5fdeb6
| 3,641,146
|
def generate_margined_binary_data ( num_samples, count, limits, rng ):
"""
Draw random samples from a linearly-separable binary model
with some non-negligible margin between classes. (The exact
form of the model is up to you.)
# Arguments
num_samples: number of samples to generate
(ie, the number of rows in the returned X
and the length of the returned y)
count: the number of feature dimensions
limits: a tuple (low, high) specifying the value
range of all the features x_i
rng: an instance of numpy.random.Generator
from which to draw random numbers
# Returns
X: a matrix of sample vectors, where
the samples are the rows and the
features are the columns
ie, its size should be:
num_samples x count
y: a vector of num_samples binary labels
"""
# TODO: implement this
return None, None
|
5345be6c21cedfb62bf33a5c3a3b6ff75f7be892
| 3,641,147
|
def _brighten_images(images: np.ndarray, brightness: int = BRIGHTNESS) -> np.ndarray:
"""
Adjust the brightness of all input images
:params images: The original images of shape [H, W, D].
:params brightness: The amount the brighness should be raised or lowered
:return: Images with adjusted brightness
"""
brighten_images = deepcopy(images)
for image in brighten_images:
_brighten_image(image, brightness)
return brighten_images
|
b2bc8c801e459b5f913a2bf725709cbc80df128a
| 3,641,148
|
import os
import sys
def findBaseDir(basename, max_depth=5, verbose=False):
"""
Get relative path to a BASEDIR.
:param basename: Name of the basedir to path to
:type basename: str
:return: Relative path to base directory.
:rtype: StringIO
"""
MAX_DEPTH = max_depth
BASEDIR = os.path.abspath(os.path.dirname(__file__))
print("STARTING AT: %s\n Looking for: %s" % (BASEDIR, basename))
for level in range(MAX_DEPTH):
if verbose:
print('LEVEL %d: %s\n Current basename: %s' %
(level, BASEDIR, os.path.basename(BASEDIR)))
if os.path.basename(BASEDIR) == basename:
break
else:
BASEDIR = os.path.abspath(os.path.dirname(BASEDIR))
if level == MAX_DEPTH - 1:
sys.exit('Could not find correct basedir\n Currently at %s' % BASEDIR)
return os.path.relpath(BASEDIR)
|
75d11503ef02cb0671803ab7b13dcd6d77322330
| 3,641,149
|
def CreateMovie(job_name, input_parameter, view_plane, plot_type):
"""encoder = os.system("which ffmpeg")
print encoder
if(len(encoder) == 0):
feedback['error'] = 'true'
feedback['message'] = ('ERROR: Movie create encoder not found')
print feedback
return
"""
movie_name = job_name + '_' + input_parameter+ '_' + view_plane + '_'+ plot_type + "_movie.mp4"
cmd = "ffmpeg -qscale 1 -r 3 -b 3000k -i " + input_parameter + '_' + view_plane + '_'+ plot_type + "_image_%01d.png " + movie_name;
print cmd
os.system(cmd)
return movie_name
|
ff518f485db1de7a2d00af2984f5829cfe4279dc
| 3,641,150
|
def describe_recurrence(recur, recurrence_dict, connective="and"):
"""Create a textual description of the recur set.
Arguments:
recur (Set): recurrence pattern as set of day indices eg Set(["1","3"])
recurrence_dict (Dict): map of strings to recurrence patterns
connective (Str): word to connect list of days, default "and"
Returns:
Str: List of days as a human understandable string
"""
day_list = list(recur)
day_list.sort()
days = " ".join(day_list)
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == days:
return recurrence # accept the first perfect match
# Assemble a long desc, e.g. "Monday and Wednesday"
day_names = []
for day in days.split(" "):
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == day:
day_names.append(recurrence)
break
return join_list(day_names, connective)
|
0189fa88f64a284367829c3f95ecd056aec7bfa8
| 3,641,151
|
def route_image_zoom_in():
"""
Zooms in.
"""
result = image_viewer.zoom_in()
return jsonify({'zoom-in' : result})
|
758acf681718cc1c2be2c3bac112f4fc83243cf9
| 3,641,152
|
def evaluate_flow_file(gt_file, pred_file):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt_file: ground truth file path
:param pred_file: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt_file) # ground truth flow
eva_flow = read_flow(pred_file) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1],
eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe
|
ecbe960cd011f4282758a3f6bf4f345853f9a49d
| 3,641,153
|
def get_heavy_load_rses(threshold, session=None):
"""
Retrieve heavy load rses.
:param threshold: Threshold as an int.
:param session: Database session to use.
:returns: .
"""
try:
results = session.query(models.Source.rse_id, func.count(models.Source.rse_id).label('load'))\
.filter(models.Source.is_using == true())\
.group_by(models.Source.rse_id)\
.all()
if not results:
return
result = []
for t in results:
if t[1] >= threshold:
t2 = {'rse_id': t[0], 'load': t[1]}
result.append(t2)
return result
except IntegrityError as error:
raise RucioException(error.args)
|
52b44132c1680e0ca9ba64a0afb2938abcab1228
| 3,641,154
|
import operator
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
|
bec3419a22128c2863032022c5e9e507c9791f1b
| 3,641,155
|
def divide_set(vectors, labels, column, value):
"""
Divide the sets into two different sets along a specific dimension and value.
"""
set_1 = [(vector, label) for vector, label in zip(vectors, labels) if split_function(vector, column, value)]
set_2 = [(vector, label) for vector, label in zip(vectors, labels) if not split_function(vector, column, value)]
vectors_set_1 = [element[0] for element in set_1]
vectors_set_2 = [element[0] for element in set_2]
label_set_1 = [element[1] for element in set_1]
label_set_2 = [element[1] for element in set_2]
return vectors_set_1, label_set_1, vectors_set_2, label_set_2
|
dd99c4d700ad8294ce2b2a33b18feb79165487fb
| 3,641,156
|
def execute_freezerc(dict, must_fail=False, merge_stderr=False):
"""
:param dict:
:type dict: dict[str, str]
:param must_fail:
:param merge_stderr:
:return:
"""
return execute([FREEZERC] + dict_to_args(dict), must_fail=must_fail,
merge_stderr=merge_stderr)
|
596490d1fe0ae90a807c6553674273e470165e57
| 3,641,157
|
def corField2D_vector(field):
"""
2D correlation field of a vector field. Correlations are calculated with
use of Fast Fourier Transform.
Parameters
----------
field : (n, n, 2) shaped array like
Vector field to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Unnormalised correlation field.
C[0, 0] is the origin, points are uniformly distributed.
xCL : float
Unnormalised longitudinal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCL : float
Unnormalised longitudinal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
xCT : float
Unnormalised transversal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCT : float
Unnormalised transversal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
Norm : float
Norm of correlation field.
"""
xfield = field[:, :, 0] # projection of field on the first direction of space
xC, xNorm = corField2D_scalar(xfield) # unnormalised correlation field and its norm associated to field projection on the first direction of space
yfield = field[:, :, 1] # projection of field on the second direction of space
yC, yNorm = corField2D_scalar(yfield) # unnormalised correlation field and its norm associated to field projection on the second direction of space
C = xC + yC # correlation field of field
xCL, yCL = xC[0, 1], yC[1, 0] # longitudinal correlations in first and second directions of space
xCT, yCT = xC[1, 0], yC[0, 1] # transversal correlations in first and second directions of space
Norm = xNorm + yNorm # norm of correlation field
return C, xCL, yCL, xCT, yCT, Norm
|
35b4c1dc646c30b9fdae7c1ad1095ae67ea1356d
| 3,641,158
|
import json
def load_json_fixture(filename: str):
"""Load stored JSON data."""
return json.loads((TEST_EXAMPLES_PATH / filename).read_text())
|
1021975d2111a9391a93ff9bd757479a8c9663f6
| 3,641,159
|
from typing import List
from typing import Optional
def stat_list_card(
box: str,
title: str,
items: List[StatListItem],
name: Optional[str] = None,
subtitle: Optional[str] = None,
commands: Optional[List[Command]] = None,
) -> StatListCard:
"""Render a card displaying a list of stats.
Args:
box: A string indicating how to place this component on the page.
title: The title.
items: The individual stats to be displayed.
name: An optional name for this item.
subtitle: The subtitle, displayed below the title.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.StatListCard` instance.
"""
return StatListCard(
box,
title,
items,
name,
subtitle,
commands,
)
|
fa9036442fdad3028d40918741e91ca5077925fb
| 3,641,160
|
def test_DataGeneratorAllSpectrums_fixed_set():
"""
Test whether use_fixed_set=True toggles generating the same dataset on each epoch.
"""
# Get test data
binned_spectrums, tanimoto_scores_df = create_test_data()
# Define other parameters
batch_size = 4
dimension = 88
# Create normal generator
normal_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
use_fixed_set=False)
# Create generator that generates a fixed set every epoch
fixed_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
def collect_results(generator):
n_batches = len(generator)
X = np.zeros((batch_size, dimension, 2, n_batches))
y = np.zeros((batch_size, n_batches))
for i, batch in enumerate(generator):
X[:, :, 0, i] = batch[0][0]
X[:, :, 1, i] = batch[0][1]
y[:, i] = batch[1]
return X, y
first_X, first_y = collect_results(normal_generator)
second_X, second_y = collect_results(normal_generator)
assert not np.array_equal(first_X, second_X)
assert first_y.shape == (4, 2), "Expected different number of labels"
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator)
assert np.array_equal(first_X, second_X)
assert first_y.shape == (4, 10), "Expected different number of labels"
# Create another fixed generator based on the same dataset that should generate the same
# fixed set
fixed_generator2 = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator2)
assert np.array_equal(first_X, second_X)
|
95d8a13f1eaa5d7ef93936a8f9e224a56119e6af
| 3,641,161
|
import math
def inverse_gamma(data, alpha=0.1, beta=0.1):
"""
Inverse gamma distributions
:param data: Data value
:param alpha: alpha value
:param beta: beta value
:return: Inverse gamma distributiion
"""
return (pow(beta, alpha) / math.gamma(alpha)) *\
pow(alpha, data-1) * math.exp(-beta/data)
|
c13f5e4a05e111ae0082b7e69ef5b31498d2c221
| 3,641,162
|
def query(queryid):
"""
Dynamic Query View.
Must be logged in to access this view, otherwise redirected to login page.
A unique view is generated based off a query ID.
A page is only returned if the query ID is associated with a logged in user.
Otherwise a logged in user will be redirected to a 404 error page.
"""
query = models.SpellChecks.query.get(queryid)
if query is not None and ((g.user.is_admin) or (g.user.username == query.username)):
query
render = make_response(render_template('spellcheck/history_s_query.html', query=query))
render.headers.set('Content-Security-Policy', "default-src 'self'")
render.headers.set('X-Content-Type-Options', 'nosniff')
render.headers.set('X-Frame-Options', 'SAMEORIGIN')
render.headers.set('X-XSS-Protection', '1; mode=block')
return render
else:
abort(404)
|
22c77dc122b15d8bb6589a823a8f3904f189371a
| 3,641,163
|
def get_ssid() -> str:
"""Gets SSID of the network connected.
Returns:
str:
Wi-Fi or Ethernet SSID.
"""
process = Popen(
['/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport', '-I'],
stdout=PIPE)
out, err = process.communicate()
if error := process.returncode:
logger.error(f"Failed to fetch SSID with exit code: {error}\n{err}")
# noinspection PyTypeChecker
return dict(map(str.strip, info.split(': ')) for info in out.decode('utf-8').splitlines()[:-1] if
len(info.split()) == 2).get('SSID')
|
def6c485099219cd0894cde636ac93dc1b130a98
| 3,641,164
|
def train_predict(clf, X_train, X_test, y_train, y_test):
"""Train clf on <X_train, y_train>, predict <X_test, y_test>; return y_pred."""
print("Training a {}...".format(clf.__class__.__name__))
get_ipython().run_line_magic('time', 'clf.fit(X_train, y_train)')
print(clf)
print("Predicting test labels...")
y_pred = clf.predict(X_test)
return y_pred
|
eda266df2cea704d557e4de4f7600b62c533f362
| 3,641,165
|
import subprocess
def FindFilesWithContents(string_a, string_b):
"""Returns list of paths of files that contain |string_a| or |string_b|.
Uses --name-only to print the file paths. The default behavior of git grep
is to OR together multiple patterns.
Args:
string_a: A string to search for (not a regular expression).
string_b: As above.
Returns:
A list of file paths as strings.
"""
matching_files = subprocess.check_output([
'git', 'grep', '--name-only', '--fixed-strings', '-e', string_a,
'-e', string_b])
files_list = matching_files.split('\n')
# The output ends in a newline, so slice that off.
files_list = files_list[:-1]
return files_list
|
d7b36ae2aaac98b79276f9bbd1b82dc7172520a6
| 3,641,166
|
def get_executable_choices(versions):
"""
Return available Maya releases.
"""
return [k for k in versions if not k.startswith(Config.DEFAULTS)]
|
df0c253b8ca29b42f7863a8944d21678847b7c9a
| 3,641,167
|
def list_songs():
"""
Lists all the songs in your media server
Can do this without a login
"""
# # Check if the user is logged in, if not: back to login.
# if('logged_in' not in session or not session['logged_in']):
# return redirect(url_for('login'))
page['title'] = 'List Songs'
# Get a list of all songs from the database
allsongs = None
allsongs = database.get_allsongs()
# Data integrity checks
if allsongs == None:
allsongs = []
return render_template('listitems/listsongs.html',
session=session,
page=page,
user=user_details,
allsongs=allsongs)
|
bbeb4e11404dcce582ed86cc711508645399a7cd
| 3,641,168
|
import statistics
def linear_regression(xs, ys):
"""
Computes linear regression coefficients
https://en.wikipedia.org/wiki/Simple_linear_regression
Returns a and b coefficients of the function f(y) = a * x + b
"""
x_mean = statistics.mean(xs)
y_mean = statistics.mean(ys)
num, den = 0.0, 0.0
for x, y in zip(xs, ys):
num += (x - x_mean) * (y - y_mean)
den += (x - x_mean) * (x - x_mean)
a = num / den
b = y_mean - a * x_mean
return a, b
|
6b6ecbd31262e5fe61f9cf7793d741a874327598
| 3,641,169
|
def calcR1(n_hat):
"""
Calculate the rotation matrix that would rotate the
position vector x_ae to the x-y plane.
Parameters
----------
n_hat : `~numpy.ndarray` (3)
Unit vector normal to plane of orbit.
Returns
-------
R1 : `~numpy.matrix` (3, 3)
Rotation matrix.
"""
n_hat_ = np.ascontiguousarray(n_hat)
# Find the rotation axis v
v = np.cross(n_hat_, Z_AXIS)
# Calculate the cosine of the rotation angle, equivalent to the cosine of the
# inclination
c = np.dot(n_hat_, Z_AXIS)
# Compute the skew-symmetric cross-product of the rotation axis vector v
vp = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
# Calculate R1
R1 = np.identity(3) + vp + np.linalg.matrix_power(vp, 2) * (1 / (1 + c))
return R1
|
c3202c477eefe43648e693316079f9e2c6ac0aa0
| 3,641,170
|
def get_arb_info(info, n=1000):
"""
Example: info := {'start':1556668800, 'period':300, 'trading_pair':'eth_btc', 'exchange_id':'binance'}
"""
assert {'exchange_id', 'trading_pair', 'period', 'start'}.issubset(info.keys())
info['n'] = n
q = """with sub as (
select * from candlesticks
where trading_pair=%(trading_pair)s and period=%(period)s and timestamp>=%(start)s
),
thing as (
select "timestamp", avg(close) from sub
group by (timestamp)
)
select exchange,trading_pair, thing.timestamp, "period", "avg", "close"-"avg" as arb_diff, ("close"-"avg")/"avg" as arb_signal from
(sub inner join thing on sub.timestamp = thing.timestamp)
where exchange=%(exchange_id)s
order by thing.timestamp
limit %(n)s;
"""
results = safe_qall(q, info)
if results is not None:
# arb_signal is more interpretable than arb_diff but the signal is the same
df = pd.DataFrame(results, columns=["exchange", "trading_pair", "timestamp", "period", "avg", "arb_diff", "arb_signal"])
return d.fix_df(df)
|
20177d71b0a816031aded0151e91711f729fe0bf
| 3,641,171
|
from typing import List
def parse_nested_root(stream: TokenStream) -> AstRoot:
"""Parse nested root."""
with stream.syntax(colon=r":"):
colon = stream.expect("colon")
if not consume_line_continuation(stream):
exc = InvalidSyntax("Expected non-empty block.")
raise set_location(exc, colon)
level, command_level = stream.indentation[-2:]
commands: List[AstCommand] = []
with stream.intercept("newline"), stream.provide(
scope=(),
line_indentation=command_level,
):
while True:
commands.append(delegate("command", stream))
# The command parser consumes the trailing newline so we need to rewind
# to be able to use "consume_line_continuation()".
while (token := stream.peek()) and not token.match("newline", "eof"):
stream.index -= 1
with stream.provide(multiline=True, line_indentation=level):
if not consume_line_continuation(stream):
break
node = AstRoot(commands=AstChildren(commands))
return set_location(node, commands[0], commands[-1])
|
ea559c4c2592b4ecabfb10df39059391799f4462
| 3,641,172
|
def wg_config_write():
""" Write configuration file. """
global wg_config_file
return weechat.config_write(wg_config_file)
|
add032c3447d2c68005bbc1cc33006ba452afaa3
| 3,641,173
|
def is_palindrome_permutation(phrase):
"""checks if a string is a permutation of a palindrome"""
table = [0 for _ in range(ord("z") - ord("a") + 1)]
countodd = 0
for c in phrase:
x = char_number(c)
if x != -1:
table[x] += 1
if table[x] % 2:
countodd += 1
else:
countodd -= 1
return countodd <= 1
|
dac9d0fc67f628cb22213d5fcad947baa3d2d8f1
| 3,641,174
|
import json
def get_stored_username():
"""Get Stored Username"""
filename = 'numbers.json'
try:
with open(filename) as file_object:
username = json.load(file_object)
except FileNotFoundError:
return None
else:
return username
|
3471369cfc147dd9751cedb7cde92ceb5d69e908
| 3,641,175
|
import posixpath
def post_process_symbolizer_image_file(file_href, dirs):
""" Given an image file href and a set of directories, modify the image file
name so it's correct with respect to the output and cache directories.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_auto_image_support = (MAPNIK_VERSION >= 701)
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
file_href = urljoin(dirs.source.rstrip('/')+'/', file_href)
scheme, n, path, p, q, f = urlparse(file_href)
if scheme in ('http','https'):
scheme, path = '', locally_cache_remote_file(file_href, dirs.cache)
if scheme not in ('file', '') or not systempath.exists(un_posix(path)):
raise Exception("Image file needs to be a working, fetchable resource, not %s" % file_href)
if not mapnik_auto_image_support and not Image:
raise SystemExit('PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0')
img = Image.open(un_posix(path))
if mapnik_requires_absolute_paths:
path = posixpath.realpath(path)
else:
path = dirs.output_path(path)
msg('reading symbol: %s' % path)
image_name, ext = posixpath.splitext(path)
if ext in ('.png', '.tif', '.tiff'):
output_ext = ext
else:
output_ext = '.png'
# new local file name
dest_file = un_posix('%s%s' % (image_name, output_ext))
if not posixpath.exists(dest_file):
img.save(dest_file,'PNG')
msg('Destination file: %s' % dest_file)
return dest_file, output_ext[1:], img.size[0], img.size[1]
|
42026823be510b5ffbb7404bd24809a1232e8206
| 3,641,176
|
import torch
def collate_fn_feat_padded(batch):
"""
Sort a data list by frame length (descending order)
batch : list of tuple (feature, label). len(batch) = batch_size
- feature : torch tensor of shape [1, 40, 80] ; variable size of frames
- labels : torch tensor of shape (1)
ex) samples = collate_fn([batch])
batch = [dataset[i] for i in batch_indices]. ex) [Dvector_train_dataset[i] for i in [0,1,2,3,4]]
batch[0][0].shape = torch.Size([1,64,774]). "774" is the number of frames per utterance.
"""
batch.sort(key=lambda x: x[0].shape[2], reverse=True)
feats, labels = zip(*batch)
# Merge labels => torch.Size([batch_size,1])
labels = torch.stack(labels, 0)
labels = labels.view(-1)
# Merge frames
lengths = [feat.shape[2] for feat in feats] # in decreasing order
max_length = lengths[0]
# features_mod.shape => torch.Size([batch_size, n_channel, dim, max(n_win)])
padded_features = torch.zeros(len(feats), feats[0].shape[0], feats[0].shape[1], feats[0].shape[2]).float() # convert to FloatTensor (it should be!). torch.Size([batch, 1, feat_dim, max(n_win)])
for i, feat in enumerate(feats):
end = lengths[i]
num_frames = feat.shape[2]
while max_length > num_frames:
feat = torch.cat((feat, feat[:,:,:end]), 2)
num_frames = feat.shape[2]
padded_features[i, :, :, :] = feat[:,:,:max_length]
return padded_features, labels
|
b901b6c3eacfd4d0bac93e1569d59ad944365fd2
| 3,641,177
|
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
39f7ed291e6a2cec5111ba979c35a6aaa8e521c0
| 3,641,178
|
def average_gradient_norm(model, data):
""" Computes the average gradient norm for a keras model """
# just checking if the model was already compiled
if not hasattr(model, "train_function"):
raise RuntimeError("You must compile your model before using it.")
weights = model.trainable_weights # weight tensors
get_gradients = model.optimizer.get_gradients(model.total_loss, weights) # gradient tensors
input_tensors = [
# input data
model.inputs[0],
# how much to weight each sample by
model.sample_weights[0],
# labels
model.targets[0],
# train or test mode
keras.backend.learning_phase()
]
grad_fct = keras.backend.function(inputs=input_tensors, outputs=get_gradients)
steps = 0
total_norm = 0
s_w = None
while steps < data.steps_per_epoch:
X, y = next(data)
# set sample weights to one
# for every input
if s_w is None:
s_w = np.ones(X.shape[0])
gradients = grad_fct([X, s_w, y, 0])
total_norm += np.sqrt(np.sum([np.sum(np.square(g)) for g in gradients]))
steps += 1
return total_norm / float(steps)
|
84f5feb894f72856ef43eea8befc048f216020bd
| 3,641,179
|
import logging
def string_regex_matcher(input_str: str, regex: str, replacement_str=""):
"""Python version of StringRegexMatcher in mlgtools.
Replaces all substring matched with regular expression (regex) with replacement string (replacement_str).
Args:
input_str (str): input string to match
regex (str): regular expression to match
replacement_str (str): replacement string for string matched with regex
Returns:
str: string removed replacement_str if it is set, or otherwise the original string
"""
# log error if regex is None or empty
if not regex:
log(logging.INFO, DataCategory.PUBLIC,
'_string_regex_matcher: regex is None or empty. Returning original sentence.')
return input_str
# Compile the regular expression
regex_compiled = check_and_compile_regular_expression(regex)
# Return the string with replacing matched substrings with replacement_str
return regex_compiled.sub(replacement_str, input_str)
|
c813447a1453347a1b263f603970d8ff4f709696
| 3,641,180
|
def cont_scatterplot(data: pd.DataFrame,
x: str,
y: str,
z: str or None,
label: str,
cmap: str,
size: int or str or None,
fig: plt.Figure,
cbar_kwargs: dict,
**kwargs):
"""
Scatterplot with continuous label
Parameters
----------
data: Pandas.DataFrame
x: str
y: str
z: str, optional
label: str
cmap: str
size: int or str, optional
fig: Matplotlib.Figure
cbar_kwargs: dict
Keyword arguments passed to colorbar
kwargs:
Additional keyword arguments passed to Matplotlib.Axes.scatter call
Returns
-------
Matplotlib.Axes
"""
if isinstance(size, str):
size = data[size].values
if z is not None:
ax = fig.add_subplot(111, projection="3d")
im = ax.scatter(data[x].values,
data[y].values,
data[z].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
else:
ax = fig.add_subplot(111)
im = ax.scatter(data[x].values,
data[y].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
fig.colorbar(im, ax=ax, **cbar_kwargs)
return ax
|
e6c85d1fafa93c9000c807c7ec82ac7851082aec
| 3,641,181
|
def performance(problem, W, H, C, R_full):
"""Compute the performance of the IMC estimates."""
assert isinstance(problem, IMCProblem), \
"""`problem` must be an IMC problem."""
assert W.ndim == H.ndim, """Mismatching dimensionality."""
if W.ndim < 3:
W, H = np.atleast_3d(W, H)
n_iterations = W.shape[-1]
assert W.shape[-1] == H.shape[-1], """Mismatching number of iterations."""
# sparsitry coefficients
sparsity_W = np.isclose(W, 0).mean(axis=(0, 1))
sparsity_H = np.isclose(H, 0).mean(axis=(0, 1))
# Regularization -- components
reg_ridge = (0.5 * np.linalg.norm(W, "fro", axis=(0, 1))**2 +
0.5 * np.linalg.norm(H, "fro", axis=(0, 1))**2)
reg_group = (np.linalg.norm(W, 2, axis=1).sum(axis=0) +
np.linalg.norm(H, 2, axis=1).sum(axis=0))
reg_lasso = (np.linalg.norm(W.reshape(-1, W.shape[-1]), 1, axis=0) +
np.linalg.norm(H.reshape(-1, H.shape[-1]), 1, axis=0))
# Regularization -- full
C_lasso, C_group, C_ridge = C
regularizer_value = (C_group * reg_group +
C_lasso * reg_lasso +
C_ridge * reg_ridge)
# sequential forbenius norm of the matrices
div_W = np.r_[np.linalg.norm(np.diff(W, axis=-1),
"fro", axis=(0, 1)), 0]
div_H = np.r_[np.linalg.norm(np.diff(H, axis=-1),
"fro", axis=(0, 1)), 0]
# Objective value on the train data
v_val_train = np.array([problem.value(W[..., i], H[..., i])
for i in range(n_iterations)])
# Objective on the full matrix (expensive!)
v_val_full = np.array([
problem.loss(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()).sum() for i in range(n_iterations)])
# Score on the full matrix (expensive!)
score_full = np.array([
problem.score(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()) for i in range(n_iterations)])
metrics = np.stack([v_val_train, regularizer_value,
score_full, v_val_full,
sparsity_W, sparsity_H,
div_W, div_H], axis=0)
titles = ['Observed Elements', 'Regularization', 'Score',
'Full Matrix', 'Zero Values of W', 'Zero Values of H',
'L2-Norm Variation W', 'L2-Norm Variation H']
units = ['L2-Loss', 'L2-Loss', 'Score', 'L2-Loss',
'%', '%', 'L2-Norm', 'L2-Norm']
return metrics, titles, units
|
79635826cc72633a0024df29980ecc678f20e32d
| 3,641,182
|
import pytz
from datetime import datetime
def courseschedules_to_private_ical_feed(user):
"""
Generate an ICAL feed for all course schedules associated with the given user.
The IDs given for each event are sequential, unique only amongst the results of this particular query, and not
guaranteed to be consistent across calls.
:param user: The user to generate an ICAL feed for.
:return: An ICAL string of all the user's course schedules.
"""
calendar = _create_calendar(user)
events = []
for course in Course.objects.for_user(user.pk).iterator():
events += coursescheduleservice.course_schedules_to_events(course, course.schedules)
timezone.activate(pytz.timezone(user.settings.time_zone))
for event in events:
calendar_event = icalendar.Event()
calendar_event["UID"] = f"he-{user.pk}-{event.pk}"
calendar_event["SUMMARY"] = event.title
calendar_event["DTSTAMP"] = icalendar.vDatetime(timezone.localtime(event.created_at))
if not event.all_day:
calendar_event["DTSTART"] = icalendar.vDatetime(timezone.localtime(event.start))
calendar_event["DTEND"] = icalendar.vDatetime(timezone.localtime(event.end))
else:
calendar_event["DTSTART"] = icalendar.vDate(event.start)
calendar_event["DTEND"] = icalendar.vDate((event.end + datetime.timedelta(days=1)))
calendar_event["DESCRIPTION"] = _create_event_description(event)
calendar.add_component(calendar_event)
timezone.deactivate()
return calendar.to_ical()
|
1d1ae7f650416eb240d0ce5240523d8c66067389
| 3,641,183
|
def AuxStream_Cast(*args):
"""
Cast(BaseObject o) -> AuxStream
AuxStream_Cast(Seiscomp::Core::BaseObjectPtr o) -> AuxStream
"""
return _DataModel.AuxStream_Cast(*args)
|
a38248e5476273aa6b18da5017a7ca0a033fd0a8
| 3,641,184
|
def BPNet(tasks, bpnet_params):
"""
BPNet architecture definition
Args:
tasks (dict): dictionary of tasks info specifying
'signal', 'loci', and 'bias' for each task
bpnet_params (dict): parameters to the BPNet architecture
The keys include (all are optional)-
'input_len': (int)
'output_profile_len': (int),
'motif_module_params': (dict) -
'filters' (list)
'kernel_sizes' (list)
'padding' (str)
'syntax_module_params': (dict) -
'num_dilation_layers' (int)
'filters' (int)
'kernel_size' (int)
'padding': (str)
'pre_activation_residual_unit' (boolean)
'profile_head_params': (dict) -
'filters' (int)
'kernel_size' (int)
'padding' (str)
'counts_head_params': (dict) -
'units' (int)
'profile_bias_module_params': (dict) -
'kernel_sizes' (list)
'counts_bias_module_params': (dict) - N/A
'use_attribution_prior': (boolean)
'attribution_prior_params': (dict) -
'frequency_limit' (int)
'limit_softness' (float)
'grad_smooth_sigma' (int)
'profile_grad_loss_weight' (float)
'counts_grad_loss_weight' (float)
'loss_weights': (list)
Returns:
tensorflow.keras.layers.Model
"""
# load params from json file
(input_len,
output_profile_len,
motif_module_params,
syntax_module_params,
profile_head_params,
counts_head_params,
profile_bias_module_params,
counts_bias_module_params,
use_attribution_prior,
attribution_prior_params) = load_params(bpnet_params)
# Step 1 - sequence input
one_hot_input = layers.Input(shape=(input_len, 4), name='sequence')
# Step 2 - Motif module (one or more conv layers)
motif_module_out = motif_module(
one_hot_input, motif_module_params['filters'],
motif_module_params['kernel_sizes'], motif_module_params['padding'])
# Step 3 - Syntax module (all dilation layers)
syntax_module_out = syntax_module(
motif_module_out, syntax_module_params['num_dilation_layers'],
syntax_module_params['filters'], syntax_module_params['kernel_size'],
syntax_module_params['padding'],
syntax_module_params['pre_activation_residual_unit'])
# Step 4.1 - Profile head (large conv kernel)
# Step 4.1.1 - get total number of output tracks across all tasks
num_tasks = len(list(tasks.keys()))
total_tracks = 0
for i in range(num_tasks):
total_tracks += len(tasks[i]['signal']['source'])
# Step 4.1.2 - conv layer to get pre bias profile prediction
profile_head_out = profile_head(
syntax_module_out, total_tracks,
profile_head_params['kernel_size'], profile_head_params['padding'])
# first let's figure out if bias input is required based on
# tasks info, this also affects the naming of the profile head
# and counts head layers
# total number of bias tasks in the tasks_info dictionary
total_bias_tracks = 0
# number of bias tracks in each task
task_bias_tracks = {}
for i in range(num_tasks):
task_bias_tracks[i] = _get_num_bias_tracks_for_task(tasks[i])
total_bias_tracks += task_bias_tracks[i]
# Step 4.1.3 crop profile head to match output_len
if total_bias_tracks == 0:
profile_head_name = 'profile_predictions'
else:
profile_head_name = 'profile_head_cropped'
crop_size = int_shape(profile_head_out)[1] // 2 - output_profile_len // 2
profile_head_out = layers.Cropping1D(
crop_size, name=profile_head_name)(profile_head_out)
# Step 4.2 - Counts head (global average pooling)
if total_bias_tracks == 0:
counts_head_name = 'logcounts_predictions'
else:
counts_head_name = 'counts_head'
counts_head_out = counts_head(
syntax_module_out, counts_head_name, total_tracks)
# Step 5 - Bias Input
# if the tasks have no bias tracks then profile_head and
# counts_head are the outputs of the model
inputs = [one_hot_input]
if total_bias_tracks == 0:
# we need to first rename the layers to correspond to what
# the batch generator sends
# At this point, since there is no bias the two outputs
# are called 'profile_head_cropped' & 'counts_head'
print("renaming layers")
profile_head_out._name = 'profile_predictions'
counts_head_out._name = 'logcounts_predictions'
profile_outputs = profile_head_out
logcounts_outputs = counts_head_out
else:
if num_tasks != len(profile_bias_module_params['kernel_sizes']):
raise NoTracebackException(
"Length on 'kernel_sizes' in profile_bias_module_params "
"must match #tasks")
# Step 5.1 - Define the bias input layers
profile_bias_inputs = []
counts_bias_inputs = []
for i in range(num_tasks):
if task_bias_tracks[i] > 0:
# profile bias input for task i
profile_bias_inputs.append(layers.Input(
shape=(output_profile_len, task_bias_tracks[i]),
name="profile_bias_input_{}".format(i)))
# counts bias input for task i
counts_bias_inputs.append(layers.Input(
shape=(task_bias_tracks[i]),
name="counts_bias_input_{}".format(i)))
# append to inputs
inputs.append(profile_bias_inputs[i])
inputs.append(counts_bias_inputs[i])
else:
profile_bias_inputs.append(None)
counts_bias_inputs.append(None)
# Step 5.2 - account for profile bias
profile_outputs = profile_bias_module(
profile_head_out, profile_bias_inputs, tasks,
kernel_sizes=profile_bias_module_params['kernel_sizes'])
# Step 5.3 - account for counts bias
logcounts_outputs = counts_bias_module(
counts_head_out, counts_bias_inputs, tasks)
if use_attribution_prior:
# instantiate attribution prior Model with inputs and outputs
return AttributionPriorModel(
attribution_prior_params['frequency_limit'],
attribution_prior_params['limit_softness'],
attribution_prior_params['grad_smooth_sigma'],
attribution_prior_params['profile_grad_loss_weight'],
attribution_prior_params['counts_grad_loss_weight'],
inputs=inputs,
outputs=[profile_outputs, logcounts_outputs])
else:
# instantiate keras Model with inputs and outputs
return Model(
inputs=inputs, outputs=[profile_outputs, logcounts_outputs])
|
cfffac8da543241160b6fc1963b8b19df7fa2b02
| 3,641,185
|
def getCubePixels(cubeImages):
"""
Returns a list containing the raw pixels from the `bpy.types.Image` images
in the list `cubeImages`. Factoring this functionality out into its own
function is useful for performance profiling.
"""
return [face.pixels[:] for face in cubeImages]
|
cdb2ba02ce9466e1b92a683dbea409e66b60c8da
| 3,641,186
|
import select
async def get_round_details(round_id):
"""
Get details for a given round (include snapshot)
"""
query = (
select(detail_columns)
.select_from(select_from_default)
.where(rounds_table.c.id == round_id)
) # noqa: E127
result = await conn.fetch_one(query=query)
return result and dict(result)
|
b7d64f52a8dff1a68a327651653f1f217f688146
| 3,641,187
|
import random
def dens_hist_plot(**kwargs):
"""
plot prediction probability density histogram
Arguments:
df: classification prediction probability in pandas datafrane
"""
data = {'top1prob' : random.sample(range(1, 100), 5),
'top2prob' : random.sample(range(1, 100), 5)
}
def_vals = {"df" : data
} # default parameters value
for k, v in def_vals.items():
kwargs.setdefault(k, v)
df = kwargs['df']
x = df['top1prob']
y = df['top2prob']
def make_anno(x=1, y=1, text=text_source):
return go.Annotation(
text=text, # annotation text
showarrow=False, # remove arrow
xref='paper', # use paper coords
yref='paper', # for both coordinates
xanchor='right', # x-coord line up with right end of text
yanchor='bottom', # y-coord line up with bottom end of text
x=x, # position's x-coord
y=y # and y-coord
)
title = 'Prediction Result<br>\
Top1, Top2' # plot's title
x_title = 'Top1 Probability'#.format(site1) # x and y axis titles
y_title = 'Top2 Probability'
# Make a layout object
layout1 = go.Layout(
title=title, # set plot's title
font=dict(
family='PT Sans Narrow', # global font
size=13
),
xaxis1=go.XAxis(
title=x_title, # set x-axis title
#range=xy_range, # x-axis range
zeroline=False # remove x=0 line
),
annotations=go.Annotations([ # add annotation citing the data source
make_anno()
]),
showlegend=True, # remove legend
autosize=False, # custom size
width=980, # set figure width
height=880, # and height
margin=dict(l=100,
r=50,
b=100,
t=50
)
)
trace1 = go.Scatter(
x=x, y=y, mode='markers', name='points',
marker=dict(color='rgb(102,0,0)', size=2, opacity=0.4)
)
trace2 = go.Histogram2dContour(
x=x, y=y, name='density', ncontours=20,
colorscale='Hot', reversescale=True, showscale=False
)
trace3 = go.Histogram(
x=x, name='x density',
marker=dict(color='rgb(102,0,0)'),
yaxis='y2'
)
trace4 = go.Histogram(
y=y, name='y density', marker=dict(color='rgb(102,100,200)'),
xaxis='x2'
)
data = [trace1, trace2, trace3, trace4]
layout = go.Layout(
showlegend=False,
autosize=False,
xaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = x_title
),
yaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = y_title
),
margin=dict(
t=50
),
hovermode='closest',
bargap=0,
xaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
)
)
fig = go.Figure(data=data, layout=layout)
fig.update(layout = layout1)
fig['layout'].update(images= [dict(
source= "image/0016_Blue_horizon.svg",
xref= "paper",
yref= "paper", xanchor="left", yanchor="bottom",
x= 0,
y= 0,
sizex= 0.1,
sizey= 0.1,
sizing= "stretch",
opacity= 0.5,
layer= "above")])
iplot(fig, show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False})
#plot(fig, filename='network_predic.html', show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False})
|
9b9e65f4aef04c2676b48737f4b9bf525172b6ea
| 3,641,188
|
import time
def run_query(run_id, athena_client, query, athena_database_name, wait_to_finish):
""" Run the given Athena query
Arguments:
run_id {string} -- run_id for the current Step Function execution
athena_client {boto3.client} -- Boto3 Athena client
query {string} -- Athena query to execute
athena_database_name {string} -- Athena database to use for query execution
wait_to_finish {boolean} -- Should method wait for the Athena query to finish?
Raises:
utility.S3InsightsException: when Athena query fails
Returns:
string -- Athena execution id
"""
output_location = {
'OutputLocation': 's3://{0}/{1}'.format(
config.DeploymentDetails.consolidated_inventory_bucket_name,
get_s3_output_location_prefix(run_id)),
}
if athena_database_name is not None:
query_response = athena_client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': athena_database_name
},
ResultConfiguration=output_location)
else:
query_response = athena_client.start_query_execution(
QueryString=query,
ResultConfiguration=output_location)
execution_id = query_response['QueryExecutionId']
if wait_to_finish:
for attempt_count in range(1, 10):
query_status = athena_client.get_query_execution(
QueryExecutionId=execution_id)
query_execution_status = query_status['QueryExecution']['Status']['State']
if utility.compare_strings(query_execution_status, 'succeeded'):
break
elif utility.compare_strings(query_execution_status, 'failed'):
raise utility.S3InsightsException('Athena query failed for unknown reasons')
time.sleep(30)
return execution_id
|
0ee4618de6df9e32bbe3255f7a423dd838a564f1
| 3,641,189
|
def next_version(v: str) -> str:
"""
If ``v`` is a prerelease version, returns the base version. Otherwise,
returns the next minor version after the base version.
"""
vobj = Version(v)
if vobj.is_prerelease:
return str(vobj.base_version)
vs = list(vobj.release)
vs[1] += 1
vs[2:] = [0] * len(vs[2:])
s = ".".join(map(str, vs))
if vobj.epoch:
s = f"{vobj.epoch}!{s}"
return s
|
aea76df6874e368494ca630d594bee978ffc7e08
| 3,641,190
|
def sample_translate_text(text, target_language, project_id):
"""
Translating Text
Args:
text The content to translate in string format
target_language Required. The BCP-47 language code to use for translation.
"""
client = translate.TranslationServiceClient()
# TODO(developer): Uncomment and set the following variables
# text = 'Text you wish to translate'
# target_language = 'fr'
# project_id = '[Google Cloud Project ID]'
contents = [text]
parent = client.location_path(project_id, "global")
response = client.translate_text(
parent=parent,
contents=contents,
mime_type='text/plain', # mime types: text/plain, text/html
source_language_code='vi',
target_language_code=target_language)
# Display the translation for each input text provided
for translation in response.translations:
print(u"Translated text: {}".format(translation.translated_text))
return response
|
968e4f739f801aea0692db721cafb7cbd8d1e36e
| 3,641,191
|
def temp_obs():
"""Return a list of tobs from the 2016-08-24 to 2017-08-23"""
# Query temperature data with date
temp_query = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date > year_start_date).all()
# Create a dictionary from the row data and append to a list of all temperature data
temp_data = []
for date, tobs in temp_query:
temp_dict = {}
temp_dict["Date"] = date
temp_dict["Temperature"] = tobs
temp_data.append(temp_dict)
# Return data in json format
return jsonify(temp_data)
|
6a74ad37cb95d1103943f15dfcac82e39c38620d
| 3,641,192
|
def pcmh_5_5c__3():
"""ER/IP discharge log"""
er_ip_log_url = URL('init', 'word', 'er_ip_log.doc',
vars=dict(**request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
# er_ip_log tracking chart
er_ip_log = MultiQNA(
1, float('inf'), True,
'er_ip_log',
"Please fill out <a href='{url}'>this ER/IP log</a> with at least 4 months of past data. Continue to maintain "
"this log permanently as part of your PCMH transformation. <b>Please make sure all the patients in this log "
"have their discharge summary in their patient record!</b>"
.format(url=er_ip_log_url)
)
er_ip_log.set_template("{choose_file}")
return dict(documents=[
dict(
description="Emergency Room / Inpatient Tracking Log",
url=er_ip_log_url,
permissions=["IS_TEAM"]
),
])
|
968bcc181cf3ec4513cfeb73004cdd3336f62003
| 3,641,193
|
def get_largest_component_size(component_size_distribution):
"""Finds the largest component in the given component size distribution.
Parameters
----------
component_size_distribution : dict
The component size distribution. See the function get_component_size_dist
Returns
-------
The largest component size : int
"""
# YOUR CODE HERE
#TODO: Implement this function.
return max(component_size_distribution.keys())
|
2f922d465a61b4c65eacfdb3d5284e97d23f9659
| 3,641,194
|
import os
def bin_regions_parallel(
bed_files,
out_dir,
chromsizes,
bin_size=200,
stride=50,
final_length=1000,
parallel=12):
"""bin in parallel
"""
split_queue = setup_multiprocessing_queue()
for bed_file in bed_files:
prefix = os.path.basename(bed_file).split(".narrowPeak")[0].split(".bed")[0]
split_args = [
bed_file,
"{}/{}".format(out_dir, prefix),
bin_size,
stride,
final_length,
chromsizes,
"naive"]
split_queue.put([bin_regions_sharded, split_args])
# run the queue
run_in_parallel(split_queue, parallel=parallel, wait=True)
return None
|
634a0977918b25da3898f24bcf5d4027a11576a3
| 3,641,195
|
import json
def is_jsonable(data):
"""
Check is the data can be serialized
Source: https://stackoverflow.com/a/53112659/8957978
"""
try:
json.dumps(data)
return True
except (TypeError, OverflowError):
return False
|
acfc697025a8597fdd8043fe3245a339b27051c4
| 3,641,196
|
def check_ref_type(ref, allowed_types, ws_url):
"""
Validates the object type of ref against the list of allowed types. If it passes, this
returns True, otherwise False.
Really, all this does is verify that at least one of the strings in allowed_types is
a substring of the ref object type name.
Ex1:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "KBaseFile.Assembly"]
returns False
Ex2:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "genome"]
returns True
"""
obj_type = get_object_type(ref, ws_url).lower()
for t in allowed_types:
if t.lower() in obj_type:
return True
return False
|
263cf56124fb466544dfba8879a55d1f09160d35
| 3,641,197
|
from typing import Dict
from typing import List
def check_infractions(
infractions: Dict[str, List[str]],
) -> int:
"""
Check infractions.
:param infractions: the infractions dict {commit sha, infraction explanation}
:return: 0 if no infractions, non-zero otherwise
"""
if len(infractions) > 0:
logger.print('Missing sign-off(s):')
logger.print()
for commit_sha, commit_infractions in infractions.items():
logger.print('\t' + commit_sha)
for commit_infraction in commit_infractions:
logger.print('\t\t' + commit_infraction)
return 1
logger.print('All good!')
return 0
|
954f5f80dbfdd2a834f56662f4d04c1f788bb9ef
| 3,641,198
|
def get_particle_tracks(_particle_tracker, particle_detects, time, is_last, debug_axis=None, metrics=None):
"""This module constructs the particle tracks.
WARNING: No examples for Finishing. No error warning.
Parameters
----------
_particle_tracker: dict
A particle tracker dictionary
particle_detects: list
A list of detected particle positions.
time: int
Current time.
is_last: bool
Last frame. Boolean.
debug_axis: ax
Matplotlib axis for debug plotting. Defaults to None.
metrics: dict
output parameter for track filtering metrics
Returns
-------
(A list of track dictionaries, Updated particle tracker dictionary)
"""
if not metrics:
metrics = {}
# Projecting the particle in the 'Current_Tracks' list.
projection = np.zeros(2)
projected_points = []
for track in _particle_tracker['Current_Tracks']:
projection = project(track['Particles_Estimated_Position'][-1],
track['Particles_Estimated_Velocity'][-1],
track['Particles_Estimated_Acceleration'][-1],
_particle_tracker['Use_Acceleration'],
delta_t=1.0)
projection_copy = projection.copy() # This prevents duplication in Projection_List
projected_points.append(projection_copy)
# NOTE: DEBUG plot projected particles
if len(projected_points) and debug_axis is not None:
projected_points = np.array(projected_points)
debug_axis.scatter(projected_points[:,1], projected_points[:,0], marker='o', s=2, color='cyan', label='path projections')
assignments = defaultdict(list)
assignment_dists = defaultdict(list)
unassigned = []
# If no detection, set it to empty list
if not particle_detects:
particle_detects = []
# Find the distances from the detections to projections and assign to most probable track
if len(projected_points) == 0 or len(particle_detects) == 0:
unassigned = particle_detects
else:
pairwise_distances = distance_matrix(particle_detects, projected_points)
for index, particle in enumerate(particle_detects):
# Get associated track index
assign_index = np.argmin(pairwise_distances[index])
# Get assignment distance
min_assign_dist = np.min(pairwise_distances[index])
# Get length of associated track
curr_track_len = len(_particle_tracker['Current_Tracks'][assign_index]['Particles_Position'])
if curr_track_len == 1 and min_assign_dist > _particle_tracker['Max_Init_Dist']:
# Second point in a track, but too far from first point
unassigned.append(particle)
elif curr_track_len > 1 and min_assign_dist > _particle_tracker['Max_Assignment_Dist']:
# Future points in a track, but too far from projection
unassigned.append(particle)
else:
# Detection is close enough to be associated to track
assignments[assign_index].append(particle)
if not _particle_tracker['Allow_Cluster_Aggregation']:
min_dist = pairwise_distances[index][assign_index]
assignment_dists[assign_index].append(min_dist)
finished_tracks = []
current_tracks = []
# Updating the tracks by aggregation
for i, _ in enumerate(_particle_tracker['Current_Tracks']):
positions = np.array(assignments[i])
aggregated_position = None
if len(positions) > 0:
if _particle_tracker['Allow_Cluster_Aggregation']:
aggregated_position = aggregate(positions)
else:
best_index = np.argmin(assignment_dists[i])
aggregated_position = positions[best_index]
track = _particle_tracker['Current_Tracks'][i]
finished = True
if (aggregated_position is not None
or track['Projected_Frames'] < _particle_tracker['Max_Projected_Frames']):
finished = False
track = update(time, aggregated_position, track, _particle_tracker['Use_Acceleration'])
current_tracks.append(track)
if is_last or finished:
track_length = sum(p is not None for p in track['Particles_Position'])
if track_length >= _particle_tracker['Min_Track_Obs']:
finished_track = finish(track, _particle_tracker['Last_ID'] + 1)
_particle_tracker['Last_ID'] = _particle_tracker['Last_ID'] + 1
finished_tracks.append(finished_track)
# Adding new tracks from the Unassigned list to the Current_Tracks
for particle in unassigned:
_particle_tracker['Temp_ID'] += 1
track = particle_track(time,
particle,
VELOCITY,
ACCELERATION,
_particle_tracker['Temp_ID'])
current_tracks.append(track)
if 'tracks_started' not in metrics:
metrics['tracks_started'] = 0
metrics['tracks_started'] += 1
_particle_tracker['Current_Tracks'] = current_tracks
return finished_tracks, _particle_tracker
|
57525a488b12cd3b5024879f66cd8e9904e2d71f
| 3,641,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.