content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None,
layer_collection=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(
x, norm_type, depth, epsilon, layer_collection=layer_collection)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x
|
f983888739fa04d0c086e276997cec3919cf3e24
| 3,650,000
|
def build_norm_layer(cfg, num_channels, postfix=''):
""" Build normalization layer
Args:
Returns:
layer (fluid.dygrah.Layer): created norm layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in norm_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
stop_gradient = cfg_.pop('stop_gradient', False)
cfg_.setdefault('epsilon', 1e-5)
layer = norm_layer(num_channels=num_channels, **cfg_)
# for param in layer.parameters():
# param.stop_gradient = stop_gradient
return name, layer
|
d29437854587f7aeaac3b97c2e98d70b56369402
| 3,650,001
|
import torchvision
def get_split_cifar100_tasks(num_tasks, batch_size):
"""
Returns data loaders for all tasks of split CIFAR-100
:param num_tasks:
:param batch_size:
:return:
"""
datasets = {}
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms)
cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms)
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
|
85c06c07682c74554aa11826431e5fdbd7eb84c8
| 3,650,002
|
import re
def _find_loose_date(date_string):
"""Look for four digit numbers in the string. If there's only one, return it."""
if re.search(r'digit', date_string):
# Might be something like "digitized 2010", which we want to avoid.
return None
# find all the (unique) four digit numbers in the date_string.
matches = set(re.findall(r'\b\d{4}\b', date_string))
if len(matches) != 1:
return None
year = list(matches)[0]
if is_valid_year(int(year)):
LOG.debug('Parsed %s from "%s" as a loose date.' % (year, date_string))
return (year, year)
|
9e946af42ad28c26bed4b347a04a1fd2a49ea104
| 3,650,003
|
import subprocess
import time
import syslog
import sys
import select
def invoke(command_requested, timeout=DEFAULT_TIMEOUT):
"""
"""
p = subprocess.Popen(command_requested, shell=True, stdin=subprocess.PIPE, stdout=None, stderr=None)
p.stdin.close() # since we do not allow invoked processes to listen to stdin, we close stdin to the subprocess
try:
return_code = p.wait(timeout)
except TypeError:
# most likely this is a python version lower than 3.3
start = time.time()
while p.poll() is None and (time.time() - start) < timeout:
time.sleep(0.1) # nicer than a pass, which would cause a lot of CPU usage
return_code = p.poll()
if return_code is None:
return_code = -1
syslog.syslog('Invocation timed out1')
except subprocess.TimeoutExpired: # works on python < 3.3 because a known exception is caught first
return_code = -1
syslog.syslog('Invocation timed out')
# We are also interested in data sent to stdin. This isn't allowed, and may
# be interesting for forensics
# Beware, this code is a bit iffy. In fact, reading from stdin seems iffy
# to me.
if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
stdin_lines = sys.stdin.readline()
else:
stdin_lines = []
return return_code, stdin_lines
|
ef121fea037b39cd5d9b19e782257b0c8e8fad8c
| 3,650,004
|
from pathlib import Path
import os
import shutil
import re
import zipfile
def zip_addon(addon: str, addon_dir: str):
""" Zips 'addon' dir or '.py' file to 'addon.zip' if not yet zipped, then moves the archive to 'addon_dir'.
:param addon: Absolute or relative path to a directory to zip or a .zip file.
:param addon_dir: Path to Blender's addon directory to move the zipped archive to.
:return (bpy_module, zip_file) Tuple of strings - an importable module name, an addon zip file path.
"""
addon_path = Path(addon).resolve()
addon_basename = addon_path.name
# Check if addon is already zipped
already_zipped = False
if addon_basename.endswith(".zip"):
already_zipped = True
# Delete target addon dir if exists
if os.path.isdir(addon_dir):
shutil.rmtree(addon_dir)
os.mkdir(addon_dir)
print(f"Addon dir is - {os.path.realpath(addon_dir)}")
if not already_zipped: # Zip the addon
# Get bpy python module from addon file name
bpy_module = re.sub(".py", "", addon_basename)
# Create zip archive using the module name
zfile = Path(f"{bpy_module}.zip").resolve()
print(f"Future zip path is - {zfile}")
print(f"Zipping addon - {bpy_module}")
# Zip addon content
# -------------------
zf = zipfile.ZipFile(zfile, "w")
if addon_path.is_dir(): # Addon is a directory, zip hierarchy
cwd = os.getcwd()
temp_dir = Path(gettempdir(), "blender_addon_tester")
# Clean temp dir if already exists
if temp_dir.is_dir():
shutil.rmtree(temp_dir)
# Creating the addon under the temp dir with its hierarchy
shutil.copytree(addon_path, temp_dir.joinpath(addon_path.relative_to(addon_path.anchor)))
# Move to temp dir
os.chdir(temp_dir)
# Clear python cache
if os.path.isdir("__pycache__"):
shutil.rmtree("__pycache__")
# Write addon content into archive
for dirname, subdirs, files in os.walk(addon_path):
for filename in files:
filename = os.path.join(dirname, filename)
# Clean file
clean_file(filename)
# Write file into zip under its hierarchy
zf.write(filename, arcname=os.path.relpath(filename, addon_path.parent))
# Go back to start dir
os.chdir(cwd)
# Remove temp dir
shutil.rmtree(temp_dir)
else: # Addon is a file, zip only the file
# Clean file
#y = addon_path.as_posix()
y = addon_basename
#print(y)
clean_file(y)
# Write single addon file into zip
zf.write(y)
# End zip building
zf.close()
else: # Addon is already zipped, take it as it is
zfile = addon_path
print(f"Detected zip path is - {zfile}. No need to zip the addon beforehand.")
# Get bpy python module from zip file name
bpy_module = addon_basename.split(".zip")[0]
# Copy zipped addon with name extended by blender revision number
bl_revision = f"{bpy.app.version[0]}.{bpy.app.version[1]}"
bfile = f"{zfile.stem}_{bl_revision}.zip"
shutil.copy(zfile, bfile)
return bpy_module, bfile
|
4eb4731fd69dd9dc234640386c39b5494d013877
| 3,650,005
|
def set_simulation_data(
state_energies, T_array, state1_index, state2_index
):
"""
Create and set SimulationData objects for a pair of specified states
"""
# Set default UnitData object
default_UnitData = UnitData(
kb=kB.value_in_unit(unit.kilojoule_per_mole/unit.kelvin),
energy_conversion=1,
length_conversion=1,
volume_conversion=1,
temperature_conversion=1,
pressure_conversion=1,
time_conversion=1,
energy_str='KJ/mol',
length_str='nm',
volume_str='nm^3',
temperature_str='K',
pressure_str='bar',
time_str='ps'
)
# State 1
sim_data1 = SimulationData()
sim_data1.observables = ObservableData(
potential_energy=state_energies[state1_index,:],
)
sim_data1.ensemble = EnsembleData(
ensemble='NVT',
energy=state_energies[state1_index,:],
temperature=T_array[state1_index]
)
sim_data1.units = default_UnitData
# State 2
sim_data2 = SimulationData()
sim_data2.observables = ObservableData(
potential_energy=state_energies[state2_index,:],
)
sim_data2.ensemble = EnsembleData(
ensemble='NVT',
energy=state_energies[state2_index,:],
temperature=T_array[state2_index]
)
sim_data2.units = default_UnitData
return sim_data1, sim_data2
|
edff2bd66a359da10f64c175aa125f8749a2064d
| 3,650,006
|
def park2_euc(x):
""" Comutes the park2 function """
max_val = 5.925698
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
ret = (2.0/3.0) * np.exp(x1 + x2) - x4*np.sin(x3) + x3
return min(ret, max_val)
|
96448c502867d360010238526791144fdc1e7581
| 3,650,007
|
def num_compositions_jit(m, n):
"""
Numba jit version of `num_compositions`. Return `0` if the outcome
exceeds the maximum value of `np.intp`.
"""
return comb_jit(n+m-1, m-1)
|
40562a1ee1564e7b2015f5b8e5d2298a18644493
| 3,650,008
|
def fake_get_vim_object(arg):
"""Stubs out the VMwareAPISession's get_vim_object method."""
return fake_vmware_api.FakeVim()
|
ee7c7b0331f344b1428e48da38d185dc01bf11d9
| 3,650,009
|
import os
import re
def get_version(*file_paths):
"""Retrieves the version from replicat_documents/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
|
b94580d378b4e35777f9719fe898fd134aba5170
| 3,650,010
|
import json
def get_old_ids(title):
"""
Returns all the old ids of a particular site given the title of the
Wikipedia page
"""
raw_data = json.loads( readInDataFromURL("https://en.wikipedia.org/w/api.php?action=query&prop=revisions&format=json&rvlimit=100000&titles=" + title) )
old_ids = dict() # initialize
for page_id, revisions in data['query']['pages'].items():
print(revisions)
# for revision in revisions:
# old_ids[revision.]
# try:
# for extlink in page['extlinks']:
# # print to the output file
# print('%s\t%s\t%s'%(page_id, name, extlink['*']), file=outputfile)
# except:
# if options.verbose:
# print('Page %s does not have any external links...'%name)
# print(data)
return old_ids
|
9e9bc37ac51d7b3a8491fa41db5867943a170e1e
| 3,650,011
|
def max_expectation_under_constraint(f: np.ndarray, q: np.ndarray, c: float, eps: float = 1e-2,
display: bool = False) -> np.ndarray:
"""
Solve the following constrained optimisation problem:
max_p E_p[f] s.t. KL(q || p) <= c
:param f: an array of values f(x), np.array of size n
:param q: a discrete distribution q(x), np.array of size n
:param c: a threshold for the KL divergence between p and q.
:param eps: desired accuracy on the constraint
:param display: plot the function
:return: the argmax p*
"""
np.seterr(all="warn")
if np.all(q == 0):
q = np.ones(q.size) / q.size
x_plus = np.where(q > 0)
x_zero = np.where(q == 0)
p_star = np.zeros(q.shape)
lambda_, z = None, 0
q_p = q[x_plus]
f_p = f[x_plus]
f_star = np.amax(f)
theta = partial(theta_func, q_p=q_p, f_p=f_p, c=c)
d_theta_dl = partial(d_theta_dl_func, q_p=q_p, f_p=f_p)
if f_star > np.amax(f_p):
theta_star = theta_func(f_star, q_p=q_p, f_p=f_p, c=c)
if theta_star < 0:
lambda_ = f_star
z = 1 - np.exp(theta_star)
p_star[x_zero] = 1.0 * (f[x_zero] == np.amax(f[x_zero]))
p_star[x_zero] *= z / p_star[x_zero].sum()
if lambda_ is None:
if np.isclose(f_p, f_p[0]).all():
return q
else:
# Binary search seems slightly (10%) faster than newton
# lambda_ = binary_search(theta, eps, a=f_star, display=display)
lambda_ = newton_iteration(theta, d_theta_dl, eps, x0=f_star + 1, a=f_star, display=display)
# numba jit binary search is twice as fast as python version
# lambda_ = binary_search_theta(q_p=q_p, f_p=f_p, c=c, eps=eps, a=f_star)
beta = (1 - z) / (q_p @ (1 / (lambda_ - f_p)))
if beta == 0:
x_uni = np.where((q > 0) & (f == f_star))
if np.size(x_uni) > 0:
p_star[x_uni] = (1 - z) / np.size(x_uni)
else:
p_star[x_plus] = beta * q_p / (lambda_ - f_p)
return p_star
|
88a67ae4eece82c08bc683dc015904f2d307c54f
| 3,650,012
|
def payload_to_plain(payload=None):
"""
Converts the myADS results into the plain text message payload
:param payload: list of dicts
:return: plain text formatted payload
"""
formatted = u''
for p in payload:
formatted += u"{0} ({1}) \n".format(p['name'], p['query_url'].format(p['qtype'], p['id']))
for r in p['results']:
first_author = _get_first_author_formatted(r)
if type(r.get('title', '')) == list:
title = r.get('title')[0]
else:
title = r.get('title', '')
formatted += u"\"{0},\" {1} ({2})\n".format(title, first_author, r['bibcode'])
formatted += u"\n"
return formatted
|
53050791335dd8d259bf6b55bd36d3e8bc3f5fb0
| 3,650,013
|
import json
def get_credentials_from_request(cloud, request):
"""
Extracts and returns the credentials from the current request for a given
cloud. Returns an empty dict if not available.
"""
if request.META.get('HTTP_CL_CREDENTIALS_ID'):
return get_credentials_by_id(
cloud, request, request.META.get('HTTP_CL_CREDENTIALS_ID'))
# In case a base class instance is sent in, attempt to retrieve the actual
# subclass.
if type(cloud) is models.Cloud:
cloud = models.Cloud.objects.get_subclass(slug=cloud.slug)
if isinstance(cloud, models.OpenStack):
os_username = request.META.get('HTTP_CL_OS_USERNAME')
os_password = request.META.get('HTTP_CL_OS_PASSWORD')
if os_username or os_password:
os_project_name = request.META.get('HTTP_CL_OS_PROJECT_NAME')
os_project_domain_name = request.META.get(
'HTTP_CL_OS_PROJECT_DOMAIN_NAME')
os_user_domain_name = request.META.get(
'HTTP_CL_OS_USER_DOMAIN_NAME')
d = {'os_username': os_username, 'os_password': os_password}
if os_project_name:
d['os_project_name'] = os_project_name
if os_project_domain_name:
d['os_project_domain_name'] = os_project_domain_name
if os_user_domain_name:
d['os_user_domain_name'] = os_user_domain_name
return d
else:
return {}
elif isinstance(cloud, models.AWS):
aws_access_key = request.META.get('HTTP_CL_AWS_ACCESS_KEY')
aws_secret_key = request.META.get('HTTP_CL_AWS_SECRET_KEY')
if aws_access_key or aws_secret_key:
return {'aws_access_key': aws_access_key,
'aws_secret_key': aws_secret_key,
}
else:
return {}
elif isinstance(cloud, models.Azure):
azure_subscription_id = request.META.get(
'HTTP_CL_AZURE_SUBSCRIPTION_ID')
azure_client_id = request.META.get('HTTP_CL_AZURE_CLIENT_ID')
azure_secret = request.META.get('HTTP_CL_AZURE_SECRET')
azure_tenant = request.META.get('HTTP_CL_AZURE_TENANT')
azure_resource_group = request.META.get('HTTP_CL_AZURE_RESOURCE_GROUP')
azure_storage_account = request.META.get(
'HTTP_CL_AZURE_STORAGE_ACCOUNT')
azure_vm_default_username = request.META.get(
'HTTP_CL_AZURE_VM_DEFAULT_USERNAME')
if (azure_subscription_id and azure_client_id and azure_secret and
azure_tenant):
return {'azure_subscription_id': azure_subscription_id,
'azure_client_id': azure_client_id,
'azure_secret': azure_secret,
'azure_tenant': azure_tenant,
'azure_resource_group': azure_resource_group,
'azure_storage_account': azure_storage_account,
'azure_vm_default_username': azure_vm_default_username
}
else:
return {}
elif isinstance(cloud, models.GCP):
gcp_credentials_json = request.META.get('HTTP_CL_GCP_CREDENTIALS_JSON')
if gcp_credentials_json:
return json.loads(gcp_credentials_json)
else:
return {}
else:
raise Exception("Unrecognised cloud provider: %s" % cloud)
|
29fa45d17a0473643b2db448dfbe2de7837c4dd7
| 3,650,014
|
from functools import reduce
def nCr(n, r):
"""n-choose-r.
Thanks for the "compact" solution go to:
http://stackoverflow.com/questions/2096573/counting-combinations-and-permutations-efficiently
"""
return reduce(
lambda x, y: x * y[0] / y[1],
izip(xrange(n - r + 1, n + 1),
xrange(1, r + 1)),
1)
|
06ab7a4e12a35cf49f7ddf3e75780576d3b8972c
| 3,650,015
|
from pythia.pyre.inventory import facility
from pylith.bc.DirichletTimeDependent import DirichletTimeDependent
def bcFactory(name):
"""Factory for boundary condition items.
"""
return facility(name, family="boundary_condition", factory=DirichletTimeDependent)
|
65bb203b901c1648ee504bfd5bfd0956e9f849d4
| 3,650,016
|
def decode(value):
"""Decode utf-8 value to string.
Args:
value: String to decode
Returns:
result: decoded value
"""
# Initialize key variables
result = value
# Start decode
if value is not None:
if isinstance(value, bytes) is True:
result = value.decode('utf-8')
# Return
return result
|
9704678f6ff96de3b711758922c28f5ecbd11bc7
| 3,650,017
|
def sequence_rec_sqrt(x_init, iter, dtype=int):
"""
Mathematical sequence: x_n = x_{n-1} * sqrt(n)
:param x_init: initial values of the sequence
:param iter: iteration until the sequence should be evaluated
:param dtype: data type to cast to (either int of float)
:return: element at the given iteration and array of the whole sequence
"""
# exponential growth
def iter_function(x_seq, i, x_init):
return x_seq[i - 1, :] * np.sqrt(i + 1) # i+1 because sqrt(1) = 1
return sequence(x_init, iter, iter_function, dtype)
|
a7e695ee605caad5cef7881a2eeafbee8a25bf15
| 3,650,018
|
def convert_string_to_type(string_value, schema_type):
"""
Attempts to convert a string value into a schema type.
This method may evaluate code in order to do the conversion
and is therefore not safe!
"""
# assume that the value is a string unless otherwise stated.
if schema_type == "float":
evaluated_value = float(string_value)
elif schema_type == "int":
evaluated_value = int(string_value)
elif schema_type == "bool":
if string_value == "False":
evaluated_value = False
elif string_value == "True":
evaluated_value = True
else:
raise TankError("Invalid boolean value %s! Valid values are True and False" % string_value)
elif schema_type == "list":
evaluated_value = eval(string_value)
elif schema_type == "dict":
evaluated_value = eval(string_value)
else:
# assume string-like
evaluated_value = string_value
return evaluated_value
|
4d99470f7094a36567851bb23c1edd49686149cf
| 3,650,019
|
import os
import glob
def counter(path):
"""Get number of files by searching directory recursively"""
if not os.path.exists(path):
return 0
count = 0
for r, dirs, files in os.walk(path):
for dr in dirs:
count += len(glob.glob(os.path.join(r, dr + "/*")))
return count
|
e2d0498d1156a10a4ee98a3ad6701030af68e594
| 3,650,020
|
def get_local_coordinate_system(time_dep_orientation: bool, time_dep_coordinates: bool):
"""
Get a local coordinate system.
Parameters
----------
time_dep_orientation :
If True, the coordinate system has a time dependent orientation.
time_dep_coordinates :
If True, the coordinate system has a time dependent coordinates.
Returns
-------
weldx.transformations.LocalCoordinateSystem:
A local coordinate system
"""
if not time_dep_coordinates:
coords = Q_(np.asarray([2.0, 5.0, 1.0]), "mm")
else:
coords = Q_(
np.asarray(
[[2.0, 5.0, 1.0], [1.0, -4.0, 1.2], [0.3, 4.4, 4.2], [1.1, 2.3, 0.2]]
),
"mm",
)
if not time_dep_orientation:
orientation = tf.rotation_matrix_z(np.pi / 3)
else:
orientation = tf.rotation_matrix_z(np.pi / 2 * np.array([1, 2, 3, 4]))
if not time_dep_orientation and not time_dep_coordinates:
return tf.LocalCoordinateSystem(orientation=orientation, coordinates=coords)
time = pd.DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03", "2000-01-04"])
return tf.LocalCoordinateSystem(
orientation=orientation, coordinates=coords, time=time
)
|
daa8259e92a31884d798915522d4e538f316fc91
| 3,650,021
|
def _get_tooltip(tooltip_col, gpd):
"""Show everything or columns in the list."""
if tooltip_col is not None:
tooltip = folium.GeoJsonTooltip(fields=tooltip_col)
else:
tooltip = tooltip_col
return tooltip
|
8a2dc564ef65aa0eaf8a9e85457876ad0e6989ec
| 3,650,022
|
def encryption(message: str, key: int) -> str:
"""Return the ciphertext by xor the message with a repeating key"""
return b"".join(
[bytes([message[i] ^ key[i % len(key)]]) for i in range(len(message))]
)
|
674e4a27491a9f6c918f2129276349ba426cd676
| 3,650,023
|
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 1e-7 * np.sin(2. * np.pi * 10. * times)
data *= window
return data
|
edbdf5e059b8f4c3559386497961a1c65133a80b
| 3,650,024
|
def var(x, axis=None, ddof=0, keepdims=False):
"""
Computes the variance along the specified axis.
The variance is the average of the squared deviations from the mean, i.e.,
:math:`var = mean(abs(x - x.mean())**2)`.
Returns the variance, which is computed for the flattened array by default,
otherwise over the specified axis.
Note:
Numpy arguments `dtype`, `out` and `where` are not supported.
Args:
x (Tensor): A Tensor to be calculated.
axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
The default is to compute the variance of the flattened array. Default: `None`.
ddof (int): Means Delta Degrees of Freedom. Default: 0.
The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast correctly against the input array.
If the default value is passed, then keepdims will not be passed through to the var method of
sub-classes of tensor, however any non-default value will be. If the sub-class’ method does not
implement keepdims any exceptions will be raised. Default: `False`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Returns:
Standard deviation tensor.
Examples:
>>> import mindspore.numpy as np
>>> input_x = np.array([1., 2., 3., 4.])
>>> output = np.var(input_x)
>>> print(output)
1.25
"""
x = _to_tensor(x)
return x.var(axis, ddof, keepdims)
|
b39bf29caf4f47882fb3be900c2924a90b25a880
| 3,650,025
|
def check_inputs(supplied_inputs):
"""Check that the inputs are of some correct type and returned as AttributeDict."""
inputs = None
if supplied_inputs is None:
inputs = AttributeDict()
else:
if isinstance(supplied_inputs, DataFactory('dict')):
inputs = AttributeDict(supplied_inputs.get_dict())
elif isinstance(supplied_inputs, dict):
inputs = AttributeDict(supplied_inputs)
elif isinstance(supplied_inputs, AttributeDict):
inputs = supplied_inputs
else:
raise ValueError(f'The supplied type {type(inputs)} of inputs is not supported. Supply a dict, Dict or an AttributeDict.')
return inputs
|
a5369767c23a96b44da2bff2c0ac49456e3452f1
| 3,650,026
|
def _parse_none(arg, fn=None):
"""Parse arguments with support for conversion to None.
Args:
arg (str): Argument to potentially convert.
fn (func): Function to apply to the argument if not converted to None.
Returns:
Any: Arguments that are "none" or "0" are converted to None;
otherwise, returns the original value.
"""
if arg.lower() in ("none", "0"):
return None
return arg if fn is None else fn(arg)
|
4ebd283eb9e2218e523ba185c4500c9879d5719d
| 3,650,027
|
def generate_constraint(category_id, user):
"""
generate the proper basic data structure to express a constraint
based on the category string
"""
return {'year': category_id}
|
f55151a5b4b17bbf6eb697e1b1489ee4897f5db0
| 3,650,028
|
def get_RIB_IN_capacity(cvg_api,
multipath,
start_value,
step_value,
route_type,
port_speed,):
"""
Args:
cvg_api (pytest fixture): snappi API
temp_tg_port (pytest fixture): Ports mapping info of T0 testbed
multipath: ecmp value for BGP config
start_value: Start value of the number of BGP routes
step_value: Step value of the number of BGP routes to be incremented
route_type: IPv4 or IPv6 routes
port_speed: speed of the port used in test
"""
def tgen_capacity(routes):
conv_config = cvg_api.convergence_config()
config = conv_config.config
for i in range(1, 3):
config.ports.port(name='Test_Port_%d' % i, location=temp_tg_port[i-1]['location'])
c_lag = config.lags.lag(name="lag%d" % i)[-1]
lp = c_lag.ports.port(port_name='Test_Port_%d' % i)[-1]
lp.ethernet.name = 'lag_eth_%d' % i
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
lp.protocol.lacp.actor_system_id = "00:10:00:00:00:%s" % m
lp.ethernet.name = "lag_Ethernet %s" % i
lp.ethernet.mac = "00:10:01:00:00:%s" % m
config.devices.device(name='Topology %d' % i)
config.options.port_options.location_preemption = True
layer1 = config.layer1.layer1()[-1]
layer1.name = 'port settings'
layer1.port_names = [port.name for port in config.ports]
layer1.ieee_media_defaults = False
layer1.auto_negotiation.rs_fec = True
layer1.auto_negotiation.link_training = False
layer1.speed = port_speed
layer1.auto_negotiate = False
def create_v4_topo():
eth = config.devices[0].ethernets.add()
eth.port_name = config.lags[0].name
eth.name = 'Ethernet 1'
eth.mac = "00:00:00:00:00:01"
ipv4 = eth.ipv4_addresses.add()
ipv4.name = 'IPv4 1'
ipv4.address = temp_tg_port[0]['ip']
ipv4.gateway = temp_tg_port[0]['peer_ip']
ipv4.prefix = int(temp_tg_port[0]['prefix'])
rx_flow_name = []
for i in range(2, 3):
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
ethernet_stack = config.devices[i-1].ethernets.add()
ethernet_stack.port_name = config.lags[i-1].name
ethernet_stack.name = 'Ethernet %d' % i
ethernet_stack.mac = "00:00:00:00:00:%s" % m
ipv4_stack = ethernet_stack.ipv4_addresses.add()
ipv4_stack.name = 'IPv4 %d' % i
ipv4_stack.address = temp_tg_port[i-1]['ip']
ipv4_stack.gateway = temp_tg_port[i-1]['peer_ip']
ipv4_stack.prefix = int(temp_tg_port[i-1]['prefix'])
bgpv4 = config.devices[i-1].bgp
bgpv4.router_id = temp_tg_port[i-1]['peer_ip']
bgpv4_int = bgpv4.ipv4_interfaces.add()
bgpv4_int.ipv4_name = ipv4_stack.name
bgpv4_peer = bgpv4_int.peers.add()
bgpv4_peer.name = 'BGP %d' % i
bgpv4_peer.as_type = BGP_TYPE
bgpv4_peer.peer_address = temp_tg_port[i-1]['peer_ip']
bgpv4_peer.as_number = int(TGEN_AS_NUM)
route_range = bgpv4_peer.v4_routes.add(name="Network_Group%d" % i) #snappi object named Network Group 2 not found in internal db
route_range.addresses.add(address='200.1.0.1', prefix=32, count=number_of_routes)
as_path = route_range.as_path
as_path_segment = as_path.segments.add()
as_path_segment.type = as_path_segment.AS_SEQ
as_path_segment.as_numbers = aspaths
rx_flow_name.append(route_range.name)
return rx_flow_name
def create_v6_topo():
eth = config.devices[0].ethernets.add()
eth.port_name = config.lags[0].name
eth.name = 'Ethernet 1'
eth.mac = "00:00:00:00:00:01"
ipv6 = eth.ipv6_addresses.add()
ipv6.name = 'IPv6 1'
ipv6.address = temp_tg_port[0]['ipv6']
ipv6.gateway = temp_tg_port[0]['peer_ipv6']
ipv6.prefix = int(temp_tg_port[0]['ipv6_prefix'])
rx_flow_name = []
for i in range(2, 3):
if len(str(hex(i).split('0x')[1])) == 1:
m = '0'+hex(i).split('0x')[1]
else:
m = hex(i).split('0x')[1]
ethernet_stack = config.devices[i-1].ethernets.add()
ethernet_stack.port_name = config.lags[i-1].name
ethernet_stack.name = 'Ethernet %d' % i
ethernet_stack.mac = "00:00:00:00:00:%s" % m
ipv6_stack = ethernet_stack.ipv6_addresses.add()
ipv6_stack.name = 'IPv6 %d' % i
ipv6_stack.address = temp_tg_port[i-1]['ipv6']
ipv6_stack.gateway = temp_tg_port[i-1]['peer_ipv6']
ipv6_stack.prefix = int(temp_tg_port[i-1]['ipv6_prefix'])
bgpv6 = config.devices[i-1].bgp
bgpv6.router_id = temp_tg_port[i-1]['peer_ip']
bgpv6_int = bgpv6.ipv6_interfaces.add()
bgpv6_int.ipv6_name = ipv6_stack.name
bgpv6_peer = bgpv6_int.peers.add()
bgpv6_peer.name = 'BGP+_%d' % i
bgpv6_peer.as_type = BGP_TYPE
bgpv6_peer.peer_address = temp_tg_port[i-1]['peer_ipv6']
bgpv6_peer.as_number = int(TGEN_AS_NUM)
route_range = bgpv6_peer.v6_routes.add(name="Network Group %d" % i)
route_range.addresses.add(address='3000::1', prefix=64, count=number_of_routes)
as_path = route_range.as_path
as_path_segment = as_path.segments.add()
as_path_segment.type = as_path_segment.AS_SEQ
as_path_segment.as_numbers = aspaths
rx_flow_name.append(route_range.name)
return rx_flow_name
conv_config.rx_rate_threshold = 90/(multipath)
if route_type == 'IPv4':
rx_flows = create_v4_topo()
flow = config.flows.flow(name='IPv4_Traffic_%d' % routes)[-1]
elif route_type == 'IPv6':
rx_flows = create_v6_topo()
flow = config.flows.flow(name='IPv6_Traffic_%d' % routes)[-1]
else:
raise Exception('Invalid route type given')
flow.tx_rx.device.tx_names = [config.devices[0].name]
flow.tx_rx.device.rx_names = rx_flows
flow.size.fixed = 1024
flow.rate.percentage = 100
flow.metrics.enable = True
flow.metrics.loss = True
return conv_config
def run_traffic(routes):
logger.info('|-------------------- RIB-IN Capacity test, No.of Routes : {} ----|'.format(routes))
conv_config = tgen_capacity(routes)
cvg_api.set_config(conv_config)
""" Starting Protocols """
logger.info("Starting all protocols ...")
cs = cvg_api.convergence_state()
cs.protocol.state = cs.protocol.START
cvg_api.set_state(cs)
wait(TIMEOUT, "For Protocols To start")
""" Starting Traffic """
logger.info('Starting Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.START
cvg_api.set_state(cs)
wait(TIMEOUT, "For Traffic To start")
try:
for j in range(start_value, 100000000000, step_value):
tx_frate, rx_frate = [], []
run_traffic(j)
flow_stats = get_flow_stats(cvg_api)
logger.info('Loss% : {}'.format(flow_stats[0].loss))
for flow in flow_stats:
tx_frate.append(flow.frames_tx_rate)
rx_frate.append(flow.frames_rx_rate)
logger.info("Tx Frame Rate : {}".format(tx_frate))
logger.info("Rx Frame Rate : {}".format(rx_frate))
if float(flow_stats[0].loss) > 0.001:
if j == start_value:
raise Exception('Traffic Loss Encountered in first iteration, reduce the start value and run the test')
logger.info('Loss greater than 0.001 occured')
logger.info('Reducing the routes and running test')
b = j-step_value
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
break
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
l = []
l.append(b+int(step_value/8))
l.append(b+int(step_value/4))
l.append(b+int(step_value/2))
l.append(b+step_value-int(step_value/4))
l.append(b+step_value-int(step_value/8))
for i in range(0,len(l)):
run_traffic(l[i])
flow_stats = get_flow_stats(cvg_api)
logger.info('Loss% : {}'.format(flow_stats[0].loss))
if float(flow_stats[0].loss) <= 0.001:
max_routes = start_value
pass
else:
max_routes = l[i]-int(step_value/8)
break
logger.info('Stopping Traffic')
cs = cvg_api.convergence_state()
cs.transmit.state = cs.transmit.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Traffic To stop")
""" Stopping Protocols """
logger.info("Stopping all protocols ...")
cs = cvg_api.convergence_state()
cs.protocol.state = cs.protocol.STOP
cvg_api.set_state(cs)
wait(TIMEOUT-20, "For Protocols To STOP")
except Exception as e:
logger.info(e)
finally:
columns = ['Test Name', 'Maximum no. of Routes']
logger.info("\n%s" % tabulate([['RIB-IN Capacity Test',max_routes]], headers=columns, tablefmt="psql"))
|
e13c85d9e6ebdbfba84e20a81324da8156e7c934
| 3,650,029
|
import os
def input_file_exists(filepath):
""" Return True if the file path exists, or is the stdin marker. """
return (filepath == '-') or os.path.exists(filepath)
|
5f6a0c2195ce90ba551679d516ebee0e593184c8
| 3,650,030
|
from typing import List
from typing import Set
def ladder_length(beginWord: str, endWord: str, wordList: List[str]) -> int:
"""
双端交替迫近目标层,根据一层数量最多节点确定为目标层
:param beginWord:
:param endWord:
:param wordList:
:return:
>>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log","cog"])
5
>>> ladder_length('hit', 'cog', ["hot","dot","dog","lot","log"])
0
>>> ladder_length("hit","cog",["hot","dot","dog","lot","log"])
"""
if not beginWord or not endWord or endWord not in wordList:
return 0
all_chars: List[str] = [chr(i) for i in range(ord('a'), ord('z') + 1)]
curr_word_set: Set[str] = {beginWord} # 当前层的节点
end_word_set: Set[str] = {endWord} # 目标层的节点
word_set: Set[str] = set(wordList) # 加速单词是否在字典中的判断
level: int = 1
while curr_word_set:
# 避免同层节点临接
level += 1
for cw in curr_word_set:
# beginWord不重复出现在wordList(word_set)
if cw != beginWord:
word_set.remove(cw)
tmp_set: Set[str] = set()
for curr_word in curr_word_set:
for i, w in enumerate(curr_word):
for letter in all_chars:
if w == letter:
continue
changed: str = curr_word[:i] + letter + curr_word[i + 1:]
if changed in end_word_set:
return level
if changed in word_set:
tmp_set.add(changed)
# 让层节点最多的层作为目标层
if len(tmp_set) <= len(end_word_set):
curr_word_set = tmp_set
else:
# 逆转方向
curr_word_set = end_word_set
end_word_set = tmp_set
return 0
|
020f3ffd2e009b682a47ff9aad8d1d6025c29f37
| 3,650,031
|
def setup_option(request):
"""Создаем объект для удобство работы с переменными в тестовых методах
"""
setup_parameters = {}
if request.config.getoption('--site_url'):
setup_parameters['site_url'] = request.config.getoption('--site_url')
return setup_parameters
|
49908ee8e1422cc4fd05c6d93a96c00d734cf6d1
| 3,650,032
|
import time
import torch
def train_one_epoch(img_input,model,optimizer,writer,epoch,args):
"""
Finish
1.train for one epoch
2.print process, total loss, data time in terminal
3.save loss, lr, output img in tensorboard
Note
1.you can change the save frequency
"""
loss_train = 0
model.train()
length = len(img_input)
print("iteration:",length)
train_time = time.time()
begin = time.time()
'''loss control'''
loss_for_control = torch.zeros([6,args.paf_num+args.heatmap_num])
weight_con = torch.ones([1,args.paf_num+args.heatmap_num])
weight_con = weight_con.cuda()
'''start training'''
for each_batch, (img, target_heatmap, target_paf) in enumerate(img_input):
data_time = time.time() - begin
img = img.cuda()
target_heatmap = target_heatmap.cuda()
target_paf = target_paf.cuda()
# heat_mask = heat_mask.cuda()
# paf_mask = paf_mask.cuda()
_, saved_for_loss = model(img)
#loss = CMUnet_loss.get_loss(saved_for_loss,target_heatmap,target_paf,args,weight_con)
loss = resnet_loss.get_loss(saved_for_loss,target_heatmap,target_paf,args,weight_con)
# for i in range(args.paf_stage):
# for j in range(args.paf_num):
# loss_for_control[i][j] += loss['stage_{0}_{1}'.format(i,j)]
# for i in range(len(saved_for_loss)-args.paf_stage):
# for j in range(args.heatmap_num):
# loss_for_control[i][j] += loss['stage_{0}_{1}'.format(i,j)]
optimizer.zero_grad()
loss["final"].backward()
optimizer.step()
loss_train += loss["final"]
if each_batch % args.print_fre == 0:
print_to_terminal(epoch,each_batch,length,loss,loss_train,data_time)
#print_to_terminal(epoch,each_batch,length,loss,loss_train,data_time)
#writer.add_scalar("train_loss_iterations", loss_train, each_batch + epoch * length)
begin = time.time()
'''for short test'''
# if each_batch == 5:
# break
#weight_con = Online_weight_control(loss_for_control)
loss_train /= length
train_time = time.time() - train_time
print('total training time:',train_time)
return loss_train
|
b26e2933dd3575e45c33ba6bf801f5a92fc72ab7
| 3,650,033
|
def get_unique_tokens(texts):
"""
Returns a set of unique tokens.
>>> get_unique_tokens(['oeffentl', 'ist', 'oeffentl'])
{'oeffentl', 'ist'}
"""
unique_tokens = set()
for text in texts:
for token in text:
unique_tokens.add(token)
return unique_tokens
|
f9c174b264082b65a328fd9edf9421e7ff7808a2
| 3,650,034
|
def _symmetric_difference(provided: dict, chosen: dict) -> dict:
"""
Returns the fields that are not in common between provided and chosen JSON schema.
:param provided: the JSON schema to removed the chosen schema from.
:param chosen: the JSON schema to remove from the provided schema.
:return: a JSON schema with the chosen JSON schema removed.
"""
remove_keys = []
for k, vp in provided.items():
vc = chosen.get(k)
if vc is not None:
if isinstance(vp, dict):
vc = chosen.get(k, {})
assert isinstance(vc, dict), type_not_matching_str
provided[k] = _symmetric_difference(vp, vc)
elif isinstance(vp, list):
vc = chosen.get(k, [])
assert isinstance(vc, list), type_not_matching_str
provided[k] = [i for i in vp if i not in vc] # quadratic performance, optimize
else:
remove_keys.append(k)
for k in remove_keys:
provided.pop(k)
return provided
|
5900c6de35c0665ab2c0ec10c4df4dc87b75483a
| 3,650,035
|
import subprocess
import re
def check_pv_name_in_rados(arg, image_id, pvc_name, pool_name):
"""
validate pvc information in rados
"""
omapkey = 'csi.volume.%s' % pvc_name
cmd = ['rados', 'getomapval', 'csi.volumes.default',
omapkey, "--pool", pool_name]
if not arg.userkey:
cmd += ["--id", arg.userid, "--key", arg.userkey]
if arg.toolboxdeployed is True:
tool_box_name = get_tool_box_pod_name(arg)
kube = [arg.command]
if arg.kubeconfig != "":
if arg.command == "oc":
kube += ["--config", arg.kubeconfig]
else:
kube += ["--kubeconfig", arg.kubeconfig]
kube += ['exec', '-it', tool_box_name, '-n',
arg.rooknamespace, '--']
cmd = kube+cmd
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
if stderr is not None:
return False
name = ''
lines = [x.strip() for x in stdout.split("\n")]
for line in lines:
if ' ' not in line:
continue
if 'value' in line and 'bytes' in line:
continue
part = re.findall(r'[A-Za-z0-9\-]+', line)
if part:
name += part[-1]
if name != image_id:
if arg.debug:
print("expected image Id %s found Id in rados %s" %
(image_id, name))
return False
return True
|
11d530ed1047064367a36af3fbf8652b1e0f60a8
| 3,650,036
|
def moved_in(nn_orig, nn_proj, i, k):
"""Determine points that are neighbours in the projection space,
but were not neighbours in the original space.
nn_orig
neighbourhood matrix for original data
nn_proj
neighbourhood matrix for projection data
i
index of the point considered
k
size of the neighbourhood considered
Return a list of indices for points which are 'moved in' to point i
"""
pp = list(nn_proj[i, 1:k + 1])
oo = list(nn_orig[i, 1:k + 1])
for j in oo:
if (j in oo) and (j in pp):
pp.remove(j)
return pp
|
b63a9b0f53554032fc920aeaf6d3d76b93dd8ab3
| 3,650,037
|
import re
def _get_lines_changed(line_summary):
"""
Parse the line diff summary into a list of numbers representing line numbers added or changed
:param line_summary: the summary from a git diff of lines that have changed (ex: @@ -1,40 +1,23 @@)
:return: a list of integers indicating which lines changed for that summary
"""
lines = re.search(r"\@\@.*?\+(.+?) \@\@", line_summary).group(1)
if "," in lines:
start, count = [int(x) for x in lines.split(",")]
return list(range(start, start + count))
return [int(lines)]
|
01d1b51ef480a0d7dcdc916fe68aac08ce81d23f
| 3,650,038
|
def tj_agri_sup():
"""
Real Name: b'Tj Agri Sup'
Original Eqn: b'MIN(Tj Agri Dem *Agri Tajan Dam Coef, (Tj Outflow-Tj Dom Sup-Tj Env Sup-Tj Ind Sup))'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return np.minimum(tj_agri_dem() * agri_tajan_dam_coef(),
(tj_outflow() - tj_dom_sup() - tj_env_sup() - tj_ind_sup()))
|
07c6029dc062f20756b3f72289640a29526c41bf
| 3,650,039
|
def correlation_coefficient(y_true, y_pred):
"""The CC, is the Pearson’s correlation coefficient and treats the saliency
and ground truth density maps, as random variables measuring the linear
relationship between them.Values are first divided by their sum for each
image to yield a distribution that adds to 1.
Args:
y_true (tensor, float32): A 4d tensor that holds the ground truth
saliency maps with values between 0 and 255.
y_pred (tensor, float32): A 4d tensor that holds the predicted saliency
maps with values between 0 and 1.
Returns:
tensor, float32: A 0D tensor that holds the averaged error.
"""
sum_y_true = tf.reduce_sum(y_true, axis=[1, 2, 3], keep_dims=True)
sum_y_pred = tf.reduce_sum(y_pred, axis=[1, 2, 3], keep_dims=True)
y_true /= (sum_y_true + 1e-7)
y_pred /= (sum_y_pred + 1e-7)
N = shape_r_out * shape_c_out
sum_prod = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])
sum_x = tf.reduce_sum(y_true, axis=[1, 2, 3])
sum_y = tf.reduce_sum(y_pred * y_pred, axis=[1, 2, 3])
sum_x_square = tf.reduce_sum(tf.square(y_true), axis=[1, 2, 3])
sum_y_square = tf.reduce_sum(tf.square(y_pred), axis=[1, 2, 3])
num = sum_prod - ((sum_x * sum_y) / N)
den = tf.sqrt((sum_x_square - tf.square(sum_x) / N) * (sum_y_square - tf.square(sum_y) / N))
return -tf.reduce_mean(num / den)
|
9d0f7825219a5957edfbf464ca9b62182b81bb3c
| 3,650,040
|
def init_args():
"""Init command line args used for configuration."""
parser = init_main_args()
return parser.parse_args()
|
c2939b8d6fbefa7a6b792d13c98a805a3e53785f
| 3,650,041
|
import warnings
def _fit_binary(estimator, X, y, classes=None, **kwargs):
"""Fit a single binary estimator with kwargs."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." % str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y, **kwargs)
return estimator
|
24e37aa50cada6cce4ab52c1be85cace3ad4c417
| 3,650,042
|
import csv
def data_index(person, dim):
"""
Output sequence of eye gaze (x, y) positions from the dataset for a person and a dimension of that person (task, session, etc)
Index starts at 0.
The vectors are [x, y, flag], flag being if it's null
"""
session = "S1" if dim % 2 == 0 else "S2"
# S1_Balura_Game S1_Fixations S1_Horizontal_Saccades S1_Random_Saccades S1_Reading S1_Video_1 S1_Video_2
for exc in exceptions:
person += (exc-1 <= person)
num = str(person+1).rjust(3, "0")
#global info, tasks, tasks_code
dir = "data/Round_1/id_1" + num + "/" + session + "/" + session + tasks[dim//2] + \
"/S_1" + num + "_" + session + "_" + tasks_code[dim//2] + \
".csv"
pos = []
mask = []
with open(dir) as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
vecs = []
pads = []
for i, row in enumerate(spamreader):
if i < 1:
continue
row = ''.join(row).split(",")
if (i-1) % config['Hz'] == 0 and (i-1) != 0:
vecs = np.stack(vecs)
pads = np.stack(pads)
pos.append(vecs)
mask.append(pads)
vecs = []
pads = []
if (i-1) % (config['Hz'] // config['second_split']) == 0:
flag = (row[1] == 'NaN' or row[2] == 'NaN')
arr = np.array([0, 0, flag]) if flag else np.array([float(row[1]), float(row[2]), flag])
vecs.append(arr)
arr2 = np.array([0]*(info.feature_size-1)+[info.feature_size]) if flag else np.ones(info.feature_size)
# the info.feature_size instead of 1 is to rescale and give it equal "weight"
pads.append(arr2)
pos=np.stack(pos)
mask=np.stack(mask)
return pos, mask, [tasks[dim//2]]
|
e8b37aaeb2c228f0749aece26609fb04e0d4a226
| 3,650,043
|
def getStatic():
"""
These are "static" params for a smoother application flow and fine tuning of some params
Not all functions are implemented yet
Returns the necessary Params to run this application
"""
VISU_PAR = {
# =============================================================================
# More general Params
# =============================================================================
# does not consider samples which are longer than this value in [s]
"delteSampleAbove[s]": 5,
# flag for extractring/considering long Samples
"extractLongs" : False,
# does not consider samples which are longer than this value in [s]
"toShort[s]": 0.003,
# flag for extractring/considering too short Samples
"extractShort" : False,
# this might indicate a loop !!
"bpmConfidence" : 1,
# flag for extractring/considering potential Loops
"extractLoops" : False,
#compress all features to a range from (0,..,1) ->getFeatureStack()
"compress": True,
# invert all negative feature values with a total negative correlation ->getPandasCorrelation()
"invNegative" : True,
# =============================================================================
# Application Modes
# =============================================================================
# scriptMode := ("clustering", "get_N_Closest", "analyseWithGT", "optimizer")
# "clustering" := group samples into 'n Cluster' not regarding their GT
# "get_N_Closest" := select N most similar samples to a reference sample not regarding their GT
# requires path of a JSON file which contains the features of one sample (compareFilePath)
# requires a number (N) (n_mostSimilar)
# "analyseWithGT" := analyse a set of features and evaluate with GT-Labels
# it is still possible to cluster within this option and save a landmap and restructure files
# "optimizer" := trys a new subset of features and save the new subset, Needs GTs
#
# the hiearchy of the application mode is: analyseWithGT (when true, most params below are usefull)
# clustering (There will be no option to select features compared to GT)
# get_N_Closest There will be no option to select features compared to GT)
# -> The best Features calculated and saved will be used ->(getBestFile,getBestFeatureSelektion)
"scriptMode" : "get_N_Closest",
#for get_N_Closest -> This should only contain one file and only the features for one Sample,
"compareFilePath" : "../json_data/singleFile/Dirt-SamplesSingle2020-10-06.17:26:55.json",
"n_mostSimilar": 25,
# path to json files
"dirName" : "../json_data/",
# saved Features of a sample-library
"fileName": "Dirt-Samples2020-09-14.20:53:18.json",
# =============================================================================
# Feature selection and Feature subset creation modes
# =============================================================================
# A fixed set of Features to select by (the names my vary from old JSON-Files to new ones)
"predefinedFeatures" : False,
# You can select Features by yourself if you want. It will refers to the predefined featrues
# the default set can be generated from the Dirst-samples with suboptimalSearchs default values.
"defineYoureOwnFeatureSet" : ['Har-Log_-FACM_10', 'MFCC-4', 'MFCC-7', 'Har-RecChr_-FACM_12','TriChr_Centroid', 'ZeroCrossingRate', 'MFCC-8'],
# "defineYoureOwnFeatureSet" : ["Har-TriChr_-FACM_12", "MFCC-10"],
# Select all features with correlation > suboptimalSearch.second to GT-Labels
# And discard all features with cross correlation > suboptimalSearch.third
"suboptimalSearch" : (True,0.3, 0.8),
# Only take the nBest Features from suboptimaSearch (-1 := all)
"nBest" : 7,
# Consider all Features or take an approach of above.
"calcAllFeatures": False,
#("HillClimber", "Random") optimize features with a) hillclimber b) totaly random
# maxxHill is the maximum iterationof the hillclimber/ max repeat for Random
# probHill is the probability for each individual feature to get selected
# modeHill := ("small", "big", "medium") affects HillClimber
# small -> small steps (1-2 changes at a time)
# big -> every permutation has equal probability
# bigChoice -> bigger steps than "small" but not everything possibe like "big"
"optimizer" : "HillClimber",
"maxHill" : 500,
"probHill": 0.0000001,
"modeHill" : "medium",
# amount of cluster to consider with Hierarch
"nCluster" : 40,
# (Hierarch/OPTICS/AffinityPropagation/SpectralClustering) 1st is hierarchial clustering, 2nd is Density based->getClusteringLabels()
"clusterAlgo" : "Hierarch",
# The mode for hierarchichal clustering. ward = minimum variance, average = minimum of average, complete = maximum of each cluster, single = minimum of each cluster
"hierarchMode" : "average",
# =============================================================================
# Output Params (save files to folder | draw landmap)
# =============================================================================
# save folder for copying all audio files
"saveFolder" : '../estimateSongs/',
# restructure all files within their new assigned cluster Group/
# if mode is n_mostSimilar, it is an folder which contains the n_mostSimilar samples
"copyFilesToFolder" : True,
# draw a distance landmap with graphviz.
"graphviz": False,
# graphvizMode := ("clusterBased", "oneFilePerCluster", "minimalSpan") :
# "minimalSpan" = draw one big landmap without clusters as minimal span tree (not recommended for all Files)
# "clusterBased" = draw seperate clusters in one big landmap |
# "oneFilePerCluster" = generate one landmap file per cluster)
"graphvizMode" : "minimalSpan"
}
# Same Params for Spectral Clustering. This approach be will not be taken further
SpectralClusterParam = {"assign_labels":"kmeans", #{‘kmeans’, ‘discretize’} default kmeans,
"eigen_solver": "amg",
}
VISU_PAR = {**VISU_PAR, **SpectralClusterParam}
return VISU_PAR
|
f82ed9c4156b8199be924fc1ed62398fcbad9e0c
| 3,650,044
|
def current_device():
"""Return the index of the current active device.
Returns
-------
int
The index of device.
"""
return dragon.cuda.GetDevice()
|
453b81673e198ddd3a5870843d16b9cc395802d4
| 3,650,045
|
import time
async def access_logger(app, handler):
"""Simple logging middleware to report info about each request/response.
"""
async def logging_handler(request):
start_time = time.time()
request_name = hex(int(start_time * 10000))[-6:]
client_ip, _ = request.transport.get_extra_info(
'peername', ('UNKNOWN', None))
# log request
LOGGER.info(
'Request %s: "%s %s" from %s',
request_name,
request.method,
request.rel_url,
client_ip)
def log_response(response):
# pylint: disable=protected-access
content_length = response._headers.get('Content-Length',
'UNKNOWN')
if content_length == 'UNKNOWN':
LOGGER.info(
'Response %s: %s status, %s size, in %.3fs',
request_name,
response._status,
content_length,
time.time() - start_time)
else:
LOGGER.info(
'Response %s: %s status, %sB size, in %.3fs',
request_name,
response._status,
content_length,
time.time() - start_time)
try:
response = await handler(request)
log_response(response)
return response
except web.HTTPError as e:
log_response(e)
raise e
return logging_handler
|
55d4ac318a65d6f4256467f7909b5a6ee2115a6d
| 3,650,046
|
from typing import Tuple
def main(source: str) -> Tuple[astroid.Module, TypeInferer]:
"""Parse a string representing source text, and perform a typecheck.
Return the astroid Module node (with the type_constraints attribute set
on all nodes in the tree) and TypeInferer object.
"""
module = astroid.parse(source)
type_inferer = TypeInferer()
type_inferer.environment_transformer().visit(module)
type_inferer.type_inference_transformer().visit(module)
return module, type_inferer
|
f8e9b9a0ac9ff4334cce9ca7c888d3ff11570661
| 3,650,047
|
def to_literal_scalar(a_str):
"""Helper function to enforce literal scalar block (ruamel.yaml)."""
return ruamel.yaml.scalarstring.LiteralScalarString(a_str)
|
7cdb3d37bad184b7c6e68b374d1b6fd7e4c744c4
| 3,650,048
|
from typing import Optional
def get_first_free_address(subnet_id: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirstFreeAddressResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['subnetId'] = subnet_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('phpipam:index/getFirstFreeAddress:getFirstFreeAddress', __args__, opts=opts, typ=GetFirstFreeAddressResult).value
return AwaitableGetFirstFreeAddressResult(
id=__ret__.id,
ip_address=__ret__.ip_address,
subnet_id=__ret__.subnet_id)
|
ea4a599a7f3ac65e296cce4c8fc3a764202bba26
| 3,650,049
|
def pagenav(object_list, base_url, order_by, reverse, cur_month, is_paginated, paginator):
"""Display page navigation for given list of objects"""
return {'object_list': object_list,
'base_url': base_url,
'order_by': order_by,
'reverse': reverse,
'cur_month': cur_month,
'is_paginated': is_paginated,
'paginator': paginator}
|
eb61fb76dd32b8d0b3e264e77ce912766d3e38da
| 3,650,050
|
def read_input(path: str):
"""
Read game board file from path.
Return list of str.
>>> read_input("skyscrapers1.txt")
['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']
"""
with open(path, 'r') as f:
game_lst = f.readlines()
for idx, line in enumerate(game_lst):
game_lst[idx] = line.strip('\n')
return game_lst
|
a4bf08525ca3fe4b0b1efab1901830b4d7c45f05
| 3,650,051
|
def run_tweeter():
""" Captures image and sends tweet """
capture_image_and_tweet()
return schedule.CancelJob
|
2cf3895270e5f5f64ecb2e943548f0a290c35b02
| 3,650,052
|
import time
from functools import reduce
from operator import add
def get_retro_results(
outdir,
recos_basedir,
events_basedir,
recompute_estimate=False,
overwrite=False,
):
"""Extract all rectro reco results from a reco directory tree, merging with original
event information from correspoding source events directory tree. Results are
populated to a Pandas DataFrame, saved to disk, and this is returned to the user.
Parameters
----------
outdir : string
recos_basedir : string
events_basedir : string
recompute_estimate : bool, optional
overwrite : bool, optional
"""
t0 = time.time()
outdir = abspath(expand(outdir))
if not isdir(outdir):
mkdir(outdir)
outfile_path = join(outdir, 'reconstructed_events.feather')
if not overwrite and isfile(outfile_path):
raise IOError('Output file path already exists at "{}"'.format(outfile_path))
cluster = LocalCluster(threads_per_worker=1, diagnostics_port=None)
client = Client(cluster)
try:
# Walk directory hierarchy
futures = []
for reco_dirpath, _, files in walk(recos_basedir, followlinks=True):
is_leafdir = False
for f in files:
if f[-3:] == 'pkl' and f[:3] in ('slc', 'evt'):
is_leafdir = True
break
if not is_leafdir:
continue
rel_dirpath = relpath(path=reco_dirpath, start=recos_basedir)
if events_basedir is not None:
event_dirpath = join(events_basedir, rel_dirpath)
if not isdir(event_dirpath):
raise IOError('Event directory does not exist: "{}"'
.format(event_dirpath))
abs_reco_dirpath = abspath(reco_dirpath)
filenum = basename(abs_reco_dirpath)
flavdir = basename(dirname(abs_reco_dirpath))
futures.append(
client.submit(
extract_from_leaf_dir,
recodir=reco_dirpath,
eventdir=event_dirpath,
flavdir=flavdir,
filenum=filenum,
recompute_estimate=recompute_estimate,
)
)
results = [f.result() for f in as_completed(futures)]
finally:
cluster.close()
client.close()
del client
del cluster
# Convert to a single list containing all events
all_events = reduce(add, results, [])
# Convert to pandas DataFrame
all_events = pd.DataFrame(all_events)
# Save to disk
all_events.to_feather(outfile_path)
print('\nAll_events saved to "{}"\n'.format(outfile_path))
nevents = len(all_events)
dt = time.time() - t0
print('\nTook {:.3f} s to extract {} events'.format(dt, nevents))
return all_events
|
e3753b86ed4efa60057f1e3a0c70c34193447718
| 3,650,053
|
import copy
def split_surface_v(obj, t, **kwargs):
""" Splits the surface at the input parametric coordinate on the v-direction.
This method splits the surface into two pieces at the given parametric coordinate on the v-direction,
generates two different surface objects and returns them. It does not modify the input surface.
:param obj: surface
:type obj: BSpline.Surface or NURBS.Surface
:param t: parametric coordinate on the v-direction
:type t: float
:return: a list of surface as the split pieces of the initial surface
:rtype: Multi.MultiSurface
"""
# Validate input
if not isinstance(obj, Abstract.Surface):
raise TypeError("Input shape must be an instance of any Surface class")
if t == 0.0 or t == 1.0:
raise ValueError("Cannot split on the corner points")
utilities.check_uv(t)
# Keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find multiplicity of the knot
ks = span_func(obj.degree_v, obj.knotvector_v, obj.ctrlpts_size_v, t) - obj.degree_v + 1
s = helpers.find_multiplicity(t, obj.knotvector_v)
r = obj.degree_v - s
# Create backups of the original surface
temp_obj = copy.deepcopy(obj)
# Split the original surface
temp_obj.insert_knot(v=t, rv=r, check_r=False)
# Knot vectors
knot_span = span_func(temp_obj.degree_v, temp_obj.knotvector_v, temp_obj.ctrlpts_size_v, t) + 1
surf1_kv = list(temp_obj.knotvector_v[0:knot_span])
surf1_kv.append(t)
surf2_kv = list(temp_obj.knotvector_v[knot_span:])
for _ in range(0, temp_obj.degree_v + 1):
surf2_kv.insert(0, t)
# Control points
surf1_ctrlpts = []
for v_row in temp_obj.ctrlpts2d:
temp = v_row[0:ks + r]
surf1_ctrlpts.append(temp)
surf2_ctrlpts = []
for v_row in temp_obj.ctrlpts2d:
temp = v_row[ks + r - 1:]
surf2_ctrlpts.append(temp)
# Create a new surface for the first half
surf1 = temp_obj.__class__()
surf1.degree_u = temp_obj.degree_u
surf1.degree_v = temp_obj.degree_v
surf1.ctrlpts2d = surf1_ctrlpts
surf1.knotvector_v = surf1_kv
surf1.knotvector_u = temp_obj.knotvector_u
# Create another surface fot the second half
surf2 = temp_obj.__class__()
surf2.degree_u = temp_obj.degree_u
surf2.degree_v = temp_obj.degree_v
surf2.ctrlpts2d = surf2_ctrlpts
surf2.knotvector_v = surf2_kv
surf2.knotvector_u = temp_obj.knotvector_u
# Create a MultiSurface
ret_val = Multi.MultiSurface()
ret_val.add(surf1)
ret_val.add(surf2)
# Return the new surfaces
return ret_val
|
6603fb5e4c45fa60817168d776ac005475bd37a5
| 3,650,054
|
from typing import OrderedDict
def oidc_userprofile_test(request):
"""
OIDC-style userinfo
"""
user = request.user
profile, g_o_c = UserProfile.objects.get_or_create(user=user)
data = OrderedDict()
data['sub'] = user.username
data['name'] = "%s %s" % (user.first_name, user.last_name)
data['nickname'] = profile.nickname
data['given_name'] = user.first_name
data['family_name'] = user.last_name
data['email'] = user.email
data['email_verified'] = profile.email_verified
data['phone_number'] = profile.mobile_phone_number
data['phone_verified'] = profile.phone_verified
data['picture'] = profile.picture_url
data['gender'] = profile.gender
data['birthdate'] = str(profile.birth_date)
data['patient'] = get_fhir_id(user)
data['iat'] = user.date_joined
data['call_member'] = settings.CALL_MEMBER
data['call_member_plural'] = settings.CALL_MEMBER
data['call_organization'] = settings.CALL_ORGANIZATION
data['call_organization_plural'] = settings.CALL_ORGANIZATION_PLURAL
data['ial'] = profile.identity_assurance_level
return JsonResponse(data)
|
aeae1962615ac9894b1b555814851c33efa85b45
| 3,650,055
|
def split_idx( idx,a,b):
"""
Shuffle and split a list of indexes into training and test data with a fixed
random seed for reproducibility
run: index of the current split (zero based)
nruns: number of splits (> run)
idx: list of indices to split
"""
rs = np.random.RandomState()
rs.shuffle(idx)
start = int(a / 10. * len(idx))
end = int((b+a) / 10. * len(idx))
train_idx = idx[0:start]
test_idx = idx[start:end]
val_idx = idx[end:]
return train_idx, val_idx, test_idx
# return train_idx, test_idx
|
e5c9850a0bbcdb187d12dff4cd9df6c9faddfacc
| 3,650,056
|
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
val: float or int
src: tuple
dst: tuple
example: print(scale(99, (0.0, 99.0), (-1.0, +1.0)))
"""
return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
|
26cfaccaeea861ccecb36697838710c0ab706520
| 3,650,057
|
def add(c1, c2):
"""Add two encrypted counters"""
a1, b1 = c1
a2, b2 = c2
return (a1 + a2, b1 + b2)
|
d3e519524fac558622f692a46ffb8fed9899176f
| 3,650,058
|
async def wait_all_tasks_blocked(cushion=0.0):
"""Block until there are no runnable tasks.
This is useful in testing code when you want to give other tasks a
chance to "settle down". The calling task is blocked, and doesn't wake
up until all other tasks are also blocked for at least ``cushion``
seconds. (Setting a non-zero ``cushion`` is intended to handle cases
like two tasks talking to each other over a local socket, where we
want to ignore the potential brief moment between a send and receive
when all tasks are blocked.)
Note that ``cushion`` is measured in *real* time, not the Trio clock
time.
If there are multiple tasks blocked in :func:`wait_all_tasks_blocked`,
then the one with the shortest ``cushion`` is the one woken (and
this task becoming unblocked resets the timers for the remaining
tasks). If there are multiple tasks that have exactly the same
``cushion``, then all are woken.
You should also consider :class:`trio.testing.Sequencer`, which
provides a more explicit way to control execution ordering within a
test, and will often produce more readable tests.
Example:
Here's an example of one way to test that Trio's locks are fair: we
take the lock in the parent, start a child, wait for the child to be
blocked waiting for the lock (!), and then check that we can't
release and immediately re-acquire the lock::
async def lock_taker(lock):
await lock.acquire()
lock.release()
async def test_lock_fairness():
lock = trio.Lock()
await lock.acquire()
async with trio.open_nursery() as nursery:
nursery.start_soon(lock_taker, lock)
# child hasn't run yet, we have the lock
assert lock.locked()
assert lock._owner is trio.lowlevel.current_task()
await trio.testing.wait_all_tasks_blocked()
# now the child has run and is blocked on lock.acquire(), we
# still have the lock
assert lock.locked()
assert lock._owner is trio.lowlevel.current_task()
lock.release()
try:
# The child has a prior claim, so we can't have it
lock.acquire_nowait()
except trio.WouldBlock:
assert lock._owner is not trio.lowlevel.current_task()
print("PASS")
else:
print("FAIL")
"""
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return await GLOBAL_RUN_CONTEXT.runner.wait_all_tasks_blocked(cushion)
except AttributeError:
raise RuntimeError("must be called from async context")
|
35b144f4a214cb1f02bb1448f78a54ed93ac66aa
| 3,650,059
|
def get_chisq_grid(data, type, forecast=False, errors=None):
"""
Generates 2d meshgrid for chisq values of a given type (i.e. BBN, CMB etc)
"""
masses = np.unique(data['mass'])
omegabs = np.unique(data['OmegaB'])
MASS, OMEGAB = np.meshgrid(masses, omegabs)
OMEGABDAT = data['OmegaB'].reshape(len(masses), -1).T
YP = data['Yp'].reshape(len(masses), -1).T
DH = data['D/H'].reshape(len(masses), -1).T
NEFF = data['Neff'].reshape(len(masses), -1).T
return chisq(YP, DH, OMEGABDAT, NEFF, type, forecast, errors)
|
3ea6fcf16d6c506733f5164e8808c5d5dce6c969
| 3,650,060
|
import os
import random
def collect_samples_clouds_video(upsampling, opt, deferred_shading=False):
"""
Collect samples of cloud videos.
opt is expected to be a dict.
Output: DatasetData
- samples: list of Sample
- images_high: num_frames x output_channels x H*upsampling x W*upsampling
- images_low: num_frames x input_channels x H x W
- flow_low: num_frames x 2 x H x W
"""
number_of_samples = opt['samples']
number_of_images = opt['numberOfImages']
use_input_depth = deferred_shading or opt['useInputDepth']
use_input_normal = deferred_shading or opt['useInputNormal']
INPUT_PATH_SHADED = opt['inputPathShaded'] or'../../data/clouds/rendering_video/'
INPUT_PATH_UNSHADED = opt['inputPathUnshaded'] or '../../data/clouds/rendering_video3/'
inputPath = INPUT_PATH_UNSHADED if deferred_shading else INPUT_PATH_SHADED
inputExtension = '.exr'
if load_arrays and deferred_shading:
# load directly from numpy arrays
def get_image_name(i,mode,p):
if mode=='high':
return os.path.join(p, "high_%05d.npy" % i)
if mode=='low':
return os.path.join(p, "low_%05d.npy" % i)
elif mode=='flow':
return os.path.join(p, "flow_%05d.npy" % i)
# Collect number of images and paths
image_paths = []
print("dataset path:", inputPath)
if os.path.isfile(inputPath):
# input path points to a file where each line is a subdirectory of sets
with open(inputPath, 'r') as fp:
while True:
line = fp.readline()
if line is None or len(line)==0: break
p = os.path.join(os.path.dirname(inputPath), line[:-1])
print("Check path '%s'"%p)
num_images = 0
while True:
if not os.path.exists(get_image_name(num_images, 'low', p)):
break
image_paths.append((
get_image_name(num_images, 'high', p),
get_image_name(num_images, 'low', p),
get_image_name(num_images, 'flow', p)
))
num_images += 1
else:
# input path is directly a folder
num_images = 0
while True:
if not os.path.exists(get_image_name(num_images, 'low', inputPath)):
break
image_paths.append((
get_image_name(num_images, 'high', inputPath),
get_image_name(num_images, 'low', inputPath),
get_image_name(num_images, 'flow', inputPath)
))
num_images += 1
num_images = len(image_paths)
if num_images==0:
raise ValueError("No image found")
num_frames = np.load(image_paths[0][1]).shape[0]
print('Number of images found: %d, each with %d frames' % (num_images, num_frames))
if number_of_images is not None and number_of_images>0:
num_images = min(num_images, number_of_images)
print('But limited to %d images'%number_of_images)
# load all images
pg = ProgressBar(num_images, 'Load all images (npy)', length=50)
images_high = [None]*num_images
images_low = [None]*num_images
flow_low = [None]*num_images
for i in range(num_images):
pg.print_progress_bar(i)
images_high[i] = np.load(image_paths[i][0])
images_low[i] = np.load(image_paths[i][1])
flow_low[i] = np.load(image_paths[i][2])
pg.print_progress_bar(num_images)
input_channels = 5
output_channels = images_high[0].shape[1]
else:
#old version, load images seperately
def get_image_name(i,j,mode):
if mode=='high':
return os.path.join(inputPath, "high_%05d_%05d%s" % (i, j, inputExtension))
if mode=='highdn':
return os.path.join(inputPath, "high_%05d_%05d_depth%s" % (i, j, inputExtension))
elif mode=='low':
return os.path.join(inputPath, "low_%05d_%05d%s" % (i, j, inputExtension))
elif mode=='dn':
return os.path.join(inputPath, "low_%05d_%05d_depth%s" % (i, j, inputExtension))
elif mode=='flow':
return os.path.join(inputPath, "low_%05d_%05d_flow%s" % (i, j, inputExtension))
# Collect number of images
num_images = 0
num_frames = 0
while True:
if not os.path.exists(get_image_name(num_images, 0, 'low')):
break
num_images += 1
while True:
if not os.path.exists(get_image_name(0, num_frames, 'low')):
break
num_frames += 1
print('Number of images found: %d, each with %d frames' % (num_images, num_frames))
if number_of_images is not None and number_of_images>0:
num_images = min(num_images, number_of_images)
print('But limited to %d images'%number_of_images)
# load all images
#print('Load all images')
pg = ProgressBar(num_images, 'Load all images', length=50)
images_high = [None]*num_images
images_low = [None]*num_images
flow_low = [None]*num_images
output_channels = 3
for i in range(num_images):
pg.print_progress_bar(i)
high = [None]*num_frames
low = [None]*num_frames
flow = [None]*num_frames
for j in range(num_frames):
if not deferred_shading:
high[j] = np.clip(np.asarray(imageio.imread(get_image_name(i, j, 'high'))).transpose((2, 0, 1)), 0, 1)
else:
high_rgb = np.clip(np.asarray(imageio.imread(get_image_name(i, j, 'high'))).transpose((2, 0, 1)), 0, 1)
high_dn = np.asarray(imageio.imread(get_image_name(i, j, 'highdn'))).transpose((2, 0, 1))
high[j] = np.concatenate((high_rgb, high_dn), axis=0)
low_rgb = np.clip(np.asarray(imageio.imread(get_image_name(i, j, 'low'))).transpose((2, 0, 1)), 0, 1)
if use_input_depth or use_input_normal:
low_dn = np.asarray(imageio.imread(get_image_name(i, j, 'dn'))).transpose((2, 0, 1))
if use_input_depth and use_input_normal:
low[j] = np.concatenate((low_rgb, low_dn), axis=0)
elif use_input_depth: #not use_input_normal
low[j] = np.concatenate((low_rgb, low_dn[3:4,:,:]), axis=0)
elif use_input_normal: #not use_input_depth
low[j] = np.concatenate((low_rgb, low_dn[0:3,:,:]), axis=0)
else:
low[j] = low_rgb
flow_xy = imageio.imread(get_image_name(i, j, 'flow'))[:,:,0:2]
flow_inpaint = np.stack((
cv.inpaint(flow_xy[:,:,0], np.uint8(low_rgb[3,:,:]==0), 3, cv.INPAINT_NS),
cv.inpaint(flow_xy[:,:,1], np.uint8(low_rgb[3,:,:]==0), 3, cv.INPAINT_NS)), axis=0)
low[j][3,:,:] = low[j][3,:,:] * 2 - 1 # transform mask to [-1,1]
high[j][3,:,:] = high[j][3,:,:] * 2 - 1
if deferred_shading:
channel_mask = [3, 4, 5, 6, 7] # mask, normal x, y, z, depth
low[j] = low[j][channel_mask,:,:]
high[j] = high[j][channel_mask,:,:]
flow[j] = flow_inpaint
images_high[i] = np.stack(high, axis=0)
images_low[i] = np.stack(low, axis=0)
flow_low[i] = np.stack(flow, axis=0)
pg.print_progress_bar(num_images)
if deferred_shading:
input_channels = 5
output_channels = 5
else:
input_channels = 4
if use_input_depth:
input_channels += 1
if use_input_normal:
input_channels += 3
# find crops
def randomPointOnSphere():
vec = np.random.randn(3)
vec /= np.linalg.norm(vec)
return vec;
print('Find crops')
fill_ratio = 0.5 * video_crop_size * video_crop_size # at least 50% of that crop has to be filled
samples = [None]*number_of_samples
sample = 0
while sample < number_of_samples:
while True:
index = random.randint(0, num_images-1)
w = images_low[index].shape[2]
h = images_low[index].shape[3]
x = random.randint(0, w - video_crop_size - 1)
y = random.randint(0, h - video_crop_size - 1)
# check if it is filled
crop_mask1 = (images_low[index][0,0,x:x+video_crop_size,y:y+video_crop_size]
+ images_low[index][0,1,x:x+video_crop_size,y:y+video_crop_size]
+ images_low[index][0,2,x:x+video_crop_size,y:y+video_crop_size]) > 0
crop_mask2 = (images_low[index][num_frames-1,0,x:x+video_crop_size,y:y+video_crop_size]
+ images_low[index][num_frames-1,1,x:x+video_crop_size,y:y+video_crop_size]
+ images_low[index][num_frames-1,2,x:x+video_crop_size,y:y+video_crop_size]) > 0
if np.sum(crop_mask1) >= fill_ratio and np.sum(crop_mask2) >= fill_ratio:
# we found our sample
samples[sample] = Sample(
index=index,
crop_low=(x,x+video_crop_size,y,y+video_crop_size),
crop_high=(upsampling*x,upsampling*(x+video_crop_size),upsampling*y,upsampling*(y+video_crop_size)),
augmentation=np.random.randint(MAX_AUGMENTATION_MODE),
ambient_color=np.array([random.uniform(0.05,0.2)]*3), # color + light only needed for deferred shading
diffuse_color=np.array([random.uniform(0.4,1.0)]*3),
light_direction=np.array([0,0,1]*3) if random.uniform(0,1)<0.5 else randomPointOnSphere()
)
#print(samples[sample])
sample += 1
break
#sort samples by image index for proper sepearation between test and training
samples.sort(key = lambda s: s.index)
print('All samples collected')
return DatasetData(samples=samples,
images_high=images_high,
images_low=images_low,
flow_low=flow_low,
input_channels=input_channels,
output_channels=output_channels,
crop_size = video_crop_size,
num_frames = num_frames)
|
aab7be545d3b1561c7677bd9d370f8b5562d0c4f
| 3,650,061
|
import ctypes
def spectrl2(units, location, datetime, weather, orientation,
atmospheric_conditions, albedo):
"""
Calculate solar spectrum by calling functions exported by
:data:`SPECTRL2DLL`.
:param units: set ``units`` = 1 for W/m\ :sup:`2`/micron
:type units: int
:param location: latitude, longitude and UTC-timezone
:type location: float
:param datetime: year, month, day, hour, minute and second
:type datetime: int
:param weather: ambient-pressure [mB] and ambient-temperature [C]
:type weather: float
:param orientation: tilt and aspect [degrees]
:type orientation: float
:param atmospheric_conditions: alpha, assym, ozone, tau500 and watvap
:type atmospheric_conditions: float
:param albedo: 6 wavelengths and 6 reflectivities
:type albedo: float
:returns: spectral decomposition, x-coordinate
:rtype: float
:raises: :exc:`~solar_utils.exceptions.SPECTRL2_Error`,
:exc:`~solar_utils.exceptions.SOLPOS_Error`
Returns the diffuse, direct, extraterrestrial and global spectral components
on the tilted surface in as a function of x-coordinate specified by units.
===== ===============================================================
units output units
===== ===============================================================
1 irradiance (W/sq m/micron) per wavelength (microns)
2 photon flux (10.0E+16 /sq cm/s/micron) per wavelength (microns)
3 photon flux density (10.0E+16 /sq cm/s/eV) per energy (eV)
===== ===============================================================
See
`NREL SPECTRL2 Documentation <http://rredc.nrel.gov/solar/models/spectral/spectrl2/documentation.html>`_
for more detail.
.. seealso::
:func:`solposAM`
**Examples:**
>>> units = 1
>>> location = [33.65, -84.43, -5.0]
>>> datetime = [1999, 7, 22, 9, 45, 37]
>>> weather = [1006.0, 27.0]
>>> orientation = [33.65, 135.0]
>>> atmospheric_conditions = [1.14, 0.65, -1.0, 0.2, 1.36]
>>> albedo = [0.3, 0.7, 0.8, 1.3, 2.5, 4.0] + ([0.2] * 6)
>>> (specdif, specdir, specetr, specglo,
specx) = spectrl2(units, location, datetime, weather, orientation,
atmospheric_conditions, albedo)
"""
# load the DLL
ctypes.cdll.LoadLibrary(SOLPOSAMDLL) # requires 'solpos.dll'
spectrl2_dll = ctypes.cdll.LoadLibrary(SPECTRL2DLL)
_spectrl2 = spectrl2_dll.spectrl2
# cast Python types as ctypes
_location = (ctypes.c_float * 3)(*location)
_datetime = (ctypes.c_int * 6)(*datetime)
_weather = (ctypes.c_float * 2)(*weather)
_orientation = (ctypes.c_float * 2)(*orientation)
_atmospheric_conditions = (ctypes.c_float * 5)(*atmospheric_conditions)
_albedo = (ctypes.c_float * 12)(*albedo)
# allocate space for results
specdif = (ctypes.c_float * 122)()
specdir = (ctypes.c_float * 122)()
specetr = (ctypes.c_float * 122)()
specglo = (ctypes.c_float * 122)()
specx = (ctypes.c_float * 122)()
angles = (ctypes.c_float * 2)()
airmass = (ctypes.c_float * 2)()
settings = (ctypes.c_int * 2)()
shadowband = (ctypes.c_float * 3)()
# call DLL
err_code = _spectrl2(
units, _location, _datetime, _weather, _orientation,
_atmospheric_conditions, _albedo, specdif, specdir, specetr, specglo,
specx, angles, airmass, settings, shadowband
)
# return results if successful, otherwise raise exception
if err_code == 0:
return specdif, specdir, specetr, specglo, specx
elif err_code < 0:
data = {'units': units,
'tau500': atmospheric_conditions[3],
'watvap': atmospheric_conditions[4],
'assym': atmospheric_conditions[1]}
raise SPECTRL2_Error(err_code, data)
else:
# convert err_code to bits
_code = _int2bits(err_code)
data = {'location': location,
'datetime': datetime,
'weather': weather,
'angles': angles,
'airmass': airmass,
'settings': settings,
'orientation': orientation,
'shadowband': shadowband}
raise SOLPOS_Error(_code, data)
|
aa8bd3878bc3f230d89e1d9545621a43e2d2fa6c
| 3,650,062
|
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for an unbiased random walk.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or solute molecule).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left/down motion 10.0
2 (1-0) 1 (0-1) right/up motion 10.0
3 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append(Transition((0, 1, 0), (1, 0, 0), 10.0, "left/down motion"))
xn_list.append(Transition((1, 0, 0), (0, 1, 0), 10.0, "right/up motion"))
return xn_list
|
702c7a7083546797578e5463841c7b59548dcca2
| 3,650,063
|
def error_message(error, text):
"""
Gives default or custom text for the error.
--------------------
Inputs <datatype>:
- error <Error Object>: The error code
- text <string>: Custom error text if error has no message
Returns <datatype>:
- error description <string>: The custom error description or default
"""
try:
return error.description['message']
except TypeError:
return text
|
466fec2d2abefc9f05a3f0adf569fba1c63ea4c1
| 3,650,064
|
def maskguard(maskarray, niter=1, xyonly=False, vonly=False):
"""
Pad a mask by specified number of pixels in all three dimensions.
Parameters
----------
maskarray : `~numpy.ndarray`
The 3-D mask array with 1s for valid pixels and 0s otherwise.
niter : int, optional
Number of iterations for expanding mask by binary dilation.
Default: 1
xyonly : boolean, optional
Whether to expand only in the two sky coordinates
Default: False
vonly : boolean, optional
Whether to expand only in the spectral coordinate
Default: False (ignored if xyonly==True)
Returns
-------
maskarray : `~numpy.ndarray`
A copy of the input maskarray after padding.
"""
s = ndimage.generate_binary_structure(3, 1)
if xyonly:
s[0,:] = False
s[2,:] = False
elif vonly:
s[1]=s[0]
maskarray = ndimage.binary_dilation(maskarray, structure=s, iterations=niter)
return maskarray
|
098964878e313b08c73f1a3c1a66a2b7f1664090
| 3,650,065
|
def validdest(repo, old, new):
"""Is the new bookmark destination a valid update from the old one"""
repo = repo.unfiltered()
if old == new:
# Old == new -> nothing to update.
return False
elif not old:
# old is nullrev, anything is valid.
# (new != nullrev has been excluded by the previous check)
return True
elif repo.obsstore:
return new.node() in obsolete.foreground(repo, [old.node()])
else:
# still an independent clause as it is lazier (and therefore faster)
return old.descendant(new)
|
8206b1ec130582864979ea9fb617c60b6175deff
| 3,650,066
|
def no_rbac_suffix_in_test_filename(filename):
"""Check that RBAC filenames end with "_rbac" suffix.
P101
"""
if "patrole_tempest_plugin/tests/api" in filename:
if filename.endswith('rbac_base.py'):
return
if not filename.endswith('_rbac.py'):
return 0, "RBAC test filenames must end in _rbac suffix"
|
6ebfcede8b6e30f24f5ecc1f9d3f0985bd4c44fa
| 3,650,067
|
def import_results(results_file, valid_codes=None, session=None):
"""Take a iterable which yields result lines and add them to the database.
If session is None, the global db.session is used.
If valid_codes is non-None, it is a set containing the party codes which are
allowed in this database. If None, this set is queried from the database.
.. note::
This can take a relatively long time when adding several hundred
results. Should this become a bottleneck, there are some optimisation
opportunities.
"""
session = session if session is not None else db.session
valid_codes = (
valid_codes if valid_codes is not None else
_query_valid_party_codes(session)
)
diagnostics = []
# This is a relatively straightforward but sub-optimal way to implement a
# bulk insert. The main issue is that the DB is queried once per result to
# see if the constituency exists. It would be preferable to do a single
# query over all of the given constituency names to determine which ones are
# present. This would make the flow of this function less obvious. For the
# moment, leave the sub-optimal implementation but should we need to
# re-visit this function as we deal with greater numbers of results the
# strategy above should be tried.
for line_idx, line in enumerate(results_file):
try:
add_constituency_result_line(
line, valid_codes=valid_codes, session=session)
except ValueError as e:
diagnostics.append(Diagnostic(
line, e.args[0] % e.args[1:], line_idx + 1
))
# Log the fact that this import happened
log('\n'.join([
'Imported {} result line(s), {} diagnostic(s)'.format(
line_idx+1, len(diagnostics)),
] + [str(d) for d in diagnostics]))
return diagnostics
|
e7faea1b78418b6fdb599612fdc72fe20fe45bc6
| 3,650,068
|
def fit_lens_data_with_tracer(lens_data, tracer, padded_tracer=None):
"""Fit lens data with a model tracer, automatically determining the type of fit based on the \
properties of the galaxies in the tracer.
Parameters
-----------
lens_data : lens_data.LensData or lens_data.LensDataHyper
The lens-images that is fitted.
tracer : ray_tracing.AbstractTracerNonStack
The tracer, which describes the ray-tracing and strong lens configuration.
padded_tracer : ray_tracing.Tracer or None
A tracer with an identical strong lens configuration to the tracer above, but using the lens data's \
padded grid_stack such that unmasked model-images can be computed.
"""
if tracer.has_light_profile and not tracer.has_pixelization:
return LensProfileFit(lens_data=lens_data, tracer=tracer, padded_tracer=padded_tracer)
elif not tracer.has_light_profile and tracer.has_pixelization:
return LensInversionFit(lens_data=lens_data, tracer=tracer)
elif tracer.has_light_profile and tracer.has_pixelization:
return LensProfileInversionFit(lens_data=lens_data, tracer=tracer,
padded_tracer=padded_tracer)
else:
raise exc.FittingException('The fit routine did not call a Fit class - check the '
'properties of the tracer')
|
c94454462e4e9fd770eebf39a9574daa0e6a9025
| 3,650,069
|
def sround(a, *ndigits):
"""Termwise round(a) for an iterable.
An optional second argument is supported, and passed through to the
built-in ``round`` function.
As with the built-in, rounding is correct taking into account the float
representation, which is base-2.
https://docs.python.org/3/library/functions.html#round
"""
op = _make_termwise_stream_unop(round, ndigits[0]) if ndigits else _round
return op(a)
|
ee75d82fa3bdfb50afb279cce87d6d6ec6120adf
| 3,650,070
|
def part_b(lines):
""" For each valid line consider the stack of opening characters that didn't get closed.
Compute a score for each line per the question, then return the median value of these scores.
"""
scores = []
for line in lines:
is_line_valid, stack = assess_line(line)
if is_line_valid:
scores.append(score_completion(stack))
scores.sort()
return scores[len(scores) // 2]
|
e745a3be40f5a83f0e8ce3de4c647bd5984e7511
| 3,650,071
|
def _get_activation(
spec):
"""Get a rematlib Layer corresponding to a given activation function."""
if spec == mobile_search_space_v3.RELU:
result = layers.ReLU()
elif spec == mobile_search_space_v3.RELU6:
result = layers.ReLU6()
elif spec == mobile_search_space_v3.SWISH6:
result = layers.Swish6()
elif spec == mobile_search_space_v3.SIGMOID:
result = layers.Sigmoid()
else:
raise ValueError('Unrecognized activation function: {}'.format(spec))
return result
|
d2e67564eb366128b6dfe9f0c1c919ceb0e949ac
| 3,650,072
|
def addUpdateCarrierGroups():
"""
Add or Update a group of carriers
"""
db = DummySession()
try:
if not session.get('logged_in'):
return redirect(url_for('index'))
if (settings.DEBUG):
debugEndpoint()
db = SessionLoader()
form = stripDictVals(request.form.to_dict())
gwgroup = form['gwgroup']
name = form['name']
new_name = form['new_name'] if 'new_name' in form else ''
authtype = form['authtype'] if 'authtype' in form else ''
r_username = form['r_username'] if 'r_username' in form else ''
auth_username = form['auth_username'] if 'auth_username' in form else ''
auth_password = form['auth_password'] if 'auth_password' in form else ''
auth_domain = form['auth_domain'] if 'auth_domain' in form else settings.DEFAULT_AUTH_DOMAIN
auth_proxy = form['auth_proxy'] if 'auth_proxy' in form else ''
# format data
if authtype == "userpwd":
auth_domain = safeUriToHost(auth_domain)
if auth_domain is None:
raise http_exceptions.BadRequest("Auth domain hostname/address is malformed")
if len(auth_proxy) == 0:
auth_proxy = auth_domain
auth_proxy = safeFormatSipUri(auth_proxy, default_user=r_username)
if auth_proxy is None:
raise http_exceptions.BadRequest('Auth domain or proxy is malformed')
if len(auth_username) == 0:
auth_username = r_username
# Adding
if len(gwgroup) <= 0:
Gwgroup = GatewayGroups(name, type=settings.FLT_CARRIER)
db.add(Gwgroup)
db.flush()
gwgroup = Gwgroup.id
# Add auth_domain(aka registration server) to the gateway list
if authtype == "userpwd":
Uacreg = UAC(gwgroup, r_username, auth_password, realm=auth_domain, auth_username=auth_username, auth_proxy=auth_proxy,
local_domain=settings.EXTERNAL_IP_ADDR, remote_domain=auth_domain)
Addr = Address(name + "-uac", auth_domain, 32, settings.FLT_CARRIER, gwgroup=gwgroup)
db.add(Uacreg)
db.add(Addr)
# Updating
else:
# config form
if len(new_name) > 0:
Gwgroup = db.query(GatewayGroups).filter(GatewayGroups.id == gwgroup).first()
gwgroup_fields = strFieldsToDict(Gwgroup.description)
old_name = gwgroup_fields['name']
gwgroup_fields['name'] = new_name
Gwgroup.description = dictToStrFields(gwgroup_fields)
Addr = db.query(Address).filter(Address.tag.contains("name:{}-uac".format(old_name))).first()
if Addr is not None:
addr_fields = strFieldsToDict(Addr.tag)
addr_fields['name'] = 'name:{}-uac'.format(new_name)
Addr.tag = dictToStrFields(addr_fields)
# auth form
else:
if authtype == "userpwd":
# update uacreg if exists, otherwise create
if not db.query(UAC).filter(UAC.l_uuid == gwgroup).update(
{'l_username': r_username, 'r_username': r_username, 'auth_username': auth_username,
'auth_password': auth_password, 'r_domain': auth_domain, 'realm': auth_domain,
'auth_proxy': auth_proxy, 'flags': UAC.FLAGS.REG_ENABLED.value}, synchronize_session=False):
Uacreg = UAC(gwgroup, r_username, auth_password, realm=auth_domain, auth_username=auth_username,
auth_proxy=auth_proxy, local_domain=settings.EXTERNAL_IP_ADDR, remote_domain=auth_domain)
db.add(Uacreg)
# update address if exists, otherwise create
if not db.query(Address).filter(Address.tag.contains("name:{}-uac".format(name))).update(
{'ip_addr': auth_domain}, synchronize_session=False):
Addr = Address(name + "-uac", auth_domain, 32, settings.FLT_CARRIER, gwgroup=gwgroup)
db.add(Addr)
else:
# delete uacreg and address if they exist
db.query(UAC).filter(UAC.l_uuid == gwgroup).delete(synchronize_session=False)
db.query(Address).filter(Address.tag.contains("name:{}-uac".format(name))).delete(synchronize_session=False)
db.commit()
globals.reload_required = True
return displayCarrierGroups()
except sql_exceptions.SQLAlchemyError as ex:
debugException(ex)
error = "db"
db.rollback()
db.flush()
return showError(type=error)
except http_exceptions.HTTPException as ex:
debugException(ex)
error = "http"
db.rollback()
db.flush()
return showError(type=error)
except Exception as ex:
debugException(ex)
error = "server"
db.rollback()
db.flush()
return showError(type=error)
finally:
db.close()
|
570374d0c42e89c2cd57b628345e88e7e5680d90
| 3,650,073
|
def RT2tq(poses, square=False):
"""
!!NOT TESETED!!
:param poses: N x 3 x 4, (R|T)
:return: (N, 7)
"""
N,_,_ = poses.shape
R = poses[:,:,:3]
T = poses[:,:,3:] # Nx3x1
q = quaternion.as_float_array(quaternion.from_rotation_matrix(R)) #Nx4
t= T.squeeze(-1)
tq = np.concatenate([t,q], axis=-1)
return tq
|
5241aa7110df8074fe203b1cbe33cb7bf509c2f3
| 3,650,074
|
import json
def make_callback(subscription_path, project_id):
"""Return a callback closure"""
def callback(message):
"""Handle Pub/Sub resurrection message.
Ignore (and ACK) messages that are not well-formed.
Try handle any other message, ACKing it eventually (always).
"""
logger.info('Handling message from subscription "%s"', subscription_path)
# parse the message, ACK on failure to avoid duplicate deliveries
try:
instance_desc = json.loads(message.data)
except:
logger.exception('Failed parsing JSON message - ignoring it\n%s', message)
else:
resurrect_instance(project_id, instance_desc)
finally:
logger.info('ACKing message\n%s', message)
message.ack()
return callback
|
ac16d67ee9e7b89d69b79702e4121b1983df2bb8
| 3,650,075
|
import os
def make_partition_table(target_device, part_name, **kwargs):
"""
Create new GUID partition table on ``target_device``, with two partitions:
1) GRUB second stage partition with type 0xEF02
2) size of rest of the disk with name ``part_name``
Returns path to the boot partition,
which would eventually contain GRUB and ISO's
"""
delete_parttable = 'sgdisk --zap-all ' + target_device
create_grub_partition = 'sgdisk --new=1:0:+1M --typecode=1:ef02 ' + target_device
create_boot_partition = 'sgdisk --new=2:0:0 --change-name=1:"' + part_name + '" '
create_boot_partition = create_boot_partition + target_device
print(delete_parttable)
print(create_grub_partition)
print(create_boot_partition)
if not __debug__ :
os.system(delete_parttable)
os.system(create_grub_partition)
os.system(create_boot_partition)
return target_device + '2'
|
c40fc6c256ce2f94f4a909b633a47e07443940f0
| 3,650,076
|
def data_to_bytes(data, encoding):
"""\
Converts the provided data into bytes. If the data is already a byte
sequence, it will be left unchanged.
This function tries to use the provided `encoding` (if not ``None``)
or the default encoding (ISO/IEC 8859-1). It uses UTF-8 as fallback.
Returns the (byte) data, the data length and the encoding of the data.
:param data: The data to encode
:type data: str or bytes
:param encoding: str or ``None``
:rtype: tuple: data, data length, encoding
"""
if isinstance(data, bytes):
return data, len(data), encoding or consts.DEFAULT_BYTE_ENCODING
data = str(data)
if encoding is not None:
# Use the provided encoding; could raise an exception by intention
data = data.encode(encoding)
else:
try:
# Try to use the default byte encoding
encoding = consts.DEFAULT_BYTE_ENCODING
data = data.encode(encoding)
except UnicodeError:
try:
# Try Kanji / Shift_JIS
encoding = consts.KANJI_ENCODING
data = data.encode(encoding)
except UnicodeError:
# Use UTF-8
encoding = 'utf-8'
data = data.encode(encoding)
return data, len(data), encoding
|
78d0813075c24d2a85412648fa45d720227ae853
| 3,650,077
|
def get_session_store(state: State = Depends(get_app_state)) -> SessionStore:
"""Get a singleton SessionStore to keep track of created sessions."""
session_store = getattr(state, _SESSION_STORE_KEY, None)
if session_store is None:
session_store = SessionStore()
setattr(state, _SESSION_STORE_KEY, session_store)
return session_store
|
4204371079babbfdc15327bb62b3c1c306e27f39
| 3,650,078
|
import os
def get_comments(filename):
"""
Get Julia, Python, R comments.
"""
comments = []
try:
with open(filename, 'r', encoding='utf8') as fp:
filename = os.path.basename(filename)
for comment, start, end in getcomments.get_comment_blocks(fp):
comments.append({ "ln%s" % (start[0]) : comment.rstrip()})
except Exception as e:
print(e)
return comments
|
a3944acc98914c3d245fe62a03b825790dad8565
| 3,650,079
|
def extractCurrentlyTLingBuniMi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('[BNM]'):
return buildReleaseMessageWithType(item, 'Bu ni Mi wo Sasagete Hyaku to Yonen. Elf de Yarinaosu Musha Shugyou', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[DD]'):
return buildReleaseMessageWithType(item, 'Doll Dungeon', vol, chp, frag=frag, postfix=postfix)
if item['title'].startswith('[HCLS]'):
return buildReleaseMessageWithType(item, 'High Comprehension Low Strength', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Abyss Domination', 'Abyss Domination', 'translated'),
('Nine Yang Sword Saint', 'Nine Yang Sword Saint', 'translated'),
('Mysterious World Beast God', 'Mysterious World Beast God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
9b91a6a2329cb4e2572f181b16ddc6b2f0fb3553
| 3,650,080
|
from dif import dif_stats
def dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
):
"""Calculate DIF stats for each in a range of items.
Returns
-------
dif() returns an item by statistic Damon object with
a column containing number of score categories. Display
results using:
>>> print tabulate(dif(...).whole, 'firstrow')
Comments
--------
"dif" (DIF) stands for "differential item functioning" and reflects
the degree to which items have different difficulties for two
groups of persons, a "focal" and a "reference" group, after
adjusting for the ability of each person. It is used to flag
items that "play favorites" with student groups, e.g., that are
easy for girls and hard for boys even though the two groups
otherwise have similar ability.
There are a profusion of DIF statistics, organized mainly by whether
they are intended for dichotomous or polytomous items. The Rasch
model has its own way of estimating DIF (not included in this
function) which yields similar results. dif() supports three
categories of DIF statistics plus related variances, z-scores,
chi-squares and so on. Any number of combinations of these statistics
have been proposed for flagging DIF items.
'MH' => Mantel-Haenszel, for dichotomous data
'M' => Mantel, for dichotomous and polytomous data
'SMD' => standardized mean difference, usually for polytomous
Formulas are pulled from Zwick & Thayer (1996) and Wood (2011).
A commonly used statistic is the 'Flag' statistic, which gives a code
for whether an item should be flagged. ETS's a, b, c DIF flags
are reported numerically as 0, 1, 2. See discussion below.
The dif_stats() function applies only to unidimensional data.
Multidimensional DIF can be evaluated in Damon to a limited
degree using the "stability" statistic in conjunction with
coord()'s seed parameters.
dif() requires a student-by-item data file or array with a group
membership column and a column of student raw scores. Thus, column
headers should contain a student id column, a group column, a raw score
column, and a series of item columns. Any other columns in your
dataset should be filtered out using the getcols parameter.
References
----------
Zwick, R., Thayer, D. (Autumn, 1996). "Evaluating the Magnitude of Differential
Item Functioning in Polytomous Items". Journal of Educational and
Behavioral Statistics, Vol. 21, No. 3, pp 187-201.
http://www.jstor.org/stable/1165267
Wood, S. W. (2011). "Differential item functioning procedures for polytomous
items when examinee sample sizes are small." doctoral PhD diss, University
of Iowa, 2011.
http://ir.uiowa.edu/etd/1110.
Parameters
----------
"filename" is the string name of a person x item file containing
integer scores of how each student did on each item, a column
containing test-level raw scores for each student, and a column
assigning each student to a group. All non-numerical cells are
treated as missing. All numerical scores are treated as valid.
Numerical scores must be integers whose minimum value is zero.
Data must be tabular and field-delimited.
filename = '/path/to/my_file.txt'
=> file is 'my_file.txt'
-----------
"student_id' is the header label of the column containing unique
student identifiers.
student_id = 'Student_ID' => Student identifiers are in the
column labels 'Student_ID'.
-----------
"group" contains the header label of the group column and
assigns one group to be "focal" and the other to be the "reference".
group = ['Sex', {'focal':'female', 'ref':'male'}]
=> Student gender identifiers are
in the column labeled 'Sex'.
Students labeled "female" will
be the focal group. Students
labeled "male" will be the
reference group.
Note: As is typical with DIF statistics, while there can be
more than two groups, only two are compared at a time.
-----------
"raw_score" is the header label of the raw score column.
raw_score = 'RawScore' => Test-level student raw scores
are in the column labeled
'RawScore'
-----------
"items" is the list of items for which DIF statistics should be
calculated.
items = 'All' => Calculate DIF for all items
in the dataset.
items = ['item1', 'item5'] => Calculate DIF for only items
1 and 5.
-----------
"stats" is the list of DIF stats to be calculated for each
item. If a given statistic cannot be calculated for a given
item, the cell is left blank.
stats = 'All' => Calculate all possible DIF
statistics for all items (see
list below).
stats = ['MH_d-dif', 'MH_z', 'M_z', 'SMD_z']
=> Calculate just the Mantel-Haenszel
delta-DIF (defined by ETS), the
Mantel-Haenszel z statistic (both
for dichotomous items), the Mantel
z-statistic (for dichotomous and
polytomous items), and the
standardized mean difference
z-statistic.
List of available DIF-related statistics ("MH" means Mantel-
Haenszel, "M" means Mantel, "SMD" means standardized mean difference.
Mantel-Haenszel (dichotomous data)
'MH_alpha' => odds ratio, dich, 0 -> +inf
'MH_dif' => log-odds ratio, dich, -inf -> +inf
'MH_d-dif' => delta-DIF = -2.35*log-odds, dich, -inf -> +inf,
negative implies bias toward reference group.
(d-dif > 1.5 implies DIF)
'MH_var' => variance of MH_dif (SE = sqrt(var))
'MH_d-var' => variance of MH_d-dif
'MH_z' => absolute z-statistic (dif/sqrt(var)), z > 2.0 => p < 0.05
'MH_pval' => p-value associated with z, pval < 0.05 => significance
'MH_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'MH_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Mantel (dichotomous and polytomous data)
'M_dif' => observed - expected frequencies
'M_var' => variance of M_diff (SE = sqrt(var))
'M_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'M_pval' => p-value associated with z, pval < 0.05 => significance
'M_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'M_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Standardized mean difference (mainly for polytomous data)
'SMD_dif' => difference between reference and focal groups
'SMD_var' => variance of SMD_dif (SE = sqrt(var))
'SMD_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'SMD_pval' => p-value associated with z, pval < 0.05 => significance
'SMD_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'SMD_chisq_pval'=> p-value associated with chisq, pval < 0.05 => significance
Other stats
'SD' => standard deviation of person scores for that item
'SMD/SD' => absolute SMD/SD > 0.25 implies DIF if SMD_chisq_pval < 0.05
'Flag' => flag a DIF item based on the rules described below.
'Counts' => Count valid scores for each item, overall and by group.
As mentioned, all statistics that are dependent on sample size (e.g., z,
chi-square) will show larger values as sample size increases and their
standard errors go to zero. Therefore, DIF decisions should be based
on other considerations.
One useful rule suggested by Zwick, Thayer, and Mazzeo and used by
ETS is as follows. Flag DIF:
for dichotomous items:
Flag = 2 if:
'MH_d-dif' is greater than 1.5 and significantly greater than 1.0.
Flag = 0 if:
'MH_d-dif' is less than 1.0 or the p-value is greater than 0.05.
Flag = 1, otherwise.
These correspond to ETS a, b, c DIF flags:
'a'=>0, 'b'=>1, 'c'=>2
for polytomous items:
Flag = 2 if:
'SMD/SD' is greater than 0.25 and 'M_chisq_pval' is less than 0.05.
Flag = 0, otherwise.
There is no flag = 1 here.
(Note: Zwick refers to this as a Mantel-Haenszel chi-square p-value
but the formula resembles the polytomous Mantel chi-square p-value,
which is what is used here.)
-----------
"strata" is the number of ability strata or levels into which
to divide student test raw scores for purposes of matching
students of similar abilities. If the number of strata do
not divide evenly into the number of potential raw scores,
the remainder are stuck in the lowest stratum. "strata" requires
a backup strata specification in case the primary specification
leads to a count of one or less for a given item:
strata = (primary, backup)
Examples:
strata = ('all_scores', 4) => Let each possible raw
score be its own stratum.
This is desirable so long as
the sample of persons is large
enough that all cells in
the resulting stratum x score
table have fairly large counts.
If 'all_scores' yields insufficient
data for a given item, use a
stratum of 4 for that item.
strata = (20, 10) => Divide the raw scores into
20 strata and match students
who belong to the same stratum.
If this leads to insufficient data,
use 10 for that item.
Some DIF programs allow no more than five or so stratification
levels in order to avoid insufficient counts. This degrades the
DIF statistics a little, but not generally enough to be a problem.
-----------
"getrows" controls the rows that are loaded from the datafile,
making it possible to filter out unneeded rows, e.g., to get a
student subsample. The syntax is drawn from Damon's extract()
method and can be a bit fancy. To get a full description of
what you can do with getrows, see:
>>> help(core.Damon.extract)
Simple examples:
getrows = None => Retain all rows as they are.
Non-intuitively, this really means
"get all rows".
getrows = {'Get':'AllExcept','Labels':'key','Rows':['row_x', 'row_y']}
=> Extract all rows except those
labeled 'row_x' and 'row_y'.
getrows = {'Get':'NoneExcept','Labels':'index','Rows':[range(1, 20, 2)]}
=> Extract only row 1 up to, but not
including, row 20. 2 is a step parameter, and
means get every other row within the range.
Counting starts from 0. The 'index' parameter
means 'Rows' refers to positions, not 'keys'.
-----------
"getcols" controls the columns that are loaded from the datafile,
making it possible to filter out unneeded columns, e.g., data
columns that are not items or the student raw score. The syntax
is drawn from Damon's extract() method and can be a bit fancy.
To get a full description of what you can do with getcols, see:
>>> help(core.Damon.extract)
Simple examples:
getcols = None => Retain all columns as they are.
Non-intuitively, this really means
"get all columns".
getcols = {'Get':'AllExcept','Labels':'key','Cols':['col_x', 'col_y']}
=> Extract all columns except those
labeled 'col_x' and 'col_y'.
getcols = {'Get':'NoneExcept','Labels':'index','Cols':[range(2, 41)]}
=> Extract only columns 2 up to, but not
including, 41. Counting starts from 0.
Note the 'index' parameter.
-----------
"delimiter" is the character used to delimit columns in
the dataset.
delimiter = ',' => File is comma-delimited.
delimiter = '\t' => File is tab-delimited.
Examples
--------
[under construction]
Paste Function
--------------
dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
)
"""
args = locals()
return dif_stats(**args)
|
0ed6b94e63d5eacc40aeaf4f2181012ef8aacc22
| 3,650,081
|
def delete_all_devices_for_user():
"""
delete all active devices for the given user
"""
try:
username = get_jwt_identity()
with session_scope() as session:
user = user_service.get_user(username, session)
device_count = user.devices.count()
if device_count == 0:
resp = {
"status": "error",
"msg": "no devices found for '%s'" % username
}
return make_response(jsonify(resp), status.HTTP_404_NOT_FOUND)
LOGGER.info("Deleting all devices for '%s'" % username)
for device in user.devices:
device_service.delete_device(user.username, device.device_id, session)
LOGGER.info("Deleted " + device.device_name + ", with device id = " + device.device_id + "!")
LOGGER.info("Deleted all devices for '%s'" % username)
resp = {
"status": "success",
"msg": "deleted %d devices for '%s'" % (device_count, username)
}
return make_response(jsonify(resp), status.HTTP_200_OK)
except Exception as e:
resp = {
"status": "error",
"msg": "%s" % str(e)
}
return make_response(jsonify(resp), status.HTTP_500_INTERNAL_SERVER_ERROR)
|
2ac4c0f40e72dc54ca78a109ead9d09f15481b92
| 3,650,082
|
def _GetNormalizationTuple(url):
"""Parse a URL into a components tuple.
Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Args:
url:A URL string.
Returns:
A 6-tuple: (scheme, netloc, path, params, query, fragment).
"""
url = encoding_util.EncodeToAscii(url)
up = urlparse(url, 'http')
authority = up[1]
path = up[2]
if not authority:
end_index = path.find('/')
if end_index == -1:
end_index = len(path)
authority = path[:end_index]
path = path[end_index:]
path = path.rstrip('/') # Ignore trailing slashes on the path.
return (up[0], authority, path, up[3], up[4], up[5])
|
cbc8dad95202a9a17f75dac754b6ec00e3efcdfd
| 3,650,083
|
def gCallback(dataset, geneid, colors):
"""Callback to set initial value of green slider from dict.
Positional arguments:
dataset -- Currently selected dataset.
geneid -- Not needed, only to register input.
colors -- Dictionary containing the color values.
"""
colorsDict = colors
try:
colorVal = colorsDict[dataset][4:-1].split(',')[1]
return int(colorVal)
except KeyError:
return 0
|
5a97fd16ea362b3b53f33f52a449c4dccc617e44
| 3,650,084
|
def intForcesMoments(sliceZnes,method, direction):
"""
Loops over the sliceZnes and performs an integration of Forces and moments
for each slice (Scalar integrals, variables are depending on the method).
Returns a ([dir, dirNormalized,fxNr,fyNr,fzNr,mxNr,myNr,mzNr]*Nslices array)
"""
#direction, norm_direction, fx,fy,fz,mx,my,mz
forcesMoments=np.zeros((8,len(sliceZnes)))
ds = sliceZnes[0].dataset
fr = ds.frame
#Retrieves Forces and Moments variables
xAxisNr=ds.variable(direction).index
if method == "Pressure":
fxNr=ds.variable('px').index+1
fyNr=ds.variable('py').index+1
fzNr=ds.variable('pz').index+1
else:
fxNr=ds.variable('taux').index+1
fyNr=ds.variable('tauy').index+1
fzNr=ds.variable('tauz').index+1
mxNr=ds.variable('mx').index+1
myNr=ds.variable('my').index+1
mzNr=ds.variable('mz').index+1
#Populates the returned array with the direction and integrated values
for i,slc in enumerate(sliceZnes):
forcesMoments[(0,i)]= slc.values(xAxisNr)[0]
for j,v in enumerate([fxNr,fyNr,fzNr,mxNr,myNr,mzNr]):
intCmde=("Integrate ["+"{}".format(slc.index + 1)+"] VariableOption='Scalar'"\
+ " XOrigin=0 YOrigin=0 ZOrigin=0"\
+" ScalarVar=" + "{}".format(v)\
+ " Absolute='F' ExcludeBlanked='F' XVariable=1 YVariable=2 ZVariable=3 "\
+ "IntegrateOver='Cells' IntegrateBy='Zones'"\
+ "IRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " JRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " KRange={MIN =1 MAX = 0 SKIP = 1}"\
+ " PlotResults='F' PlotAs='Result' TimeMin=0 TimeMax=0")
tp.macro.execute_extended_command(command_processor_id='CFDAnalyzer4',
command=intCmde)
forcesMoments[(j+2,i)]=fr.aux_data['CFDA.INTEGRATION_TOTAL']
#Normalized direction:
forcesMoments[1]=(forcesMoments[0]-forcesMoments[0].min())/(forcesMoments[0].max()-forcesMoments[0].min())
return (forcesMoments)
|
16e0a3adc3a3b171fd02b07f241ed8623b16c7e3
| 3,650,085
|
from typing import List
def _other_members(other_members: List[parser.MemberInfo], title: str):
"""Returns "other_members" rendered to markdown.
`other_members` is used for anything that is not a class, function, module,
or method.
Args:
other_members: A list of `MemberInfo` objects.
title: Title of the table.
Returns:
A markdown string
"""
items = []
for other_member in other_members:
description = [other_member.doc.brief]
for doc_part in other_member.doc.docstring_parts:
if isinstance(doc_part, parser.TitleBlock):
# Use list_view here because description will be part of a table.
description.append(str(doc_part))
else:
description.append(doc_part)
items.append(
parser.ITEMS_TEMPLATE.format(
name=other_member.short_name,
anchor=f'<a id="{other_member.short_name}"></a>',
description='\n'.join(description),
))
return '\n' + parser.TABLE_TEMPLATE.format(
title=title, text='', items=''.join(items)) + '\n'
|
77c02e8532dd01bab0b9ea0f9d14634dc3523cd2
| 3,650,086
|
def full_url(parser, token):
"""Spits out the full URL"""
url_node = url(parser, token)
f = url_node.render
url_node.render = lambda context: _get_host_from_context(context) + f(context)
return url_node
|
d54e9cf5acee1b6283f3166e9479e8c9e8bb5047
| 3,650,087
|
def Chi2CoupleDiffFunc(nzbins, nzcorrs, ntheta, mask,
data1, xi_obs_1, xi_theo_1,
data2, xi_obs_2, xi_theo_2,
inDir_cov12, file_name_cov12):
"""
Estimate chi^2 for difference between two data vectors
Note: this assumes two data vectors have two separated covariance matrices
the cross-correlation between two data vectors is also desired
the masks for two data vector need to be identical
"""
# load the full covariance matrix:
covmat_block_1 = io_cs.LoadCovarianceFunc(data1, nzbins, nzcorrs, xi_theo_1)
covmat_block_2 = io_cs.LoadCovarianceFunc(data2, nzbins, nzcorrs, xi_theo_2)
covmat_block_12 = io_cs.LoadCrossCovarianceFunc(inDir_cov12, file_name_cov12, ntheta, nzbins, nzcorrs, xi_theo_1, xi_theo_2)
# build a combined cov-mat
covmat = covmat_block_1 + covmat_block_2 - covmat_block_12 - covmat_block_12.transpose()
# trim covariance matrix to chosen scales:
mask_indices = np.where(mask == 1)[0]
covmat = covmat[np.ix_(mask_indices, mask_indices)]
# precompute Cholesky transform for chi^2 calculation:
# don't invert that matrix...
# use the Cholesky decomposition instead:
cholesky_transform = cholesky(covmat, lower=True)
vec = (xi_theo_1[mask_indices] - xi_obs_1[mask_indices]) - (xi_theo_2[mask_indices] - xi_obs_2[mask_indices])
yt = solve_triangular(cholesky_transform, vec, lower=True)
chi2 = yt.dot(yt)
return chi2, len(vec)
|
c0cd8a683447b0572a93914e633fb8f770c3a6fd
| 3,650,088
|
def minimax(just_mapping, mapping):
"""
Scale the mapping to minimize the maximum error from just intonation.
"""
least_error = float("inf")
best_mapping = mapping
for i in range(len(just_mapping)):
for j in range(i+1, len(just_mapping)):
candidate = mapping / (mapping[i] + mapping[j]) * (just_mapping[i] + just_mapping[j])
error = abs(just_mapping - candidate).max()
if error < least_error:
least_error = error
best_mapping = candidate
return best_mapping
|
b2226de7a916e3075327cd30c64e7412e186027d
| 3,650,089
|
from datetime import datetime
def app_used_today():
"""Check the session and the backend database for a record of app use from the last 24 hours."""
now = UTC.localize(datetime.datetime.utcnow())
last_app_use = get_last_app_use_date()
day_length_in_seconds = 60 * 60 * 24
if last_app_use and (last_app_use.timestamp() + day_length_in_seconds) > now.timestamp():
return True
return False
|
290bb4b87e74f5134effeb37da36cedcca05c4aa
| 3,650,090
|
def search_by_pattern(pattern, limit=20):
"""Perform a search for pattern."""
pattern_ = normalize_pattern(pattern)
db = get_db()
results = db.execute(
"""
SELECT json FROM places
WHERE document MATCH ?
ORDER BY rank DESC
LIMIT ?;
""",
(fts_pattern(pattern_), limit)
).fetchall()
return "[{}]".format(','.join([doc['json'] for doc in results]))
|
467b85c850bb27ac1ed9a6e7fff6bb969a5f84e0
| 3,650,091
|
def gcd(a, b):
"""Greatest common divisor"""
return _gcd_internal(abs(a), abs(b))
|
886d366893a0215ccf0208af56c9c45037ad9549
| 3,650,092
|
def exp_create_database(db_name, demo, lang, user_password='admin', login='admin', country_code=None, phone=None):
""" Similar to exp_create but blocking."""
_logger.info('Create database `%s`.', db_name)
_create_empty_database(db_name)
_initialize_db(id, db_name, demo, lang, user_password, login, country_code, phone)
return True
|
b1d956628d864e0aa3998c00fd6a0b7cfb3ba411
| 3,650,093
|
def fix_cr(data):
"""Cosmic ray fixing function.
Args:
data (:class:`numpy.ndarray`): Input image data.
Returns:
:class:`numpy.dtype`: Fixed image data.
"""
m = data.mean(dtype=np.float64)
s = data.std(dtype=np.float64)
_mask = data > m + 3.*s
if _mask.sum()>0:
x = np.arange(data.size)
f = InterpolatedUnivariateSpline(x[~_mask], data[~_mask], k=3)
return f(x)
else:
return data
|
40702ccc9400f4ba5f10cad1f376b83eac487876
| 3,650,094
|
def iou(box1, box2, iouType='segm'):
"""Compute the Intersection-Over-Union of two given boxes.
or the Intersection-Over box2.
Args:
box1: array of 4 elements [cx, cy, width, height].
box2: same as above
iouType: The kind of intersection it will compute.
'keypoints' is for intersection over box2 area.
Returns:
iou: a float number in range [0, 1]. iou of the two boxes.
"""
lr = min(box1[0]+0.5*box1[2], box2[0]+0.5*box2[2]) - \
max(box1[0]-0.5*box1[2], box2[0]-0.5*box2[2])
if lr > 0:
tb = min(box1[1]+0.5*box1[3], box2[1]+0.5*box2[3]) - \
max(box1[1]-0.5*box1[3], box2[1]-0.5*box2[3])
if tb > 0:
intersection = tb*lr
else:
intersection = 0
if(iouType == 'keypoints'):
box2_area = box2[2] * box2[3]
return intersection/box2_area
else:
union = box1[2]*box1[3]+box2[2]*box2[3]-intersection
return intersection/union
return 0
|
42ef4689c977e4ccbdbb987ff3ae63b265d3c42d
| 3,650,095
|
def transform_color(color1, color2, skipR=1, skipG=1, skipB=1):
"""
transform_color(color1, color2, skipR=1, skipG=1, skipB=1)
This function takes 2 color1 and color2 RGB color arguments, and then returns a
list of colors in-between the color1 and color2
eg- tj.transform_color([0,0,0],[10,10,20]) returns a list:-
[[0, 0, 0], [1, 1, 1], [2, 2, 2] ... [9, 9, 9], [10, 10, 10], [10, 10, 11] ... [10, 10, 20]]
This function is very useful for creating color fade or color transition effects in pygame.
There are 3 optional arguments, which are skip arguments set to 1 by default.
"""
L = []
if (color1[0] < color2[0]):
i = list(range(color1[0],
color2[0] + 1,
skipR))
else:
i = list(range(color2[0], color1[0] + 1, skipR))[::-1]
if i == []:
i = [color1[0]]
if (color1[1] < color2[1]):
j = list(range(color1[1],
color2[1] + 1,
skipG))
else:
j = list(range(color2[1], color1[1] + 1, skipG))[::-1]
if j == []:
j = [color1[1]]
if (color1[2] < color2[2]):
k = list(range(color1[2],
color2[2] + 1,
skipB))
else:
k = list(range(color2[2], color1[2] + 1, skipB))[::-1]
if k == []:
k = [color1[2]]
x = max(len(i), len(j), len(k))
for m in range(len(i), x):
i += [i[-1]]
for m in range(len(j), x):
j += [j[-1]]
for m in range(len(k), x):
k += [k[-1]]
for m in range(x):
l = [i[m], j[m], k[m]]
L += [l]
return L
|
5f04daa951c59b0445387b2dc988ab7efb98aff4
| 3,650,096
|
def sandwich(func):
"""Write a decorator that prints UPPER_SLICE and
LOWE_SLICE before and after calling the function (func)
that is passed in (@wraps is to preserve the original
func's docstring)
"""
@wraps(func)
def wrapped(*args, **kwargs):
print(UPPER_SLICE)
func(*args, **kwargs)
print(LOWE_SLICE)
return wrapped
|
167e1a753b7ba1f0d42732e12c5b37e0b0670f1b
| 3,650,097
|
def as_dict(bdb_path, compact=True):
"""Get the state of a minter BerkeleyDB as a dict. Only the fields used by EZID are
included.
"""
with nog.bdb_wrapper.BdbWrapper(bdb_path, dry_run=False) as w:
return w.as_dict(compact)
|
dab02d671c099bd726839dc40167632cab812015
| 3,650,098
|
def _ecg_findpeaks_ssf(signal, sampling_rate=1000, threshold=20, before=0.03, after=0.01):
"""From https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L448.
- W. Zong, T. Heldt, G.B. Moody, and R.G. Mark. An open-source algorithm to detect onset of arterial
blood pressure pulses. In Computers in Cardiology, 2003, pages 259–262, 2003.
"""
# TODO: Doesn't really seems to work
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# diff
dx = np.diff(signal)
dx[dx >= 0] = 0
dx = dx ** 2
# detection
(idx,) = np.nonzero(dx > threshold)
idx0 = np.hstack(([0], idx))
didx = np.diff(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = np.argmax(signal[a:b]) + a
Rset.add(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
|
d7527db71724d8208a3438f8f959d23e82c89d6a
| 3,650,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.