content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import csv
def snp2dict(snpfile):
"""Get settings of dict from .snp file exported from save&restore app.
Parameters
----------
snpfile : str
Filename of snp file exported from save&restore app.
Returns
-------
r : dict
Dict of pairs of PV name and setpoint value.
"""
with open(snpfile, 'r') as fp:
csv_data = csv.reader(fp, delimiter=',', skipinitialspace=True)
next(csv_data)
header = next(csv_data)
ipv, ival = header.index('PV'), header.index('VALUE')
settings = {line[ipv]: line[ival] for line in csv_data if line}
return settings | cb902b3f8796685ed065bfeb8ed2d6d83c0fe80b | 30,800 |
import numpy
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):
"""
Set scales of near constant features to 1.
The goal is to avoid division by very small or zero values.
Near constant features are detected automatically by identifying
scales close to machine precision unless they are precomputed by
the caller and passed with the `constant_mask` kwarg.
Typically for standard scaling, the scales are the standard
deviation while near constant features are better detected on the
computed variances which are closer to machine precision by
construction.
Parameters
----------
scale : array
Scale to be corrected.
copy : bool
Create copy.
constant_mask : array
Masking array.
Returns
-------
scale : array
Corrected scale.
"""
# if we are fitting on 1D arrays, scale might be a scalar
if numpy.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, numpy.ndarray):
if constant_mask is None:
# Detect near constant values to avoid dividing by a very small
# value that could lead to suprising results and numerical
# stability issues.
constant_mask = scale < 10 * numpy.finfo(scale.dtype).eps
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[constant_mask] = 1.0
return scale | 129f28dbf74f04929fcbccf8de42ee1c8dd5a09e | 30,801 |
def ST_LineStringFromText(geos):
"""
Transform the representation of linestring from WKT to WKB.
:type geos: WKT
:param geos: Linestring in WKT form.
:rtype: WKB
:return: Linestring in WKB form.
:example:
>>> from pyspark.sql import SparkSession
>>> from arctern_pyspark import register_funcs
>>> spark_session = SparkSession.builder.appName("Python Arrow-in-Spark example").getOrCreate()
>>> spark_session.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
>>> register_funcs(spark_session)
>>> test_data = []
>>> test_data.extend([('LINESTRING (0 0, 0 1, 1 1, 1 0)',)])
>>> data_df = spark_session.createDataFrame(data=test_data, schema=["geos"]).cache()
>>> data_df.createOrReplaceTempView("data")
>>> spark_session.sql("select ST_AsText(ST_LineStringFromText(geos)) from data").show(100,0)
+--------------------------------------+
|ST_AsText(ST_LineStringFromText(data))|
+--------------------------------------+
|LINESTRING (0 0, 0 1, 1 1, 1 0) |
+--------------------------------------+
"""
return arctern.ST_GeomFromText(geos) | 7cb0a5f3f6d35b49d35765ad5b5992952fe58e18 | 30,802 |
def get_container_state(pod, name):
"""Get the state of container ``name`` from a pod.
Returns one of ``waiting, running, terminated, unknown``.
"""
phase = pod["status"].get("phase", "Unknown")
if phase == "Pending":
return "waiting"
cs = get_container_status(pod, name)
if cs is not None:
return next(iter(cs["state"]))
return "unknown" | bad0143dbc2fb6998dce62665138d94f9542abc5 | 30,803 |
def implicit_ecp(
objective, equality_constraints, initial_values, lr_func, max_iter=500,
convergence_test=default_convergence_test, batched_iter_size=1, optimizer=optimizers.sgd,
tol=1e-6):
"""Use implicit differentiation to solve a nonlinear equality-constrained program of the form:
max f(x, θ) subject to h(x, θ) = 0 .
We perform a change of variable via the implicit function theorem and obtain the unconstrained
program:
max f(φ(θ), θ) ,
where φ is an implicit function of the parameters θ such that h(φ(θ), θ) = 0.
Args:
objective (callable): Binary callable with signature `f(x, θ)`
equality_constraints (callble): Binary callable with signature `h(x, θ)`
initial_values (tuple): Tuple of initial values `(x_0, θ_0)`
lr_func (scalar or callable): The step size used by the unconstrained optimizer. This can
be a scalar ora callable taking in the current iteration and returning a scalar.
max_iter (int, optional): Maximum number of outer iterations. Defaults to 500.
convergence_test (callable): Binary callable with signature `callback(new_state, old_state)`
where `new_state` and `old_state` are tuples of the form `(x_k^*, θ_k)` such that
`h(x_k^*, θ_k) = 0` (and with `k-1` for `old_state`). The default convergence test
returns `true` if both elements of the tuple have not changed within some tolerance.
batched_iter_size (int, optional): The number of iterations to be
unrolled and executed per iterations of the `while_loop` op for the forward iteration
and the fixed-point adjoint iteration. Defaults to 1.
optimizer (callable, optional): Unary callable waking a `lr_func` as a argument and
returning an unconstrained optimizer. Defaults to `jax.experimental.optimizers.sgd`.
tol (float, optional): Tolerance for the forward and backward iterations. Defaults to 1e-6.
Returns:
fax.loop.FixedPointSolution: A named tuple containing the solution `(x, θ)` as as the
`value` attribute, `converged` (a bool indicating whether convergence was achieved),
`iterations` (the number of iterations used), and `previous_value`
(the value of the solution on the previous iteration). The previous value satisfies
`sol.value=func(sol.previous_value)` and allows us to log the size
of the last step if desired.
"""
def _objective(*args):
return -objective(*args)
def make_fp_operator(params):
def _fp_operator(i, x):
del i
return x + equality_constraints(x, params)
return _fp_operator
constraints_solver = make_forward_fixed_point_iteration(
make_fp_operator, default_max_iter=max_iter, default_batched_iter_size=batched_iter_size,
default_atol=tol, default_rtol=tol)
adjoint_iteration_vjp = make_adjoint_fixed_point_iteration(
make_fp_operator, default_max_iter=max_iter, default_batched_iter_size=batched_iter_size,
default_atol=tol, default_rtol=tol)
opt_init, opt_update, get_params = optimizer(step_size=lr_func)
grad_objective = grad(_objective, (0, 1))
def update(i, values):
old_xstar, opt_state = values
old_params = get_params(opt_state)
forward_solution = constraints_solver(old_xstar, old_params)
grads_x, grads_params = grad_objective(forward_solution.value, get_params(opt_state))
ybar, _ = adjoint_iteration_vjp(
grads_x, forward_solution, old_xstar, old_params)
implicit_grads = tree_util.tree_multimap(
lax.add, grads_params, ybar)
opt_state = opt_update(i, implicit_grads, opt_state)
return forward_solution.value, opt_state
def _convergence_test(new_state, old_state):
x_new, params_new = new_state[0], get_params(new_state[1])
x_old, params_old = old_state[0], get_params(old_state[1])
return convergence_test((x_new, params_new), (x_old, params_old))
x0, init_params = initial_values
opt_state = opt_init(init_params)
solution = fixed_point_iteration(init_x=(x0, opt_state),
func=update,
convergence_test=jit(_convergence_test),
max_iter=max_iter,
batched_iter_size=batched_iter_size,
unroll=False)
return solution._replace(
value=(solution.value[0], get_params(solution.value[1])),
previous_value=(solution.previous_value[0], get_params(solution.previous_value[1])),
) | 98a753765dda4cbc09ee8b1bc6225c12d10eb996 | 30,804 |
from .modules import utils, concat
def concatenate(input_files, output_file):
"""
Concatenates the input files into the single output file.
In debug mode this function adds a comment with the filename
before the contents of each file.
"""
if not isinstance(input_files, (list, tuple)):
raise RuntimeError('Concatenate takes a list of input files.')
return {
'dependencies_fn': utils.no_dependencies,
'compiler_fn': concat.concatenate_input_files,
'input': input_files,
'output': output_file,
'kwargs': {},
} | 0fcfb9e83bd300a383f037cfed14abf06cdc23ab | 30,805 |
def get_detection_probability_Braun2008(filename, index, TS_threshold):
"""
Find the detection probability as a function
of the expected number of source counts in
a detector.
Returns Nsrc_list and Pdet
:param filename: Filename
:param index: spectral index
:param TS_threshold: TS <=> 5sigma threshold
"""
with h5py.File(filename, "r") as f:
Nsrc_list = f["Nsrc_list"][()]
folder = f["index_%.2f" % index]
TS = []
for Nsrc in Nsrc_list:
TS.append(folder["TS_" + str(Nsrc)][()])
# Find Pdet for each expected Nsrc
Pdet_at_Nsrc = []
for i, Nsrc in enumerate(Nsrc_list):
idx = np.where(~np.isnan(TS[i]))
ts = TS[i][idx]
P = len(ts[ts > TS_threshold]) / len(ts)
Pdet_at_Nsrc.append(P)
# Weight by poisson probability
Pdet = []
for Nsrc in Nsrc_list:
P = sum([w * poisson(Nsrc).pmf(i) for i, w in enumerate(Pdet_at_Nsrc)])
Pdet.append(P)
return Nsrc_list, Pdet | 631195c6d0c264d9b8676929882048d39333f2d6 | 30,806 |
from datetime import datetime
def device_create(db: Session,
name: str,
device_type: DeviceType,
activation_token: str) -> Device:
"""
Create a new Device
"""
device = Device(
name=name,
device_type=device_type,
activation_token=activation_token,
created_on=datetime.now(timezone.utc),
)
db.add(device)
db.commit()
db.refresh(device)
return device | f7c39a52ad43523cce895b223da85ec6f632ade2 | 30,807 |
def xy_potential(_):
"""
Potential for square XY model with periodic boundary conditions
"""
def potential(_, passive_rates):
pot = -passive_rates.sum(dim=(-1, -2, -3)) # sum over all sites and directions
return pot
return potential | b809e57464a9cd893cd33cd7ad38dffdb0d12a40 | 30,808 |
def distribute_srcs_2D(X, Y, n_src, ext_x, ext_y, R_init):
"""Distribute n_src's in the given area evenly
Parameters
----------
X, Y : np.arrays
points at which CSD will be estimated
n_src : int
demanded number of sources to be included in the model
ext_x, ext_y : floats
how should the sources extend the area X, Y
R_init : float
demanded radius of the basis element
Returns
-------
X_src, Y_src : np.arrays
positions of the sources
nx, ny : ints
number of sources in directions x,y
new n_src = nx * ny may not be equal to the demanded number of sources
R : float
effective radius of the basis element
"""
Lx = np.max(X) - np.min(X)
Ly = np.max(Y) - np.min(Y)
Lx_n = Lx + (2 * ext_x)
Ly_n = Ly + (2 * ext_y)
[nx, ny, Lx_nn, Ly_nn, ds] = get_src_params_2D(Lx_n, Ly_n, n_src)
ext_x_n = (Lx_nn - Lx) / 2
ext_y_n = (Ly_nn - Ly) / 2
X_src, Y_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
np.complex(0, nx),
(np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
np.complex(0, ny)]
# d = round(R_init / ds)
R = R_init # R = d * ds
return X_src, Y_src, R | 8c984c742e6e8d604332e51f39e057a46cad0076 | 30,809 |
def _setBlueprintNumberOfAxialMeshes(meshPoints, factor):
"""
Set the blueprint number of axial mesh based on the axial mesh refinement factor.
"""
if factor <= 0:
raise ValueError(
"A positive axial mesh refinement factor "
f"must be provided. A value of {factor} is invalid."
)
if factor != 1:
runLog.important(
"An axial mesh refinement factor of {} is applied "
"to blueprint based on setting specification.".format(factor),
single=True,
)
return int(meshPoints) * factor | 6c477b0e55e996009158fa34c06d3c642e4691c4 | 30,810 |
from typing import Mapping
from typing import Any
import os
import logging
import time
def run_benchmark(
execution_mode: str,
params: config_definitions.ExperimentConfig,
model_dir: str,
distribution_strategy: tf.distribute.Strategy = None
) -> Mapping[str, Any]:
"""Runs benchmark for a specific experiment.
Args:
execution_mode: A 'str', specifying the mode. Can be 'accuracy',
'performance', or 'tflite_accuracy'.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
distribution_strategy: A tf.distribute.Strategy to use. If specified,
it will be used instead of inferring the strategy from params.
Returns:
benchmark_data: returns benchmark data in dict format.
Raises:
NotImplementedError: If try to use unsupported setup.
"""
# For GPU runs, allow option to set thread mode
if params.runtime.gpu_thread_mode:
os.environ['TF_GPU_THREAD_MODE'] = params.runtime.gpu_thread_mode
logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE'])
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype)
strategy = distribution_strategy or distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
trainer = train_utils.create_trainer(
params,
task,
train=True,
evaluate=(execution_mode == 'accuracy'))
# Initialize the model if possible, e.g., from a pre-trained checkpoint.
trainer.initialize()
steps_per_loop = params.trainer.steps_per_loop if (
execution_mode in ['accuracy', 'tflite_accuracy']) else 100
controller = orbit.Controller(
strategy=strategy,
trainer=trainer,
evaluator=trainer if (execution_mode == 'accuracy') else None,
global_step=trainer.global_step,
steps_per_loop=steps_per_loop)
logging.info('Starts to execute execution mode: %s', execution_mode)
with strategy.scope():
# Training for one loop, first loop time includes warmup time.
first_loop_start_time = time.time()
controller.train(steps=steps_per_loop)
first_loop_time = time.time() - first_loop_start_time
# Training for second loop.
second_loop_start_time = time.time()
controller.train(steps=2*steps_per_loop)
second_loop_time = time.time() - second_loop_start_time
if execution_mode == 'accuracy':
controller.train(steps=params.trainer.train_steps)
wall_time = time.time() - first_loop_time
eval_logs = trainer.evaluate(
tf.convert_to_tensor(params.trainer.validation_steps))
benchmark_data = {'metrics': eval_logs}
elif execution_mode == 'performance':
benchmark_data = {}
elif execution_mode == 'tflite_accuracy':
eval_logs = tflite_utils.train_and_evaluate(
params, task, trainer, controller)
benchmark_data = {'metrics': eval_logs}
else:
raise NotImplementedError(
'The benchmark execution mode is not implemented: %s' %
execution_mode)
# First training loop time contains startup time plus training time, while
# second training loop time is purely training time. Startup time can be
# recovered by subtracting second trianing loop time from first training
# loop time.
startup_time = first_loop_time - second_loop_time
wall_time = time.time() - first_loop_start_time
examples_per_second = steps_per_loop * params.task.train_data.global_batch_size / second_loop_time
benchmark_data.update(
dict(
examples_per_second=examples_per_second,
wall_time=wall_time,
startup_time=startup_time))
return benchmark_data | d8c48c410f61ec2837683b1132321c21d4a2e1f3 | 30,811 |
def make_fun(f, *args, **kwargs):
"""
This function calls the function f while taking into account some of the
limitations of the pandas UDF support:
- support for keyword arguments
- support for scalar values (as long as they are picklable)
- support for type hints and input checks.
:param f:
:param args:
:param kwargs:
:return:
"""
sig_args = f.sig_args # type: typing.List[X]
final_args = []
col_indexes = []
frozen_args = [] # None for columns or the value for non-columns
for (idx, (arg, sig_arg)) in enumerate(zip(args, sig_args)):
arg2 = _check_compatible(arg, sig_arg)
if isinstance(arg2, (Column,)):
col_indexes.append(idx)
frozen_args.append(None)
else:
frozen_args.append(arg2)
final_args.append(arg2)
sig_kwargs = f.sig_kwargs # type: typing.Dict[str, X]
final_kwargs = {}
col_keys = []
frozen_kwargs = {} # Value is none for kwargs that are columns, and the value otherwise
for (key, arg) in kwargs:
sig_arg = sig_kwargs[key]
arg2 = _check_compatible(arg, sig_arg)
final_kwargs[key] = arg2
if isinstance(arg2, (Column,)):
col_keys.append(key)
frozen_kwargs[key] = None
else:
frozen_kwargs[key] = arg2
if not col_keys and not col_indexes:
# No argument is related to spark
# The function is just called through without other considerations.
return f(*args, **kwargs)
# We detected some columns. They need to be wrapped in a UDF to spark.
# Only handling the case of columns for now.
ret_type = f.sig_return
assert isinstance(ret_type, _Column), ret_type
spark_ret_type = ret_type.inner
# Spark UDFs do not handle extra data that is not a column.
# We build a new UDF that only takes arguments from columns, the rest is
# sent inside the closure into the function.
all_indexes = col_indexes + col_keys # type: typing.Union[str, int]
def clean_fun(*args2):
assert len(args2) == len(all_indexes),\
"Missing some inputs:{}!={}".format(all_indexes, [str(c) for c in args2])
full_args = list(frozen_args)
full_kwargs = dict(frozen_kwargs)
for (arg, idx) in zip(args2, all_indexes):
if isinstance(idx, int):
full_args[idx] = arg
else:
assert isinstance(idx, str), str(idx)
full_kwargs[idx] = arg
return f(*full_args, **full_kwargs)
udf = pandas_udf(clean_fun, returnType=spark_ret_type)
wrapped_udf = udf # udf #_wrap_callable(udf)
col_args = []
for idx in col_indexes:
col_args.append(final_args[idx])
for key in col_keys:
col_args.append(final_kwargs[key])
col = wrapped_udf(*col_args)
# TODO: make more robust
col._spark_ref_dataframe = col_args[0]._spark_ref_dataframe
return col | bc625453aa4913bcb70cea80f39f8edb8d4abbc7 | 30,812 |
def _get_filter_syntax(_filter_info, _prefix=True):
"""This function retrieves the proper filter syntax for an API call."""
if type(_filter_info) != tuple and type(_filter_info) != list:
raise TypeError("Filter information must be provided as a tuple (element, criteria) or a list of tuples.")
elif type(_filter_info) == tuple:
_filter_info = [_filter_info]
_syntax = ""
if len(_filter_info[0]) > 0:
_define_prefix = {True: '&', False: ''}
_syntax_prefix = _define_prefix.get(_prefix)
for _filter_tuple in _filter_info:
_element, _criteria = _filter_tuple
_syntax = f"{_syntax_prefix}filter={_element}({_criteria})&"
_syntax = _syntax[:-1]
return _syntax | b1817a2a3f004ba2bd44a8f8f272ad685e4d5ebe | 30,813 |
import logging
def pad_and_crop(ndarray, target_shape=(10, 10, 10)):
"""
Center pad and crop a np.ndarray with any shape to a given target shape
Parameters
In this implementation the pad and crop is invertible, ceil and round respects uneven shapes
pad = floor(x),floor(x)+1
crop = floor(x)+1, floor(x)
----------
ndarray : numpy.ndarray - of any shape
target_shape : tuple - must have the same length as ndarray.ndim
Returns np.ndarray with each axis either pad or crop
-------
"""
empty = np.zeros(target_shape)
target_shape = np.array(target_shape)
logging.debug('input shape, crop_and_pad: {}'.format(ndarray.shape))
logging.debug('target shape, crop_and_pad: {}'.format(target_shape))
diff = ndarray.shape - target_shape
# divide into summands to work with odd numbers
# take the same numbers for left or right padding/cropping if the difference is dividable by 2
# else take floor(x),floor(x)+1 for PAD (diff<0)
# else take floor(x)+1, floor(x) for CROP (diff>0)
d = list(
(int(x // 2), int(x // 2)) if x % 2 == 0 else (int(np.floor(x / 2)), int(np.floor(x / 2) + 1)) if x < 0 else (
int(np.floor(x / 2) + 1), int(np.floor(x / 2))) for x in diff)
# replace the second slice parameter if it is None, which slice until end of ndarray
d = list((abs(x), abs(y)) if y != 0 else (abs(x), None) for x, y in d)
# create a bool list, negative numbers --> pad, else --> crop
pad_bool = diff < 0
crop_bool = diff > 0
# create one slice obj for cropping and one for padding
pad = list(i if b else (None, None) for i, b in zip(d, pad_bool))
crop = list(i if b else (None, None) for i, b in zip(d, crop_bool))
# Create one tuple of slice calls per pad/crop
# crop or pad from dif:-dif if second param not None, else replace by None to slice until the end
# slice params: slice(start,end,steps)
pad = tuple(slice(i[0], -i[1]) if i[1] != None else slice(i[0], i[1]) for i in pad)
crop = tuple(slice(i[0], -i[1]) if i[1] != None else slice(i[0], i[1]) for i in crop)
# crop and pad in one step
empty[pad] = ndarray[crop]
return empty | 5362654de8c890560cb66c83f103b98b34462cfd | 30,814 |
import math
def pol2cart(r,theta):
"""
Translate from polar to cartesian coordinates.
"""
return (r*math.cos(float(theta)/180*math.pi), r*math.sin(float(theta)/180*math.pi)) | 69753e1cadd36ec70da1bf2cf94641d4c7f78179 | 30,815 |
import math
def mass2mk_ben(m):
"""mass2mk_ben - mass to M_K, Benedict et al. (2016) double exponential.
Usage:
mk = mass2mk_ben(mass)
Where mk is absolute 2MASS K magnitude and mass is in solar masses.
This version is the original double-exponential "forward model" (for
going from mass to absolute magnitude) from the paper.
NOTE: the range of the parameters is not checked to ensure the
relation is used within the domain of applicability, this is left to
the user.
References:
Benedict et al. (2016) AJ 152 141
"""
x = m - _x0
e1 = _a1 * math.exp(-_k1*x)
e2 = _a2 * math.exp(-_k2*x)
mk = e1 + e2 + _y0
return mk | 3e9f20588f87db6bb9429b6c5d7135ad879158c3 | 30,816 |
def apply_and_concat_one_nb(n, apply_func_nb, *args): # numba doesn't accepts **kwargs
"""A Numba-compiled version of `apply_and_concat_one`.
!!! note
* `apply_func_nb` must be Numba-compiled
* `*args` must be Numba-compatible
* No support for `**kwargs`
"""
output_0 = to_2d_one_nb(apply_func_nb(0, *args))
output = np.empty((output_0.shape[0], n * output_0.shape[1]), dtype=output_0.dtype)
for i in range(n):
if i == 0:
outputs_i = output_0
else:
outputs_i = to_2d_one_nb(apply_func_nb(i, *args))
output[:, i * outputs_i.shape[1]:(i + 1) * outputs_i.shape[1]] = outputs_i
return output | ed75920864a736aeafe9156b5bd6e456cd287226 | 30,817 |
def convertCovariance2Dto3D(covariance2d):
""" convert the covariance from [x, y, theta] to [x, y, z, roll, pitch, yaw]
:param covariance2d: covariance matrix in 3x3 format.
each row and column corresponds to [x, y, theta]
:return: covariance matrix in 6x6 format. each row and column corresponds to
[x, y, z, roll, pitch, yaw],
where z, roll and pitch values are padded with 0.
"""
covariance3d = np.zeros([6, 6])
covariance2d = np.array(covariance2d)
covariance3d[0:1, 0:1] = covariance2d[0:1, 0:1]
covariance3d[5, 0:1] = covariance2d[2, 0:1]
covariance3d[0:1, 5] = covariance2d[0:1, 2]
covariance3d[5, 5] = covariance2d[2, 2]
return covariance3d | 4c6ea8bb8475a705fb40181172bab2a761676e85 | 30,818 |
import torch
import random
def fit_gan_wasserstein(nb_epoch: int, x_LS: np.array, y_LS: np.array, x_VS: np.array, y_VS: np.array, x_TEST: np.array, y_TEST: np.array, gen, dis, opt_gen, opt_dis, n_discriminator:int, batch_size:int=100, wdb:bool=False, gpu:bool=True):
"""
Fit GAN with discriminator using the Wasserstein distance estimate.
"""
# to assign the data to GPU with .to(device) on the data
if gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
# Assign models and data to gpu
gen.to(device)
dis.to(device)
x_VS_gpu = torch.tensor(x_VS).to(device).float()
y_VS_gpu = torch.tensor(y_VS).to(device).float()
x_TEST_gpu = torch.tensor(x_TEST).to(device).float()
y_TEST_gpu = torch.tensor(y_TEST).to(device).float()
loss_list = []
time_tot = 0.
# WARNING: batch size = 10 % #LS
batch_size = int(0.1 * y_LS.shape[0])
for epoch in range(nb_epoch):
start = timer()
# Shuffle the data randomly at each epoch
seed = random.randint(0, 2000)
x_LS_shuffled, y_LS_shuffled = shuffle(x_LS, y_LS, random_state=seed)
batch_dis_idx = 0
batch_gen_idx = 0
loss_D_batch = 0
loss_G_batch = 0
# Training batch loop
batch_list = [i for i in range(batch_size, batch_size * y_LS.shape[0] // batch_size, batch_size)]
for y_batch, x_batch in zip(np.split(y_LS_shuffled, batch_list), np.split(x_LS_shuffled, batch_list)):
y_batch_LS = torch.tensor(y_batch).to(device).float()
x_batch_LS = torch.tensor(x_batch).to(device).float()
bs = x_batch_LS.shape[0]
# 1. Train the Discriminator
# Critic wants to maximize : E(C(x)) - E(C(G(z)))
# <~> maximize : mean(C(x)) - mean(C(G(z)))
# <-> minimize : -{mean(C(x)) - mean(C(G(z)))}
# Generated samples
G_LS_samples = gen(noise=torch.randn(bs, gen.latent_s).to(device), context=x_batch_LS)
# Compute Discriminator's loss
loss_D = dis.loss(generated_samples=G_LS_samples, true_samples=y_batch_LS, context=x_batch_LS)
loss_D_batch += loss_D.detach()
# Update critic's weight
opt_dis.zero_grad()
loss_D.backward()
opt_dis.step()
# N_CRITIC update for discriminator while one for generator
# 2. Train the Generator
if ((batch_dis_idx + 1) % n_discriminator) == 0:
# Train Generator
# Generator has the opposed objective that of critic :
# wants to minimize : E(C(x)) - E(C(G(z)))
# <-> minimize : - E(C(G(z)))
# <-> minimize : -(mean(C(G(z)))
# Generated samples
G_LS_samples = gen(noise=torch.randn(bs, gen.latent_s).to(device), context=x_batch_LS)
D_LS = dis(input=G_LS_samples, context=x_batch_LS)
# Compute generator's loss
lossG = -torch.mean(D_LS)
loss_G_batch += lossG.detach()
# Update generator's weight
opt_gen.zero_grad()
lossG.backward()
opt_gen.step()
batch_gen_idx += 1
batch_dis_idx += 1
# LS loss is the average over all the batch
loss_D_LS = loss_D_batch / batch_dis_idx
loss_G_LS = loss_G_batch / batch_gen_idx
# VS loss
# D
G_VS_samples = gen(noise=torch.randn(y_VS_gpu.shape[0], gen.latent_s).to(device), context=x_VS_gpu)
loss_D_VS = dis.loss(generated_samples=G_VS_samples, true_samples=y_VS_gpu, context=x_VS_gpu).detach()
# G
D_VS = dis(input=G_VS_samples, context=x_VS_gpu)
loss_G_VS = -torch.mean(D_VS).detach()
# TEST loss
# D
G_TEST_samples = gen(noise=torch.randn(y_TEST.shape[0], gen.latent_s).to(device), context=x_TEST_gpu)
loss_D_TEST = dis.loss(generated_samples=G_TEST_samples, true_samples=y_TEST_gpu, context=x_TEST_gpu).detach()
# G
D_TEST = dis(input=G_TEST_samples, context=x_TEST_gpu)
loss_G_TEST = -torch.mean(D_TEST).detach()
# Save NF model when the VS loss is minimal
loss_list.append([loss_D_LS, loss_G_LS, loss_D_VS, loss_G_VS, loss_D_TEST, loss_G_TEST])
end = timer()
time_tot += end - start
if wdb:
wandb.log({"D ls loss": loss_D_LS})
wandb.log({"G ls loss": loss_G_LS})
wandb.log({"D vs loss": loss_D_VS})
wandb.log({"G vs loss": loss_G_VS})
wandb.log({"D test loss": loss_D_TEST})
wandb.log({"G test loss": loss_G_TEST})
if epoch % 10 == 0:
print("Epoch {:.0f} Approximate time left : {:2f} min - D LS loss: {:4f} G LS loss: {:4f} D VS loss: {:4f} G VS loss: {:4f}".format(epoch, time_tot / (epoch + 1) * (nb_epoch - (epoch + 1)) / 60, loss_D_LS, loss_G_LS, loss_D_VS, loss_G_VS), end="\r", flush=True)
print('Fitting time_tot %.0f min' %(time_tot/60))
return np.asarray(torch.tensor(loss_list, device='cpu')), gen, dis | 64fc1625aa76ca09c14720ef3489657b8c28e671 | 30,819 |
def manifest_file_registration(workspace_id):
"""マニフェストテンプレートファイル登録
Args:
workspace_id (int): ワークスペースID
Returns:
response: HTTP Respose
"""
globals.logger.debug("CALL manifest_file_registration:{}".format(workspace_id))
try:
# 登録内容は基本的に、引数のJsonの値を使用する(追加項目があればここで記載)
specification = request.json
response_rows = []
with dbconnector() as db, dbcursor(db) as cursor:
for spec in specification['manifests']:
# manifest情報 insert実行
manifest_id = da_manifest.insert_manifest(cursor, workspace_id, spec)
globals.logger.debug('insert manifest_id:{}'.format(str(manifest_id)))
# manifest情報の再取得
fetch_rows = da_manifest.select_manifest_id(cursor, workspace_id, manifest_id)
# 戻り値に内容を追加
response_rows.append(fetch_rows[0])
return jsonify({"result": "200", "rows": response_rows })
except Exception as e:
return common.serverError(e) | 2823d3c413f3635855c035fa4d3ba912c2285823 | 30,820 |
import math
def pad(image_array, final_dims_in_pixels, zero_fill_mode=False):
"""
Pad image data to final_dim_in_pixels
Attributes:
image_array (float, np.array): 3D numpy array containing image data
final_dim_in_pixels (list): Final number of pixels in xyz dimensions. Example: [256, 256, 80]
zero_fill_mode (bool): If True, returns array filled with zeros
Returns:
padded_image_array (arr): Resized array containing image data
"""
dims = len(final_dims_in_pixels)
original_dims_in_pixels = [image_array.shape[d] for d in range(len(image_array.shape))]
# test if input and output dimensions match
if dims != len(original_dims_in_pixels):
raise ValueError("Dimensions of the input (" + str(len(image_array.shape)) +
") do not match those of output (" + str(len(final_dims_in_pixels))+ ")")
# test if desired final image is larger than original
if any(final_dims_in_pixels[d] < original_dims_in_pixels[d] for d in range(dims)):
raise ValueError("Final dimensions are smaller than original. Did you mean to `crop`?")
padded_image_array = np.zeros(final_dims_in_pixels)
new_first_image_pixel = [0 for i in range(dims)]
new_last_image_pixel = [0 for i in range(dims)]
for dim in range(dims):
new_first_image_pixel[dim] = int(math.floor((final_dims_in_pixels[dim] - original_dims_in_pixels[dim]) / 2))
new_last_image_pixel[dim] = new_first_image_pixel[dim] + original_dims_in_pixels[dim]
#for 2D:
if dims == 2:
padded_image_array [new_first_image_pixel[0] : new_last_image_pixel[0],
new_first_image_pixel[1] : new_last_image_pixel[1]] = image_array
elif dims == 3:
padded_image_array [new_first_image_pixel[0] : new_last_image_pixel[0],
new_first_image_pixel[1] : new_last_image_pixel[1],
new_first_image_pixel[2] : new_last_image_pixel[2]] = image_array
if zero_fill_mode:
padded_image_array = padded_image_array*0.
return(padded_image_array) | 8882ded9a01f98e9163807675cf7246527443d97 | 30,821 |
def save_and_plot(canddatalist):
""" Converts a canddata list into a plots and a candcollection.
Calculates candidate features from CandData instance(s).
Returns structured numpy array of candidate features labels defined in
st.search_dimensions.
Generates png plot for peak cands, if so defined in preferences.
"""
if isinstance(canddatalist, CandData):
canddatalist = [canddatalist]
elif isinstance(canddatalist, list):
if not len(canddatalist):
return CandCollection()
else:
logger.warn("argument must be list of CandData object")
logger.info('Calculating features for {0} candidate{1}.'
.format(len(canddatalist), 's'[not len(canddatalist)-1:]))
st = canddatalist[0].state
featurelists = []
for feature in st.features:
ff = []
for i, canddata in enumerate(canddatalist):
ff.append(canddata_feature(canddata, feature))
featurelists.append(ff)
kwargs = dict(zip(st.features, featurelists))
candlocs = []
for i, canddata in enumerate(canddatalist):
candlocs.append(canddata_feature(canddata, 'candloc'))
kwargs['candloc'] = candlocs
if canddata.cluster is not None:
clusters = []
clustersizes = []
for i, canddata in enumerate(canddatalist):
clusters.append(canddata_feature(canddata, 'cluster'))
clustersizes.append(canddata_feature(canddata, 'clustersize'))
kwargs['cluster'] = clusters
kwargs['clustersize'] = clustersizes
candcollection = make_candcollection(st, **kwargs)
if (st.prefs.savecands or st.prefs.saveplots) and len(candcollection.array):
if len(candcollection) > 1:
snrs = candcollection.array['snr1'].flatten()
elif len(candcollection) == 1:
snrs = None
# save cc and save/plot each canddata
for i, canddata in enumerate(canddatalist):
if st.prefs.savecands:
save_cands(st, canddata=canddata)
if st.prefs.saveplots:
candplot(canddata, cluster=(clusters[i], clustersizes[i]), snrs=snrs)
return candcollection | 484b2bf099c31762e294ce40039f01f8ec00a273 | 30,822 |
def GetReviewers(host, change):
"""Gets information about all reviewers attached to a change."""
path = 'changes/%s/reviewers' % change
return _SendGerritJsonRequest(host, path) | 4e2d5bdf37993f76b42c0062dec042dc5a01aa87 | 30,823 |
def plot_single_points(xs, ys, color=dark_color, s=50, zorder=1e6, edgecolor='black', **kwargs):
"""Plot single points and return patch artist."""
if xs is None:
xs = tuple(range(len(ys)))
return plt.scatter(xs, ys, marker='o', s=s, color=color, zorder=zorder, edgecolor=edgecolor, **kwargs) | 490c17dbb360bc06c7805dddb7af1c72b5ce5890 | 30,824 |
import _ast
def find_imports(source: str, filename=constants.DEFAULT_FILENAME, mode='exec'):
"""return a list of all module names required by the given source code."""
# passing an AST is not supported because it doesn't make sense to.
# either the AST is one that we made, in which case the imports have already been made and calling parse_ast again
# would find no imports, or it's an AST made by parsing the output of fix_syntax, which is internal.
fixed = _fix_syntax(source, filename=filename)
tree = _ast.parse(fixed, filename, mode)
return _find_imports(tree, filename=filename) | 2ea91f6387e455fb1b4907e6a109fc3b987c8a9d | 30,825 |
def generate_character_data(sentences_train, sentences_dev, sentences_test, max_sent_length, char_embedd_dim=30):
"""
generate data for charaters
:param sentences_train:
:param sentences_dev:
:param sentences_test:
:param max_sent_length:
:return: C_train, C_dev, C_test, char_embedd_table
"""
def get_character_indexes(sentences):
index_sentences = []
max_length = 0
for words in sentences:
index_words = []
for word in words:
index_chars = []
if len(word) > max_length:
max_length = len(word)
for char in word[:MAX_CHAR_LENGTH]:
char_id = char_alphabet.get_index(char)
index_chars.append(char_id)
index_words.append(index_chars)
index_sentences.append(index_words)
return index_sentences, max_length
def construct_tensor_char(index_sentences):
C = np.empty([len(index_sentences), max_sent_length, max_char_length], dtype=np.int32)
word_end_id = char_alphabet.get_index(word_end)
for i in range(len(index_sentences)):
words = index_sentences[i]
sent_length = len(words)
for j in range(sent_length):
chars = words[j]
char_length = len(chars)
for k in range(char_length):
cid = chars[k]
C[i, j, k] = cid
# fill index of word end after the end of word
C[i, j, char_length:] = word_end_id
# Zero out C after the end of the sentence
C[i, sent_length:, :] = 0
return C
def build_char_embedd_table():
scale = np.sqrt(3.0 / char_embedd_dim)
char_embedd_table = np.random.uniform(-scale, scale, [char_alphabet.size(), char_embedd_dim]).astype(theano.config.floatX)
char_freqs = char_alphabet.get_vocab_freqs()
return (char_embedd_table, char_freqs)
char_alphabet = Alphabet('character')
char_alphabet.get_index(word_end)
index_sentences_train, max_char_length_train = get_character_indexes(sentences_train)
index_sentences_dev, max_char_length_dev = get_character_indexes(sentences_dev)
index_sentences_test, max_char_length_test = get_character_indexes(sentences_test)
# close character alphabet
char_alphabet.close()
logger.info("character alphabet size: %d" % (char_alphabet.size() - 1))
max_char_length = min(MAX_CHAR_LENGTH, max(max_char_length_train, max_char_length_dev, max_char_length_test))
logger.info("Maximum character length of training set is %d" % max_char_length_train)
logger.info("Maximum character length of dev set is %d" % max_char_length_dev)
logger.info("Maximum character length of test set is %d" % max_char_length_test)
logger.info("Maximum character length used for training is %d" % max_char_length)
# fill character tensor
C_train = construct_tensor_char(index_sentences_train)
C_dev = construct_tensor_char(index_sentences_dev)
C_test = construct_tensor_char(index_sentences_test)
return C_train, C_dev, C_test, build_char_embedd_table() | c53257e1d999edafc54b627a0687ae33aaebc487 | 30,826 |
def draw_pie_distribution_of_elements(Genome_EP, ChIP_EP, gprom=(1000, 2000, 3000), gdown=(1000, 2000, 3000), prom=(1000,2000,3000), down=(1000,2000,3000)):
"""Draw the pie charts of the overall distributions of ChIP regions and genome background
"""
# get the labels (legend) for the genome pie chart
gnames = ["Promoter (<=%d bp)" %gprom[0]]
gnames += ["Promoter (%d-%d bp)" %(p1, p2) for p1, p2 in zip(gprom[:-1], gprom[1:])]
gnames += ["Downstream (<=%d bp)" %gdown[0]]
gnames += ["Downstream (%d-%d bp)" %(d1, d2) for d1, d2 in zip(gdown[:-1], gdown[1:])]
gnames += ["5'UTR","3'UTR", "Coding exon", "Intron", "Distal intergenic"]
# get the labels (legend) for the pie chart
names = ["Promoter (<=%d bp)" %prom[0]]
names += ["Promoter (%d-%d bp)" %(p1, p2) for p1, p2 in zip(prom[:-1], prom[1:])]
names += ["Downstream (<=%d bp)" %down[0]]
names += ["Downstream (%d-%d bp)" %(d1, d2) for d1, d2 in zip(down[:-1], down[1:])]
names += ["5'UTR","3'UTR", "Coding exon", "Intron", "Distal intergenic"]
# get the proportions to draw
x = ChIP_EP['whole']['promoter'] + ChIP_EP['whole']['downstream'] + ChIP_EP['whole']['gene'] + [ChIP_EP['whole']['enhancer']]
x_percent = _percent_str([100.0*a for a in x])
names_w_percent_x = list(map(lambda x, y: x + ': ' + y, names, x_percent))
# make x values less than .1% .5% because they are too small to see in the pie chart. But x_percent does not change
x = list(map(max, x, [0.01]*len(x)))
# get the proportions to draw
y = Genome_EP['whole']['promoter'] + Genome_EP['whole']['downstream'] + Genome_EP['whole']['gene'] + [Genome_EP['whole']['enhancer']]
y_percent = _percent_str([100.0*a for a in y])
names_w_percent_y = list(map(lambda x, y: x + ': ' + y, gnames, y_percent))
# make x values less than .1% .5% because they are too small to see in the pie chart. But x_percent does not change
y = list(map(max, y, [0.01]*len(y)))
#
# producing R script return
#
# put header
rscript = '\n'
rscript += R.comment('')
rscript += R.comment('Distribution of Genome and ChIP regions over cis-regulatory element')
rscript += R.comment('Note that the x may be modified for better graphics in case a value is too small')
rscript += R.comment('Thus, look at the labels of the pie chart to get the real percentage values' )
rscript += R.comment('')
rscript += '\n'
# some graphical parameters
init_angle = 90
density = 100
main_x = 'ChIP'
main_y = 'Genome'
# pie chart colors
cols = ["#445FA2","#EB9D86","#799F7A","#6C527F","#5FA1C1","#E8BB77","#A8C5EF","#FDCDB9","#C6E6B5","#F1D5EE","#B4E1F6"]
mar = mar=[3,3,4,2.8]
oma=[4,2,4,2]
mfcol = [2, 2]
rscript += R.par(mar=mar, oma=oma, mfcol=mfcol)
# R script
rscript += R.pie(y, labels=y_percent, main=main_y, col=cols,clockwise=True, radius=0.9,init_angle=init_angle, cex=0.8, density=density)
rscript += R.plot([0,1],[0,1], tp="n", axes=False, xlab="", ylab="", main="", frame=False)
rscript += R.legend(x='top', legend=names_w_percent_y, pch=15, col=cols, bty="n")
# R script
rscript += R.pie(x, labels=x_percent, main=main_x, col=cols,clockwise=True, radius=0.9,init_angle=init_angle, cex=0.8, density=density)
rscript += R.plot([0,1],[0,1], tp="n", axes=False, xlab="", ylab="", main="", frame=False)
rscript += R.legend(x='top', legend=names_w_percent_x, pch=15, col=cols, bty="n")
return rscript | 24a28a23e2929e20c77dd2389691235d51e1ba80 | 30,827 |
def get_neighborhood(leaflet, mdsys):
""" Get neighborhood object for the give leaflet
"""
dist = distances.distance_array(leaflet.positions, leaflet.positions, mdsys.dimensions[:3])
nbrs = Neighborhood(leaflet.positions, dist, mdsys.dimensions[:3])
return nbrs | 234193d36c957a0dd26a805f2fcbf5e97c0be2d6 | 30,828 |
def clean_title(title: str) -> str:
"""Strip unwanted additional text from title."""
for splitter in [" (", " [", " - ", " (", " [", "-"]:
if splitter in title:
title_parts = title.split(splitter)
for title_part in title_parts:
# look for the end splitter
for end_splitter in [")", "]"]:
if end_splitter in title_part:
title_part = title_part.split(end_splitter)[0]
for ignore_str in ["feat.", "featuring", "ft.", "with ", "explicit"]:
if ignore_str in title_part.lower():
return title.split(splitter + title_part)[0].strip()
return title.strip() | 5625c6c64b166560b1804b7048fd3d604536251a | 30,829 |
def get_dates():
"""
Query date in the tweets table
:return:
"""
sql = "SELECT date FROM tweets"
dates = cx.read_sql(db, sql)
return dates | 60704fa5fa625ffbd42b29b9cc22c95d01475026 | 30,830 |
def _convert_to_dict(best_param):
"""
Utiliy method for converting best_param string to dict
Args:
:best_param: the best_param string
Returns:
a dict with param->value
"""
best_param_dict = {}
for hp in best_param:
hp = hp.split('=')
best_param_dict[hp[0]] = hp[1]
return best_param_dict | 318ed529b0f411b1b671de34a4b0f4ecf3dc9780 | 30,831 |
import os
def read_in(file_index, normalized, train, ratio):
"""
Reads in a file and can toggle between normalized and original files
:param file_index: patient number as string
:param normalized: binary that determines whether the files should be normalized or not
:param train: int that determines whether or not we are reading in data to train the model or for encoding
or if we are creating noisy train data
:param ratio: ratio to split the files into train and test
:return: returns npy array of patient data across 4 leads
"""
# filepath = os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx" + file_index + ".npy")
# filepath = os.path.join("Working_Data", "1000d", "Normalized_Fixed_Dim_HBs_Idx35.npy")
filepath = "Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(file_index) + ".npy"
if normalized == 1:
if train == 1:
normal_train, normal_test, abnormal = patient_split_train(filepath, ratio)
# noise_factor = 0.5
# noise_train = normal_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=normal_train.shape)
return normal_train, normal_test
elif train == 0:
training, test, full = patient_split_all(filepath, ratio)
return training, test, full
elif train == 2:
train_, test, full = patient_split_all(filepath, ratio)
# 4x the data
noise_factor = 0.5
noise_train = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
noise_train2 = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
# noise_train3 = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
train_ = np.concatenate((train_, noise_train, noise_train2))
return train_, test, full
else:
data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy"))
return data | c83c5033a291e99b45886a45e55cdd96af477cda | 30,832 |
def _expand_currency(data: dict) -> str:
"""
Verbalizes currency tokens.
Args:
data: detected data
Returns string
"""
currency = _currency_dict[data['currency']]
quantity = data['integral'] + ('.' + data['fractional'] if data.get('fractional') else '')
magnitude = data.get('magnitude')
# remove commas from quantity to be able to convert to numerical
quantity = quantity.replace(',', '')
# check for million, billion, etc...
if magnitude is not None and magnitude.lower() in _magnitudes:
if len(magnitude) == 1:
magnitude = _magnitudes_dict[magnitude.lower()]
return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency + 's')
parts = quantity.split('.')
if len(parts) > 2:
return quantity + " " + currency + "s" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = currency if dollars == 1 else currency + 's'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {}".format(
_expand_hundreds(dollars), dollar_unit, _inflect.number_to_words(cents), cent_unit
)
elif dollars:
dollar_unit = currency if dollars == 1 else currency + 's'
return "{} {}".format(_expand_hundreds(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero' + ' ' + currency + 's' | 491d175195f97126d65afec65c60f2e34ca09bc3 | 30,833 |
def getCampaignID(title):
""" Returns the id of a campaign from a dm name and a title """
conn = connectToDB()
cur = conn.cursor()
print title
query = cur.mogrify('select id from campaigns where title = %s;', (title,))
print query
cur.execute(query)
results = cur.fetchone()
return results[0] if results else -1 | b3f6f3b50a97e25931754332ed864bdac9d5c639 | 30,834 |
import time
def calculate_exe_time(input_function):
"""
This decorator method take in a function as argument and calulates its execution time.
:param input_function: name of method to be executed.
:return process_time: method that calls the input_function and calculates execution time.
"""
def process_time(*args):
start = time()
input_function(*args)
end = time()
print(f"Execution time: {end-start} secs")
return process_time | bdbd4e20c8126e48d27031e46e5a91c83740a188 | 30,835 |
import torch
def load_xyz_from_txt(file_name):
"""Load xyz poses from txt. Each line is: x,y,x
Args:
file_name (str): txt file path
Returns:
torch.Tensor: Trajectory in the form of homogenous transformation matrix. Shape [N,4,4]
"""
global device
poses = np.genfromtxt(file_name, delimiter=',')
poses = torch.Tensor(poses).to(device)
poses = tgm.rtvec_to_pose(poses) # [n,4,4]
return poses | f0d57aafa9e96a20c719a27dc8e0f2c18ebe0e7b | 30,836 |
def category_add():
"""
Route for category add
"""
# request Form data
form = CategoryForm(request.form)
if request.method == "POST" and form.validate():
# Set new category name variable
category_name = form.name.data.lower()
if category_check(category_name):
# Add new category to the database
mongo.db.categories.insert_one({"name": category_name})
# Display flash message
flash(
"Category " + category_name +
" succesfully added", "success")
return redirect(url_for('products.search'))
else:
return render_template("category_add.html", form=form)
return render_template("category_add.html", form=form) | 11a94b7b3600fcaad0848696dd63648d39988052 | 30,837 |
import requests
def generate_text(input: TextGenerationInput) -> TextGenerationOutput:
"""Generate text based on a given prompt."""
payload = {
"text": input.text,
"temperature": input.temperature,
"min_length": input.min_length,
"max_length": input.max_length,
"do_sample": input.do_sample,
}
res = requests.post(API_ENDPOINT, json=payload)
print(res.json())
return TextGenerationOutput(generated_text=res.json()["generated_text"]) | 1c0eeff8b90b5246828a285f8c7a86bc4095c364 | 30,838 |
def file_content_hash(file_name, encoding, database=None):
"""
Returns the hash of the contents of the file
Use the database to keep a persistent cache of the last content
hash.
"""
_, content_hash = _file_content_hash(file_name, encoding, database)
return content_hash | 42962749e6bb5ec2d061ffcefd6ebf4aa34bbc29 | 30,839 |
import inspect
def pass_multiallelic_sites(mqc):
"""
The number of PASS multiallelic sites.
Source: count_variants.py (bcftools view)
"""
k = inspect.currentframe().f_code.co_name
try:
d = next(iter(mqc["multiqc_npm_count_variants"].values()))
v = d["pass_multiallelic_sites"]
v = int(v)
except KeyError:
v = "NA"
return k, v | cd91ff816e88fa29e4d3beed4d0baf740388428c | 30,840 |
import json
def read_dialog_file(json_filename: str) -> list[Message]:
"""
Read messages from the dialog file
@return: list of Message objects (without intent)
"""
with open(json_filename, encoding="utf8") as dialog_json:
return [
Message(is_bot=msg["is_bot"], text=msg["text"])
for msg in json.load(dialog_json)
] | 9665c6bd708c66e66e24cb416d033730ef4f3909 | 30,841 |
def dtwavexfm3(X, nlevels=3, biort=DEFAULT_BIORT, qshift=DEFAULT_QSHIFT,
include_scale=False, ext_mode=4, discard_level_1=False):
"""Perform a *n*-level DTCWT-3D decompostion on a 3D matrix *X*.
:param X: 3D real array-like object
:param nlevels: Number of levels of wavelet decomposition
:param biort: Level 1 wavelets to use. See :py:func:`dtcwt.coeffs.biort`.
:param qshift: Level >= 2 wavelets to use. See :py:func:`dtcwt.coeffs.qshift`.
:param ext_mode: Extension mode. See below.
:param discard_level_1: True if level 1 high-pass bands are to be discarded.
:returns Yl: The real lowpass image from the final level
:returns Yh: A tuple containing the complex highpass subimages for each level.
Each element of *Yh* is a 4D complex array with the 4th dimension having
size 28. The 3D slice ``Yh[l][:,:,:,d]`` corresponds to the complex higpass
coefficients for direction d at level l where d and l are both 0-indexed.
If *biort* or *qshift* are strings, they are used as an argument to the
:py:func:`dtcwt.coeffs.biort` or :py:func:`dtcwt.coeffs.qshift` functions. Otherwise, they are
interpreted as tuples of vectors giving filter coefficients. In the *biort*
case, this should be (h0o, g0o, h1o, g1o). In the *qshift* case, this should
be (h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b).
There are two values for *ext_mode*, either 4 or 8. If *ext_mode* = 4,
check whether 1st level is divisible by 2 (if not we raise a
``ValueError``). Also check whether from 2nd level onwards, the coefs can
be divided by 4. If any dimension size is not a multiple of 4, append extra
coefs by repeating the edges. If *ext_mode* = 8, check whether 1st level is
divisible by 4 (if not we raise a ``ValueError``). Also check whether from
2nd level onwards, the coeffs can be divided by 8. If any dimension size is
not a multiple of 8, append extra coeffs by repeating the edges twice.
If *discard_level_1* is True the highpass coefficients at level 1 will be
discarded. (And, in fact, will never be calculated.) This turns the
transform from being 8:1 redundant to being 1:1 redundant at the cost of
no-longer allowing perfect reconstruction. If this option is selected then
`Yh[0]` will be `None`. Note that :py:func:`dtwaveifm3` will accepts
`Yh[0]` being `None` and will treat it as being zero.
Example::
# Performs a 3-level transform on the real 3D array X using the 13,19-tap
# filters for level 1 and the Q-shift 14-tap filters for levels >= 2.
Yl, Yh = dtwavexfm3(X, 3, 'near_sym_b', 'qshift_b')
.. codeauthor:: Rich Wareham <rjw57@cantab.net>, Aug 2013
.. codeauthor:: Huizhong Chen, Jan 2009
.. codeauthor:: Nick Kingsbury, Cambridge University, July 1999.
"""
trans = Transform3d(biort, qshift, ext_mode)
res = trans.forward(X, nlevels, include_scale, discard_level_1)
if include_scale:
return res.lowpass, res.highpasses, res.scales
else:
return res.lowpass, res.highpasses | d0adab48c51ade82fab55b416029a2291151e3b9 | 30,842 |
def character_regions(img, line_regs, bg_thresh=None, **kwargs):
"""
Find the characters in an image given the regions of lines if text in the
image.
Args:
img (numpy.ndarray): Grayscaled image.
line_regs (list[tuple[int, int]]): List of regions representing where
the lines are.
bg_thresh (Optional[int]): Background threshold up to which a pixel
is considered text and not part of the background. If not provided,
a default background threshold is calculated for each line region
in the image and used instead.
**kwargs: Keyword arguments passed to text_regions.
"""
assert len(img.shape) == 2
regions = []
w = img.shape[1]
for start, end in line_regs:
sub_img = img[start:end+1, :]
if bg_thresh is None:
bg_thresh = default_background_threshold(sub_img)
# Sanity check
assert w == sub_img.shape[1]
pixels = colored_pixels(sub_img, bg_thresh)
x_distr, y_distr = zip(*pixels)
char_regions = text_regions(x_distr, w, **kwargs)
regions.append(char_regions)
return regions | 2e1b182944a857b698886ed590295723909dcc7e | 30,843 |
def cache_clear(request):
"""
Очищает директорию кеша.
"""
Core.get_instance().clear_cache()
return {
'size': Core.get_instance().get_cache_size()
} | 039ddc6e400c1befe283b529ba239d9c7831a7ce | 30,844 |
def sort_crp_tables(tables):
"""Sort cluster assignments by number"""
keys = sorted(tables,
key=lambda t: (len(tables[t]), min(tables[t])),
reverse=True)
items = [item for table in keys for item in tables[table]]
dividers = [len(tables[table]) for table in keys]
return (items, np.cumsum(dividers)) | 4147cb86ed672b7dd1503615ba759fdb36d74185 | 30,845 |
def sum_category_hours(day, now, timelog=TIMELOG, category_hours=False):
""" Sum the hours by category. """
if not category_hours:
category_hours = {}
activities = get_rows(day, timelog)
for activity in activities:
category = activity.category
duration = activity.get_duration(now)
if category in category_hours:
category_hours[category] += duration
else:
category_hours[category] = duration
return category_hours | 5d8d77759c43f40c616bd394ed8ba169f4a58917 | 30,846 |
def run_factory(
factory, # type: LazyFactory
args=None, # type: Optional[Iterable[Any]]
kwargs=None, # type: Optional[Mapping[str, Any]]
):
# type: (...) -> Any
"""
Import and run factory.
.. code:: python
>>> from objetto.utils.factoring import run_factory
>>> bool(run_factory("re|match", (r"^[a-z]+$", "abc")))
True
:param factory: Lazy factory.
:type factory: str or function or collections.abc.Callable or None
:param args: Arguments to be passed to the factory function.
:param kwargs: Keyword arguments to be passed to the factory function.
:return: Result from factory.
"""
factory = import_factory(factory)
if factory is None:
return None
else:
return factory(*(args or ()), **(kwargs or {})) | 3766555849bca15e568ffc41fb522a47c22c666c | 30,847 |
def steps_smoother(steps, resolution):
"""
:param delta_steps: array of delta positions of 2 joints for each of the 4 feet
:return: array of positions of 2 joints for each of the 4 feet
"""
smoothed_steps = []
for i in range(len(steps)):
step = steps[i]
next_step = steps[(i + 1) % len(steps)]
for j in range(resolution):
smoothed_step = []
for k in range(4):
positions = step[k]
next_positions = next_step[k]
pos0 = positions[0] + j * \
((next_positions[0] - positions[0]) / resolution)
pos1 = positions[1] + j * \
((next_positions[1] - positions[1]) / resolution)
smoothed_step.append([pos0, pos1])
smoothed_steps.append(smoothed_step)
return smoothed_steps | a27e09af169e79438895d0e15c0b536213962429 | 30,848 |
from typing import Optional
def get_instance_server(name: Optional[str] = None,
server_id: Optional[str] = None,
zone: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceServerResult:
"""
Gets information about an instance server.
## Example Usage
```python
import pulumi
import pulumi_scaleway as scaleway
my_key = scaleway.get_instance_server(server_id="11111111-1111-1111-1111-111111111111")
```
:param str name: The server name. Only one of `name` and `server_id` should be specified.
:param str server_id: The server id. Only one of `name` and `server_id` should be specified.
:param str zone: `zone`) The zone in which the server exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['serverId'] = server_id
__args__['zone'] = zone
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('scaleway:index/getInstanceServer:getInstanceServer', __args__, opts=opts, typ=GetInstanceServerResult).value
return AwaitableGetInstanceServerResult(
additional_volume_ids=__ret__.additional_volume_ids,
boot_type=__ret__.boot_type,
bootscript_id=__ret__.bootscript_id,
cloud_init=__ret__.cloud_init,
enable_dynamic_ip=__ret__.enable_dynamic_ip,
enable_ipv6=__ret__.enable_ipv6,
id=__ret__.id,
image=__ret__.image,
ip_id=__ret__.ip_id,
ipv6_address=__ret__.ipv6_address,
ipv6_gateway=__ret__.ipv6_gateway,
ipv6_prefix_length=__ret__.ipv6_prefix_length,
name=__ret__.name,
organization_id=__ret__.organization_id,
placement_group_id=__ret__.placement_group_id,
placement_group_policy_respected=__ret__.placement_group_policy_respected,
private_ip=__ret__.private_ip,
private_networks=__ret__.private_networks,
project_id=__ret__.project_id,
public_ip=__ret__.public_ip,
root_volumes=__ret__.root_volumes,
security_group_id=__ret__.security_group_id,
server_id=__ret__.server_id,
state=__ret__.state,
tags=__ret__.tags,
type=__ret__.type,
user_data=__ret__.user_data,
zone=__ret__.zone) | aae211831c951d131a4cd21fab917da5b169f31b | 30,849 |
def leaper(x, y, int1, int2):
"""sepcifically for the rook, permutes the values needed around a position for no_conflict tests"""
return [(x+int1, y+int2), (x-int1, y+int2), (x+int1, y-int2), (x-int1, y-int2), (x+int2, y+int1), (x-int2, y+int1), (x+int2, y-int1), (x-int2, y-int1)] | 6f7afc071c8adbc72a6391179e2df522574e5197 | 30,850 |
def calc_offsets(obj):
"""
The search "hit" should have a 'fullsnip' annotation which is a the entire
text of the indexable resource, with <start_sel> and <end_sel> wrapping each
highlighted word.
Check if there's a selector on the indexable, and then if there's a box-selector
use this to generate a list of xywh coordinates by retrieving the selector by
its index from a list of lists
"""
if hasattr(obj, "fullsnip"):
words = obj.fullsnip.split(" ")
offsets = []
if words:
for i, word in enumerate(words):
if "<start_sel>" in word and "<end_sel>" in word:
offsets.append(i)
if offsets:
if obj.selector:
if (boxes := obj.selector.get("box-selector")) is not None:
box_list = []
for x in offsets:
try:
box_list.append(boxes[x])
except (IndexError, ValueError):
pass
if box_list:
return box_list # [boxes[x] for x in offsets if boxes[x]]
else:
return
return | 6af4827a57cf20f317ce2a40a669c14d3f6380f3 | 30,851 |
import argparse
import sys
def parse_args():
"""
Parse command-line arguments
Returns
-------
Parser argument namespace
"""
parser = argparse.ArgumentParser(description="Fibermorph")
parser.add_argument(
"--output_directory", default=None,
help="Required. Full path to and name of desired output directory. "
"Will be created if it doesn't exist.")
parser.add_argument(
"--input_directory", default=None,
help="Required. Full path to and name of desired directory containing "
"input files.")
parser.add_argument(
"--resolution_mm", type=int, default=132,
help="Integer. Number of pixels per mm.")
parser.add_argument(
"--resolution_mu", type=float, default=4.25,
help="Float. Number of pixels per micron.")
parser.add_argument(
"--file_extension", type=str, default=".RW2",
help="Optional. String. Extension of input files to use in input_directory when using raw2gray function. Default is .RW2.")
parser.add_argument(
"--jobs", type=int, default=1,
help="Integer. Number of parallel jobs to run. Default is 1.")
parser.add_argument(
"--window_size", type=float, default=1.0,
help="Float. Desired size for window of measurement in mm. Default is 1.0.")
parser.add_argument(
"--minsize", type=int, default=20,
help="Integer. Minimum diameter in microns for sections. Default is 20.")
parser.add_argument(
"--maxsize", type=int, default=150,
help="Integer. Maximum diameter in microns for sections. Default is 150.")
parser.add_argument(
"--save_image", type=bool, default=False,
help="Boolean. Default is False. Whether the curvature function should save images for intermediate image "
"processing steps.")
# Create mutually exclusive flags for each of fibermorph's modules
module_group = parser.add_mutually_exclusive_group(required=True)
module_group.add_argument(
"--raw2gray", action="store_true", default=False,
help="")
module_group.add_argument(
"--curvature", action="store_true", default=False,
help="")
module_group.add_argument(
"--section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_real_curv", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_real_section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_dummy_curv", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_dummy_section", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_teardown_data", action="store_true", default=False,
help="")
module_group.add_argument(
"--demo_delete_results_cache", action="store_true", default=False,
help="")
args = parser.parse_args()
# Validate arguments
demo_mods = [
args.demo_real_curv,
args.demo_real_section,
args.demo_dummy_curv,
args.demo_dummy_section,
args.demo_teardown_data,
args.demo_delete_results_cache]
if any(demo_mods) is False:
if args.input_directory is None and args.output_directory is None:
sys.exit("ExitError: need both --input_directory and --output_directory")
if args.input_directory is None:
sys.exit("ExitError: need --input_directory")
if args.output_directory is None:
sys.exit("ExitError: need --output_directory")
return args | e788d4519b539ba52e99989653c7aa3091cd9a39 | 30,852 |
def spread(self, value="", **kwargs):
"""Turns on a dashed tolerance curve for the subsequent curve plots.
APDL Command: SPREAD
Parameters
----------
value
Amount of tolerance. For example, 0.1 is ± 10%.
"""
return self.run("SPREAD,%s" % (str(value)), **kwargs) | a92c8e230eadd4e1fde498fa5650a403f419eaeb | 30,853 |
def _ValidateCandidateImageVersionId(current_image_version_id,
candidate_image_version_id):
"""Determines if candidate version is a valid upgrade from current version."""
if current_image_version_id == candidate_image_version_id:
return False
parsed_curr = _ImageVersionItem(image_ver=current_image_version_id)
parsed_cand = _ImageVersionItem(image_ver=candidate_image_version_id)
# Checks Composer versions.
if (not parsed_cand.composer_contains_alias and
not _IsComposerVersionUpgradeCompatible(parsed_curr.composer_ver,
parsed_cand.composer_ver)):
return False
# Checks Airflow versions.
if (not parsed_cand.airflow_contains_alias and
not _IsAirflowVersionUpgradeCompatible(parsed_curr.airflow_ver,
parsed_cand.airflow_ver)):
return False
return True | 25d888645211fc21f7a21ee17f5aeeb04e83907e | 30,854 |
import random
def run_mc_sim(lattice, num_lattice_steps, data_dict, io_dict, simsetup, exosome_string=EXOSTRING,
exosome_remove_ratio=0.0, ext_field_strength=FIELD_SIGNAL_STRENGTH, app_field=None,
app_field_strength=FIELD_APPLIED_STRENGTH, beta=BETA, plot_period=LATTICE_PLOT_PERIOD,
flag_uniplots=False, state_int=False, meanfield=MEANFIELD):
"""
Form of data_dict:
{'memory_proj_arr':
{memory_idx: np array [N x num_steps] of projection each grid cell onto memory idx}
'grid_state_int': n x n x num_steps of int at each site
(int is inverse of binary string from state)
Notes:
-can replace update_with_signal_field with update_state to simulate ensemble
of non-interacting n**2 cells
"""
def input_checks(app_field):
n = len(lattice)
assert n == len(lattice[0]) # work with square lattice for simplicity
num_cells = n * n
assert SEARCH_RADIUS_CELL < n / 2.0 # to prevent signal double counting
if app_field is not None:
if len(app_field.shape) > 1:
assert app_field.shape[0] == simsetup['N']
assert len(app_field[1]) == num_lattice_steps
else:
app_field = np.array([app_field for _ in range(num_lattice_steps)]).T
app_field_step = app_field[:, 0]
else:
app_field_step = None
return n, num_cells, app_field, app_field_step
def update_datadict_timestep_cell(lattice, loc, memory_idx_list, timestep_idx):
cell = lattice[loc[0]][loc[1]]
# store the projections
proj = cell.get_memories_projection(simsetup['A_INV'], simsetup['XI'])
for mem_idx in memory_idx_list:
data_dict['memory_proj_arr'][mem_idx][loc_to_idx[loc], timestep_idx] = proj[mem_idx]
# store the integer representation of the state
if state_int:
data_dict['grid_state_int'][loc[0], loc[1], timestep_idx] = cell.get_current_label()
return proj
def update_datadict_timestep_global(lattice, timestep_idx):
data_dict['lattice_energy'][timestep_idx, :] = calc_lattice_energy(
lattice, simsetup, app_field_step, app_field_strength, ext_field_strength, SEARCH_RADIUS_CELL,
exosome_remove_ratio, exosome_string, meanfield)
data_dict['compressibility_full'][timestep_idx, :] = calc_compression_ratio(
get_state_of_lattice(lattice, simsetup, datatype='full'),
eta_0=None, datatype='full', elemtype=np.int, method='manual')
def lattice_plot_init(lattice, memory_idx_list):
lattice_projection_composite(lattice, 0, n, io_dict['latticedir'], simsetup, state_int=state_int)
reference_overlap_plotter(lattice, 0, n, io_dict['latticedir'], simsetup, state_int=state_int)
if flag_uniplots:
for mem_idx in memory_idx_list:
lattice_uniplotter(lattice, 0, n, io_dict['latticedir'], mem_idx, simsetup)
def meanfield_global_field():
# TODO careful: not clear best way to update exo field as cell state changes in a time step,
# refactor exo fn?
assert exosome_string == 'no_exo_field'
print('Initializing mean field...')
# TODO decide if want scale factor to be rescaled by total popsize (i.e. *mean*field or total field?)
state_total = np.zeros(simsetup['N'])
field_global = np.zeros(simsetup['N'])
# TODO ok that cell is neighbour with self as well? should remove diag
neighbours = [[a, b] for a in range(len(lattice[0])) for b in range(len(lattice))]
if simsetup['FIELD_SEND'] is not None:
for loc in neighbours:
state_total += lattice[loc[0]][loc[1]].get_current_state()
state_total_01 = (state_total + num_cells) / 2
field_paracrine = np.dot(simsetup['FIELD_SEND'], state_total_01)
field_global += field_paracrine
if exosome_string != 'no_exo_field':
field_exo, _ = lattice[0][0].\
get_local_exosome_field(lattice, None, None, exosome_string=exosome_string,
exosome_remove_ratio=exosome_remove_ratio, neighbours=neighbours)
field_global += field_exo
return field_global
def parallel_block_update_lattice(J_block, s_block_current, applied_field_block, total_spins):
# TODO (determ vs 0 temp?)
total_field = np.zeros(total_spins)
internal_field = np.dot(J_block, s_block_current)
total_field += internal_field
# TODO deal with time dependent applied field on the whole lattice here (e.g. if using
# housekeeping or other)
if applied_field_block is not None:
applied_field_block_scaled = app_field_strength * applied_field_block
total_field += applied_field_block_scaled
# probability that site i will be "up" after the timestep
prob_on_after_timestep = 1 / (1 + np.exp(-2 * beta * total_field))
rsamples = np.random.rand(total_spins)
for idx in range(total_spins):
if prob_on_after_timestep[idx] > rsamples[idx]:
s_block_current[idx] = 1.0
else:
s_block_current[idx] = -1.0
return s_block_current
def build_block_matrices_from_search_radius(n, num_cells, search_radius, gamma,
aotocrine=AUTOCRINE, plot_adjacency=False):
W_scaled = gamma * simsetup['FIELD_SEND']
# Term A: self interactions for each cell (diagonal blocks of multicell J_block)
if aotocrine:
J_diag_blocks = np.kron(np.eye(num_cells), simsetup['J'] + W_scaled)
else:
J_diag_blocks = np.kron(np.eye(num_cells), simsetup['J'])
# Term B:
adjacency_arr_uptri = np.zeros((num_cells, num_cells))
# build only upper diagonal part of A
for a in range(num_cells):
grid_loc_a = lattice_square_int_to_loc(a, n) # map cell a & b index to grid loc (i, j)
arow, acol = grid_loc_a[0], grid_loc_a[1]
arow_low = arow - search_radius
arow_high = arow + search_radius
acol_low = acol - search_radius
acol_high = acol + search_radius
for b in range(a+1, num_cells):
grid_loc_b = lattice_square_int_to_loc(b, n) # map cell a & b index to grid loc (i, j)
# is neighbor?
if (arow_low <= grid_loc_b[0] <= arow_high) and (acol_low <= grid_loc_b[1] <= acol_high):
adjacency_arr_uptri[a, b] = 1
adjacency_arr_lowtri = adjacency_arr_uptri.T
adjacency_arr = adjacency_arr_lowtri + adjacency_arr_uptri
# Term 2 of J_multicell (cell-cell interactions)
J_offdiag_blocks = np.kron(adjacency_arr_lowtri, W_scaled.T) \
+ np.kron(adjacency_arr_uptri, W_scaled)
# build final J multicell matrix
J_block = J_diag_blocks + gamma * J_offdiag_blocks
if plot_adjacency:
plt.imshow(adjacency_arr)
plt.show()
return J_block, adjacency_arr
def build_block_state_from_lattice(lattice, n, num_cells, simsetup):
N = simsetup['N']
total_spins = num_cells * N
s_block = np.zeros(total_spins)
for a in range(num_cells):
arow, acol = lattice_square_int_to_loc(a, n)
cellstate = np.copy(
lattice[arow][acol].get_current_state())
s_block[a * N: (a+1) * N] = cellstate
return s_block
def update_lattice_using_state_block(lattice, n, num_cells, simsetup, s_block):
N = simsetup['N']
total_spins = num_cells * N
for a in range(num_cells):
arow, acol = lattice_square_int_to_loc(a, n)
cell = lattice[arow][acol]
cellstate = np.copy(s_block[a * N: (a + 1) * N])
# update cell state specifically
lattice[arow][acol].state = cellstate
# update whole cell state array (append new state for the current timepoint)
state_array_ext = np.zeros((N, np.shape(cell.state_array)[1] + 1))
state_array_ext[:, :-1] = cell.state_array # TODO: make sure don't need array copy
state_array_ext[:,-1] = cellstate
cell.state_array = state_array_ext
# update steps attribute
cell.steps += 1
return lattice
# input processing
n, num_cells, app_field, app_field_step = input_checks(app_field)
cell_locations = get_cell_locations(lattice, n)
loc_to_idx = {pair: idx for idx, pair in enumerate(cell_locations)}
memory_idx_list = list(data_dict['memory_proj_arr'].keys())
# assess & plot initial state
for loc in cell_locations:
update_datadict_timestep_cell(lattice, loc, memory_idx_list, 0)
update_datadict_timestep_global(lattice, 0) # measure initial state
lattice_plot_init(lattice, memory_idx_list) # plot initial state
# special update method for meanfield case (infinite search radius)
if meanfield:
state_total, field_global = meanfield_global_field()
if BLOCK_UPDATE_LATTICE:
assert not meanfield # TODO how does this flag interact with meanfield flag?
# Psuedo 1: build J = I dot J0 + A dot W
# I is M x M, A determined by the type of graph (could explore other, non-lattice, types)
total_spins = num_cells * simsetup['N']
J_block, adjacency_arr = build_block_matrices_from_search_radius(
n, num_cells, SEARCH_RADIUS_CELL, ext_field_strength, aotocrine=AUTOCRINE)
# Pseudo 2: store lattice state as blocked vector s_hat
state_block = build_block_state_from_lattice(lattice, n, num_cells, simsetup)
# Pseudo 3: applied_field_block timeseries or None
# TODO
for turn in range(1, num_lattice_steps):
print('Turn ', turn)
if BLOCK_UPDATE_LATTICE:
# TODO applied field block
# block update rule for the lattice (represented by state_block)
state_block = parallel_block_update_lattice(J_block, state_block, None, total_spins)
# TODO applied field block
# TODO better usage of the lattice object, this refilling is inefficient
# especially the state array part
# fill lattice object based on updated state_block
lattice = update_lattice_using_state_block(lattice, n, num_cells, simsetup, state_block)
else:
random.shuffle(cell_locations)
for idx, loc in enumerate(cell_locations):
cell = lattice[loc[0]][loc[1]]
if app_field is not None:
app_field_step = app_field[:, turn]
if meanfield:
cellstate_pre = np.copy(cell.get_current_state())
cell.update_with_meanfield(
simsetup['J'], field_global, beta=beta, app_field=app_field_step,
field_signal_strength=ext_field_strength, field_app_strength=app_field_strength)
# TODO update field_avg based on new state TODO test
state_total += (cell.get_current_state() - cellstate_pre)
state_total_01 = (state_total + num_cells) / 2
field_global = np.dot(simsetup['FIELD_SEND'], state_total_01)
print(field_global)
print(state_total)
else:
cell.update_with_signal_field(
lattice, SEARCH_RADIUS_CELL, n, simsetup['J'], simsetup, beta=beta,
exosome_string=exosome_string, exosome_remove_ratio=exosome_remove_ratio,
field_signal_strength=ext_field_strength, field_app=app_field_step,
field_app_strength=app_field_strength)
# update cell specific datdict entries for the current timestep
cell_proj = update_datadict_timestep_cell(lattice, loc, memory_idx_list, turn)
if turn % (120*plot_period) == 0: # proj vis of each cell (slow; every k steps)
fig, ax, proj = cell.\
plot_projection(simsetup['A_INV'], simsetup['XI'], proj=cell_proj,
use_radar=False, pltdir=io_dict['latticedir'])
# compute lattice properties (assess global state)
# TODO 1 - consider lattice energy at each cell update (not lattice update)
# TODO 2 - speedup lattice energy calc by using info from state update calls...
update_datadict_timestep_global(lattice, turn)
if turn % plot_period == 0: # plot the lattice
lattice_projection_composite(
lattice, turn, n, io_dict['latticedir'], simsetup, state_int=state_int)
reference_overlap_plotter(
lattice, turn, n, io_dict['latticedir'], simsetup, state_int=state_int)
#if flag_uniplots:
# for mem_idx in memory_idx_list:
# lattice_uniplotter(lattice, turn, n, io_dict['latticedir'], mem_idx, simsetup)
return lattice, data_dict, io_dict | bfd11100b37a2161cfaf32c10be0ba48dfdc0a84 | 30,855 |
def collisional_loss(electron_energy):
"""
Compute the energy dependant terms of the collisional energy loss rate for energetic electrons.
Parameters
----------
electron_energy : `numpy.array`
Array of electron energies at which to evaluate loss
Returns
-------
`numpy.array`
Energy loss rate
Notes
-----
Initial version modified from SSW
`Brm_ELoss <https://hesperia.gsfc.nasa.gov/ssw/packages/xray/idl/brm/brm_eloss.pro>`_
"""
electron_rest_mass = const.get_constant('mc2') # * u.keV #c.m_e * c.c**2
gamma = (electron_energy / electron_rest_mass) + 1.0
beta = np.sqrt(1.0 - (1.0 / gamma ** 2))
# TODO figure out what number is?
energy_loss_rate = np.log(6.9447e+9 * electron_energy) / beta
return energy_loss_rate | 6edbb87a70dc033542c5dc3a113886ba26c87bc8 | 30,856 |
import re
def getOrdererIPs():
"""
returns list of ip addr
"""
client = docker.from_env()
container_list = client.containers.list()
orderer_ip_list = []
for container in container_list:
if re.search("^orderer[1-9][0-9]*", container.name):
out = container.exec_run("awk 'END{print $1}' /etc/hosts", stdout=True)
orderer_ip_list.append(out.output.decode().split("\n")[0])
client.close()
return orderer_ip_list | 745c9635b03745c5e61d6cd56c0b1fcd58df1fa4 | 30,857 |
import re
def repair_attribute_name(attr):
"""
Remove "weird" characters from attribute names
"""
return re.sub('[^a-zA-Z-_\/0-9\*]','',attr) | f653a5cb5ed5e43609bb334f631f518f73687853 | 30,858 |
def get_xsd_file(profile_name, profile_version):
"""Returns path to installed XSD, or local if no installed one exists."""
if profile_name.lower() not in XSD_LOOKUP_MAP:
raise ValueError(
'Profile %s did not match a supported profile: %s.\n'
% (profile_name, sorted(XSD_FILES.keys())))
# Ensure we have the correct case.
camelcase_profile_name = XSD_LOOKUP_MAP[profile_name.lower()]
if profile_version not in XSD_FILES[camelcase_profile_name]:
raise ValueError(
'Profile Version %s did not match a supported version: %s.\n'
% (profile_version, sorted(XSD_FILES[camelcase_profile_name].keys())))
return XSD_FILES[camelcase_profile_name][profile_version] | 02d2c127fabd0a8f274211885e625f90d314036f | 30,859 |
def get_response(url: str) -> HTMLResponse:
"""
向指定url发起HTTP GET请求
返回Response
:param url: 目标url
:return: url响应
"""
session = HTMLSession()
return session.get(url) | f53c2a6a2066bbe76f3b9266d42ad014d5e4fcfa | 30,860 |
def minutesBetween(date_1, date_2):
"""Calculates the number of whole minutes between two dates.
Args:
date_1 (Date): The first date to use.
date_2 (Date): The second date to use.
Returns:
int: An integer that is representative of the difference between
two dates.
"""
diff = date_2 - date_1
d, s, _ = diff.days, diff.seconds, diff.microseconds
return d * 1440 + s // 60 | 1e75c3571bee3855183b7a51e661d8eaa0bf47a2 | 30,861 |
from typing import Dict
import pkgutil
import sys
import importlib
def find_whatrecord_submodules() -> Dict[str, ModuleType]:
"""Find all whatrecord submodules, as a dictionary of name to module."""
modules = {}
package_root = str(MODULE_PATH.parent)
for item in pkgutil.walk_packages(path=[package_root], prefix="whatrecord."):
if item.name.endswith("__main__"):
continue
try:
modules[item.name] = sys.modules[item.name]
except KeyError:
# Submodules may not yet be imported; do that here.
try:
modules[item.name] = importlib.import_module(
item.name, package="whatrecord"
)
except Exception:
logger.exception("Failed to import %s", item.name)
return modules | 9adfd236a64922d194493d10a9f5585b2e0eb208 | 30,862 |
import asyncio
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload FireServiceRota config entry."""
await hass.async_add_executor_job(
hass.data[DOMAIN][entry.entry_id].websocket.stop_listener
)
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
del hass.data[DOMAIN][entry.entry_id]
return unload_ok | d6acb96f1f144868923b1cc65e99b4ee901caa56 | 30,863 |
def HasPositivePatterns(test_filter):
"""Returns True if test_filter contains a positive pattern, else False
Args:
test_filter: test-filter style string
"""
return bool(len(test_filter) > 0 and test_filter[0] != '-') | 9038bf799efbe4008a83d2da0aba89c0197c16a1 | 30,864 |
import math
def get_lr(lr_init, lr_end, lr_max, total_epochs, warmup_epochs,
pretrain_epochs, steps_per_epoch, lr_decay_mode):
"""
generate learning rate array
Args:
lr_init(float): init learning rate
lr_end(float): end learning rate
lr_max(float): max learning rate
total_epochs(int): total epoch of training
warmup_epochs(int): number of warmup epochs
pretrain_epochs(int): number of pretrain epochs
steps_per_epoch(int): steps of one epoch
lr_decay_mode(string): learning rate decay mode,
including steps, poly, linear or cosine
Returns:
np.array, learning rate array
"""
lr_each_step = []
total_steps = steps_per_epoch * total_epochs
warmup_steps = steps_per_epoch * warmup_epochs
pretrain_steps = steps_per_epoch * pretrain_epochs
decay_steps = total_steps - warmup_steps
if lr_decay_mode == 'steps':
decay_epoch_index = [
0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps
]
for i in range(total_steps):
if i < decay_epoch_index[0]:
lr = lr_max
elif i < decay_epoch_index[1]:
lr = lr_max * 0.1
elif i < decay_epoch_index[2]:
lr = lr_max * 0.01
else:
lr = lr_max * 0.001
lr_each_step.append(lr)
elif lr_decay_mode == 'poly':
for i in range(total_steps):
if i < warmup_steps:
lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
else:
base = (1.0 - (i - warmup_steps) / decay_steps)
lr = lr_max * base * base
lr_each_step.append(lr)
elif lr_decay_mode == 'linear':
for i in range(total_steps):
if i < warmup_steps:
lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
else:
lr = lr_max - (lr_max - lr_end) * (i -
warmup_steps) / decay_steps
lr_each_step.append(lr)
elif lr_decay_mode == 'cosine':
for i in range(total_steps):
if i < warmup_steps:
lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
else:
linear_decay = (total_steps - i) / decay_steps
cosine_decay = 0.5 * (
1 + math.cos(math.pi * 2 * 0.47 *
(i - warmup_steps) / decay_steps))
decayed = linear_decay * cosine_decay + 0.00001
lr = lr_max * decayed
lr_each_step.append(lr)
else:
raise NotImplementedError(
'Learning rate decay mode [{:s}] cannot be recognized'.format(
lr_decay_mode))
lr_each_step = np.array(lr_each_step).astype(np.float32)
learning_rate = lr_each_step[pretrain_steps:]
return learning_rate | 90091b35126bcf91166c498c396c831c3da1e7f6 | 30,865 |
def comptineN2():
"""Generate the midi file of the comptine d'un autre été"""
mid = MidiFile()
trackl = MidiTrack()
trackl.name = "Left hand"
for i in range(8):
trackl = comp_lh1(trackl)
trackl = comp_lh1(trackl)
trackl = comp_lh2(trackl)
trackl = comp_lh2(trackl)
trackl.append(Message('note_on', note=52))
trackl.append(Message('note_off', note=52, time=200))
mid.tracks.append(trackl)
trackr = MidiTrack()
trackr.name = 'Right hand'
trackr.append(Message('note_on', note=67, velocity=0, time=3200))
trackr = comp_rh1(trackr)
trackr = comp_rh2(trackr)
trackr = comp_rh2(trackr)
trackr = comp_rh3(trackr)
trackr = comp_rh3(trackr, end=True)
trackr = comp_rh4(trackr)
trackr.append(Message('note_on', note=71))
trackr.append(Message('note_off', note=71, time=200))
mid.tracks.append(trackr)
mid.ticks_per_beat = 100
vols = generate_vol()
mid = volume(mid, vols)
return mid | a7cd80b7ab483ef68be827c56e5f8b95967d8c08 | 30,866 |
from xenserver import tasks
from xenserver.tests.helpers import XenServerHelper
def xs_helper(monkeypatch):
"""
Provide a XenServerHelper instance and monkey-patch xenserver.tasks to use
sessions from that instance instead of making real API calls.
"""
xshelper = XenServerHelper()
monkeypatch.setattr(tasks, 'getSession', xshelper.get_session)
return xshelper | d504aa6b651eb3777171187aceea7eb03fa7e46a | 30,867 |
def highest_palindrome_product(digits):
"""Returns the highest palindrome number resulting from the
multiplication of two numbers with the given amount of digits.
"""
def is_palindrome(target):
"""Returns True if target (str or int) is a palindrome.
"""
string = str(target)
return list(string) == list(string)[::-1]
# Creating the two highest possible numbers with the given amount of
# digits:
highest_number1 = highest_number2 = int("9"*digits)
palindromes_list = []
while True:
result = highest_number1 * highest_number2
if is_palindrome(result):
palindromes_list.append(result)
# Finding the products between all two numbers with the given
# amount of digits:
if highest_number2 == int("1" + "0"*(digits-1)):
if highest_number1 == int("1" + "0"*(digits-1)):
break
else:
highest_number2 = highest_number1
highest_number1 -=1
else:
highest_number2 -= 1
return max(palindromes_list) | e509de1c977c6e4ecf9ab8304ef1afe65a447188 | 30,868 |
def is_ladder(try_capture, game_state, candidate,
ladder_stones=None, recursion_depth=50):
"""Ladders are played out in reversed roles, one player tries to capture,
the other to escape. We determine the ladder status by recursively calling
is_ladder in opposite roles, providing suitable capture or escape candidates.
Arguments:
try_capture: boolean flag to indicate if you want to capture or escape the ladder
game_state: current game state, instance of GameState
candidate: a move that potentially leads to escaping the ladder or capturing it, instance of Move
ladder_stones: the stones to escape or capture, list of Point. Will be inferred if not provided.
recursion_depth: when to stop recursively calling this function, integer valued.
Returns True if game state is a ladder and try_capture is true (the ladder captures)
or if game state is not a ladder and try_capture is false (you can successfully escape)
and False otherwise.
"""
if not game_state.is_valid_move(Move(candidate)) or not recursion_depth:
return False
next_player = game_state.next_player
capture_player = next_player if try_capture else next_player.other
escape_player = capture_player.other
if ladder_stones is None:
ladder_stones = guess_ladder_stones(game_state, candidate, escape_player)
for ladder_stone in ladder_stones:
current_state = game_state.apply_move(candidate)
if try_capture:
candidates = determine_escape_candidates(
game_state, ladder_stone, capture_player)
attempted_escapes = [ # now try to escape
is_ladder(False, current_state, escape_candidate,
ladder_stone, recursion_depth - 1)
for escape_candidate in candidates]
if not any(attempted_escapes):
return True # if at least one escape fails, we capture
else:
if count_liberties(current_state, ladder_stone) >= 3:
return True # successful escape
if count_liberties(current_state, ladder_stone) == 1:
continue # failed escape, others might still do
candidates = liberties(current_state, ladder_stone)
attempted_captures = [ # now try to capture
is_ladder(True, current_state, capture_candidate,
ladder_stone, recursion_depth - 1)
for capture_candidate in candidates]
if any(attempted_captures):
continue # failed escape, try others
return True # candidate can't be caught in a ladder, escape.
return False | 755ed8c007d51034ec2f2a24958c3f4660795007 | 30,869 |
def instances(request, compute_id):
"""
:param request:
:return:
"""
all_host_vms = {}
error_messages = []
compute = get_object_or_404(Compute, pk=compute_id)
if not request.user.is_superuser:
all_user_vms = get_user_instances(request)
else:
try:
all_host_vms = get_host_instances(request, compute)
except libvirtError as lib_err:
error_messages.append(lib_err)
if request.method == 'POST':
try:
return instances_actions(request)
except libvirtError as lib_err:
error_messages.append(lib_err)
addlogmsg(request.user.username, request.POST.get("name", "instance"), lib_err.message)
return render(request, 'instances.html', locals()) | 622336dfb836fbe4918f5d1331149aaaa3467a05 | 30,870 |
def seresnet101b_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs) | 784e915704d244270ddf479eaaf5e279a7db437a | 30,871 |
import os
def get_template_dir(format):
"""
Given a format string return the corresponding standard template
directory.
"""
return os.path.join(os.path.dirname(__file__), 'templates', format) | c204575b877c08700a7c236577016ed7e267f88b | 30,872 |
from pathlib import Path
import shutil
def dst(request):
"""Return a real temporary folder path which is unique to each test
function invocation. This folder is deleted after the test has finished.
"""
dst = Path(mkdtemp()).resolve()
request.addfinalizer(lambda: shutil.rmtree(str(dst), ignore_errors=True))
return dst | 7714ce85fbeedfed00b571d9d2ef31cd6d8898e9 | 30,873 |
import string
def getSentencesFromReview(reviewContent):
"""
INPUT: a single review consist of serveral sentences
OUTPUT: a list of single sentences
"""
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = sent_detector.tokenize(reviewContent)
# split agglomerated sentences
for m in range(len(sentences)):
subsentences = sentences[m].split('.')
new_sentences = []
new_subsen = subsentences[0]
for n in range(1,len(subsentences)):
if subsentences[n] and (subsentences[n][0] in string.ascii_uppercase):
new_subsen += '.'
new_sentences.append(new_subsen)
new_subsen = subsentences[n]
else:
new_subsen += '.' + subsentences[n]
new_sentences.append(new_subsen)
sentences[m] = new_sentences
# collect all the single sentence into final_sentence list
final_sentences = []
for sentence in sentences:
if isinstance(sentence, list):
final_sentences.extend(sentence)
else:
final_sentences.append(sentence)
return final_sentences | 2c074fac508994ad44edb0889a799ada22261c3c | 30,874 |
import math
def gamma_vector_neutrino(m_med, g_l=0.0):
"""Function to calculate the neutrino width of a vector mediator
:param m_med: mediator mass
:type m_med: float
:param g_l: lepton coupling, defaults to 0.0
:type g_l: float, optional
"""
return 3 * g_l**2 / (24 * math.pi) * m_med | ebb0c913beee57cf9cdb605cc356949cea461882 | 30,875 |
from typing import Dict
from typing import Callable
import importlib
import sys
def load_debugtalk_functions() -> Dict[Text, Callable]:
""" load project debugtalk.py module functions
debugtalk.py should be located in project root directory.
Returns:
dict: debugtalk module functions mapping
{
"func1_name": func1,
"func2_name": func2
}
"""
# load debugtalk.py module
try:
imported_module = importlib.import_module("debugtalk")
except Exception as ex:
logger.error(f"error occurred in debugtalk.py: {ex}")
sys.exit(1)
# reload to refresh previously loaded module
imported_module = importlib.reload(imported_module)
return load_module_functions(imported_module) | 8928d6e53985551e6375b8ff3ea94e86f690845a | 30,876 |
def fifo_cdc(glbl, emesh_i, emesh_o):
"""
map the packet interfaces to the FIOF interface
"""
fifo_intf = FIFOBus(size=16, width=len(emesh_i.bits))
@always_comb
def rtl_assign():
wr.next = emesh_i.access and not fifo_intf.full
rd.next = not fifo_intf.empty and not emesh_i.wait
emesh_o.wait.next = fifo_intf.full
@always(emesh_o.clock.posedge)
def rtl_access():
if not emesh_i.wait:
emesh_o.access.next = fifo_intf.rd
# assign signals ot the FIFO interface
fifo_intf.wdata = emesh_i.bits
fifo_intf.rdata = emesh_o.bits
g_fifo = cores.fifo.fifo_async(glbl.reset, emesh_i.clock,
emesh_o.clock, fifo_intf)
return rtl_assign, rtl_access, g_fifo | d30445dad18043c63e29e7a79ff3e02dad370964 | 30,877 |
def eudora_bong(update, context): #1.2.1
"""Show new choice of buttons"""
query = update.callback_query
bot = context.bot
keyboard = [
[InlineKeyboardButton("Yes", callback_data='0'),
InlineKeyboardButton("No", callback_data='00')],
[InlineKeyboardButton("Back",callback_data='1.2')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(
chat_id=query.message.chat_id,
message_id=query.message.message_id,
text="""We have found a lawyer that suits your needs!""",
)
bot.send_photo(
chat_id=query.message.chat_id,
photo = open("female.jpg",'rb')
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Name: Eudora Bong \nCompany: Chee Chong LLP \nYears of Experience: 9""",
)
bot.send_message(
chat_id=query.message.chat_id,
text = """See more on our website: https://eldoraboo.github.io/PairALegal/eudora-bong"""
)
bot.send_message(
chat_id=query.message.chat_id,
text = """Thank you for using Pair-A-Legal bot. \nWould you like to restart?""",
reply_markup = reply_markup
)
return FIRST | 234d4324b384414fd6e2a6f52dbbccc51f0ff738 | 30,878 |
import os
def get_wildcard_dir(path):
"""If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory."""
ret = []
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret | a2688463c02c9558140d52da567e8744ed775e99 | 30,879 |
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors | 33b9a4bfd44a70d93ae7b50df870d46765bf0cb7 | 30,880 |
def pattern():
"""Start a pattern
Expected arguments are: name, delay, pause
"""
if request.args.get('name') is None:
return ''
pattern = request.args.get('name')
delay = float(request.args.get('delay', 0.1))
pause = float(request.args.get('pause', 0.5))
LightsController.start_pattern(pattern=pattern, delay=delay, pause=pause)
return '' | 13d1ff59dbd4521b157ab28bae75fed30378f8c5 | 30,881 |
def smow(t):
"""
Density of Standard Mean Ocean Water (Pure Water) using EOS 1980.
Parameters
----------
t : array_like
temperature [℃ (ITS-90)]
Returns
-------
dens(t) : array_like
density [kg m :sup:`3`]
Examples
--------
>>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> sw.smow(t)
array([ 999.842594 , 999.842594 , 995.65113374, 995.65113374,
999.842594 , 999.842594 , 995.65113374, 995.65113374])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Millero, F.J. and Poisson, A. International one-atmosphere equation
of state of seawater. Deep-Sea Res. 1981. Vol28A(6) pp625-629.
doi:10.1016/0198-0149(81)90122-9
"""
t = np.asanyarray(t)
a = (999.842594, 6.793952e-2, -9.095290e-3, 1.001685e-4, -1.120083e-6,
6.536332e-9)
T68 = T68conv(t)
return (a[0] + (a[1] + (a[2] + (a[3] + (a[4] + a[5] * T68) * T68) * T68) *
T68) * T68) | 1f7ae913a1f4c71493d7d94d04bf543e6ffff72b | 30,882 |
from typing import Optional
import os
def get_asgi_handler(fast_api: FastAPI) -> Optional[Mangum]:
"""Initialize an AWS Lambda ASGI handler"""
if os.getenv("AWS_EXECUTION_ENV"):
return Mangum(fast_api, enable_lifespan=False)
return None | acc6e37049bed84f58fd692df50016e18c04c714 | 30,883 |
from typing import Optional
from typing import Tuple
import torch
def generate_change_image_given_dlatent(
dlatent: np.ndarray,
generator: networks.Generator,
classifier: Optional[MobileNetV1],
class_index: int,
sindex: int,
s_style_min: float,
s_style_max: float,
style_direction_index: int,
shift_size: float,
label_size: int = 2,
num_layers: int = 14
) -> Tuple[np.ndarray, float, float]:
"""Modifies an image given the dlatent on a specific S-index.
Args:
dlatent: The image dlatent, with sape [dlatent_size].
generator: The generator model. Either StyleGAN or GLO.
classifier: The classifier to visualize.
class_index: The index of the class to visualize.
sindex: The specific style index to visualize.
s_style_min: The minimal value of the style index.
s_style_max: The maximal value of the style index.
style_direction_index: If 0 move s to it's min value otherwise to it's max
value.
shift_size: Factor of the shift of the style vector.
label_size: The size of the label.
Returns:
The image after the style index modification, and the output of
the classifier on this image.
"""
expanded_dlatent_tmp = torch.tile(dlatent.unsqueeze(1),[1, num_layers, 1])
network_inputs = generator.synthesis.style_vector_calculator(expanded_dlatent_tmp)
style_vector = torch.cat(generator.synthesis.style_vector_calculator(expanded_dlatent_tmp)[1], dim=1).numpy()
orig_value = style_vector[0, sindex]
target_value = (s_style_min if style_direction_index == 0 else s_style_max)
if target_value == orig_value:
weight_shift = shift_size
else:
weight_shift = shift_size * (target_value - orig_value)
layer_idx, in_idx = sindex_to_layer_idx_and_index(network_inputs[1], sindex)
layer_one_hot = torch.nn.functional.one_hot(torch.Tensor([in_idx]).to(int), network_inputs[1][layer_idx].shape[1])
network_inputs[1][layer_idx] += (weight_shift * layer_one_hot)
svbg_new = group_new_style_vec_block(network_inputs[1])
images_out = generator.synthesis.image_given_dlatent(expanded_dlatent_tmp, svbg_new)
images_out = torch.maximum(torch.minimum(images_out, torch.Tensor([1])), torch.Tensor([-1]))
change_image = torch.tensor(images_out.numpy())
result = classifier(change_image)
change_prob = nn.Softmax(dim=1)(result).detach().numpy()[0, class_index]
change_image = change_image.permute(0, 2, 3, 1)
return change_image, change_prob | 89ad94dd6f74c175ede27712046d3c46ba43143c | 30,884 |
def count_nodes_of_type_on_path_of_type_to_label(source_name, source_label, target_label, node_label_list, relationship_label_list, node_of_interest_position, debug=False):
"""
This function will take a source node, look for paths along given node and relationship types to a certain target node,
and then count the number distinct number of nodes of interest that occur along that path.
:param source_name: source node name
:param source_label: source node label
:param target_label: target node label
:param node_label_list: list of node labels (eg. ['phenotypic_feature'])
:param relationship_label_list: list of relationship types (eg. ['has_phenotype', 'has_phenotype'])
:param node_of_interest_position: position of node to count (in this eg, node_of_interest_position = 0)
:param debug: just print the cypher query
:param session: neo4j session
:return: two dictionaries: names2counts, names2nodes (keys = target node names, counts is number of nodes of interest in the paths, nodes are the actual nodes of interest)
"""
query = "MATCH (s:%s{id:'%s'})-" % (source_label, source_name)
for i in range(len(relationship_label_list) - 1):
if i == node_of_interest_position:
query += "[:%s]-(n:%s)-" % (relationship_label_list[i], node_label_list[i])
else:
query += "[:%s]-(:%s)-" % (relationship_label_list[i], node_label_list[i])
query += "[:%s]-(t:%s) " % (relationship_label_list[-1], target_label)
query += "RETURN t.id, count(distinct n.id), collect(distinct n.id)"
if debug:
return query
else:
with driver.session() as session:
result = session.run(query)
result_list = [i for i in result]
names2counts = dict()
names2nodes = dict()
for i in result_list:
names2counts[i['t.id']] = int(i['count(distinct n.id)'])
names2nodes[i['t.id']] = i['collect(distinct n.id)']
return names2counts, names2nodes | 27809f03b34d4f575d20cddb3780e55381cb6881 | 30,885 |
def _build_square(A, B, C, D):
"""Build a matrix from submatrices
A B
C D
"""
return np.vstack((
np.hstack((A, B)),
np.hstack((C, D))
)) | 510b39f433023339f977a665c055f60abe46a160 | 30,886 |
from typing import Sequence
from typing import Optional
import asyncio
import sys
import pkg_resources
import logging
def main(
argv: Sequence[str] = sys.argv[1:], loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
"""Parse argument and setup main program loop."""
args = docopt(
__doc__, argv=argv, version=pkg_resources.require("easywave")[0].version
)
level = logging.ERROR
if args["-v"]:
level = logging.INFO
if args["-v"] == 2:
level = logging.DEBUG
logging.basicConfig(level=level)
if not loop:
loop = asyncio.get_event_loop()
conn = create_easywave_connection(
protocol=EasywaveProtocol,
host=args["--host"],
port=args["--port"],
baud=args["--baud"],
loop=loop,
)
if conn == False:
logging.warning('No connection possible')
return None
transport, protocol = loop.run_until_complete(conn)
try:
if args["<command>"]:
loop.run_until_complete(protocol.send_command_ack(args["<id>"], args["<command>"]))
else:
loop.run_forever()
except KeyboardInterrupt:
# cleanup connection
transport.close()
loop.run_forever()
finally:
loop.close() | 418251a015cfce97f028cc0f1be7e9e2f814f0e8 | 30,887 |
def DataFrame_to_AsciiDataTable(pandas_data_frame,**options):
"""Converts a pandas.DataFrame to an AsciiDataTable"""
# Set up defaults and pass options
defaults={}
conversion_options={}
for key,value in defaults.items():
conversion_options[key]=value
for key,value in options.items():
conversion_options[key]=value
conversion_options["column_names"]=pandas_data_frame.columns.tolist()[:]
conversion_options["data"]=pandas_data_frame.values.tolist()[:]
conversion_options["column_types"]=[str(x) for x in pandas_data_frame.dtypes.tolist()[:]]
new_table=AsciiDataTable(None,**conversion_options)
return new_table | 2864440528324e00e5d7389b5cc2b04aecbb833b | 30,888 |
import logging
async def update_product(product_id: int, product_update: schemas.ProductPartialUpdate, db: DatabaseManagerBase = Depends(get_db)):
"""
Patches a product, this endpoint allows to update single or multiple values of a product
- **title**: Title of the product
- **description**: Description of the product
- **purch_price**: The purch price of the product
- **sales_price**: The sales price of the product
"""
logging.debug("Product: Update product")
if len(product_update.dict(exclude_unset=True).keys()) == 0:
raise ApiException(status_code=400, code="Invalid request",
description="Please specify at least one property!")
product = db.update_product(product_id, product_update)
if not product:
raise EntityNotFoundException(
code="Unable to update product", description=f"Product with the id {product_id} does not exist")
return product | a1104864e517df795b58b76020fef1e2dbdfe222 | 30,889 |
def TimeDist(times, cutoff, X, e, n1, k):
"""Translated from Rccp file ConleySE.cpp"""
nrow = times.shape[0]
assert n1 == nrow
assert X.shape[1] == k
dmat = np.ones((nrow, nrow))
v1 = np.empty(nrow)
v2 = np.empty(nrow)
for i in range(nrow):
t_diff = times.copy()
try:
t_diff -= times[i]
except KeyError:
print(times)
raise
t_diff = np.abs(t_diff)
for j in range(nrow):
v1[j] = t_diff[j] <= cutoff
# TODO: assert that we're working with integer times
v2[j] = t_diff[j] != t_diff[i]
# If comparing against the original Rcpp code, remember that
# in arma, '*' is matrix multiplication. However, since v1[j]
# and v2[j] are scalars, element-wise multiplication is good
t_diff[j] = v1[j] * v2[j] * (1 - t_diff[j]) / (cutoff + 1)
dmat[i, :] *= t_diff.T
XeeXh = np.zeros((k, k))
for i in range(nrow):
# direct translation of the arma code seems silly in python,
# but we'll do it anyway.
e_mat = np.zeros((1, nrow))
e_mat[:] = e[i]
k_mat = np.ones((k, 1))
d_row = np.ones((1, nrow))
d_row *= dmat[i, :]
d_row *= e.T
# equivalently:
# d_row = dmat[i, :] * e.T
X_row = X[i, :].reshape(-1, 1)
assert X_row.shape == (k, 1)
XeeXh += (X_row @ e_mat * (k_mat @ d_row)) @ X
return XeeXh | 89743d3585306907b1efa4aa3802a79e2627175f | 30,890 |
def generate_fps_from_reaction_products(reaction_smiles, fp_data_configs):
""" Generates specified fingerprints for the both reactive and non-reactive substructures of the reactant and
product molecules that are the participating in the chemical reaction. """
# Generate the RDKit Mol representations of the product molecules and generate the reaction cores.
reactants, _, products = parse_reaction_roles(reaction_smiles, as_what="mol_no_maps")
reaction_cores = get_reaction_core_atoms(reaction_smiles)
# Separate the reaction cores if they consist out of multiple non-neighbouring parts.
separated_cores = get_separated_cores(reaction_smiles, reaction_cores)
# Define variables which will be used for storing the results.
total_reactive_fps, total_non_reactive_fps = [], []
# Iterate through the product molecules and generate fingerprints for all reactive and non-reactive substructures.
for p_ind, product in enumerate(products):
# Iterate through all of the dataset configurations.
for fp_config in fp_data_configs:
reactive_fps, non_reactive_fps = [], []
# Generate fingerprints from the reactive substructures i.e. the reaction core(s).
for core in separated_cores[1][p_ind]:
# Generate reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
reactive_fps.append(construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, output_type="np_array", as_type="np_float"))
# Generate reactive HS fingerprints and add them to the list.
else:
reactive_fps.append(construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=core, neighbourhood_ext=fp_config["ext"]))
# Generate the extended environment of the reaction core.
extended_core_env = get_atom_environment(reaction_cores[1][p_ind], product, degree=1)
# Generate fingerprints from the non-reactive substructures i.e. non-reaction core substructures.
for bond in product.GetBonds():
# Generate the extended environment of the focus bond.
extended_bond_env = get_bond_environment(bond, product, degree=1)
# If the extended environment of the non-reactive substructure does not overlap with the extended
# reaction core, generate a non-reactive fingerprint representation.
if not extended_bond_env.intersection(extended_core_env):
# Generate non-reactive EC fingerprints and add them to the list.
if fp_config["type"] == "ecfp":
non_reactive_fps.append(
construct_ecfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
output_type="np_array", as_type="np_float"))
# Generate non-reactive HS fingerprints and add them to the list.
else:
non_reactive_fps.append(
construct_hsfp(product, radius=fp_config["radius"], bits=fp_config["bits"],
from_atoms=[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()],
neighbourhood_ext=fp_config["ext"]))
# Append the generated fingerprints to the final list.
total_reactive_fps.append(reactive_fps)
total_non_reactive_fps.append(non_reactive_fps)
# Return all of the generated fingerprints and labels.
return total_reactive_fps, total_non_reactive_fps | 42c4777dcf9c306cd45f9e94bbf18c0d1768c59b | 30,891 |
import torch
def count_acc(logits, label):
"""The function to calculate the .
Args:
logits: input logits.
label: ground truth labels.
Return:
The output accuracy.
"""
pred = F.softmax(logits, dim=1).argmax(dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
return (pred == label).type(torch.FloatTensor).mean().item() | 2f34be0cfb52a438c66b36d1d653ecbd72d559e2 | 30,892 |
def cuda_argmin(a, axis):
""" Location of minimum GPUArray elements.
Parameters:
a (gpu): GPUArray with the elements to find minimum values.
axis (int): The dimension to evaluate through.
Returns:
gpu: Location of minimum values.
Examples:
>>> a = cuda_argmin(cuda_give([[1, 2, 3], [6, 5, 4]]), axis=1)
array([[0],
[2]], dtype=uint32)
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return skcuda.misc.argmin(a, axis, keepdims=True) | 25231969616e5c14736757a7b13f058ee218b6aa | 30,893 |
def _process_columns(validated_data, context):
"""Process the used_columns field of a serializer.
Verifies if the column is new or not. If not new, it verifies that is
compatible with the columns already existing in the workflow
:param validated_data: Object with the parsed column items
:param context: dictionary with additional objects for serialization
:return: List of new columns
"""
new_columns = []
for citem in validated_data:
cname = citem.get('name')
if not cname:
raise Exception(
_('Incorrect column name {0}.').format(cname))
# Search for the column in the workflow columns
col = context['workflow'].columns.filter(name=cname).first()
if not col:
# Accumulate the new columns just in case we have to undo
# the changes
if citem['is_key']:
raise Exception(
_('Action contains non-existing key column "{0}"').format(
cname))
new_columns.append(citem)
continue
# Processing an existing column. Check data type compatibility
is_not_compatible = (
col.data_type != citem.get('data_type')
or col.is_key != citem['is_key']
or set(col.categories) != set(citem['categories'])
)
if is_not_compatible:
# The two columns are different
raise Exception(_(
'Imported column {0} is different from existing '
+ 'one.').format(cname))
# Update the column categories (just in case the new one has a
# different order)
col.set_categories(citem['categories'])
return _create_columns(new_columns, context) | cae79dda5e5121d4684e0995034050e9c6c45598 | 30,894 |
def module_of_callable(c):
"""Find name of module where callable is defined
Arguments:
c {Callable} -- Callable to inspect
Returns:
str -- Module name (as for x.__module__ attribute)
"""
# Ordinal function defined with def or lambda:
if type(c).__name__ == 'function':
return c.__module__
# Some callable, probably it's a class with __call_ method, so define module of declaration rather than a module of instantiation:
return c.__class__.__module__ | 116e46a3e75fcd138e271a3413c62425a9fcec3b | 30,895 |
def lung_seg(input_shape, num_filters=[16,32,128], padding='same') :
"""Generate CN-Net model to train on CT scan images for lung seg
Arbitrary number of input channels and output classes are supported.
Arguments:
input_shape - (? (number of examples),
input image height (pixels),
input image width (pixels),
input image features (1 for grayscale, 3 for RGB))
num_filters - number of filters (exactly 4 should be passed)
padding - 'same' or 'valid'
Output:
CN-Net model expecting input shape (height, width, channels) and generates
output with the same shape (height, width, channels).
"""
x_input = Input(input_shape)
### LUNG SEGMENTATION
x = Conv2D(num_filters[0], kernel_size=3, activation='relu', padding=padding)(x_input)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Conv2D(num_filters[1], kernel_size=3, activation='relu', padding=padding)(x)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Conv2D(num_filters[2], kernel_size=3, activation='relu', padding=padding)(x)
x = MaxPooling2D(pool_size=2, padding=padding)(x)
x = Dense(num_filters[2], activation='relu')(x)
x = UpSampling2D(size=2)(x)
x = Conv2D(num_filters[2], kernel_size=3, activation='sigmoid', padding=padding)(x)
x = UpSampling2D(size=2)(x)
x = Conv2D(num_filters[1], kernel_size=3, activation='sigmoid', padding=padding)(x)
x = UpSampling2D(size=2)(x)
lung_seg = Conv2D(1, kernel_size=3, activation='sigmoid',
padding=padding)(x) # identifying lungs
lseg = Model(inputs=x_input, outputs=lung_seg, name='lung_seg')
return lseg | 67cf286122c40e7fa2f87fc1e0a2f57e97777e32 | 30,896 |
def set_cluster_status(event, context):
"""Set the status of a cluster, ie active, inactive, maintainance_mode, etc"""
try:
cluster_status = event['queryStringParameters']['cluster_status']
except:
return {
"statusCode": 500,
"body": {"message": f'Must provide a status variable in uri query string'}
}
try:
cluster_name = event['queryStringParameters']['cluster_name']
except:
return {
"statusCode": 500,
"body": {"message": f'Must provide a cluster_name variable in uri query string'}
}
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="set cluster_status = :r",
ExpressionAttributeValues={
':r': cluster_status
},
ReturnValues="UPDATED_NEW"
)
return {
"statusCode": 200,
"body": {"message": f'Updated cluster status for {cluster_name} to {cluster_status}'}
}
except:
print(f'Falied to update cluster status for {cluster_name}')
return {
"statusCode": 500,
"body": {"message": f'Falied to update cluster status for {cluster_name}'}
} | dbb4215c19b8a241d8d353f3567a19eca32190dc | 30,897 |
import numpy
def rep(x, n):
""" interpolate """
z = numpy.zeros(len(x) * n)
for i in range(len(x)):
for j in range(n):
z[i * n + j] = x[i]
return z | 97c2ba7e48ff365fb6b4cebcee3f753169cd4670 | 30,898 |
def insert_new_datamodel(database: Database, data_model):
"""Insert a new datamodel in the datamodels collection."""
if "_id" in data_model:
del data_model["_id"]
data_model["timestamp"] = iso_timestamp()
return database.datamodels.insert_one(data_model) | b841e9e08e269cda60d261857bc8826b6a614814 | 30,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.