content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Union
from re import T
from typing import Sequence
def inject(
dependency: Union[T, str],
*,
namespace: str = None,
group: str = None,
exclude_groups: Sequence[str] = None,
lazy: bool = False,
optional: bool = False,
) -> T:
"""
Injects the requested dependency by instantiating a new instance of it or a
singleton instance if specified by the injectable. Returns an instance of the
requested dependency.
One can use this method directly for injecting dependencies though this is not
recommended. Use the :meth:`@autowired <injectable.autowired>` decorator and the
:class:`Autowired <injectable.Autowired>` type annotation for dependency injection
to be automatically wired to a function's call instead.
Will log a warning indicating that the injection container is empty when invoked
before :meth:`load_injection_container <injectable.load_injection_container>` is
called.
Raises
:class:`InjectionError <injectable.errors.InjectionError>`
when unable to resolve the requested dependency. This can be due to a variety of
reasons: the requested dependency wasn't loaded into the container; the namespace
isn't correct; the group isn't correct; there are multiple injectables for the
dependency and none or multiple are marked as primary. When parameter ``optional``
is ``True`` no error will be raised when no injectable that matches requested
qualifier/class and group is found in the specified namespace though in ambiguous
cases that resolving a primary injectable is impossible an error will still be
raised.
:param dependency: class, base class or qualifier of the dependency to be used for
lookup among the registered injectables.
:param namespace: (optional) namespace in which to look for the dependency. Defaults
to :const:`injectable.constants.DEFAULT_NAMESPACE`.
:param group: (optional) group to filter out other injectables outside of this
group. Defaults to None.
:param exclude_groups: (optional) list of groups to be excluded. Defaults to None.
:param lazy: (optional) when True will return an instance which will automatically
initialize itself when first used but not before that. Defaults to False.
:param optional: (optional) when True this function returns None if no injectable
matches the qualifier/class and group inside the specified namespace instead
of raising an :class:`InjectionError <injectable.errors.InjectionError>`.
Ambiguous cases where resolving a primary injectable is impossible will
still raise :class:`InjectionError <injectable.errors.InjectionError>`.
Defaults to False.
Usage::
>>> from foo import Foo
>>> from injectable import inject
>>>
>>> class Bar:
... def __init__(self, foo: Foo = None):
... self.foo = foo or inject(Foo)
"""
dependency_name = get_dependency_name(dependency)
registry_type = get_dependency_registry_type(dependency)
matches = get_namespace_injectables(
dependency_name, registry_type, namespace or DEFAULT_NAMESPACE
)
if not matches:
if not optional:
raise InjectionError(
f"No injectable matches {registry_type.value} '{dependency_name}'"
)
return None
if group is not None or exclude_groups is not None:
matches = filter_by_group(matches, group, exclude_groups)
if not matches:
if not optional:
raise InjectionError(
f"No injectable for {registry_type.value} '{dependency_name}'"
f" matches group '{group}'"
)
return None
injectable = resolve_single_injectable(dependency_name, registry_type, matches)
return injectable.get_instance(lazy=lazy) | ba2524875d13c388e157c9994ae4c380aafe9e52 | 3,636,600 |
import os
def load_dataset_simple(src):
"""
读取整合数据
训练 40 * 2 张正脸(男女)
测试 10 * 2 张正脸(男女)
训练和测试数据不重复,数据内部不重复
"""
file_names = os.listdir(src)
# 用 dict 来存每一个数据,初始化同上
datasets = {"train_images": np.array([], dtype=np.float32),
"train_labels": np.array([], dtype=np.int32),
"test_images": np.array([], dtype=np.float32),
"test_labels": np.array([], dtype=np.int32)}
count_f = 0
count_m = 0
# 文件名格式为CF0001_1101_00F.jpg
# 判断 '1' 为 无缩放第一次测量 'F' 为正脸
for file_name in file_names:
if not (file_name[-12] == '1' and file_name[-5] == 'F'):
continue
# 同上
img = Image.open(src + file_name)
img = img.convert("RGB")
img = img.resize((32, 32))
_data = img.getdata()
_data = np.array(_data, dtype=np.float32)
_label = 0
if file_name[1] == 'M':
_label = 1
# 总共 58 女 51 男
# 40女40男 训练 10女10男 测试
if _label == 0:
if count_f < 40:
datasets["train_images"].append(_data)
datasets["train_labels"].append(_label)
elif count_f < 50:
datasets["test_images"].append(_data)
datasets["test_labels"].append(_label)
count_f += 1
else:
if count_m < 40:
datasets["train_images"].append(_data)
datasets["train_labels"].append(_label)
elif count_m < 50:
datasets["test_images"].append(_data)
datasets["test_labels"].append(_label)
count_m += 1
# 切分同上
datasets["train_images"] = np.reshape(datasets["train_images"], (-1, 3072))
datasets["test_images"] = np.reshape(datasets["test_images"], (-1, 3072))
print("--------------------\n训练图片数量: %d 验证图片数量: %d \n--------------------\n"
% (datasets["train_images"].shape[0], datasets["test_images"].shape[0]))
return datasets | fd0901783dc747f0b54e125c6417188b9bacc789 | 3,636,601 |
import logging
def load2(file, collapsed=True, index=None):
"""Loads Laue diffraction data."""
if file['stacked'] is True:
files = loadstack(file)
if file['ext'] == 'h5':
vals = loadh5files(files, file['h5']['key'])
else:
if file['ext'] == 'h5':
begin, end, step = file['range']
vals = loadh5(file['path'], file['h5']['key'])[begin:step:end]
vals = np.swapaxes(vals, 0, 2)
vals = np.swapaxes(vals, 0, 1)
vals = vals.copy()
if index is None:
index = cherrypickpixels(vals, file['threshold'], file['frame'])
if collapsed is True:
vals = collapse(vals, index)
datasize = vals.shape[0] * vals.shape[1] * 4e-6 # [MB]
else:
datasize = vals.shape[0] * vals.shape[1] * vals.shape[2] * 4e-6 # [MB]
logging.info(
"Data size: {}, {:.2f} MB".format(
vals.shape, datasize))
return vals, index | f066d6e738dfd2ae4b503f8416fc5df6384f7a5d | 3,636,602 |
import logging
def get_api_user(name):
"""
Check if the user is registered on faceit
:returns 1 Ok
:returns None nOk
"""
try:
logging.info("get_api_data_user")
faceit_data = FaceitData(FACEIT_API)
user = faceit_data.player_details(name)
if user:
return 1
except ValueError:
logging.error("Faceit Name is not correct !")
return None | 16643b499162226d65f217d2bdc9c78f64424507 | 3,636,603 |
def get_storage_client():
"""Return storage client."""
global _client
if not _client:
_client = storage.Client()
return _client | fe54dd0c0f922a2b6413cab792feda9313e15e02 | 3,636,604 |
def upload_file_to_s3(image, fileStoreObj, acl="public-read"):
"""S3 file uploader."""
app = current_app._get_current_object()
s3 = boto3.client(
"s3",
aws_access_key_id=app.config['S3_KEY'],
aws_secret_access_key=app.config['S3_SECRET']
)
try:
s3.put_object(Body=image,
Bucket=app.config['S3_BUCKET'],
ACL=acl,
ContentType=fileStoreObj.content_type,
Key=fileStoreObj.filename)
except Exception as e:
print("An Error occurred: ", e)
return e
return "{}{}".format(app.config["S3_LOCATION"],
fileStoreObj.filename) | f8505858fa341cc9d7420cc04817c9169f10d182 | 3,636,605 |
def vae_bc(
transitions=None,
# Adam optimizer settings
lr_enc=1e-3,
lr_dec=1e-3,
# Training settings
minibatch_size=100,
):
"""
VAE Behavioral Cloning (VAE-BC) control preset
Args:
transitions:
dictionary of transitions generated by cpprb.ReplayBuffer.get_all_transitions()
lr_enc (float): Learning rate for the encoder.
lr_dec (float): Learning rate for the decoder.
minibatch_size (int): Number of experiences to sample in each training update.
"""
def _vae_bc(env):
disable_on_policy_mode()
device = get_device()
latent_dim = env.action_space.shape[0] * 2
encoder_model = fc_bcq_encoder(env, latent_dim=latent_dim).to(device)
encoder_optimizer = Adam(encoder_model.parameters(), lr=lr_enc)
encoder = BcqEncoder(
model=encoder_model,
latent_dim=latent_dim,
optimizer=encoder_optimizer,
name="encoder",
)
decoder_model = fc_bcq_decoder(env, latent_dim=latent_dim).to(device)
decoder_optimizer = Adam(decoder_model.parameters(), lr=lr_dec)
decoder = BcqDecoder(
model=decoder_model,
latent_dim=latent_dim,
space=env.action_space,
optimizer=decoder_optimizer,
name="decoder",
)
replay_buffer = ExperienceReplayBuffer(1e7, env)
if transitions is not None:
samples = replay_buffer.samples_from_cpprb(
transitions, device="cpu")
replay_buffer.store(samples)
set_replay_buffer(replay_buffer)
return VaeBC(
encoder=encoder,
decoder=decoder,
minibatch_size=minibatch_size,
)
return _vae_bc | c447dba04864b6eb8a9312135cd656e13a40c971 | 3,636,606 |
import itertools
import six
def get_mode_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _mode_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_score_maps = labels[0]
tower_geo_maps = labels[1]
tower_training_masks = labels[2]
tower_losses = []
tower_gradvars = []
tower_summaries = []
num_devices = FLAGS.num_gpus
device_type = 'gpu'
reuse_variables = None
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
total_loss, gradvars, summaries = _tower_fn(
is_training,
tower_features[i],
tower_score_maps[i],
tower_geo_maps[i],
tower_training_masks[i],
reuse_variables)
tower_losses.append(total_loss)
tower_gradvars.append(gradvars)
tower_summaries.append(summaries)
reuse_variables = True
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
if FLAGS.pretrained_model_path is not None:
tf.train.init_from_checkpoint(FLAGS.pretrained_model_path, {"resnet_v1_50/":"resnet_v1_50/"})
# restore only once
FLAGS.pretrained_model_path = None
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
FLAGS.decay_steps, FLAGS.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
summary_hook = tf.train.SummarySaverHook(
save_steps=10,
output_dir='/data/output/',
summary_op=tower_summaries[0])
train_hooks = [logging_hook, summary_hook]
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
if FLAGS.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step()),
variables_averages_op
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _mode_fun | 17f3465485ef5388fcacd0c8aeeac9ff4eb89651 | 3,636,607 |
def link_to_profile(request):
"""
If the user is a temporary one who was logged in via
an institution (not through a Uniauth profile), offers
them the choice between logging to an existing Uniauth
account or creating a new one.
The institution account is (eventually) linked to the
Uniauth profile the user logged into / created.
"""
next_url = request.GET.get('next')
context = _get_global_context(request)
if not next_url:
next_url = get_redirect_url(request)
params = urlencode({'next': next_url})
context['next_url'] = next_url
# If the user is not authenticated at all, redirect to login page
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('uniauth:login') + '?' + params)
# If the user is already authenticated + verified, proceed to next page
if not is_tmp_user(request.user) and not is_unlinked_account(request.user):
return HttpResponseRedirect(next_url)
# If the user is temporary, but was not logged in via an institution
# (e.g. created through Uniauth, but not verified), redirect to signup
if not is_unlinked_account(request.user):
return HttpResponseRedirect(reverse('uniauth:signup') + '?' + params)
# At this point, we've ensured the user is temporary and was
# logged in via an institution. We just need to handle the
# Login Form, if the user chooses to link to an existing account.
# If it's a POST request, attempt to validate the form
if request.method == "POST":
form = LoginForm(request, request.POST)
# Authentication successful
if form.is_valid():
unlinked_user = request.user
username_split = get_account_username_split(request.user.username)
# Log in as the authenticated Uniauth user
user = form.get_user()
auth_login(request, user)
# Merge the unlinked account into the logged in profile,
# then add the institution account described by the username
merge_model_instances(user, [unlinked_user])
_add_institution_account(user.uniauth_profile, username_split[1],
username_split[2])
slug = username_split[1]
context['institution'] = Institution.objects.get(slug=slug)
return render(request, 'uniauth/link-success.html', context)
# Authentication failed: render form errors
else:
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context)
# Otherwise, render a blank Login form
else:
form = LoginForm(request)
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context) | 63e8bfa1e226cf2247e01e0fa5c6aa30365256bc | 3,636,608 |
from pathlib import Path
def obtain_fea_im_subset(Row_range, Col_range, tsList, ts_stack_foler, fC_hdr, fC_img, bandName):
"""
fea_im_subset = eng.zeros(int(tsLen),int(num_fea),int(d2),int(d1)) # this is matlab.double type
to convert matlab.double to ndarray
For one-dimensional arrays, access only the "_data" property of the Matlab array.
For multi-dimensional arrays you need to reshape the array afterwards.
np.array(x._data).reshape(x.size[::-1]).T
"""
logger.info("obtain_fea_im_subset")
d1 = Row_range[1] - Row_range[0] + 1
d2 = Col_range[1] - Col_range[0] + 1
num_band = len(bandName)
num_fea = num_band + 1
tsLen = len(tsList)
dim_fea = (d1, d2, num_fea, tsLen)
fea_im_subset = np.zeros(dim_fea)
FILL = 0
for t in range(0, tsLen):
for i in range(0, num_band):
hdrPath_t_i = ts_stack_foler + "/" + fC_hdr[bandName[i]][t]
imgPath_t_i = ts_stack_foler + "/" + fC_img[bandName[i]][t]
# print(imgPath_t_i)
# --------------------Check file existance--------------------------
if Path(hdrPath_t_i).is_file():
# logger.info(hdrPath_t_i)
info = envi.read_envi_header(hdrPath_t_i)
img = envi.open(hdrPath_t_i)
img_open = img.open_memmap(writeable=True)
im_t_i = img_open[Row_range[0] - 1:Row_range[1], Col_range[0] - 1:Col_range[1], 0]
# print(im_t_i.shape)
# im_t_i = np.copy(img_open[:Row_range[1]+1,:Col_range[1],0])
else:
logger.info("The -%d-th TS is empty!!!" % t)
print("The -%d-th TS is empty!!!" % t)
im_nan = np.zeros((d1, d2))
im_nan[im_nan == 0] = np.nan
im_t_i = im_nan
FILL = 1
# fea_im_subset[t][i][:][:]
fea_im_subset[:, :, i, t] = im_t_i
if FILL == 1:
nans, x = np.isnan(fea_im_subset), lambda z: z.nonzero()[0]
fea_im_subset[nans] = np.interp(x(nans), x(~nans),
fea_im_subset[~nans]) # linear Interpolation, in Matlab: Cubic spline
# fea_im_subset = eng.fillmissing(fea_im_subset,"spline");
for t in range(0, tsLen):
# Calculate additional features ---------------------------------------
fea_im_subset[:, :, num_band, t] = fea_im_subset[:, :, 1, t] - fea_im_subset[:, :, 0, t]
return fea_im_subset | 257156c63d3ea06f53ed904dcddcc3c60bf9d937 | 3,636,609 |
def himmelblau(individual):
"""The Himmelblau's function is multimodal with 4 defined minimums in
:math:`[-6, 6]^2`.
.. list-table::
:widths: 10 50
:stub-columns: 1
* - Type
- minimization
* - Range
- :math:`x_i \in [-6, 6]`
* - Global optima
- :math:`\mathbf{x}_1 = (3.0, 2.0)`, :math:`f(\mathbf{x}_1) = 0`\n
:math:`\mathbf{x}_2 = (-2.805118, 3.131312)`, :math:`f(\mathbf{x}_2) = 0`\n
:math:`\mathbf{x}_3 = (-3.779310, -3.283186)`, :math:`f(\mathbf{x}_3) = 0`\n
:math:`\mathbf{x}_4 = (3.584428, -1.848126)`, :math:`f(\mathbf{x}_4) = 0`\n
* - Function
- :math:`f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2`
.. plot:: code/benchmarks/himmelblau.py
:width: 67 %
"""
return (individual[0] * individual[0] + individual[1] - 11)**2 + \
(individual[0] + individual[1] * individual[1] - 7)**2, | 2fcf348e01f33a54d847dfc7f9a225ed043e36a4 | 3,636,610 |
def get_deb_architecture():
"""
Returns the deb architecture of the local system, e.g. amd64, i386, arm
"""
return local('dpkg --print-architecture', capture=True) | a2c4ac9845bb395210043b8ebd7447596203a55b | 3,636,611 |
def Ry(angle, degrees=False):
"""Generate the :math:`3\\times3` rotation matrix :math:`R_y(\\theta)` \
for a rotation about the :math:`y` axis by an angle :math:`\\theta`.
Parameters
----------
angle : float
The rotation angle :math:`\\theta` in *radians*. If the angle is
given in *degrees*, then you must set `degrees=True` to correctly
calculate the rotation matrix.
degrees : bool, optional
if `True`, then `angle` is converted from degrees to radians.
Returns
-------
:class:`~numpy:numpy.ndarray`
:math:`3\\times3` rotation matrix :math:`R_y(\\theta)` for a
rotation about the :math:`y` axis by an angle :math:`\\theta`:
.. math::
R_y = \\begin{pmatrix}
\\cos\\theta & 0 & \\sin\\theta\\\\
0 & 1 & 0\\\\
-\\sin\\theta & 0 & \\cos\\theta
\\end{pmatrix}
Examples
--------
>>> import numpy as np
>>> from sknano.core.math import Ry
>>> Ry(np.pi/4)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 1. , 0. ],
[-0.70710678, 0. , 0.70710678]])
>>> np.alltrue(Ry(np.pi/4) == Ry(45, degrees=True))
True
"""
if degrees:
angle = np.radians(angle)
cosa = np.cos(angle)
sina = np.sin(angle)
Rmat = np.array([[cosa, 0.0, sina], [0.0, 1.0, 0.0], [-sina, 0.0, cosa]])
Rmat[np.where(np.abs(Rmat) <= np.finfo(float).eps)] = 0.0
return Rmat | e10d95eb36051da7e87540ebe24a02c092861b36 | 3,636,612 |
import os
import tqdm
def augment_subdataset_split(args, subfolder, id_list, data_suffixes, aug_name, augment=True, multiplier=1):
"""
augment specific dataset split
:param args: command line arguments with data_dir and aug_data_dir
:param subfolder: sub-dataset folder. Either hgg or lgg
:param id_list: list of sequences
:param data_suffixes: list of modality suffixes
:param aug_name: name of output files
:param augment: perform augmentation of not
:param multiplier: dataset multiplier
:return: list of augmented ids, corresponding brain bboxes and tumor bboxes
"""
input_shape = read_nii_header(os.path.join(args.data_dir,subfolder), id_list[0], data_suffixes[0]).get_data().shape
brain_bboxes = np.zeros(shape=(len(id_list) * multiplier, 2, 3), dtype=int)
tumor_bboxes = np.zeros(shape=(len(id_list) * multiplier, 2, 3), dtype=int)
for m in tqdm(range(len(id_list) * multiplier)):
i = m % len(id_list)
f = id_list[i]
data_size = config.input_modalities
sample = np.zeros(shape=(1, data_size,) + input_shape, dtype=np.float32)
bspline = gen_bspline(np.zeros(shape=input_shape))
bboxes = np.zeros(shape=(len(data_suffixes),) + (2, 3))
for j, s in enumerate(data_suffixes):
image_handle = read_nii_header(os.path.join(args.data_dir,subfolder), f, s)
image = image_handle.get_data().astype(np.float32)
if augment:
image = augment_data(image, bspline, sitk.sitkLinear)
bboxes[j] = bbox3(image)
mask = image > 0
image = normalize(image, mask)
sample[0, j] = image
bbox_min = np.min(bboxes[:, 0, :], axis=0).ravel().astype(int)
bbox_max = np.max(bboxes[:, 1, :], axis=0).ravel().astype(int)
bbox = np.zeros(shape=(2, 3), dtype=int)
bbox[0] = bbox_min
bbox[1] = bbox_max
brain_bboxes[m] = bbox
sample_cropped = resample_np(sample[:,:,bbox_min[0]:bbox_max[0],bbox_min[1]:bbox_max[1],bbox_min[2]:bbox_max[2]],
(1, data_size,)+config.brain_reshape_to,
1)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_data), sample.astype(np.float32), allow_pickle=False)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_data_cropped), sample_cropped.astype(np.float32), allow_pickle=False)
label_handle = read_nii_header(os.path.join(args.data_dir,subfolder), f, config.suffix_seg)
label_data = label_handle.get_data().astype(np.float32)
for key, value in config.dataset_transform_dict.iteritems():
label_data[label_data == key] = value
if augment:
label_data = augment_data(label_data, bspline, sitk.sitkNearestNeighbor)
tumor_bboxes[m] = bbox3(label_data > 0)
tumor_bboxes[m,0] = tumor_bboxes[m,0] - bbox[0]
tumor_bboxes[m,1] = tumor_bboxes[m,1] - bbox[0]
label_data = label_data.reshape((1, 1,) + label_data.shape)
label_data_cropped = resample_np(label_data[:,:,bbox_min[0]:bbox_max[0],bbox_min[1]:bbox_max[1],bbox_min[2]:bbox_max[2]],
(1, 1,)+config.brain_reshape_to,
0)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_label), label_data.astype(np.float32), allow_pickle=False)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_label_cropped), label_data_cropped.astype(np.float32), allow_pickle=False)
return [aug_name + str(i) for i in range(len(id_list) * multiplier)], brain_bboxes, tumor_bboxes | 038fe5004c943c53afffda9efc10cb7913646075 | 3,636,613 |
import os
def compute_ibaq_1sample(df, organism='human'):
"""IBAQ values computed for total intensities of all proteins
Parameters
----------
df : pandas dataframe
proteomics dataset with columns as samples and rows as proteins
organism : str
organism (human, rat, or mouse) to calibrate each protein
Returns
-------
df : pandas dataframe
proteomics dataset normalized by IBAQ
"""
df_ref = pd.read_csv(
os.path.join(resource_path,
'%s_proteome_mw_peptides.csv' % organism))
num_theor_peptides, ibaq_list, log10_ibaq = [], [], []
for protein in df['Uniprot_Id'].tolist():
try:
uid = protein.strip().split('|')[1]
except IndexError:
uid = protein
num_theor_peptides.append(df_ref[
df_ref.UniprotID == uid].num_theoretical_peptides.values[0])
df['num_theoretical_peptides'] = num_theor_peptides
for id in range(len(df)):
ibaq = df['default~cq_max_sum'].iloc[id] /\
df['num_theoretical_peptides'].iloc[id]
ibaq_list.append(ibaq)
log10_ibaq.append(np.log10(ibaq))
df['IBAQ'] = ibaq_list
df['log10_IBAQ'] = log10_ibaq
return df | c6b52318e6c3a579c969a75592b4282223f51bfc | 3,636,614 |
def zhongzhuang_adjustment_reservoir():
"""
Real Name: ZhongZhuang Adjustment Reservoir
Original Eqn: INTEG ( IF THEN ELSE(Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir+ZhongZhuang Adjustment Reservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount>=5.05e+006, 0 , Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount ), 5.05e+006)
Units: m3
Limits: (None, None)
Type: component
Max Storage Valume = 5050000 m^3 (2017); general output = 24000 m^3 per day(BanXin WPP assumes 15000 m^3; DaNan WPP assumes
9000 m^3 ; overflow height 68m, designed flood discharge 2.83CMS, water input limit
10cms.
"""
return integ_zhongzhuang_adjustment_reservoir() | 1fcc4a00015a3b8c1dadfc414455eab626704366 | 3,636,615 |
import requests
import random
def _request_esi_status() -> requests.Response:
"""Make request to ESI about curren status with retries."""
max_retries = 3
retries = 0
while True:
try:
r = requests.get(
"https://esi.evetech.net/latest/status/",
timeout=(5, 30),
headers={"User-Agent": f"{__package__};{__version__}"},
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
logger.warning("Network error when trying to call ESI", exc_info=True)
return EsiStatus(
is_online=False, error_limit_remain=None, error_limit_reset=None
)
if r.status_code not in {
502, # HTTPBadGateway
503, # HTTPServiceUnavailable
504, # HTTPGatewayTimeout
}:
break
else:
retries += 1
if retries > max_retries:
break
else:
logger.warning(
"HTTP status code %s - Retry %s/%s",
r.status_code,
retries,
max_retries,
)
wait_secs = 0.1 * (random.uniform(2, 4) ** (retries - 1))
sleep(wait_secs)
return r | 39a74a766f77e66ae0fa33ac62b9ad09698a83be | 3,636,616 |
def RunExampleConsumer(serialized_file_graph):
"""Runs the example consumer on the serialized_file_graph.
Args:
serialized_file_graph: mojom_files.MojomFileGraph as output by the mojom
parser.
Returns:
The integer exit code of the example consumer.
"""
examples_dir = os.path.dirname(os.path.abspath(__file__))
example_consumer = os.path.join(examples_dir, 'example_consumer.go')
src_root = os.path.abspath(os.path.join(examples_dir, '../../..'))
environ = { 'GOPATH': os.path.dirname(src_root) }
print environ
cmd = ['go', 'run', example_consumer]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, env=environ)
process.communicate(serialized_file_graph)
return process.wait() | 42fbc45f1e3ace3f1774ac631cef9811f01a2915 | 3,636,617 |
from typing import Callable
from typing import Any
def linnworks_api_session(func: Callable) -> Callable:
"""Use a Linnworks API session as a method decorator."""
def wrapper_linnapi_session(*args: Any, **kwargs: Any) -> Any:
with LinnworksAPISession():
return func(*args, **kwargs)
return wrapper_linnapi_session | fb94e4f0ed1e0477e5b058c6adfd48712b7bfb12 | 3,636,618 |
def series_quat2euler(q0, q1, q2, q3, msg_name=""):
"""Given pandas series q0-q4, compute series roll, pitch, yaw.
Arguments:
q0-q4 -- quaternion entries
Keyword arguments:
msg_name -- name of the message for which the euler angles should be computed (default "")
"""
yaw, pitch, roll = np.array(
[
tf.quat2euler([q0i, q1i, q2i, q3i])
for q0i, q1i, q2i, q3i in zip(q0, q1, q2, q3)
]
).T
yaw = pd.Series(name=msg_name + "yaw", data=yaw, index=q0.index)
pitch = pd.Series(name=msg_name + "pitch", data=pitch, index=q0.index)
roll = pd.Series(name=msg_name + "roll", data=roll, index=q0.index)
return roll, pitch, yaw | bab831238025584a275595a8b6c038f52704047e | 3,636,619 |
def _ShiftRight(x0, xs):
"""Shifts xs[:-1] one step to the right and attaches x0 on the left."""
return tf.concat([[x0], xs[:-1]], axis=0) | 9e6936432e4a7b7317560d6b2e32ac8a576d2d22 | 3,636,620 |
def compute_connected_components(self, compute_nx=True, probed_node=None, comps_to_merge=None, current_norm_vals=None):
"""
Computes the NORMALIZED connected components of the selfwork.
If compute_nx is True, actually computes components from scratch using selfworkx.
Otherwise, we update self.connected_components, self.connected_component_sizes, and
self.components based on comps_to_merge.
UPDATE: As of 7/25, we now keep track of node_component_sizes, i.e. the size of the
component each node is in. This is managed in BOTH update_neighbors AND
compute_connected_components. It is an np array with entries corresponding to rows in the
feature matrix.
Returns np array of normalized component sizes.
"""
if compute_nx:
# compute the nx components
self.connected_components = {k:c for k, c in enumerate(nx.connected_components(self.G))}
connected_components = self.connected_components
self.components = np.zeros((len(self.node_to_row)), dtype=int)
self.connected_component_sizes = dict()
self.node_component_sizes = np.zeros((len(self.node_to_row)))
# initialize min/max
min_component_size = float('inf')
max_component_size = 0
# loop sets self.connected_component_sizes, self.components, max/min
for i, component in self.connected_components.items():
size = len(component)
self.connected_component_sizes[i] = size
if size < min_component_size:
min_component_size = size
if size > max_component_size:
max_component_size = size
for node in component:
self.components[self.node_to_row[node]] = i
self.node_component_sizes[self.node_to_row[node]] = size
else:
# No need to compute in this case
connected_components = self.connected_components
# For each component:
min_component_size = self.min_comp_size
max_component_size = self.max_comp_size
probed_comp = self.components[self.node_to_row[probed_node]]
if comps_to_merge is None:
comps_to_merge = []
for comp in comps_to_merge:
# Keep probed_node's component (arbitrary choice),
# add all of each other component's nodes + size to probed component
self.connected_components[probed_comp].update(self.connected_components[comp])
self.connected_component_sizes[probed_comp] = len(self.connected_components[probed_comp])
for node in self.connected_components[comp]:
self.components[self.node_to_row[node]] = probed_comp
# pop the old component from the dictionaries
self.connected_components.pop(comp)
self.connected_component_sizes.pop(comp)
# update probed component size across the board
self.node_component_sizes[np.where(self.components == probed_comp)] = self.connected_component_sizes[probed_comp]
# If the min/max size changed, will need to recompute normalized value for ALL
# components, rather than just the probed node's component.
new_min = min(self.connected_component_sizes.values())
if self.connected_component_sizes[probed_comp] > max_component_size:
new_max = self.connected_component_sizes[probed_comp]
else:
new_max = max_component_size
if new_min != min_component_size or new_max != max_component_size:
compute_nx = True # NOTE re-using this flag is a bit adhoc, but it works
min_component_size = new_min
max_component_size = new_max
self.max_comp_size = max_component_size
self.min_comp_size = min_component_size
diff = float(self.max_comp_size - self.min_comp_size)
# Recompute normalization
if compute_nx:
# if there's more than one component, compute the normalized values
if len(connected_components) > 1 and diff > 0:
# Calculate (mycomponent-min_component) / (max_component-min_component)
return (self.node_component_sizes - self.min_comp_size) / diff
else: # otherwise, everyone is in the same sized component
return np.ones(len(self.node_to_row.keys()))
else:
if diff > 0:
new_val = float(self.connected_component_sizes[probed_comp]-self.min_comp_size)/diff
else:
new_val = 1.0
current_norm_vals[np.where(self.components == probed_comp)] = new_val
return current_norm_vals | de4c60c590e4444e15189ac59883bf2535f1c510 | 3,636,621 |
import sys
def process_method(oneway=False):
"""Decorator for process_xxx methods for asyncio."""
def _decorator(func):
def nested(self, seqid, iprot, oprot, server_ctx):
fn_name = func.__name__.split('_', 1)[-1]
handler_ctx = self._event_handler.getHandlerContext(fn_name,
server_ctx)
args = getattr(sys.modules[func.__module__], fn_name + "_args")()
reply_type = TMessageType.REPLY
self._event_handler.preRead(handler_ctx, fn_name, args)
args.read(iprot)
iprot.readMessageEnd()
self._event_handler.postRead(handler_ctx, fn_name, args)
result = yield from func(self, args, handler_ctx)
if not oneway:
if isinstance(result, TApplicationException):
reply_type = TMessageType.EXCEPTION
self._event_handler.preWrite(handler_ctx, fn_name, result)
oprot.writeMessageBegin(fn_name, reply_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
self._event_handler.postWrite(handler_ctx, fn_name, result)
return nested
return _decorator | a4eda6057276fbaaf4734bf132fb1e003fb7e296 | 3,636,622 |
import json
def search_salary(request):
""" This function will be called by search API """
logger.info("Received a salary request {}".format(request.method))
if request.method == 'GET':
try:
request_json_body = json.loads(request.body)
title = request_json_body['title']
location = request_json_body['location']
logger.info("The request has title {} and location {}".format(title, location))
except:
response_body = {'Error': 'Bad Request'}
logger.info("Wrong parameters have been passed".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
if ',' in location:
city, state = location.split(',')
else:
city = location
try:
state = city_to_state_dict[city.title()]
except:
response_body = {'Error': 'Did you spell the city name correctly? '
'Could you use the closest metropolitan cityCould you add state name?'}
logger.info("Wrong city name {} ".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
state_abbreviation = us_states[state.lower().strip()]
query_body = query_builder(title=title, city=city, state=state_abbreviation)
response = search_in_es(index_name=salary_index_name, query_body=query_body)
response_body = parse_build_response(response)
status = 200
else:
logger.info("WRONG METHOD for salary request {}".format(request.method))
status = 405
response_body = {'Error': 'Method Not Allowed'}
return HttpResponse(status=status, content=json.dumps(response_body), content_type='application/json') | 72de0064dbfad0e4692c2c0ffb240111432fc8e8 | 3,636,623 |
def Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw, logger=mod_logger): # Vlookup. return row (series) at [value_ask, header_search]
""""
Most robust interpolator
#===========================================================================
# USE
#===========================================================================
interp_ser = hp_pd.Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw)
interp_value = interp_ser[header_desire]
#===========================================================================
# FUNCTION
#===========================================================================
add a row and interpolate all values. return row (series) at [value_ask, header_search]'
'returns the whole row, which has been interpolated by the axis passed in the header_search
allows for non-index interpolation (useful for aribtrary indexes)
for time series (or other non-aribitrary indexes), see Get_interp_index_df2ser
#===========================================================================
# INPUTS
#===========================================================================\
df_raw: data set (with header_search in columns)
header_search = the header name from which to search for the value_ask
value_ask_raw: numeric value (on the header_search's column) from which to interoplate the other rows
#===========================================================================
# TESTING:
#===========================================================================
import sim.debug
df = sim.debug.Get_curve_df()
"""
'TODO: convert df index to float'
'TODO: check if the passed header is in the columns'
#===========================================================================
# check inputs
#===========================================================================
if not isinstance(df_raw, pd.core.frame.DataFrame):
logger.error('got undexpected type on df_raw: %s'%type(df_raw))
raise TypeError
#drop nan values
df_raw = df_raw.dropna(axis='index')
value_ask = round(value_ask_raw, 2)
#check if thsi value is outside of the passed column
df = df_raw.astype(np.float) #convert the index to floats
'there seems to be some problem with importing commas from excel'
df_sort = df_raw.sort_values(by=header_search).reset_index(drop='true')
if value_ask < df_sort.loc[0,header_search]:
logger.error('asked value is outside the domain')
return df_sort.loc[0,:]
last_index = len(df_sort.index) -1
if value_ask > df_sort.loc[last_index, header_search]:
logger.error('asked value is greater than the serach domain: %.2f'%value_ask)
return df_sort.iloc[last_index,:] #return the last row
#check if interpolation is even needed
bool_row = df_raw.loc[:,header_search] == value_ask #search for value
if sum(bool_row) == 1: #found one match
results_ser = df_raw.loc[bool_row,:].iloc[0] #get this row
return results_ser
elif sum(bool_row) >1: #found multiple matches
df_trim = df_raw.loc[bool_row,header_search]
logger.error('found too many existing matches in search: \n %s'%df_trim)
raise ValueError
#builda new df with the header_search as the index
'I think there is a better command for this'
index = list(df_raw.loc[:,header_search])
bool_col = df_raw.columns != header_search #get the remaining
col = df_raw.columns[bool_col]
data = df_raw.loc[:,bool_col].values #get all this data
df = pd.DataFrame(data = data, index = index, columns = col )
ser = pd.Series(data=None, index= col) #dummy row for adding
df.loc[value_ask,:] = ser #add this in at teh requested row
#resort the frame
df_interp = df.sort_index()
#convert each value to numeric
for col in df_interp: df_interp[col] = pd.to_numeric(df_interp[col], errors='coerce')
#interpolate the missing values
'WARNING: all methods (except linear) interpolate based on the index'
df_new = df_interp.interpolate(method='values')
#Extract interpolated row
results_ser = df_new.ix[value_ask] #get the results row
results_ser.loc[header_search] = value_ask #add teh search value/header back
return results_ser | fd7e8cfac2046a31ceafe830f330a84cc8732292 | 3,636,624 |
def get_wordnet_pos(treebank_tag):
"""Function to translate TreeBank PoS tags into PoS tags that WordNet
understands."""
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None | b559b767ffa8a8f87d7aad571f2c88fbc49f3ec1 | 3,636,625 |
def from_newick(string):
"""
Returns a tree sequence representation of the specified newick string.
"""
tree = dendropy.Tree.get(data=string, schema="newick")
tables = tskit.TableCollection(1)
id_map = {}
for node in tree.ageorder_node_iter():
children = list(node.child_nodes())
if node not in id_map:
flags = tskit.NODE_IS_SAMPLE if len(children) == 0 else 0
# TODO derive information from the node and store it as JSON metadata.
id_map[node] = tables.nodes.add_row(flags=flags, time=node.age)
node_id = id_map[node]
for child in children:
tables.edges.add_row(0, 1, node_id, id_map[child])
return tables.tree_sequence() | ef0161f27d00e762afa5164c23c5e0030c2e7093 | 3,636,626 |
import random
import time
def benchmark(problem_file, test_set_file):
""" Evaluates planners with a random problem from a given problem set and world map.
Assumes feasible paths can be calculated.
:param problem_file: A string of map file with .map extension
:param test_set_file: A string of problem set file with .scen extension
:return: Returns a tuple of (results_optimal, results_random) where each element is a custom data structure
carrying calculated path, path length and time elapsed to calculate path.
"""
class Results(object):
def __init__(self, path, path_length, time_elapsed):
self.path = path
self.path_length = path_length
self.time_elapsed = time_elapsed
world = tools.read_world_file(problem_file)
f = open(test_set_file, 'r')
problems = f.readlines()
# Pick random problem
problem_str = problems[random.randint(1, len(problems) - 1)].split()
# Parse problem string
start_pose = int(problem_str[5]), int(problem_str[4])
goal_pose = int(problem_str[7]), int(problem_str[6])
# Evaluate optimal planner
t = time.time()
path = algorithms.planner_optimal(world, start_pose, goal_pose)
time_ms = tools.sec_to_ms((time.time() - t))
results_optimal = Results(path, tools.path_length(path), time_ms)
# Evaluate random planner
t = time.time()
path = algorithms.planner_random(world, start_pose, goal_pose, max_step_number=100000)
time_ms = tools.sec_to_ms((time.time() - t))
results_random = Results(path, tools.path_length(path), time_ms)
return results_optimal, results_random | dcb2bc13dc0a6ecaf1b16d69bd166fd2fb1d8ffc | 3,636,627 |
def _recursive_pairwise_outer_join(
dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle
):
"""
Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results
in a much more efficient scheduling of merges than a simple loop
from:
[A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD]
to:
[A] [B] [C] [D] -> [AB] [CD] -> [ABCD]
Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel.
:param dataframes_to_merge: A list of Dask dataframes to be merged together on their index
:return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes
"""
number_of_dataframes_to_merge = len(dataframes_to_merge)
merge_options = {
"on": on,
"lsuffix": lsuffix,
"rsuffix": rsuffix,
"npartitions": npartitions,
"shuffle": shuffle,
}
# Base case 1: just return the provided dataframe and merge with `left`
if number_of_dataframes_to_merge == 1:
return dataframes_to_merge[0]
# Base case 2: merge the two provided dataframe to be merged with `left`
if number_of_dataframes_to_merge == 2:
merged_ddf = dataframes_to_merge[0].join(
dataframes_to_merge[1], how="outer", **merge_options
)
return merged_ddf
# Recursive case: split the list of dfs into two ~even sizes and continue down
else:
middle_index = number_of_dataframes_to_merge // 2
merged_ddf = _recursive_pairwise_outer_join(
[
_recursive_pairwise_outer_join(
dataframes_to_merge[:middle_index], **merge_options
),
_recursive_pairwise_outer_join(
dataframes_to_merge[middle_index:], **merge_options
),
],
**merge_options,
)
return merged_ddf | 7d65d01cce313ed0517fd685045978dee6d7cb08 | 3,636,628 |
def signum(x):
"""cal signum
:param x:
:return:
"""
if x > 0:
return 1.0
if x < 0:
return -1.0
if x == 0:
return 0 | 0f8e67eb8fa3267ec341d17440270ce68ca8b446 | 3,636,629 |
import re
def is_date(word):
"""
is_date()
Purpose: Checks if word is a date.
@param word. A string.
@return the matched object if it is a date, otherwise None.
>>> is_date('2015-03-1') is not None
True
>>> is_date('2014-02-19') is not None
True
>>> is_date('03-27-1995') is not None
True
>>> is_date('201') is not None
False
>>> is_date('0') is not None
False
"""
regex = r'^(\d\d\d\d-\d\d-\d|\d\d?-\d\d?-\d\d\d\d?|\d\d\d\d-\d\d?-\d\d?)$'
return re.search(regex, word) | 004bef4ac50f3ebd859cb35086c6e820f4c6e231 | 3,636,630 |
import torch
def test_epoch(model, base_dist, test_loader, epoch,
device=None, annealing=False):
"""Calculate validation loss.
Args:
model: instance of CVAE
base_dist: r1(z) prior distribution
test_loader: instance of pytorch DataLoader
device: device to use
annealing: whether to anneal the KL loss
Returns:
average reconstruction loss and kl loss over test_loader
"""
# KL weight annealing. This is needed to avoid posterior collapse.
if annealing:
kl_weight = torch.tensor(
kl_weight_schedule(epoch, quiet=True)).to(device)
else:
kl_weight = torch.tensor(1.0).to(device)
with torch.no_grad():
model.eval()
total_reconstruction_loss = 0.0
total_kl_loss = 0.0
for h, x in test_loader:
if device is not None:
h = h.to(device, non_blocking=True)
x = x.to(device, non_blocking=True)
# Sample a noise realization
y = h + torch.randn_like(h)
reconstruction_loss, kl_loss = model(x, y, base_dist)
# Keep track of total of each loss
total_reconstruction_loss += reconstruction_loss.sum()
total_kl_loss += kl_loss.sum()
avg_reconstruction_loss = (total_reconstruction_loss.item() /
len(test_loader.dataset))
avg_kl_loss = (total_kl_loss.item() /
len(test_loader.dataset))
avg_loss = avg_reconstruction_loss + kl_weight.item() * avg_kl_loss
print('Test set: Average Loss: {:.4f}\t=\t'
'Reconstruction loss: {:.4f}\t +'
'\t (KL weight) * KL loss: {:.4f}\n'.format(
avg_loss,
avg_reconstruction_loss, avg_kl_loss))
return avg_loss, avg_reconstruction_loss, avg_kl_loss | 36a5d2fc9d229dca98f1a36f588436ab60a92754 | 3,636,631 |
from datetime import datetime
def get_rest_value_from_path(status, device_class, path: str):
"""Parser for REST path from device status."""
if "/" not in path:
attribute_value = status[path]
else:
attribute_value = status[path.split("/")[0]][path.split("/")[1]]
if device_class == DEVICE_CLASS_TIMESTAMP:
last_boot = datetime.utcnow() - timedelta(seconds=attribute_value)
attribute_value = last_boot.replace(microsecond=0).isoformat()
if "new_version" in path:
attribute_value = attribute_value.split("/")[1].split("@")[0]
return attribute_value | bae16743c389a8b4bd760b458f187bc4130970ca | 3,636,632 |
def submit(request):
""" View for the submit page.
"""
if request.user.is_active:
user = UserSocialAuth.objects.filter(provider='github').get(user_id=request.user.id)
github = Github(user.tokens[u'access_token'])
repos = [repo for repo in github.get_user().get_repos()]
return render(request, 'base/submit.html', {'repos': repos})
else:
return HttpResponseRedirect(reverse('socialauth_begin', args=('github',))) | ec6c1107547b444923bab904ce79c848228320be | 3,636,633 |
def all_combined_threshold(input_image):
"""
Apply all thresholds to undistorted image
:param input_image: Undistorted image
:return: Combined binary image
Ref: Course notes
"""
# Apply Gausiian blur to the input image
kernel_size = 5
input_image = cv2.GaussianBlur(input_image, (kernel_size, kernel_size), 0)
# Sobel kernel size
ksize = 5 # Should be an odd number to smooth a gradient
# Apply threshold functions
absolute_binaryX = absolute_sobel_threshold(input_image, orient='x', thresh=(20, 255)) # thresh=(30, 255)
absolute_binaryY = absolute_sobel_threshold(input_image, orient='y', thresh=(30, 255)) # thresh=(30, 255)
magnitude_binary = magnitude_threshold(input_image, sobel_kernel=ksize, mag_thresh=(70, 255)) # mag_thresh=(60, 255)
direction_binary = direction_threshold(input_image, sobel_kernel=ksize, thresh=(0.7, 1.3))
# Combine the thresholds
combine_all_binary = np.zeros_like(direction_binary)
combine_all_binary[(absolute_binaryX == 1 | ((absolute_binaryY == 1)
& (direction_binary == 1))) | magnitude_binary == 1] = 1
return combine_all_binary | 638ec26ffd08ccc24bd5dea288cae55466d50c67 | 3,636,634 |
from datetime import datetime
import time
import logging
import os
import sys
from contextlib import suppress
def get_root_logger(profile='panoptes', log_config=None):
"""Creates a root logger for PANOPTES used by the PanBase object.
Args:
profile (str, optional): The name of the logger to use, defaults
to 'panoptes'.
log_config (dict|None, optional): Configuration options for the logger.
See https://docs.python.org/3/library/logging.config.html for
available options. Default is `None`, which then looks up the
values in the `log.yaml` config file.
Returns:
logger(logging.logger): A configured instance of the logger
"""
# Get log info from config
log_config = log_config if log_config else load_default()
# If we already created a logger for this profile and log_config, return that.
logger_key = (profile, to_json(log_config, sort_keys=True))
try:
return all_loggers[logger_key]
except KeyError:
pass
# Alter the log_config to use UTC times
if log_config.get('use_utc', True):
# TODO(jamessynge): Figure out why 'formatters' is sometimes
# missing from the log_config. It is hard to understand how
# this could occur given that none of the callers of
# get_root_logger pass in their own log_config.
if 'formatters' not in log_config: # pragma: no cover
# TODO(jamessynge): Raise a custom exception in this case instead
# of issuing a warning; after all, a standard dict will throw a
# KeyError in the for loop below if 'formatters' is missing.
warn('formatters is missing from log_config!')
warn(f'log_config: {log_config!r}')
log_fname_datetime = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
# Make the log use UTC
logging.Formatter.converter = time.gmtime
else:
log_fname_datetime = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
# Setup log file names
invoked_script = os.path.basename(sys.argv[0])
log_dir = os.getenv('PANLOG', '')
if not log_dir:
log_dir = os.path.join(os.getenv('PANDIR', gettempdir()), 'logs')
per_run_dir = os.path.join(log_dir, 'per-run', invoked_script)
log_fname = '{}-{}-{}'.format(invoked_script, log_fname_datetime, os.getpid())
# Create the directory for the per-run files.
os.makedirs(per_run_dir, exist_ok=True)
# Set log filename and rotation
for handler in log_config.get('handlers', []):
# Set the filename
partial_fname = '{}-{}.log'.format(log_fname, handler)
full_log_fname = os.path.join(per_run_dir, partial_fname)
log_config['handlers'][handler].setdefault('filename', full_log_fname)
# Setup the TimedRotatingFileHandler for middle of day
log_config['handlers'][handler].setdefault('atTime', datetime.time(hour=11, minute=30))
# Create a symlink to the log file with just the name of the script and the handler
# (level), as this makes it easier to find the latest file.
log_symlink = os.path.join(log_dir, '{}-{}.log'.format(invoked_script, handler))
log_symlink_target = os.path.abspath(full_log_fname)
with suppress(FileNotFoundError):
os.unlink(log_symlink)
os.symlink(log_symlink_target, log_symlink)
# Configure the logger
logging.config.dictConfig(log_config)
# Get the logger and set as attribute to class
logger = logging.getLogger(profile)
# Set custom LogRecord
logging.setLogRecordFactory(StrFormatLogRecord)
logger.info('{:*^80}'.format(' Starting PanLogger '))
# TODO(jamessynge) Output name of script, cmdline args, etc. And do son
# when the log rotates too!
all_loggers[logger_key] = logger
return logger | 2e5c246d771b4efb385ad7bbd364508445cbd6ef | 3,636,635 |
def sbol_cds (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL coding sequence renderer.
"""
# Default options
color = (0.7,0.7,0.7)
hatch = ''
start_pad = 1.0
end_pad = 1.0
y_extent = 5
x_extent = 30
arrowhead_height = 4
arrowhead_length = 8
# Reset defaults if provided
if opts != None:
if 'color' in opts.keys():
color = opts['color']
if 'hatch' in opts.keys():
hatch = opts['hatch']
if 'start_pad' in opts.keys():
start_pad = opts['start_pad']
if 'end_pad' in opts.keys():
end_pad = opts['end_pad']
if 'y_extent' in opts.keys():
y_extent = opts['y_extent']
if 'x_extent' in opts.keys():
x_extent = opts['x_extent']
if 'arrowhead_height' in opts.keys():
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in opts.keys():
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in opts.keys():
linewidth = opts['linewidth']
if 'scale' in opts.keys():
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the CDS symbol
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent-arrowhead_height),
(end, 0),
(end-dir_fac*arrowhead_length, y_extent+arrowhead_height),
(end-dir_fac*arrowhead_length, y_extent)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=11,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0
ax.add_patch(p1)
if opts != None and 'label' in opts.keys():
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end | 5647edc622c23445c6e3d8cae5f602f2a5167516 | 3,636,636 |
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo | ddf718d16ce6a13f48b1032988ec0b0a43aa2b47 | 3,636,637 |
def tran_canny(image):
"""消除噪声"""
image = cv2.GaussianBlur(image, (3, 3), 0)
return cv2.Canny(image, 50, 150) | d9c308b43a25e714a8ddd66bdc4a700c6ec926f0 | 3,636,638 |
import math
def draw_star(img_size, num_frames, bg_config, nb_branches=6):
""" Draw a star and output the interest points
Parameters:
nb_branches: number of branches of the star
"""
images = generate_background(img_size, num_frames=num_frames)
background_color = int(np.mean(images))
num_branches = random_state.randint(3, nb_branches)
min_dim = min(img_size[0], img_size[1])
thickness = random_state.randint(min_dim * 0.01, min_dim * 0.02)
rad = max(random_state.rand() * min_dim / 2, min_dim / 5)
x = random_state.randint(rad, img_size[1] - rad) # select the center of a circle
y = random_state.randint(rad, img_size[0] - rad)
# Sample num_branches points inside the circle
slices = np.linspace(0, 2 * math.pi, num_branches + 1)
angles = [slices[j] + random_state.rand() * (slices[j+1] - slices[j])
for j in range(num_branches)]
points = np.array([[int(x + max(random_state.rand(), 0.3) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.3) * rad * math.sin(a))]
for a in angles])
points = np.concatenate(([[x, y]], points), axis=0)
color = get_random_color(background_color)
rotation = get_random_rotation()
speed = get_random_speed()
pts_list = []
img_list = []
for i in range(num_frames):
img = images[i]
pts = np.empty((0, 2), dtype=np.int)
center = (points[0][0], points[0][1])
points = (np.matmul(points - center, rotation) + center + speed).astype(int)
for j in range(1, num_branches + 1):
cv.line(img, (points[0][0], points[0][1]),
(points[j][0], points[j][1]),
int(color[j]), thickness)
# Keep only the points inside the image
pts = keep_points_inside(points, img_size)
if len(pts) == 0:
draw_star(img_size, num_frames, bg_config, nb_branches)
pts_list.append(pts)
img_list.append(img)
images = np.array(img_list)
points = np.array(pts_list)
event_sim = es.Event_simulator(images[0], 0)
events = np.array([event_sim.simulate(img, 0) for img in images[1:]])
return images, points, events | bec71099fae94f43af14327ab8c5549586f3bab2 | 3,636,639 |
def forestvar(z_in):
""" Return intrinsic variance of LyaF variance for weighting. This
estimate is roughly from McDonald et al 2006
Parameters
----------
z_in : float or ndarray
Returns
-------
fvar : float or ndarray
Variance
"""
fvar = 0.065 * ((1.+z_in)/(1.+2.25))**3.8
# Return
return fvar | d3523510ee29b0cc12138da93001635f5ffe6a11 | 3,636,640 |
def _process_image_file(fobj):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj)
return _encode_image(image, image_format="JPEG") | 6e9e1e28a8e057a164b7385e87836dd280efdb9d | 3,636,641 |
import collections
def compute_v2g_scores(reg, cisreg):
"""
Goes through evidence and scores associations to a SNP
Args:
* [ Regulatory_Evidence ]
* [ Cisregulatory_Evidence ]
Returntype: dict(Gene: dict(string: float)), dict(Gene: float)
"""
intermediary_scores = dict()
gene_scores = dict()
for gene in cisreg:
intermediary_scores[gene] = collections.defaultdict(int)
seen = set()
for evidence in cisreg[gene] + reg:
if evidence.source not in seen or float(evidence.score) > intermediary_scores[gene][evidence.source]:
intermediary_scores[gene][evidence.source] = float(evidence.score)
seen.add(evidence.source)
# VEP stats
if evidence.source == 'VEP':
intermediary_scores[gene]['VEP_count'] += 1
intermediary_scores[gene]['VEP_sum'] += float(evidence.score)
if evidence.source == 'GTEx':
intermediary_scores[gene][evidence.tissue] = float(evidence.score)
# Ad hoc bounds defined here:
# PCHiC
intermediary_scores[gene]['PCHiC'] = min(intermediary_scores[gene]['PCHiC'], 1)
# VEP
if 'VEP' in intermediary_scores[gene]:
intermediary_scores[gene]['VEP_mean'] = intermediary_scores[gene]['VEP_sum'] / intermediary_scores[gene]['VEP_count']
# Weighted sum
gene_scores[gene] = sum(intermediary_scores[gene][source] * postgap.Globals.EVIDENCE_WEIGHTS[source] for source in intermediary_scores[gene] if source in postgap.Globals.EVIDENCE_WEIGHTS)
return intermediary_scores, gene_scores | 137a17ba0dbec6ce4e3fcd661709c9a166312e2a | 3,636,642 |
from re import T
def _old_normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
dev = theano.config.device
use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
normed = theano.tensor.as_tensor_variable(normed)
mean = theano.tensor.as_tensor_variable(mean)
stdinv = theano.tensor.as_tensor_variable(stdinv)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var | 1993d33c8d2d5ece26d2ac804dfb0961d02d24e2 | 3,636,643 |
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size)) | 69827028a856248c50e958761e4d106a304076e3 | 3,636,644 |
import torch
def quaternion_to_rotation_matrix(quaternion):
"""
This function transforms a quaternion into a 3x3 rotation matrix.
Parameters
----------
:param quaternion: a quaternion or a batch of quaternion N x [scalar term, vector term]
Returns
-------
:return: 3x3 rotation matrices
"""
init_shape = list(quaternion.shape)
q = quaternion.view(-1, init_shape[-1])
R = torch.zeros((quaternion.shape[0], 3, 3), dtype=quaternion.dtype).to(device)
for i in range(R.shape[0]):
w, x, y, z = q[i]
R[i] = torch.tensor([[2 * (w ** 2 + x ** 2) - 1, 2 * (x * y - w * z), 2 * (x * z + w * y)],
[2 * (x * y + w * z), 2 * (w ** 2 + y ** 2) - 1, 2 * (y * z - w * x)],
[2 * (x * z - w * y), 2 * (y * z + w * x), 2 * (w ** 2 + z ** 2) - 1]],
dtype=quaternion.dtype)
new_shape = init_shape[:-1]
new_shape.append(3)
new_shape.append(3)
return R.view(new_shape) | ace8bca2b2e512b499dcc8ebac382226699262d2 | 3,636,645 |
def session_end(bot):
""":crossed_flags: *TRPGのセッションを終わります*\n`/cc kp end`"""
target_status = "pc_id"
user_data = {}
lst_end_content = []
lst_player_data = get_lst_player_data(bot.team_id, bot.user_id, target_status)
msg_return = "| 名前 | PC | 備考 |\n|--|--|--|\n"
for player_data in lst_player_data:
name = player_data["name"]
user_id = player_data["user_id"]
url = player_data["user_param"]["url"]
user_data[user_id] = {"url": url,
"name": name}
lst_users_list = get_users_list(bot.token)
for user_id, user_datum in user_data.items():
# N+! 誰がいい感じに
player_data = list(filter(lambda x: x["id"] == user_id , lst_users_list))
if player_data is None:
continue
pc_name = user_datum["name"]
url = user_datum["url"]
real_name = player_data[0]["real_name"]
msg_return += f"| @{real_name} | [{pc_name}]({url}) | |\n"
return msg_return, None | 1f4bfa5d719be1e856efbea8d51df2af092dc119 | 3,636,646 |
def log_json(req_context, params=None, err=None):
"""
:param req_context:
:param params:
:param err:
:return:
"""
context = Util.get_context()
dict_items = dict(req_context)
dict_items.update(context)
logMsg = {key: value for (key, value) in (dict_items.items())}
if params or err:
pe = append_error(params, err)
if pe:
logMsg['params'] = pe
# logger.debug(json.dumps(logMsg))
return logMsg
# return json.dumps(logMsg)
# {"aws_request_id": "1fcf5a10-9d44-49dd-bbad-9f23945c306f", "level": "DEBUG", "x-user-agent": "halolib:/:GET:f55a", "awsRegion": "REGION", "x-correlation-id": "1f271213-6d32-40b7-b1dc-12e3a9a31bf4", "debug-log-enabled": "false", "functionVersion": "VER", "message": "we did it", "stage": "STAGE", "functionMemorySize": "MEM", "functionName": "halolib"} | 37147b5a30614b8e822ec56ec4f8e9c000691c80 | 3,636,647 |
def make_input_signature(inputs, include_tensor_ranks_only,
encode_variables_by_resource_id):
"""Generates an input signature representation.
Args:
inputs: The function inputs that need to be formed into a signature
include_tensor_ranks_only: If Tensors should be considered by rank
encode_variables_by_resource_id: If Variables should be considered by
resource id
Returns:
An object representing the input signature
"""
return pywrap_tfe.TFE_Py_EncodeArg(
inputs, include_tensor_ranks_only, encode_variables_by_resource_id) | 445adabf927614dea23131a973b12d98117d79e5 | 3,636,648 |
def _activities_from_datasets_followed_by_user_query(
user_id: str, limit: int
) -> QActivity:
"""Return a query for all activities from datasets that user_id follows."""
# Get a list of the datasets that the user is following.
follower_objects = model.UserFollowingDataset.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(Activity).filter(text("0=1"))
return _activities_union_all(
*[
_activities_limit(
_package_activity_query(follower.object_id), limit
)
for follower in follower_objects
]
) | 7e7a3111515f9da625c554f283aa5d948fd45080 | 3,636,649 |
def getRowType(row):
"""Infers types for each row"""
d = row
for col, data in enumerate(row):
try:
if isNone(data):
d[col] = 'none'
else:
num = float(data)
if num.is_integer():
d[col] = 'int'
else:
d[col] = 'double'
except:
try:
toDate(data)
d[col] = 'date'
except:
d[col] = 'string'
return d | af0e853defb95005ece8727d58b8f58db8411afe | 3,636,650 |
def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[],
numeric_only=False, dropna=False):
"""Load the Law School GPA dataset
Note:
By default, the data is downloaded from tempeh. See
https://github.com/microsoft/tempeh for details.
Args:
subset ({'train', 'test', or 'all'}, optional): Select the dataset to
load: 'train' for the training set, 'test' for the test set, 'all'
for both.
usecols (single label or list-like, optional): Feature column(s) to
keep. All others are dropped.
dropcols (single label or list-like, optional): Feature column(s) to
drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs.
Returns:
namedtuple: Tuple containing X, y, and sample_weights for the Law School
GPA dataset accessible by index or name.
"""
if subset not in {'train', 'test', 'all'}:
raise ValueError("subset must be either 'train', 'test', or 'all'; "
"cannot be {}".format(subset))
dataset = tc.datasets["lawschool_gpa"]()
X_train, X_test = dataset.get_X(format=pd.DataFrame)
y_train, y_test = dataset.get_y(format=pd.Series)
A_train, A_test = dataset.get_sensitive_features(name='race',
format=pd.Series)
all_train = pd.concat([X_train, y_train, A_train], axis=1)
all_test = pd.concat([X_test, y_test, A_test], axis=1)
if subset == "train":
df = all_train
elif subset == "test":
df = all_test
else:
df = pd.concat([all_train, all_test], axis=0)
return standardize_dataset(df, prot_attr=['race'], target='zfygpa',
usecols=usecols, dropcols=dropcols,
numeric_only=numeric_only, dropna=dropna) | 9b01070fb62e0d28dc5961d84fac311a549e258f | 3,636,651 |
def check_ref_exons(exon_seqs, mask_stops):
"""Check if the reference sequence is correct.
Should start with ATG and end with a stop.
Mask_stops controls handling of inframe stops.
"""
sec_codons = set() # in case there are TGA codons in the ref seq -> collect them
gene_seq = "".join([exon_seqs[i] for i in range(len(exon_seqs.keys()))])
codons = parts(gene_seq, n=3) # split a seq of letterns in chuncks of len == 3
if codons[0] != "ATG":
eprint("Input is corrupted! Reference sequence should start with ATG!")
elif codons[-1] not in STOPS:
eprint("Input is corrupted! Reference sequence should end with a stop codon!")
stop_codons = [(n, c) for n, c in enumerate(codons[:-1]) if c in STOPS]
if len(stop_codons) == 0: # no stop codons -> nothing else to do
return exon_seqs, set()
# there are stop codons in reference sequence:
eprint("Warning! There are inframe stop codons!")
for stop in stop_codons:
eprint(f"Codon num {stop[0] + 1} - {stop[1]}")
codons[stop[0]] = "NNN" if mask_stops else codons[stop[0]]
if stop[1] == "TGA":
# maybe a sec codon
sec_codons.add(stop[0])
eprint(">>>STOP_CODON>>>") if not mask_stops else None
die("Abort, there are inframe stop codons.", 0) if not mask_stops else None
# if stop codons in reference are allowed, then we need to mask them (rewrite as NNN)
# otherwise CESAR will show an error
safe_seq = "".join(codons)
stop_masked = {}
prev_index = 0
for num, exon_seq in exon_seqs.items():
exon_len = len(exon_seq)
stop_masked[num] = safe_seq[prev_index: prev_index + exon_len]
prev_index += exon_len
return stop_masked, sec_codons | 9b7e101cb055ee561ad54beb9b2c15c43044f2fc | 3,636,652 |
def measure_fluxline_crosstalk(
dev, target_qubit, crosstalk_qubits, amplitudes,
crosstalk_qubits_amplitudes=None, phases=None,
target_fluxpulse_length=500e-9, crosstalk_fluxpulse_length=None,
skip_qb_freq_fits=False, n_cal_points_per_state=2,
cal_states='auto', prep_params=None, label=None, upload=True,
analyze=True):
"""
Applies a flux pulse on the target qubit with various amplitudes.
Measure the phase shift due to these pulses on the crosstalk qubits which
are measured in a Ramsey setting and fluxed to a more sensitive frequency.
Args:
dev: The Device object used for the measurement
target_qubit: the qubit to which a fluxpulse with varying amplitude
is applied
crosstalk_qubits: a list of qubits to do a Ramsey on.
amplitudes: A list of flux pulse amplitudes to apply to the target qubit
crosstalk_qubits_amplitudes: A dictionary from crosstalk qubit names
to flux pulse amplitudes that are applied to them to increase their
flux sensitivity. Missing amplitudes are set to 0.
phases: An array of Ramsey phases in degrees.
target_fluxpulse_length: length of the flux pulse on the target qubit.
Default: 500 ns.
crosstalk_fluxpulse_length: length of the flux pulses on the crosstalk
qubits. Default: target_fluxpulse_length + 50 ns.
n_cal_points_per_state: Number of calibration measurements per
calibration state. Defaults to 2.
cal_states:
List of qubit states to use for calibration. Defaults to 'auto'.
prep_params: Perparation parameters dictionary specifying the type
of state preparation.
label: Overwrite the default measuremnt label.
upload: Whether the experimental sequence should be uploaded.
Defaults to True.
analyze: Whether the analysis will be run. Defaults to True.
"""
if phases is None:
phases = np.linspace(0, 360, 3, endpoint=False)
if crosstalk_fluxpulse_length is None:
crosstalk_fluxpulse_length = target_fluxpulse_length + 50e-9
if crosstalk_qubits_amplitudes is None:
crosstalk_qubits_amplitudes = {}
if isinstance(target_qubit, str):
target_qubit = dev.get_qb(target_qubit)
target_qubit_name = target_qubit.name
crosstalk_qubits = [dev.get_qb(qb) if isinstance(qb, str) else qb
for qb in crosstalk_qubits]
crosstalk_qubits_names = [qb.name for qb in crosstalk_qubits]
MC = dev.instr_mc.get_instr()
if label is None:
label = f'fluxline_crosstalk_{target_qubit_name}_' + \
''.join(crosstalk_qubits_names)
if prep_params is None:
prep_params = dev.get_prep_params(crosstalk_qubits)
sweep_points = SweepPoints('phase', phases, 'deg', 'Ramsey phase')
sweep_points.add_sweep_dimension()
sweep_points.add_sweep_parameter('target_amp', amplitudes, 'V',
'Target qubit flux pulse amplitude')
exp_metadata = {}
for qb in set(crosstalk_qubits) | {target_qubit}:
qb.prepare(drive='timedomain')
cal_states = CalibrationPoints.guess_cal_states(cal_states,
for_ef=False)
cp = CalibrationPoints.multi_qubit(
[qb.name for qb in crosstalk_qubits], cal_states,
n_per_state=n_cal_points_per_state)
operation_dict = dev.get_operation_dict()
# We get sweep_vals for only one dimension since drive_cancellation_seq
# turns 2D sweep points into 1D-SegmentHardSweep.
# FIXME: in the future, this should rather be implemented via
# sequence.compress_2D_sweep
seq, sweep_vals = mqs.fluxline_crosstalk_seq(
target_qubit_name, crosstalk_qubits_names,
crosstalk_qubits_amplitudes, sweep_points, operation_dict,
crosstalk_fluxpulse_length=crosstalk_fluxpulse_length,
target_fluxpulse_length=target_fluxpulse_length,
prep_params=prep_params, cal_points=cp, upload=False)
[seq.repeat_ro(f"RO {qbn}", operation_dict)
for qbn in crosstalk_qubits_names]
sweep_func = awg_swf.SegmentHardSweep(
sequence=seq, upload=upload,
parameter_name='segment_index')
MC.set_sweep_function(sweep_func)
MC.set_sweep_points(sweep_vals)
det_func = get_multiplexed_readout_detector_functions(
crosstalk_qubits,
nr_averages=max([qb.acq_averages() for qb in crosstalk_qubits])) \
['int_avg_det']
MC.set_detector_function(det_func)
# !!! Watch out with the call below. See docstring for this function
# to see the assumptions it makes !!!
meas_obj_sweep_points_map = sweep_points.get_meas_obj_sweep_points_map(
[qb.name for qb in crosstalk_qubits])
exp_metadata.update({
'target_qubit_name': target_qubit_name,
'crosstalk_qubits_names': crosstalk_qubits_names,
'crosstalk_qubits_amplitudes': crosstalk_qubits_amplitudes,
'target_fluxpulse_length': target_fluxpulse_length,
'crosstalk_fluxpulse_length': crosstalk_fluxpulse_length,
'skip_qb_freq_fits': skip_qb_freq_fits,
'preparation_params': prep_params,
'cal_points': repr(cp),
'sweep_points': sweep_points,
'meas_obj_sweep_points_map': meas_obj_sweep_points_map,
'meas_obj_value_names_map':
get_meas_obj_value_names_map(crosstalk_qubits, det_func),
'rotate': len(cp.states) != 0,
'data_to_fit': {qbn: 'pe' for qbn in crosstalk_qubits_names}
})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
return tda.FluxlineCrosstalkAnalysis(
qb_names=crosstalk_qubits_names, options_dict={
'TwoD': True,
'skip_qb_freq_fits': skip_qb_freq_fits,
}) | 43bd8148ccaa5625cbec91fff9fa16eb20f991d6 | 3,636,653 |
def resistancedistances(graph):
"""
Returns the pairwise resistance distances on the given graph.
Args:
network: networkx graph
Returns:
Dictionary of pairwise resistance distances,
accessed by the (i,j) node labels
"""
nodes = graph.nodes()
nodecount = len(nodes)
nodenrs = range(nodecount)
labeling = dict(zip(nodenrs,graph.nodes()))
L = np.linalg.pinv(nx.laplacian_matrix(graph))
rdist = {}
for i in nodenrs:
rdist[labeling[i]] = {}
for j in nodenrs:
rdist[labeling[i]][labeling[j]] = L[i,i] + L[j,j] - L[i,j] - L[j,i]
return rdist | 9e3b44602a0dce55516947768ad136e47397af86 | 3,636,654 |
def flatten_to_raster(data):
""" Flatten numpy array of various dimensions to RGB raster image.
:param data: numpy array of one of following sizes.
1) H x W x C (color/gray image)
2) N x Y x X x C (array of color/gray images)
3) nY x nX x Y x X x C (2d array of color/gray images)
(C has to be 1 or 3)
E.g., C = 1
<---------------W--------------->
------- ------- ------- -------
^ | | | | |
| Y 1 | 2 | ... | nX |
| | | | | |
| ---X--- ------- ------- -------
| | | | | |
| | 2 | | | |
| | | | | |
H ------- ------- ------- -------
| | | | | |
| | ... | | | |
| | | | | |
| ------- ------- ------- -------
| | | | | |
| | nY | | | |
v | | | | |
------- ------- ------- -------
Input -> Output
1) H x W x C -> H x W x C
2) N x Y x X x C -> (nY*Y) x (nX*X) x C
where nX & nY are factors of N such that we get as close to square grid as possible
(with bias towards having more columns than rows so for 12 images we have nY x nX = 3 x 4 grid)
3) nH x nW x H x W x C -> (nY*Y) x (nX*X) x C
"""
n_dim = len(data.shape)
if n_dim == 3:
H, W, C = data.shape
nY, nX = 1, 1
flattened = data
n_states = nY * nX
elif n_dim == 4:
# N x Y x X x C (array of color/gray images)
image_grid = image_array_to_grid(data)
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid) # .transpose(1,0,2,3,4)
n_states = len(data)
elif n_dim == 5:
image_grid = data
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid)
n_states = nY * nX
else:
raise Exception("data dimension {} not supported!".format(n_dim))
return flattened, nY, nX, n_states | 2cb9c12a1d3efb7885f1f4d34d0eb3fc9b4d35c0 | 3,636,655 |
def read_google(url,**kwargs):
"""
Reads a google sheet
"""
if url[-1]!='/':
url+='/'
return pd.read_csv(url+'export?gid=0&format=csv',**kwargs) | 286158dc007378eef84ed048cda54a93c41dc140 | 3,636,656 |
def ts_inspect_2d(target, *preds, start_date=None, freq=None):
"""
Builds TSMertics for point predictions only, creating internal representation for it.
"""
return TSMetrics(
xr_2d_factory(target, start_date=start_date, freq=freq),
*[xr_2d_factory(p, start_date=start_date, freq=freq) for p in preds],
) | 34bd33ac53622c2b5441613fed9b8573d019dfcb | 3,636,657 |
def adjust_age_groups(age_labels):
"""
for each pair of cols to aggregate, takes the first number of the first element, and the last number for the last element
for instance: ["0-4",'5-10'] -> ['0-10']
"""
i=0
new_age_labels=[]
label=""
for element in age_labels:
if i%2==0:
label+=element.split('-')[0]
i+=1
elif i%2==1:
label=label+'-'+element.split('-')[-1]
new_age_labels.append(label)
label=""
i+=1
#making the last agegroup based on the first number +
new_age_labels[-1]= new_age_labels[-1].split("-")[0]+"+"
return(new_age_labels) | 521a2f6779ae8fa3f3a53801e0f935844245cffc | 3,636,658 |
def get_kni_ports():
"""
A KNI port is a list of string of format vEth0_%d where %d is the port index.
"""
kni_ports = run_local_cmd('ifconfig | grep vEth0_ | cut -d\':\' -f1 ', get_output = True).split('\n')
return set([port for port in kni_ports if port != '']) | 1f04adea6c080bf5800e86dea559207048c345f3 | 3,636,659 |
import re
def parse_log(file_abspath):
"""Parse warning and error info from TRNSYS generated log file.
Parses warning and error count when simulation ends with errors.
If simulation ends successfully, counts number of warnings and return
successful completion message.
Args:
file_abspath: absolute path to result file.
Returns:
dict.
Raises:
IOError: problem reading out_file
"""
runsumdict = {}
# Parse data from Type-46-generated tab separated file and return a list of
# dicts (one dict per each row of result file)
pat01 = re.compile(r'Simulation stopped with errors')
pat02 = re.compile(r'Total Warnings\s+:\s+(\d+)')
pat03 = re.compile(r'Total Fatal Errors\s+:\s+(\d+)')
pat04 = re.compile(r'Warning at time')
with open(file_abspath, 'rU') as log_f:
temp = log_f.read()
match = pat01.search(temp)
if match:
runsumdict['Message'] = match.group()
match = pat02.search(temp)
runsumdict['Warnings'] = int(match.group(1))
match = pat03.search(temp)
runsumdict['Errors'] = int(match.group(1))
else:
runsumdict['Message'] = "Simulation ended successfully"
match = pat04.findall(temp)
runsumdict['Warnings'] = len(match)
runsumdict['Errors'] = 0
return runsumdict | 9fef07ac7a6f035b536c105d401b1f9f4413f629 | 3,636,660 |
from typing import Union
def extract_publish_info_from_issue(
issue: "Issue", publish_type: PublishType
) -> Union[PublishInfo, MyValidationError]:
"""从议题中提取发布所需数据"""
try:
if publish_type == PublishType.BOT:
return BotPublishInfo.from_issue(issue)
elif publish_type == PublishType.PLUGIN:
return PluginPublishInfo.from_issue(issue)
return AdapterPublishInfo.from_issue(issue)
except MyValidationError as e:
return e | bbe5a8d5e7971b335334aef8e6b158df6fa42146 | 3,636,661 |
from matplotlib.path import Path
def points_inside_poly(points, all_verts):
"""Return bool array of points inside a polygon """
return Path(all_verts).contains_points(points) | 1271045ae4230345da4bec6b81f4eeebe34826d5 | 3,636,662 |
def exploration_function(q_space, x_space, index_, action_space_n, k):
"""returns exploration value"""
x_value = float('-inf')
for i in range(action_space_n):
x_value = max(x_value, q_space[index_][i] + k/(1 + x_space[index_][i]))
#print("q={}, q+x_bonus={}".format(max(q_space[index_]), x_value))
return x_value | 9c6f1aa2943436d75c9a7735b4efa2c44c8a08d1 | 3,636,663 |
def get_vectors(model_dm, model_dbow):
"""
将训练完成的数据转换为vectors
:param model_dm:
:param model_dbow:
:return:
"""
# 获取训练数据集的文档向量
train_vecs_dm = getVecs(model_dm, x_train, size)
train_vecs_dbow = getVecs(model_dbow, x_train, size)
train_vecs = np.hstack((train_vecs_dm, train_vecs_dbow))
# 获取测试数据集的文档向量
test_vecs_dm = getVecs(model_dm, x_test, size)
test_vecs_dbow = getVecs(model_dbow, x_test, size)
test_vecs = np.hstack((test_vecs_dm, test_vecs_dbow))
return train_vecs, test_vecs | 9f302bccd63c43bcf685851550f553ea1283de51 | 3,636,664 |
def test_profile_queue(db, test_profile, test_project_data):
"""A queue with test data, associated with the first test profile."""
return add_queue(test_project_data, TEST_QUEUE_LEN, profile=test_profile) | 2e23f456972a7a617be1edd86bcf990286c0638b | 3,636,665 |
import sqlite3
def user_has_registered(userID):
"""Checks if a particular user has been registered in database"""
database = sqlite3.connect("users.db")
cursor = database.cursor()
cursor.execute(f"SELECT user_id FROM profile WHERE user_id = {userID}")
result = cursor.fetchone()
if result is None:
return False
return True | e98f83b272a52828638f276575596489bebe1fcf | 3,636,666 |
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
marker_velocity_or_displacement: str,
marker_in_first_coordinates_system: bool,
control_type: ControlType,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare an ocp that targets some marker velocities, either by finite differences or by jacobian
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
final_time: float
The time of the final node
n_shooting: int
The number of shooting points
marker_velocity_or_displacement: str
which type of tracking: finite difference ('disp') or by jacobian ('velo')
marker_in_first_coordinates_system: bool
If the marker to track should be expressed in the global or local reference frame
control_type: ControlType
The type of controls
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = biorbd.Model(biorbd_model_path)
# Add objective functions
if marker_in_first_coordinates_system:
# Marker should follow this segment (0 velocity when compare to this one)
coordinates_system_idx = 0
else:
# Marker should be static in global reference frame
coordinates_system_idx = -1
objective_functions = ObjectiveList()
if marker_velocity_or_displacement == "disp":
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_DISPLACEMENT,
coordinates_system_idx=coordinates_system_idx,
index=6,
weight=1000,
)
elif marker_velocity_or_displacement == "velo":
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_VELOCITY, index=6, weight=1000)
else:
raise RuntimeError(
f"Wrong choice of marker_velocity_or_displacement, actual value is "
f"{marker_velocity_or_displacement}, should be 'velo' or 'disp'."
)
# Make sure the segments actually moves (in order to test the relative speed objective)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=6, weight=-1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=7, weight=-1)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN)
# Path constraint
nq = biorbd_model.nbQ()
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model))
for i in range(nq, 2 * nq):
x_bounds[0].min[i, :] = -10
x_bounds[0].max[i, :] = 10
# Initial guess
x_init = InitialGuessList()
x_init.add([1.5, 1.5, 0.0, 0.0, 0.7, 0.7, 0.6, 0.6])
# Define control path constraint
tau_min, tau_max, tau_init = -100, 100, 0
u_bounds = BoundsList()
u_bounds.add([tau_min] * biorbd_model.nbGeneralizedTorque(), [tau_max] * biorbd_model.nbGeneralizedTorque())
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbGeneralizedTorque())
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
control_type=control_type,
ode_solver=ode_solver,
) | 6a991931b7cd458611a9467e6c6c7ba4f0235150 | 3,636,667 |
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'],
kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region'] | 02426fbf49e7a4d85094896546980828e2c6bc20 | 3,636,668 |
from typing import List
from typing import Optional
def find(bindings: List[Binding], name: str) -> Optional[Binding]:
"""
Returns a Binding with a given name. Comparison is case-insensitive.
:param bindings: the Bindings to find in
:param name: the name of the Binding to find
:return: the Binding with a given name if it exists, None otherwise
"""
for b in bindings:
if b.get_name().lower() == name.lower():
return b
return None | b5efb45c6c9ca982ffa0949a599dc3e6b1f8a948 | 3,636,669 |
def computeResult():
"""Allows download of asset data result file.
---
tags:
- services
consumes:
- application/json
parameters:
- name: consumerAddress
in: query
description: The consumer address.
required: true
type: string
- name: jobId
in: query
description: jobId
required: true
type: string
- name: index
in: query
description: Result index
required: true
- name: nonce
in: query
description: The UTC timestamp, used to prevent replay attacks
- name: signature
in: query
description: Signature of (consumerAddress+jobId+index+nonce) to verify that the consumer has rights to download the result
responses:
200:
description: Content of the result
400:
description: One or more of the required attributes are missing or invalid.
404:
description: Result not found
503:
description: Service Unavailable
"""
data = get_request_data(request)
logger.info(f"computeResult called. arguments = {data}")
url = get_compute_result_endpoint()
consumer_address = data.get("consumerAddress")
job_id = data.get("jobId")
nonce, provider_signature = sign_for_compute(
provider_wallet, consumer_address, job_id
)
web3 = get_web3()
params = {
"index": data.get("index"),
"owner": data.get("consumerAddress"),
"jobId": job_id,
"consumerSignature": data.get("signature"),
"providerSignature": provider_signature,
"nonce": nonce,
"chainId": web3.chain_id,
}
req = PreparedRequest()
req.prepare_url(url, params)
result_url = req.url
logger.debug(f"Done processing computeResult, url: {result_url}")
update_nonce(data.get("consumerAddress"), data.get("nonce"))
response = build_download_response(
request, requests_session, result_url, result_url, None, validate_url=False
)
logger.info(f"computeResult response = {response}")
return response | 85cf900bdde0d2e95c5c14c7a18abaaf592bce99 | 3,636,670 |
from typing import Dict
async def init_menu_perms(request: Request) -> Dict:
"""
初始化菜单和权限
"""
return await services.init_menu_perms(request) | d863d89a857097434885d59b037cd4ff1cf5fe8f | 3,636,671 |
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids | 992f6ccbdcfdb4498a6aa226efee8d26844d435a | 3,636,672 |
def weight_diff(w1, w2):
""" Calculates the array of differences between the weights in arrays """
# Expand and flatten arrays
_w1 = np.hstack([x.flatten() for x in w1])
_w2 = np.hstack([x.flatten() for x in w2])
return _w1 - _w2 | 0e9154aa723335a6d6d53382c67abe25523508e9 | 3,636,673 |
import sys
import time
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
"""
rootmodules_cache = XSH.modules_cache
rootmodules = list(sys.builtin_module_names)
start_time = time()
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove("__init__")
except ValueError:
pass
if path not in ("", "."): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_GIVEUP:
print("\nwarning: Getting root modules is taking too long, we give up")
return []
rootmodules.extend(modules)
rootmodules = list(set(rootmodules))
return rootmodules | fa4f04c79583e851d68b66906d8672db4db3052d | 3,636,674 |
def advect_salinity(vs):
"""
integrate salinity
"""
return advect_tracer(vs, vs.salt[..., vs.tau], vs.dsalt[..., vs.tau]) | b760f4bc9144db1ea1bc6a80c075f392a4c0acb1 | 3,636,675 |
import math
def lafferty_wyatt_point(lowedge, highedge, expo_slope):
"""calculates the l-w point for a bin where the true distribution is an
exponential characterized by expo_slope.
"""
rhs = (math.exp(expo_slope*highedge) - math.exp(expo_slope*lowedge))
rhs /= expo_slope
rhs /= (highedge - lowedge)
return math.log(rhs) / expo_slope | 326acddc1926f1a142f34e8cff9109554ec850d3 | 3,636,676 |
def init_critical_cases_20():
"""
Real Name: b'init Critical Cases 20'
Original Eqn: b'0'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 0 | 735a4df9c2ee5777c7c15dcf4fb4a3830cb5e0b6 | 3,636,677 |
def build_doc(pic_dic):
"""
Gets dict {'image-name':['image-path',text]} ==> doc obj
"""
doc = word_obj()
# Add footer
doc.sections[0].footer.paragraphs[0].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
add_page_number(doc.sections[0].footer.paragraphs[0].add_run())
# Started Page
starter_page(doc)
for pic_name, pic_details in pic_dic.items():
# Add head
head = doc.add_heading(pic_name, 6)
# add header
heading(head,doc)
# Add pic
doc.add_picture(pic_details[0], width=Inches(6) ,height=Inches(4))
# Add paragraph
p = doc.add_paragraph(pic_details[1])
paragraph(p,doc)
##
doc.add_page_break()
return doc | 1172f4b83e678fd2b363914c374667059af6ccbb | 3,636,678 |
def dispatch_every_hour(one_time_password):
""" This is the receiving point of start_every_hour's post request. It
checks that the one time password is correct and then dispatches
every_hour. """
EveryHourOTP.check_password(one_time_password)
Process(target=every_hour).start()
return "success" | e03f6bccbfb76fd25572bebde78151832b994928 | 3,636,679 |
def check_list(data):
"""check if data is a list, if it is not a list, it will return a list as [data]"""
if type(data) is not list:
return [data]
else:
return data | 00ae7a857c3f969ca435928edf98ed5bb36c1c34 | 3,636,680 |
def nfvi_reinitialize(config):
"""
Re-initialize the NFVI package
"""
global _task_worker_pools
init_complete = True
compute_plugin_disabled = (config.get('compute_plugin_disabled',
'False') in DISABLED_LIST)
if not compute_plugin_disabled:
init_complete = nfvi_compute_initialize(config,
_task_worker_pools['compute'])
return init_complete | a8f76bf228b94fadc18dbe0e97a21109ee607935 | 3,636,681 |
import re
def parse_freqs(lines, parameters):
"""Parse the basepair frequencies.
"""
root_re = re.compile("Note: node (\d+) is root.")
branch_freqs_found = False
base_freqs_found = False
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [float(val) for val in line_floats_res]
# Find base frequencies from baseml 4.3
# Example match:
# "Base frequencies: 0.20090 0.16306 0.37027 0.26577"
if "Base frequencies" in line and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
# Find base frequencies from baseml 4.1:
# Example match:
# "base frequency parameters
# " 0.20317 0.16768 0.36813 0.26102"
elif "base frequency parameters" in line:
base_freqs_found = True
# baseml 4.4 returns to having the base frequencies on the next line
# but the heading changed
elif "Base frequencies" in line and not line_floats:
base_freqs_found = True
elif base_freqs_found and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
base_freqs_found = False
# Find frequencies
# Example match:
# "freq: 0.90121 0.96051 0.99831 1.03711 1.10287"
elif "freq: " in line and line_floats:
parameters["rate frequencies"] = line_floats
# Find branch-specific frequency parameters
# Example match (note: I think it's possible to have 4 more
# values per line, enclosed in brackets, so I'll account for
# this):
# (frequency parameters for branches) [frequencies at nodes] (see Yang & Roberts 1995 fig 1)
#
# Node #1 ( 0.25824 0.24176 0.25824 0.24176 )
# Node #2 ( 0.00000 0.50000 0.00000 0.50000 )
elif "(frequency parameters for branches)" in line:
parameters["nodes"] = {}
branch_freqs_found = True
elif branch_freqs_found:
if line_floats:
node_res = re.match("Node \#(\d+)", line)
node_num = int(node_res.group(1))
node = {"root": False}
node["frequency parameters"] = line_floats[:4]
if len(line_floats) > 4:
node["base frequencies"] = {"T": line_floats[4],
"C": line_floats[5],
"A": line_floats[6],
"G": line_floats[7]}
parameters["nodes"][node_num] = node
else:
root_res = root_re.match(line)
if root_res is not None:
root_node = int(root_res.group(1))
parameters["nodes"][root_node]["root"] =\
True
branch_freqs_found = False
return parameters | b0940b15aba9387e9257fd47bd5cbbd8dbf821ea | 3,636,682 |
def student2nation(id_num):
"""
Takes student id, returns nation id of the student.
"""
return school2nation(id_num) | 9453a6b2b9f31bbeb6c319cc450a757f3c8585b0 | 3,636,683 |
def get_projects_with_builds(only_public=True, only_active_versions=True):
"""Returns a queryset of Projects with active only public by default builds."""
builds = Build.objects.filter(
success=True,
state='finished',
version__active=True
)
if only_public:
builds = builds.filter(version__privacy_level='public',)
if only_active_versions:
builds = builds.filter(version__active=True)
filtered_projects = builds.values_list(
'project',
flat=True
)
return Project.objects.filter(
pk__in=filtered_projects
) | e1ae057fa983741fb291ac3c47cc5be628fe1711 | 3,636,684 |
import base64
def encode_base64(filename):
"""encode image to string.
Args
filename: image file path.
Returns:
a bites string.
"""
with open(filename, "rb")as f:
bs64 = base64.b64encode(f.read()).decode()
return bs64 | 9eab28ec1cb9619411ea28a9640a2fa8b02e61a3 | 3,636,685 |
def transfer_from_taoyuanagrichannel_to_taoyuanagriwaterdemand():
"""
Real Name: Transfer From TaoYuanAgriChannel To TaoYuanAgriWaterDemand
Original Eqn: (Transfer From ShiMenReservoir To HouChiWeir*Ratio AgriWater ShiMenReservoir To HouChiWeir In TaoYuanAgriChannel)*(1-Channel Transfer Loss Rate )
Units: m3
Limits: (None, None)
Type: component
Subs: None
"""
return (
transfer_from_shimenreservoir_to_houchiweir()
* ratio_agriwater_shimenreservoir_to_houchiweir_in_taoyuanagrichannel()
) * (1 - channel_transfer_loss_rate()) | 7bd77945002ed9217485dcf1f412cf333c666e30 | 3,636,686 |
def Doxyfile_emitter(target, source, env):
"""
Modify the target and source lists to use the defaults if nothing
else has been specified.
Dependencies on external HTML documentation references are also
appended to the source list.
"""
doxyfile_template = env.File(env['DOXYFILE_FILE'])
source.insert(0, doxyfile_template)
return target, source | 41928a8c837d7eb00d6b4a4a2f407e2d75217620 | 3,636,687 |
def _recursive_make_immutable(o):
"""Recursively transform an object into an immutable form
This is a cdev core specific transformation that is used to convert Dict and List and other native python
types into frozendict, frozenset, etc. The purpose is that the later set of objects are immutable in python
and therefor can be used to directly compare against each other and be used as __hash__ able objects in
things like dicts and `networkx` DAGs.
Note the special case of handling Cloud Output Dict. These are identified as a dict with the key `id` that has
a value `cdev_cloud_output`.
Args:
o (Any): original object
Returns:
transformed_os
"""
# Note this is designed to be specifically used within the loading of a resource state. Therefor,
# we do not much error handling and let an error in the structure of the data be passed up all the
# way to `load_resource_state`
if isinstance(o, list):
return frozenset([_recursive_make_immutable(x) for x in o])
elif isinstance(o, dict):
if "id" in o:
if o.get("id") == "cdev_cloud_output":
tmp = {k: _recursive_make_immutable(v) for k, v in o.items()}
if not o.get("output_operations"):
return frozendict(tmp)
correctly_loaded_output_operations = _load_cloud_output_operations(
o.get("output_operations")
)
tmp["output_operations"] = correctly_loaded_output_operations
return frozendict(tmp)
return frozendict({k: _recursive_make_immutable(v) for k, v in o.items()})
return o | 270dc2aaa07edf2f0f57aa298ea0f619e7412b80 | 3,636,688 |
def retry_condition(exception):
"""Return True if we should retry (in this case when it's an IOError), False otherwise"""
if isinstance(exception, (HTTPError, AttributeError)):
print(f'HTTP error occurred: {exception}') # Python 3.6
return True
return False | c6de8b160c071ed8055ed4dd1268ac97958166dd | 3,636,689 |
def mergesort(input_arr):
"""
Sort the array by application of merge sort
Time complexity: O(n log(n))
Space Complexity: O(n)
Args:
input_arr(array): Input array with numbers to be sorted
Returns:
sorted_arr(array) Sorted array with numbers in ascending order
"""
if len(input_arr) <= 1:
return input_arr
mid = len(input_arr) // 2
left = input_arr[:mid]
right = input_arr[mid:]
left = mergesort(left)
right = mergesort(right)
return _merge(left, right) | 433348035ea2bc41aef11dc3eaa8c51d16fffc81 | 3,636,690 |
def _clean_annotated_text(text):
"""Cleans text from the format that it was presented to annotators in the
S.M.A.R.T data annotation tool. Splits the title from the abstract text
and strips any trailing whitespace.
Returns:
title (str): The project title
text (str): The project abstract
"""
text = text.split('=====')
title = text[1].strip()
abstract = text[-1].strip()
return title, abstract | 356cdf893225c41d303e83f1cf2f3418544c76ae | 3,636,691 |
def get_or_create_event_loop():
"""
Tries to get the current event loop. If not found creates a new one.
Returns
-------
event_loop : ``EventThread``
"""
try:
event_loop = get_event_loop()
except RuntimeError:
event_loop = create_event_loop(daemon=False)
return event_loop | 1dacd2172a0bffd2e5632ec48b90a1c1ee31800d | 3,636,692 |
def tnaming_Displace(*args):
"""
* Application de la Location sur les shapes du label et de ses sous labels.
:param label:
:type label: TDF_Label &
:param aLocation:
:type aLocation: TopLoc_Location &
:param WithOld: default value is Standard_True
:type WithOld: bool
:rtype: void
"""
return _TNaming.tnaming_Displace(*args) | 91d2550b5ecb108a2ddcceb6a3a227e48ab16f24 | 3,636,693 |
def get_capital_np(markets,signals,size,commiRate,climit = 4, wlimit = 2, op=True):
"""使用numpy回测,标签的盈亏, op 表示是否延迟一个tick以后撮合"""
postions = np.zeros(len(signals))
actions = np.zeros(len(signals))
costs = np.zeros(len(signals))
pnls = np.zeros(len(signals))
lastsignal = 0
lastpos = 0
lastcost = 0
num = 0
for num in range(1,len(signals)):
postions[num] = lastpos
actions[num] = 0
costs[num] = lastcost
pnls[num] = 0
# 止盈止损
if lastpos > 0 and \
(markets[num,1]<=lastcost-climit or markets[num,1]>=lastcost+wlimit):
postions[num] = 0
actions[num] = -1
costs[num] = 0
fee = (markets[num,1]+lastcost)*size*commiRate
pnls[num] = (markets[num,1]-lastcost)*size-fee
elif lastpos < 0 and \
(markets[num,0]>=lastcost+climit or markets[num,0]<=lastcost-wlimit):
postions[num] = 0
actions[num] = 1
costs[num] = 0
fee = (markets[num,0]+lastcost)*size*commiRate
pnls[num] = (lastcost-markets[num,0])*size-fee
# 开仓
if op:
lastsignal = signals[num]
if lastsignal > 0 and lastpos == 0:
postions[num] = 1
actions[num] = 1
costs[num] = markets[num,0]
elif lastsignal < 0 and lastpos == 0:
postions[num] = -1
actions[num] = -1
costs[num] = markets[num,1]
lastpos = postions[num]
lastcost = costs[num]
lastsignal = signals[num]
return pnls,actions | 4604bb16c298e4ea32d2fca5f8332e5d21e4aada | 3,636,694 |
def slicename_to_hostname(vs_name):
"""Converts a vserver slice name into a canonical FQDN.
Slice names use a pattern like: <some site>_<some name>.
Example:
If vs_name is 'mlab_utility' and the system hostname is
'mlab4.nuq01.measurement-lab.org', then slicename_to_hostname will return
'utility.mlab.mlab4.nuq01.measurement-lab.org'.
Args:
vs_name: str, name of a vserver slice, e.g. mlab_utility.
Returns:
str, the canonical FQDN based on system hostname and slice name.
"""
fields = vs_name.split('_')
if len(fields) == 1:
prefix = vs_name
else:
# The vs_name prefix is the PlanetLab site name.
# The rest is user-chosen. Place the site name after user-chosen name.
prefix = '.'.join(fields[1:] + [fields[0]])
return '%s.%s' % (prefix, _root_hostname) | 7f8b6ff17ab402cfa89ee732f1e4c61ddffee7c2 | 3,636,695 |
from typing import Dict
def swim_for_a_day(life_counts: Dict[int, int]):
"""Process the shoal, decrement the life_counts:
any that get to -1 have procreated in the last day, their offspring are
created with 8 day life_counts, whilst they get reset to 6 days… and are
added to the count of any fish that moved down from 7 days.
"""
new_counts = {d - 1: p for d, p in life_counts.items()}
if -1 in new_counts.keys():
new_counts[8] = new_counts[-1]
new_counts[6] = new_counts[-1] + new_counts.get(6, 0)
del new_counts[-1]
return new_counts | 3d5d3f48942a5a1f4eba3100e903df592d933e23 | 3,636,696 |
def page_not_found (error):
"""
Generic Error Message
"""
return "Unable to find Distill." | ed764c2c2814487c33f9945b17b85a234ae45645 | 3,636,697 |
def dict_to_obj(our_dict):
"""
Function that takes in a dict and returns a custom object associated with
the dict. This function makes use of the "__module__" and "__class__"
metadata in the dictionary to know which object type to create.
"""
if "__class__" in our_dict:
# Pop ensures we remove metadata from the dict to leave only the
# instance arguments
class_name = our_dict.pop("__class__")
# Get the module name from the dict and import it
module_name = our_dict.pop("__module__")
# We use the built in __import__ function since the module name is not
# yet known at runtime
module = __import__(module_name, globals(), locals(), [class_name])
# Get the class from the module
class_ = getattr(module, class_name)
# Use dictionary unpacking to initialize the object
obj = class_(**our_dict)
else:
obj = our_dict
return obj | 4ad11ff943d2055d37643b6d7058175f504f8271 | 3,636,698 |
def display_fips( collection_of_fips, fig, **kwargs ):
"""
Method that is very similar to :py:meth:`display_fips_geom <covid19_stats.engine.viz.display_fips_geom>`, except this *also* displays the FIPS code of each county. For example, for `Rhode Island`_, this is.
.. _viz_display_fips_rhodeisland:
.. figure:: /_static/viz/viz_display_fips_rhodeisland.png
:width: 100%
:align: left
Demonstration of this method showing the counties in `Rhode Island`_. The FIPS code of each county is shown in red. One can extract the patches in this object to manually change the colors of these county polygons.
Here are the arguments.
:param collection_of_fips: can be a :py:class:`list`, :py:class:`set`, or other iterable of FIPS codes to visualize and label.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _`Rhode Island`: https://en.wikipedia.org/wiki/Rhode_Island
"""
bdict = core.get_boundary_dict( collection_of_fips )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox, **kwargs )
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in sorted( bdict ):
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple( fc ), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
lng_cent = shape[:,0].mean( )
lat_cent = shape[:,1].mean( )
ax.text(
lng_cent, lat_cent, fips, fontsize = 10, fontweight = 'bold', color = 'red',
transform = ccrs.PlateCarree( ) )
return ax | ea609e19f4f42032a0533c12ea8ebc9ded6412aa | 3,636,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.