content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def inject(
dependency: Union[T, str],
*,
namespace: str = None,
group: str = None,
exclude_groups: Sequence[str] = None,
lazy: bool = False,
optional: bool = False,
) -> T:
"""
Injects the requested dependency by instantiating a new instance of it or a
singleton instance if specified by the injectable. Returns an instance of the
requested dependency.
One can use this method directly for injecting dependencies though this is not
recommended. Use the :meth:`@autowired <injectable.autowired>` decorator and the
:class:`Autowired <injectable.Autowired>` type annotation for dependency injection
to be automatically wired to a function's call instead.
Will log a warning indicating that the injection container is empty when invoked
before :meth:`load_injection_container <injectable.load_injection_container>` is
called.
Raises
:class:`InjectionError <injectable.errors.InjectionError>`
when unable to resolve the requested dependency. This can be due to a variety of
reasons: the requested dependency wasn't loaded into the container; the namespace
isn't correct; the group isn't correct; there are multiple injectables for the
dependency and none or multiple are marked as primary. When parameter ``optional``
is ``True`` no error will be raised when no injectable that matches requested
qualifier/class and group is found in the specified namespace though in ambiguous
cases that resolving a primary injectable is impossible an error will still be
raised.
:param dependency: class, base class or qualifier of the dependency to be used for
lookup among the registered injectables.
:param namespace: (optional) namespace in which to look for the dependency. Defaults
to :const:`injectable.constants.DEFAULT_NAMESPACE`.
:param group: (optional) group to filter out other injectables outside of this
group. Defaults to None.
:param exclude_groups: (optional) list of groups to be excluded. Defaults to None.
:param lazy: (optional) when True will return an instance which will automatically
initialize itself when first used but not before that. Defaults to False.
:param optional: (optional) when True this function returns None if no injectable
matches the qualifier/class and group inside the specified namespace instead
of raising an :class:`InjectionError <injectable.errors.InjectionError>`.
Ambiguous cases where resolving a primary injectable is impossible will
still raise :class:`InjectionError <injectable.errors.InjectionError>`.
Defaults to False.
Usage::
>>> from foo import Foo
>>> from injectable import inject
>>>
>>> class Bar:
... def __init__(self, foo: Foo = None):
... self.foo = foo or inject(Foo)
"""
dependency_name = get_dependency_name(dependency)
registry_type = get_dependency_registry_type(dependency)
matches = get_namespace_injectables(
dependency_name, registry_type, namespace or DEFAULT_NAMESPACE
)
if not matches:
if not optional:
raise InjectionError(
f"No injectable matches {registry_type.value} '{dependency_name}'"
)
return None
if group is not None or exclude_groups is not None:
matches = filter_by_group(matches, group, exclude_groups)
if not matches:
if not optional:
raise InjectionError(
f"No injectable for {registry_type.value} '{dependency_name}'"
f" matches group '{group}'"
)
return None
injectable = resolve_single_injectable(dependency_name, registry_type, matches)
return injectable.get_instance(lazy=lazy) | 5,325,300 |
def load_dataset_simple(src):
"""
读取整合数据
训练 40 * 2 张正脸(男女)
测试 10 * 2 张正脸(男女)
训练和测试数据不重复,数据内部不重复
"""
file_names = os.listdir(src)
# 用 dict 来存每一个数据,初始化同上
datasets = {"train_images": np.array([], dtype=np.float32),
"train_labels": np.array([], dtype=np.int32),
"test_images": np.array([], dtype=np.float32),
"test_labels": np.array([], dtype=np.int32)}
count_f = 0
count_m = 0
# 文件名格式为CF0001_1101_00F.jpg
# 判断 '1' 为 无缩放第一次测量 'F' 为正脸
for file_name in file_names:
if not (file_name[-12] == '1' and file_name[-5] == 'F'):
continue
# 同上
img = Image.open(src + file_name)
img = img.convert("RGB")
img = img.resize((32, 32))
_data = img.getdata()
_data = np.array(_data, dtype=np.float32)
_label = 0
if file_name[1] == 'M':
_label = 1
# 总共 58 女 51 男
# 40女40男 训练 10女10男 测试
if _label == 0:
if count_f < 40:
datasets["train_images"].append(_data)
datasets["train_labels"].append(_label)
elif count_f < 50:
datasets["test_images"].append(_data)
datasets["test_labels"].append(_label)
count_f += 1
else:
if count_m < 40:
datasets["train_images"].append(_data)
datasets["train_labels"].append(_label)
elif count_m < 50:
datasets["test_images"].append(_data)
datasets["test_labels"].append(_label)
count_m += 1
# 切分同上
datasets["train_images"] = np.reshape(datasets["train_images"], (-1, 3072))
datasets["test_images"] = np.reshape(datasets["test_images"], (-1, 3072))
print("--------------------\n训练图片数量: %d 验证图片数量: %d \n--------------------\n"
% (datasets["train_images"].shape[0], datasets["test_images"].shape[0]))
return datasets | 5,325,301 |
def load2(file, collapsed=True, index=None):
"""Loads Laue diffraction data."""
if file['stacked'] is True:
files = loadstack(file)
if file['ext'] == 'h5':
vals = loadh5files(files, file['h5']['key'])
else:
if file['ext'] == 'h5':
begin, end, step = file['range']
vals = loadh5(file['path'], file['h5']['key'])[begin:step:end]
vals = np.swapaxes(vals, 0, 2)
vals = np.swapaxes(vals, 0, 1)
vals = vals.copy()
if index is None:
index = cherrypickpixels(vals, file['threshold'], file['frame'])
if collapsed is True:
vals = collapse(vals, index)
datasize = vals.shape[0] * vals.shape[1] * 4e-6 # [MB]
else:
datasize = vals.shape[0] * vals.shape[1] * vals.shape[2] * 4e-6 # [MB]
logging.info(
"Data size: {}, {:.2f} MB".format(
vals.shape, datasize))
return vals, index | 5,325,302 |
def create_bookmark(record_settings):
"""Write record_settings to quickstreams.txt under bookmark name."""
bookmark_name = record_settings['bookmark']
# Set bookmark to None to avoid future loading attempting to bookmark
record_settings['bookmark'] = None
quickstreams = read_quickstreams()
while bookmark_name in quickstreams or bookmark_name == '':
bookmark_name = input("Name in use or blank, enter another: ")
quickstreams[bookmark_name.lower()] = record_settings
save_quickstreams(quickstreams) | 5,325,303 |
def get_api_user(name):
"""
Check if the user is registered on faceit
:returns 1 Ok
:returns None nOk
"""
try:
logging.info("get_api_data_user")
faceit_data = FaceitData(FACEIT_API)
user = faceit_data.player_details(name)
if user:
return 1
except ValueError:
logging.error("Faceit Name is not correct !")
return None | 5,325,304 |
def removeDeliveredUSPSTracking(context: CallbackContext) -> None:
"""Send the alarm message."""
job = context.job
global deliveredUSPSTrackingNumbers
timenow = datetime.now()
recordsToRemove = []
for key in deliveredUSPSTrackingNumbers:
if ((timenow - deliveredUSPSTrackingNumbers[key]).total_seconds()) > (23 * 3600):
USPSTrackingNumbers.pop(key)
recordsToRemove.append(key)
for el in recordsToRemove:
deliveredUSPSTrackingNumbers.pop(el) | 5,325,305 |
def get_storage_client():
"""Return storage client."""
global _client
if not _client:
_client = storage.Client()
return _client | 5,325,306 |
def upload_file_to_s3(image, fileStoreObj, acl="public-read"):
"""S3 file uploader."""
app = current_app._get_current_object()
s3 = boto3.client(
"s3",
aws_access_key_id=app.config['S3_KEY'],
aws_secret_access_key=app.config['S3_SECRET']
)
try:
s3.put_object(Body=image,
Bucket=app.config['S3_BUCKET'],
ACL=acl,
ContentType=fileStoreObj.content_type,
Key=fileStoreObj.filename)
except Exception as e:
print("An Error occurred: ", e)
return e
return "{}{}".format(app.config["S3_LOCATION"],
fileStoreObj.filename) | 5,325,307 |
def vae_bc(
transitions=None,
# Adam optimizer settings
lr_enc=1e-3,
lr_dec=1e-3,
# Training settings
minibatch_size=100,
):
"""
VAE Behavioral Cloning (VAE-BC) control preset
Args:
transitions:
dictionary of transitions generated by cpprb.ReplayBuffer.get_all_transitions()
lr_enc (float): Learning rate for the encoder.
lr_dec (float): Learning rate for the decoder.
minibatch_size (int): Number of experiences to sample in each training update.
"""
def _vae_bc(env):
disable_on_policy_mode()
device = get_device()
latent_dim = env.action_space.shape[0] * 2
encoder_model = fc_bcq_encoder(env, latent_dim=latent_dim).to(device)
encoder_optimizer = Adam(encoder_model.parameters(), lr=lr_enc)
encoder = BcqEncoder(
model=encoder_model,
latent_dim=latent_dim,
optimizer=encoder_optimizer,
name="encoder",
)
decoder_model = fc_bcq_decoder(env, latent_dim=latent_dim).to(device)
decoder_optimizer = Adam(decoder_model.parameters(), lr=lr_dec)
decoder = BcqDecoder(
model=decoder_model,
latent_dim=latent_dim,
space=env.action_space,
optimizer=decoder_optimizer,
name="decoder",
)
replay_buffer = ExperienceReplayBuffer(1e7, env)
if transitions is not None:
samples = replay_buffer.samples_from_cpprb(
transitions, device="cpu")
replay_buffer.store(samples)
set_replay_buffer(replay_buffer)
return VaeBC(
encoder=encoder,
decoder=decoder,
minibatch_size=minibatch_size,
)
return _vae_bc | 5,325,308 |
def get_mode_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _mode_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_score_maps = labels[0]
tower_geo_maps = labels[1]
tower_training_masks = labels[2]
tower_losses = []
tower_gradvars = []
tower_summaries = []
num_devices = FLAGS.num_gpus
device_type = 'gpu'
reuse_variables = None
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
total_loss, gradvars, summaries = _tower_fn(
is_training,
tower_features[i],
tower_score_maps[i],
tower_geo_maps[i],
tower_training_masks[i],
reuse_variables)
tower_losses.append(total_loss)
tower_gradvars.append(gradvars)
tower_summaries.append(summaries)
reuse_variables = True
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
if FLAGS.pretrained_model_path is not None:
tf.train.init_from_checkpoint(FLAGS.pretrained_model_path, {"resnet_v1_50/":"resnet_v1_50/"})
# restore only once
FLAGS.pretrained_model_path = None
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
FLAGS.decay_steps, FLAGS.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
summary_hook = tf.train.SummarySaverHook(
save_steps=10,
output_dir='/data/output/',
summary_op=tower_summaries[0])
train_hooks = [logging_hook, summary_hook]
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
if FLAGS.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step()),
variables_averages_op
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _mode_fun | 5,325,309 |
def link_to_profile(request):
"""
If the user is a temporary one who was logged in via
an institution (not through a Uniauth profile), offers
them the choice between logging to an existing Uniauth
account or creating a new one.
The institution account is (eventually) linked to the
Uniauth profile the user logged into / created.
"""
next_url = request.GET.get('next')
context = _get_global_context(request)
if not next_url:
next_url = get_redirect_url(request)
params = urlencode({'next': next_url})
context['next_url'] = next_url
# If the user is not authenticated at all, redirect to login page
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('uniauth:login') + '?' + params)
# If the user is already authenticated + verified, proceed to next page
if not is_tmp_user(request.user) and not is_unlinked_account(request.user):
return HttpResponseRedirect(next_url)
# If the user is temporary, but was not logged in via an institution
# (e.g. created through Uniauth, but not verified), redirect to signup
if not is_unlinked_account(request.user):
return HttpResponseRedirect(reverse('uniauth:signup') + '?' + params)
# At this point, we've ensured the user is temporary and was
# logged in via an institution. We just need to handle the
# Login Form, if the user chooses to link to an existing account.
# If it's a POST request, attempt to validate the form
if request.method == "POST":
form = LoginForm(request, request.POST)
# Authentication successful
if form.is_valid():
unlinked_user = request.user
username_split = get_account_username_split(request.user.username)
# Log in as the authenticated Uniauth user
user = form.get_user()
auth_login(request, user)
# Merge the unlinked account into the logged in profile,
# then add the institution account described by the username
merge_model_instances(user, [unlinked_user])
_add_institution_account(user.uniauth_profile, username_split[1],
username_split[2])
slug = username_split[1]
context['institution'] = Institution.objects.get(slug=slug)
return render(request, 'uniauth/link-success.html', context)
# Authentication failed: render form errors
else:
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context)
# Otherwise, render a blank Login form
else:
form = LoginForm(request)
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context) | 5,325,310 |
def obtain_fea_im_subset(Row_range, Col_range, tsList, ts_stack_foler, fC_hdr, fC_img, bandName):
"""
fea_im_subset = eng.zeros(int(tsLen),int(num_fea),int(d2),int(d1)) # this is matlab.double type
to convert matlab.double to ndarray
For one-dimensional arrays, access only the "_data" property of the Matlab array.
For multi-dimensional arrays you need to reshape the array afterwards.
np.array(x._data).reshape(x.size[::-1]).T
"""
logger.info("obtain_fea_im_subset")
d1 = Row_range[1] - Row_range[0] + 1
d2 = Col_range[1] - Col_range[0] + 1
num_band = len(bandName)
num_fea = num_band + 1
tsLen = len(tsList)
dim_fea = (d1, d2, num_fea, tsLen)
fea_im_subset = np.zeros(dim_fea)
FILL = 0
for t in range(0, tsLen):
for i in range(0, num_band):
hdrPath_t_i = ts_stack_foler + "/" + fC_hdr[bandName[i]][t]
imgPath_t_i = ts_stack_foler + "/" + fC_img[bandName[i]][t]
# print(imgPath_t_i)
# --------------------Check file existance--------------------------
if Path(hdrPath_t_i).is_file():
# logger.info(hdrPath_t_i)
info = envi.read_envi_header(hdrPath_t_i)
img = envi.open(hdrPath_t_i)
img_open = img.open_memmap(writeable=True)
im_t_i = img_open[Row_range[0] - 1:Row_range[1], Col_range[0] - 1:Col_range[1], 0]
# print(im_t_i.shape)
# im_t_i = np.copy(img_open[:Row_range[1]+1,:Col_range[1],0])
else:
logger.info("The -%d-th TS is empty!!!" % t)
print("The -%d-th TS is empty!!!" % t)
im_nan = np.zeros((d1, d2))
im_nan[im_nan == 0] = np.nan
im_t_i = im_nan
FILL = 1
# fea_im_subset[t][i][:][:]
fea_im_subset[:, :, i, t] = im_t_i
if FILL == 1:
nans, x = np.isnan(fea_im_subset), lambda z: z.nonzero()[0]
fea_im_subset[nans] = np.interp(x(nans), x(~nans),
fea_im_subset[~nans]) # linear Interpolation, in Matlab: Cubic spline
# fea_im_subset = eng.fillmissing(fea_im_subset,"spline");
for t in range(0, tsLen):
# Calculate additional features ---------------------------------------
fea_im_subset[:, :, num_band, t] = fea_im_subset[:, :, 1, t] - fea_im_subset[:, :, 0, t]
return fea_im_subset | 5,325,311 |
def himmelblau(individual):
"""The Himmelblau's function is multimodal with 4 defined minimums in
:math:`[-6, 6]^2`.
.. list-table::
:widths: 10 50
:stub-columns: 1
* - Type
- minimization
* - Range
- :math:`x_i \in [-6, 6]`
* - Global optima
- :math:`\mathbf{x}_1 = (3.0, 2.0)`, :math:`f(\mathbf{x}_1) = 0`\n
:math:`\mathbf{x}_2 = (-2.805118, 3.131312)`, :math:`f(\mathbf{x}_2) = 0`\n
:math:`\mathbf{x}_3 = (-3.779310, -3.283186)`, :math:`f(\mathbf{x}_3) = 0`\n
:math:`\mathbf{x}_4 = (3.584428, -1.848126)`, :math:`f(\mathbf{x}_4) = 0`\n
* - Function
- :math:`f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2`
.. plot:: code/benchmarks/himmelblau.py
:width: 67 %
"""
return (individual[0] * individual[0] + individual[1] - 11)**2 + \
(individual[0] + individual[1] * individual[1] - 7)**2, | 5,325,312 |
def get_deb_architecture():
"""
Returns the deb architecture of the local system, e.g. amd64, i386, arm
"""
return local('dpkg --print-architecture', capture=True) | 5,325,313 |
def Ry(angle, degrees=False):
"""Generate the :math:`3\\times3` rotation matrix :math:`R_y(\\theta)` \
for a rotation about the :math:`y` axis by an angle :math:`\\theta`.
Parameters
----------
angle : float
The rotation angle :math:`\\theta` in *radians*. If the angle is
given in *degrees*, then you must set `degrees=True` to correctly
calculate the rotation matrix.
degrees : bool, optional
if `True`, then `angle` is converted from degrees to radians.
Returns
-------
:class:`~numpy:numpy.ndarray`
:math:`3\\times3` rotation matrix :math:`R_y(\\theta)` for a
rotation about the :math:`y` axis by an angle :math:`\\theta`:
.. math::
R_y = \\begin{pmatrix}
\\cos\\theta & 0 & \\sin\\theta\\\\
0 & 1 & 0\\\\
-\\sin\\theta & 0 & \\cos\\theta
\\end{pmatrix}
Examples
--------
>>> import numpy as np
>>> from sknano.core.math import Ry
>>> Ry(np.pi/4)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 1. , 0. ],
[-0.70710678, 0. , 0.70710678]])
>>> np.alltrue(Ry(np.pi/4) == Ry(45, degrees=True))
True
"""
if degrees:
angle = np.radians(angle)
cosa = np.cos(angle)
sina = np.sin(angle)
Rmat = np.array([[cosa, 0.0, sina], [0.0, 1.0, 0.0], [-sina, 0.0, cosa]])
Rmat[np.where(np.abs(Rmat) <= np.finfo(float).eps)] = 0.0
return Rmat | 5,325,314 |
def augment_subdataset_split(args, subfolder, id_list, data_suffixes, aug_name, augment=True, multiplier=1):
"""
augment specific dataset split
:param args: command line arguments with data_dir and aug_data_dir
:param subfolder: sub-dataset folder. Either hgg or lgg
:param id_list: list of sequences
:param data_suffixes: list of modality suffixes
:param aug_name: name of output files
:param augment: perform augmentation of not
:param multiplier: dataset multiplier
:return: list of augmented ids, corresponding brain bboxes and tumor bboxes
"""
input_shape = read_nii_header(os.path.join(args.data_dir,subfolder), id_list[0], data_suffixes[0]).get_data().shape
brain_bboxes = np.zeros(shape=(len(id_list) * multiplier, 2, 3), dtype=int)
tumor_bboxes = np.zeros(shape=(len(id_list) * multiplier, 2, 3), dtype=int)
for m in tqdm(range(len(id_list) * multiplier)):
i = m % len(id_list)
f = id_list[i]
data_size = config.input_modalities
sample = np.zeros(shape=(1, data_size,) + input_shape, dtype=np.float32)
bspline = gen_bspline(np.zeros(shape=input_shape))
bboxes = np.zeros(shape=(len(data_suffixes),) + (2, 3))
for j, s in enumerate(data_suffixes):
image_handle = read_nii_header(os.path.join(args.data_dir,subfolder), f, s)
image = image_handle.get_data().astype(np.float32)
if augment:
image = augment_data(image, bspline, sitk.sitkLinear)
bboxes[j] = bbox3(image)
mask = image > 0
image = normalize(image, mask)
sample[0, j] = image
bbox_min = np.min(bboxes[:, 0, :], axis=0).ravel().astype(int)
bbox_max = np.max(bboxes[:, 1, :], axis=0).ravel().astype(int)
bbox = np.zeros(shape=(2, 3), dtype=int)
bbox[0] = bbox_min
bbox[1] = bbox_max
brain_bboxes[m] = bbox
sample_cropped = resample_np(sample[:,:,bbox_min[0]:bbox_max[0],bbox_min[1]:bbox_max[1],bbox_min[2]:bbox_max[2]],
(1, data_size,)+config.brain_reshape_to,
1)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_data), sample.astype(np.float32), allow_pickle=False)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_data_cropped), sample_cropped.astype(np.float32), allow_pickle=False)
label_handle = read_nii_header(os.path.join(args.data_dir,subfolder), f, config.suffix_seg)
label_data = label_handle.get_data().astype(np.float32)
for key, value in config.dataset_transform_dict.iteritems():
label_data[label_data == key] = value
if augment:
label_data = augment_data(label_data, bspline, sitk.sitkNearestNeighbor)
tumor_bboxes[m] = bbox3(label_data > 0)
tumor_bboxes[m,0] = tumor_bboxes[m,0] - bbox[0]
tumor_bboxes[m,1] = tumor_bboxes[m,1] - bbox[0]
label_data = label_data.reshape((1, 1,) + label_data.shape)
label_data_cropped = resample_np(label_data[:,:,bbox_min[0]:bbox_max[0],bbox_min[1]:bbox_max[1],bbox_min[2]:bbox_max[2]],
(1, 1,)+config.brain_reshape_to,
0)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_label), label_data.astype(np.float32), allow_pickle=False)
np.save(os.path.join(args.aug_data_dir,aug_name + str(m) + config.suffix_label_cropped), label_data_cropped.astype(np.float32), allow_pickle=False)
return [aug_name + str(i) for i in range(len(id_list) * multiplier)], brain_bboxes, tumor_bboxes | 5,325,315 |
def compute_ibaq_1sample(df, organism='human'):
"""IBAQ values computed for total intensities of all proteins
Parameters
----------
df : pandas dataframe
proteomics dataset with columns as samples and rows as proteins
organism : str
organism (human, rat, or mouse) to calibrate each protein
Returns
-------
df : pandas dataframe
proteomics dataset normalized by IBAQ
"""
df_ref = pd.read_csv(
os.path.join(resource_path,
'%s_proteome_mw_peptides.csv' % organism))
num_theor_peptides, ibaq_list, log10_ibaq = [], [], []
for protein in df['Uniprot_Id'].tolist():
try:
uid = protein.strip().split('|')[1]
except IndexError:
uid = protein
num_theor_peptides.append(df_ref[
df_ref.UniprotID == uid].num_theoretical_peptides.values[0])
df['num_theoretical_peptides'] = num_theor_peptides
for id in range(len(df)):
ibaq = df['default~cq_max_sum'].iloc[id] /\
df['num_theoretical_peptides'].iloc[id]
ibaq_list.append(ibaq)
log10_ibaq.append(np.log10(ibaq))
df['IBAQ'] = ibaq_list
df['log10_IBAQ'] = log10_ibaq
return df | 5,325,316 |
def zhongzhuang_adjustment_reservoir():
"""
Real Name: ZhongZhuang Adjustment Reservoir
Original Eqn: INTEG ( IF THEN ELSE(Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir+ZhongZhuang Adjustment Reservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount>=5.05e+006, 0 , Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount ), 5.05e+006)
Units: m3
Limits: (None, None)
Type: component
Max Storage Valume = 5050000 m^3 (2017); general output = 24000 m^3 per day(BanXin WPP assumes 15000 m^3; DaNan WPP assumes
9000 m^3 ; overflow height 68m, designed flood discharge 2.83CMS, water input limit
10cms.
"""
return integ_zhongzhuang_adjustment_reservoir() | 5,325,317 |
def _request_esi_status() -> requests.Response:
"""Make request to ESI about curren status with retries."""
max_retries = 3
retries = 0
while True:
try:
r = requests.get(
"https://esi.evetech.net/latest/status/",
timeout=(5, 30),
headers={"User-Agent": f"{__package__};{__version__}"},
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
logger.warning("Network error when trying to call ESI", exc_info=True)
return EsiStatus(
is_online=False, error_limit_remain=None, error_limit_reset=None
)
if r.status_code not in {
502, # HTTPBadGateway
503, # HTTPServiceUnavailable
504, # HTTPGatewayTimeout
}:
break
else:
retries += 1
if retries > max_retries:
break
else:
logger.warning(
"HTTP status code %s - Retry %s/%s",
r.status_code,
retries,
max_retries,
)
wait_secs = 0.1 * (random.uniform(2, 4) ** (retries - 1))
sleep(wait_secs)
return r | 5,325,318 |
def RunExampleConsumer(serialized_file_graph):
"""Runs the example consumer on the serialized_file_graph.
Args:
serialized_file_graph: mojom_files.MojomFileGraph as output by the mojom
parser.
Returns:
The integer exit code of the example consumer.
"""
examples_dir = os.path.dirname(os.path.abspath(__file__))
example_consumer = os.path.join(examples_dir, 'example_consumer.go')
src_root = os.path.abspath(os.path.join(examples_dir, '../../..'))
environ = { 'GOPATH': os.path.dirname(src_root) }
print environ
cmd = ['go', 'run', example_consumer]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, env=environ)
process.communicate(serialized_file_graph)
return process.wait() | 5,325,319 |
def linnworks_api_session(func: Callable) -> Callable:
"""Use a Linnworks API session as a method decorator."""
def wrapper_linnapi_session(*args: Any, **kwargs: Any) -> Any:
with LinnworksAPISession():
return func(*args, **kwargs)
return wrapper_linnapi_session | 5,325,320 |
def check_if_phone_number_is_opted_out(phoneNumber=None):
"""
Accepts a phone number and indicates whether the phone holder has opted out of receiving SMS messages from your account. You cannot send SMS messages to a number that is opted out.
To resume sending messages, you can opt in the number by using the OptInPhoneNumber action.
See also: AWS API Documentation
Exceptions
:example: response = client.check_if_phone_number_is_opted_out(
phoneNumber='string'
)
:type phoneNumber: string
:param phoneNumber: [REQUIRED]\nThe phone number for which you want to check the opt out status.\n
:rtype: dict
ReturnsResponse Syntax{
'isOptedOut': True|False
}
Response Structure
(dict) --The response from the CheckIfPhoneNumberIsOptedOut action.
isOptedOut (boolean) --Indicates whether the phone number is opted out:
true \xe2\x80\x93 The phone number is opted out, meaning you cannot publish SMS messages to it.
false \xe2\x80\x93 The phone number is opted in, meaning you can publish SMS messages to it.
Exceptions
SNS.Client.exceptions.ThrottledException
SNS.Client.exceptions.InternalErrorException
SNS.Client.exceptions.AuthorizationErrorException
SNS.Client.exceptions.InvalidParameterException
:return: {
'isOptedOut': True|False
}
:returns:
SNS.Client.exceptions.ThrottledException
SNS.Client.exceptions.InternalErrorException
SNS.Client.exceptions.AuthorizationErrorException
SNS.Client.exceptions.InvalidParameterException
"""
pass | 5,325,321 |
def test_csv_to_layer_data_raises(tmp_path):
"""Test various exception raising circumstances with csv_to_layer_data."""
temp = tmp_path / 'points.csv'
# test that points data is detected with require_type == points, any, None
# but raises for other shape types.
data = [['index', 'axis-0', 'axis-1']]
data.extend(np.random.random((3, 3)).tolist())
with open(temp, mode='w', newline='') as csvfile:
csv.writer(csvfile).writerows(data)
assert io.csv_to_layer_data(temp, require_type=None)[2] == 'points'
assert io.csv_to_layer_data(temp, require_type='any')[2] == 'points'
assert io.csv_to_layer_data(temp, require_type='points')[2] == 'points'
with pytest.raises(ValueError):
io.csv_to_layer_data(temp, require_type='shapes')
# test that unrecognized data simply returns None when require_type==None
# but raises for specific shape types or require_type=="any"
data = [['some', 'random', 'header']]
data.extend(np.random.random((3, 3)).tolist())
with open(temp, mode='w', newline='') as csvfile:
csv.writer(csvfile).writerows(data)
assert io.csv_to_layer_data(temp, require_type=None) is None
with pytest.raises(ValueError):
assert io.csv_to_layer_data(temp, require_type='any')
with pytest.raises(ValueError):
assert io.csv_to_layer_data(temp, require_type='points')
with pytest.raises(ValueError):
io.csv_to_layer_data(temp, require_type='shapes') | 5,325,322 |
def threads():
"""Run multiply threads"""
try:
_thread.start_new_thread(counter, ("Thread 1", 10, 2))
_thread.start_new_thread(counter, ("Thread 2", 20, 0.5))
_thread.start_new_thread(counter, ("Thread 3", 5, 4))
_thread.start_new_thread(counter, ("Thread 4", 10, 0.1))
_thread.start_new_thread(counter, ("Thread 5", 3, 10))
except:
print("Something goes wrong.")
while 1:
pass | 5,325,323 |
def test_case_change_remote(m: Maestral) -> None:
"""
Tests the download sync of a remote rename which only changes the casing of the file
name.
"""
# Start with nested folders.
os.mkdir(m.dropbox_path + "/folder")
os.mkdir(m.dropbox_path + "/folder/Subfolder")
wait_for_idle(m)
assert_synced(m)
# Rename the remote folder.
m.client.move("/folder", "/FOLDER", autorename=True)
wait_for_idle(m)
# Check that case change was propagated to the local folder.
md = m.client.get_metadata("/folder")
assert osp.isdir(m.dropbox_path + "/FOLDER")
assert osp.isdir(m.dropbox_path + "/FOLDER/Subfolder")
assert isinstance(md, FolderMetadata)
assert md.name == "FOLDER", "casing was not propagated to local folder"
assert_synced(m)
assert_no_errors(m) | 5,325,324 |
def series_quat2euler(q0, q1, q2, q3, msg_name=""):
"""Given pandas series q0-q4, compute series roll, pitch, yaw.
Arguments:
q0-q4 -- quaternion entries
Keyword arguments:
msg_name -- name of the message for which the euler angles should be computed (default "")
"""
yaw, pitch, roll = np.array(
[
tf.quat2euler([q0i, q1i, q2i, q3i])
for q0i, q1i, q2i, q3i in zip(q0, q1, q2, q3)
]
).T
yaw = pd.Series(name=msg_name + "yaw", data=yaw, index=q0.index)
pitch = pd.Series(name=msg_name + "pitch", data=pitch, index=q0.index)
roll = pd.Series(name=msg_name + "roll", data=roll, index=q0.index)
return roll, pitch, yaw | 5,325,325 |
def addScaleBars(axis, dT, dF, T_value=-0.1, F_value=-0.4):
"""Add scale bars to plot or image."""
axis.plot(T_value * np.ones((2)), np.array([F_value, F_value + dF]), 'k-', alpha=0.9)
axis.plot(np.array([T_value, dT + T_value]), F_value * np.ones((2)), 'k-', alpha=0.9) | 5,325,326 |
def _ShiftRight(x0, xs):
"""Shifts xs[:-1] one step to the right and attaches x0 on the left."""
return tf.concat([[x0], xs[:-1]], axis=0) | 5,325,327 |
def compute_connected_components(self, compute_nx=True, probed_node=None, comps_to_merge=None, current_norm_vals=None):
"""
Computes the NORMALIZED connected components of the selfwork.
If compute_nx is True, actually computes components from scratch using selfworkx.
Otherwise, we update self.connected_components, self.connected_component_sizes, and
self.components based on comps_to_merge.
UPDATE: As of 7/25, we now keep track of node_component_sizes, i.e. the size of the
component each node is in. This is managed in BOTH update_neighbors AND
compute_connected_components. It is an np array with entries corresponding to rows in the
feature matrix.
Returns np array of normalized component sizes.
"""
if compute_nx:
# compute the nx components
self.connected_components = {k:c for k, c in enumerate(nx.connected_components(self.G))}
connected_components = self.connected_components
self.components = np.zeros((len(self.node_to_row)), dtype=int)
self.connected_component_sizes = dict()
self.node_component_sizes = np.zeros((len(self.node_to_row)))
# initialize min/max
min_component_size = float('inf')
max_component_size = 0
# loop sets self.connected_component_sizes, self.components, max/min
for i, component in self.connected_components.items():
size = len(component)
self.connected_component_sizes[i] = size
if size < min_component_size:
min_component_size = size
if size > max_component_size:
max_component_size = size
for node in component:
self.components[self.node_to_row[node]] = i
self.node_component_sizes[self.node_to_row[node]] = size
else:
# No need to compute in this case
connected_components = self.connected_components
# For each component:
min_component_size = self.min_comp_size
max_component_size = self.max_comp_size
probed_comp = self.components[self.node_to_row[probed_node]]
if comps_to_merge is None:
comps_to_merge = []
for comp in comps_to_merge:
# Keep probed_node's component (arbitrary choice),
# add all of each other component's nodes + size to probed component
self.connected_components[probed_comp].update(self.connected_components[comp])
self.connected_component_sizes[probed_comp] = len(self.connected_components[probed_comp])
for node in self.connected_components[comp]:
self.components[self.node_to_row[node]] = probed_comp
# pop the old component from the dictionaries
self.connected_components.pop(comp)
self.connected_component_sizes.pop(comp)
# update probed component size across the board
self.node_component_sizes[np.where(self.components == probed_comp)] = self.connected_component_sizes[probed_comp]
# If the min/max size changed, will need to recompute normalized value for ALL
# components, rather than just the probed node's component.
new_min = min(self.connected_component_sizes.values())
if self.connected_component_sizes[probed_comp] > max_component_size:
new_max = self.connected_component_sizes[probed_comp]
else:
new_max = max_component_size
if new_min != min_component_size or new_max != max_component_size:
compute_nx = True # NOTE re-using this flag is a bit adhoc, but it works
min_component_size = new_min
max_component_size = new_max
self.max_comp_size = max_component_size
self.min_comp_size = min_component_size
diff = float(self.max_comp_size - self.min_comp_size)
# Recompute normalization
if compute_nx:
# if there's more than one component, compute the normalized values
if len(connected_components) > 1 and diff > 0:
# Calculate (mycomponent-min_component) / (max_component-min_component)
return (self.node_component_sizes - self.min_comp_size) / diff
else: # otherwise, everyone is in the same sized component
return np.ones(len(self.node_to_row.keys()))
else:
if diff > 0:
new_val = float(self.connected_component_sizes[probed_comp]-self.min_comp_size)/diff
else:
new_val = 1.0
current_norm_vals[np.where(self.components == probed_comp)] = new_val
return current_norm_vals | 5,325,328 |
def process_method(oneway=False):
"""Decorator for process_xxx methods for asyncio."""
def _decorator(func):
def nested(self, seqid, iprot, oprot, server_ctx):
fn_name = func.__name__.split('_', 1)[-1]
handler_ctx = self._event_handler.getHandlerContext(fn_name,
server_ctx)
args = getattr(sys.modules[func.__module__], fn_name + "_args")()
reply_type = TMessageType.REPLY
self._event_handler.preRead(handler_ctx, fn_name, args)
args.read(iprot)
iprot.readMessageEnd()
self._event_handler.postRead(handler_ctx, fn_name, args)
result = yield from func(self, args, handler_ctx)
if not oneway:
if isinstance(result, TApplicationException):
reply_type = TMessageType.EXCEPTION
self._event_handler.preWrite(handler_ctx, fn_name, result)
oprot.writeMessageBegin(fn_name, reply_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
self._event_handler.postWrite(handler_ctx, fn_name, result)
return nested
return _decorator | 5,325,329 |
def search_salary(request):
""" This function will be called by search API """
logger.info("Received a salary request {}".format(request.method))
if request.method == 'GET':
try:
request_json_body = json.loads(request.body)
title = request_json_body['title']
location = request_json_body['location']
logger.info("The request has title {} and location {}".format(title, location))
except:
response_body = {'Error': 'Bad Request'}
logger.info("Wrong parameters have been passed".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
if ',' in location:
city, state = location.split(',')
else:
city = location
try:
state = city_to_state_dict[city.title()]
except:
response_body = {'Error': 'Did you spell the city name correctly? '
'Could you use the closest metropolitan cityCould you add state name?'}
logger.info("Wrong city name {} ".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
state_abbreviation = us_states[state.lower().strip()]
query_body = query_builder(title=title, city=city, state=state_abbreviation)
response = search_in_es(index_name=salary_index_name, query_body=query_body)
response_body = parse_build_response(response)
status = 200
else:
logger.info("WRONG METHOD for salary request {}".format(request.method))
status = 405
response_body = {'Error': 'Method Not Allowed'}
return HttpResponse(status=status, content=json.dumps(response_body), content_type='application/json') | 5,325,330 |
def Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw, logger=mod_logger): # Vlookup. return row (series) at [value_ask, header_search]
""""
Most robust interpolator
#===========================================================================
# USE
#===========================================================================
interp_ser = hp_pd.Get_interp_header_dx2ser(df_raw, header_search, value_ask_raw)
interp_value = interp_ser[header_desire]
#===========================================================================
# FUNCTION
#===========================================================================
add a row and interpolate all values. return row (series) at [value_ask, header_search]'
'returns the whole row, which has been interpolated by the axis passed in the header_search
allows for non-index interpolation (useful for aribtrary indexes)
for time series (or other non-aribitrary indexes), see Get_interp_index_df2ser
#===========================================================================
# INPUTS
#===========================================================================\
df_raw: data set (with header_search in columns)
header_search = the header name from which to search for the value_ask
value_ask_raw: numeric value (on the header_search's column) from which to interoplate the other rows
#===========================================================================
# TESTING:
#===========================================================================
import sim.debug
df = sim.debug.Get_curve_df()
"""
'TODO: convert df index to float'
'TODO: check if the passed header is in the columns'
#===========================================================================
# check inputs
#===========================================================================
if not isinstance(df_raw, pd.core.frame.DataFrame):
logger.error('got undexpected type on df_raw: %s'%type(df_raw))
raise TypeError
#drop nan values
df_raw = df_raw.dropna(axis='index')
value_ask = round(value_ask_raw, 2)
#check if thsi value is outside of the passed column
df = df_raw.astype(np.float) #convert the index to floats
'there seems to be some problem with importing commas from excel'
df_sort = df_raw.sort_values(by=header_search).reset_index(drop='true')
if value_ask < df_sort.loc[0,header_search]:
logger.error('asked value is outside the domain')
return df_sort.loc[0,:]
last_index = len(df_sort.index) -1
if value_ask > df_sort.loc[last_index, header_search]:
logger.error('asked value is greater than the serach domain: %.2f'%value_ask)
return df_sort.iloc[last_index,:] #return the last row
#check if interpolation is even needed
bool_row = df_raw.loc[:,header_search] == value_ask #search for value
if sum(bool_row) == 1: #found one match
results_ser = df_raw.loc[bool_row,:].iloc[0] #get this row
return results_ser
elif sum(bool_row) >1: #found multiple matches
df_trim = df_raw.loc[bool_row,header_search]
logger.error('found too many existing matches in search: \n %s'%df_trim)
raise ValueError
#builda new df with the header_search as the index
'I think there is a better command for this'
index = list(df_raw.loc[:,header_search])
bool_col = df_raw.columns != header_search #get the remaining
col = df_raw.columns[bool_col]
data = df_raw.loc[:,bool_col].values #get all this data
df = pd.DataFrame(data = data, index = index, columns = col )
ser = pd.Series(data=None, index= col) #dummy row for adding
df.loc[value_ask,:] = ser #add this in at teh requested row
#resort the frame
df_interp = df.sort_index()
#convert each value to numeric
for col in df_interp: df_interp[col] = pd.to_numeric(df_interp[col], errors='coerce')
#interpolate the missing values
'WARNING: all methods (except linear) interpolate based on the index'
df_new = df_interp.interpolate(method='values')
#Extract interpolated row
results_ser = df_new.ix[value_ask] #get the results row
results_ser.loc[header_search] = value_ask #add teh search value/header back
return results_ser | 5,325,331 |
def get_wordnet_pos(treebank_tag):
"""Function to translate TreeBank PoS tags into PoS tags that WordNet
understands."""
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None | 5,325,332 |
def from_newick(string):
"""
Returns a tree sequence representation of the specified newick string.
"""
tree = dendropy.Tree.get(data=string, schema="newick")
tables = tskit.TableCollection(1)
id_map = {}
for node in tree.ageorder_node_iter():
children = list(node.child_nodes())
if node not in id_map:
flags = tskit.NODE_IS_SAMPLE if len(children) == 0 else 0
# TODO derive information from the node and store it as JSON metadata.
id_map[node] = tables.nodes.add_row(flags=flags, time=node.age)
node_id = id_map[node]
for child in children:
tables.edges.add_row(0, 1, node_id, id_map[child])
return tables.tree_sequence() | 5,325,333 |
def benchmark(problem_file, test_set_file):
""" Evaluates planners with a random problem from a given problem set and world map.
Assumes feasible paths can be calculated.
:param problem_file: A string of map file with .map extension
:param test_set_file: A string of problem set file with .scen extension
:return: Returns a tuple of (results_optimal, results_random) where each element is a custom data structure
carrying calculated path, path length and time elapsed to calculate path.
"""
class Results(object):
def __init__(self, path, path_length, time_elapsed):
self.path = path
self.path_length = path_length
self.time_elapsed = time_elapsed
world = tools.read_world_file(problem_file)
f = open(test_set_file, 'r')
problems = f.readlines()
# Pick random problem
problem_str = problems[random.randint(1, len(problems) - 1)].split()
# Parse problem string
start_pose = int(problem_str[5]), int(problem_str[4])
goal_pose = int(problem_str[7]), int(problem_str[6])
# Evaluate optimal planner
t = time.time()
path = algorithms.planner_optimal(world, start_pose, goal_pose)
time_ms = tools.sec_to_ms((time.time() - t))
results_optimal = Results(path, tools.path_length(path), time_ms)
# Evaluate random planner
t = time.time()
path = algorithms.planner_random(world, start_pose, goal_pose, max_step_number=100000)
time_ms = tools.sec_to_ms((time.time() - t))
results_random = Results(path, tools.path_length(path), time_ms)
return results_optimal, results_random | 5,325,334 |
def _recursive_pairwise_outer_join(
dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle
):
"""
Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results
in a much more efficient scheduling of merges than a simple loop
from:
[A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD]
to:
[A] [B] [C] [D] -> [AB] [CD] -> [ABCD]
Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel.
:param dataframes_to_merge: A list of Dask dataframes to be merged together on their index
:return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes
"""
number_of_dataframes_to_merge = len(dataframes_to_merge)
merge_options = {
"on": on,
"lsuffix": lsuffix,
"rsuffix": rsuffix,
"npartitions": npartitions,
"shuffle": shuffle,
}
# Base case 1: just return the provided dataframe and merge with `left`
if number_of_dataframes_to_merge == 1:
return dataframes_to_merge[0]
# Base case 2: merge the two provided dataframe to be merged with `left`
if number_of_dataframes_to_merge == 2:
merged_ddf = dataframes_to_merge[0].join(
dataframes_to_merge[1], how="outer", **merge_options
)
return merged_ddf
# Recursive case: split the list of dfs into two ~even sizes and continue down
else:
middle_index = number_of_dataframes_to_merge // 2
merged_ddf = _recursive_pairwise_outer_join(
[
_recursive_pairwise_outer_join(
dataframes_to_merge[:middle_index], **merge_options
),
_recursive_pairwise_outer_join(
dataframes_to_merge[middle_index:], **merge_options
),
],
**merge_options,
)
return merged_ddf | 5,325,335 |
def signum(x):
"""cal signum
:param x:
:return:
"""
if x > 0:
return 1.0
if x < 0:
return -1.0
if x == 0:
return 0 | 5,325,336 |
def test_nodes_controller_wrapper(_nodes_controller, _transfer_file):
"""Check nodes controller wrapper command.
Wrapper should be a valid stringified nuke command.
"""
expected = "nuke.nodePaste('{}')".format(_transfer_file)
result = _nodes_controller._paste_nodes_wrapper(_transfer_file)
assert result == expected | 5,325,337 |
def is_date(word):
"""
is_date()
Purpose: Checks if word is a date.
@param word. A string.
@return the matched object if it is a date, otherwise None.
>>> is_date('2015-03-1') is not None
True
>>> is_date('2014-02-19') is not None
True
>>> is_date('03-27-1995') is not None
True
>>> is_date('201') is not None
False
>>> is_date('0') is not None
False
"""
regex = r'^(\d\d\d\d-\d\d-\d|\d\d?-\d\d?-\d\d\d\d?|\d\d\d\d-\d\d?-\d\d?)$'
return re.search(regex, word) | 5,325,338 |
def create_pg_db_and_user(context, su_passwd=None, config_types=[]):
"""
根据配置新建postgresql数据库以及用户
"""
import getpass
if su_passwd is None:
su_passwd = getpass.getpass("sudo password:")
if len(config_types) == 0:
config_types = ["development", "testing"]
for config_type in config_types:
with open(f"cmds/{config_type}_createpg.sh") as f:
part = f.read()
log.info(f"正在为{config_type}配置创建rdb")
context.sudo(
f"bash -c 'psql postgres <<< $(echo {part})'",
user="postgres",
password=su_passwd,
) | 5,325,339 |
def test_epoch(model, base_dist, test_loader, epoch,
device=None, annealing=False):
"""Calculate validation loss.
Args:
model: instance of CVAE
base_dist: r1(z) prior distribution
test_loader: instance of pytorch DataLoader
device: device to use
annealing: whether to anneal the KL loss
Returns:
average reconstruction loss and kl loss over test_loader
"""
# KL weight annealing. This is needed to avoid posterior collapse.
if annealing:
kl_weight = torch.tensor(
kl_weight_schedule(epoch, quiet=True)).to(device)
else:
kl_weight = torch.tensor(1.0).to(device)
with torch.no_grad():
model.eval()
total_reconstruction_loss = 0.0
total_kl_loss = 0.0
for h, x in test_loader:
if device is not None:
h = h.to(device, non_blocking=True)
x = x.to(device, non_blocking=True)
# Sample a noise realization
y = h + torch.randn_like(h)
reconstruction_loss, kl_loss = model(x, y, base_dist)
# Keep track of total of each loss
total_reconstruction_loss += reconstruction_loss.sum()
total_kl_loss += kl_loss.sum()
avg_reconstruction_loss = (total_reconstruction_loss.item() /
len(test_loader.dataset))
avg_kl_loss = (total_kl_loss.item() /
len(test_loader.dataset))
avg_loss = avg_reconstruction_loss + kl_weight.item() * avg_kl_loss
print('Test set: Average Loss: {:.4f}\t=\t'
'Reconstruction loss: {:.4f}\t +'
'\t (KL weight) * KL loss: {:.4f}\n'.format(
avg_loss,
avg_reconstruction_loss, avg_kl_loss))
return avg_loss, avg_reconstruction_loss, avg_kl_loss | 5,325,340 |
def get_rest_value_from_path(status, device_class, path: str):
"""Parser for REST path from device status."""
if "/" not in path:
attribute_value = status[path]
else:
attribute_value = status[path.split("/")[0]][path.split("/")[1]]
if device_class == DEVICE_CLASS_TIMESTAMP:
last_boot = datetime.utcnow() - timedelta(seconds=attribute_value)
attribute_value = last_boot.replace(microsecond=0).isoformat()
if "new_version" in path:
attribute_value = attribute_value.split("/")[1].split("@")[0]
return attribute_value | 5,325,341 |
def submit(request):
""" View for the submit page.
"""
if request.user.is_active:
user = UserSocialAuth.objects.filter(provider='github').get(user_id=request.user.id)
github = Github(user.tokens[u'access_token'])
repos = [repo for repo in github.get_user().get_repos()]
return render(request, 'base/submit.html', {'repos': repos})
else:
return HttpResponseRedirect(reverse('socialauth_begin', args=('github',))) | 5,325,342 |
def all_combined_threshold(input_image):
"""
Apply all thresholds to undistorted image
:param input_image: Undistorted image
:return: Combined binary image
Ref: Course notes
"""
# Apply Gausiian blur to the input image
kernel_size = 5
input_image = cv2.GaussianBlur(input_image, (kernel_size, kernel_size), 0)
# Sobel kernel size
ksize = 5 # Should be an odd number to smooth a gradient
# Apply threshold functions
absolute_binaryX = absolute_sobel_threshold(input_image, orient='x', thresh=(20, 255)) # thresh=(30, 255)
absolute_binaryY = absolute_sobel_threshold(input_image, orient='y', thresh=(30, 255)) # thresh=(30, 255)
magnitude_binary = magnitude_threshold(input_image, sobel_kernel=ksize, mag_thresh=(70, 255)) # mag_thresh=(60, 255)
direction_binary = direction_threshold(input_image, sobel_kernel=ksize, thresh=(0.7, 1.3))
# Combine the thresholds
combine_all_binary = np.zeros_like(direction_binary)
combine_all_binary[(absolute_binaryX == 1 | ((absolute_binaryY == 1)
& (direction_binary == 1))) | magnitude_binary == 1] = 1
return combine_all_binary | 5,325,343 |
def get_root_logger(profile='panoptes', log_config=None):
"""Creates a root logger for PANOPTES used by the PanBase object.
Args:
profile (str, optional): The name of the logger to use, defaults
to 'panoptes'.
log_config (dict|None, optional): Configuration options for the logger.
See https://docs.python.org/3/library/logging.config.html for
available options. Default is `None`, which then looks up the
values in the `log.yaml` config file.
Returns:
logger(logging.logger): A configured instance of the logger
"""
# Get log info from config
log_config = log_config if log_config else load_default()
# If we already created a logger for this profile and log_config, return that.
logger_key = (profile, to_json(log_config, sort_keys=True))
try:
return all_loggers[logger_key]
except KeyError:
pass
# Alter the log_config to use UTC times
if log_config.get('use_utc', True):
# TODO(jamessynge): Figure out why 'formatters' is sometimes
# missing from the log_config. It is hard to understand how
# this could occur given that none of the callers of
# get_root_logger pass in their own log_config.
if 'formatters' not in log_config: # pragma: no cover
# TODO(jamessynge): Raise a custom exception in this case instead
# of issuing a warning; after all, a standard dict will throw a
# KeyError in the for loop below if 'formatters' is missing.
warn('formatters is missing from log_config!')
warn(f'log_config: {log_config!r}')
log_fname_datetime = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
# Make the log use UTC
logging.Formatter.converter = time.gmtime
else:
log_fname_datetime = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
# Setup log file names
invoked_script = os.path.basename(sys.argv[0])
log_dir = os.getenv('PANLOG', '')
if not log_dir:
log_dir = os.path.join(os.getenv('PANDIR', gettempdir()), 'logs')
per_run_dir = os.path.join(log_dir, 'per-run', invoked_script)
log_fname = '{}-{}-{}'.format(invoked_script, log_fname_datetime, os.getpid())
# Create the directory for the per-run files.
os.makedirs(per_run_dir, exist_ok=True)
# Set log filename and rotation
for handler in log_config.get('handlers', []):
# Set the filename
partial_fname = '{}-{}.log'.format(log_fname, handler)
full_log_fname = os.path.join(per_run_dir, partial_fname)
log_config['handlers'][handler].setdefault('filename', full_log_fname)
# Setup the TimedRotatingFileHandler for middle of day
log_config['handlers'][handler].setdefault('atTime', datetime.time(hour=11, minute=30))
# Create a symlink to the log file with just the name of the script and the handler
# (level), as this makes it easier to find the latest file.
log_symlink = os.path.join(log_dir, '{}-{}.log'.format(invoked_script, handler))
log_symlink_target = os.path.abspath(full_log_fname)
with suppress(FileNotFoundError):
os.unlink(log_symlink)
os.symlink(log_symlink_target, log_symlink)
# Configure the logger
logging.config.dictConfig(log_config)
# Get the logger and set as attribute to class
logger = logging.getLogger(profile)
# Set custom LogRecord
logging.setLogRecordFactory(StrFormatLogRecord)
logger.info('{:*^80}'.format(' Starting PanLogger '))
# TODO(jamessynge) Output name of script, cmdline args, etc. And do son
# when the log rotates too!
all_loggers[logger_key] = logger
return logger | 5,325,344 |
def visualize_lidar_of_sample(level5data, sample_token: str, axes_limit=80):
"""Helper to visualize sample lidar data"""
sample = level5data.get("sample", sample_token)
sample_lidar_token = sample["data"]["LIDAR_TOP"]
level5data.render_sample_data(sample_lidar_token, axes_limit=axes_limit) | 5,325,345 |
async def run(rest_client, debug=False):
"""
Actual runtime / loop.
Args:
rest_client (:py:class:`iceprod.core.rest_client.Client`): rest client
debug (bool): debug flag to propagate exceptions
"""
start_time = time.time()
time_limit = datetime.utcnow() - timedelta(days=14)
async def reset_pilot(pilot_id):
await rest_client.request('DELETE', '/pilots/{}'.format(pilot_id))
try:
pilots = await rest_client.request('GET', '/pilots')
for pilot in pilots.values():
if 'last_update' not in pilot or str2datetime(pilot['last_update']) < time_limit:
await reset_pilot(pilot['pilot_id'])
except Exception:
logger.error('error cleaning pilots', exc_info=True)
if debug:
raise
# run again after 5 minute delay
stop_time = time.time()
delay = max(60*60 - (stop_time-start_time), 60)
IOLoop.current().call_later(delay, run, rest_client) | 5,325,346 |
def sbol_cds (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL coding sequence renderer.
"""
# Default options
color = (0.7,0.7,0.7)
hatch = ''
start_pad = 1.0
end_pad = 1.0
y_extent = 5
x_extent = 30
arrowhead_height = 4
arrowhead_length = 8
# Reset defaults if provided
if opts != None:
if 'color' in opts.keys():
color = opts['color']
if 'hatch' in opts.keys():
hatch = opts['hatch']
if 'start_pad' in opts.keys():
start_pad = opts['start_pad']
if 'end_pad' in opts.keys():
end_pad = opts['end_pad']
if 'y_extent' in opts.keys():
y_extent = opts['y_extent']
if 'x_extent' in opts.keys():
x_extent = opts['x_extent']
if 'arrowhead_height' in opts.keys():
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in opts.keys():
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in opts.keys():
linewidth = opts['linewidth']
if 'scale' in opts.keys():
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the CDS symbol
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent-arrowhead_height),
(end, 0),
(end-dir_fac*arrowhead_length, y_extent+arrowhead_height),
(end-dir_fac*arrowhead_length, y_extent)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=11,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0
ax.add_patch(p1)
if opts != None and 'label' in opts.keys():
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end | 5,325,347 |
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo | 5,325,348 |
def keys(key_file,
gen_key,
show_vk):
"""
This set of commands support creation of signing key (private) and showing the verification key (public)
from a previously loaded signing key. Signing key is stored in PEM format
"""
if not gen_key and show_vk is None:
raise nRFException("Use either gen-key or show-vk.")
signer = Signing()
if gen_key:
if os.path.exists(key_file):
if not query_func("File found at %s. Do you want to overwrite the file?" % key_file):
click.echo('Key generation aborted')
return
signer.gen_key(key_file)
click.echo("Generated key at: %s" % key_file)
elif show_vk:
if not os.path.isfile(key_file):
raise nRFException("No key file to load at: %s" % key_file)
signer.load_key(key_file)
click.echo(signer.get_vk(show_vk)) | 5,325,349 |
def update_all_users():
""""
Updates all tweets for all users in the user table
"""
for yan in User.query.all():
add_or_update_user(yan.name) | 5,325,350 |
def tran_canny(image):
"""消除噪声"""
image = cv2.GaussianBlur(image, (3, 3), 0)
return cv2.Canny(image, 50, 150) | 5,325,351 |
def draw_star(img_size, num_frames, bg_config, nb_branches=6):
""" Draw a star and output the interest points
Parameters:
nb_branches: number of branches of the star
"""
images = generate_background(img_size, num_frames=num_frames)
background_color = int(np.mean(images))
num_branches = random_state.randint(3, nb_branches)
min_dim = min(img_size[0], img_size[1])
thickness = random_state.randint(min_dim * 0.01, min_dim * 0.02)
rad = max(random_state.rand() * min_dim / 2, min_dim / 5)
x = random_state.randint(rad, img_size[1] - rad) # select the center of a circle
y = random_state.randint(rad, img_size[0] - rad)
# Sample num_branches points inside the circle
slices = np.linspace(0, 2 * math.pi, num_branches + 1)
angles = [slices[j] + random_state.rand() * (slices[j+1] - slices[j])
for j in range(num_branches)]
points = np.array([[int(x + max(random_state.rand(), 0.3) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.3) * rad * math.sin(a))]
for a in angles])
points = np.concatenate(([[x, y]], points), axis=0)
color = get_random_color(background_color)
rotation = get_random_rotation()
speed = get_random_speed()
pts_list = []
img_list = []
for i in range(num_frames):
img = images[i]
pts = np.empty((0, 2), dtype=np.int)
center = (points[0][0], points[0][1])
points = (np.matmul(points - center, rotation) + center + speed).astype(int)
for j in range(1, num_branches + 1):
cv.line(img, (points[0][0], points[0][1]),
(points[j][0], points[j][1]),
int(color[j]), thickness)
# Keep only the points inside the image
pts = keep_points_inside(points, img_size)
if len(pts) == 0:
draw_star(img_size, num_frames, bg_config, nb_branches)
pts_list.append(pts)
img_list.append(img)
images = np.array(img_list)
points = np.array(pts_list)
event_sim = es.Event_simulator(images[0], 0)
events = np.array([event_sim.simulate(img, 0) for img in images[1:]])
return images, points, events | 5,325,352 |
def forestvar(z_in):
""" Return intrinsic variance of LyaF variance for weighting. This
estimate is roughly from McDonald et al 2006
Parameters
----------
z_in : float or ndarray
Returns
-------
fvar : float or ndarray
Variance
"""
fvar = 0.065 * ((1.+z_in)/(1.+2.25))**3.8
# Return
return fvar | 5,325,353 |
def _process_image_file(fobj):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj)
return _encode_image(image, image_format="JPEG") | 5,325,354 |
def main() -> None:
"""Use read_orgmode_file() to read in a file, then operate on the resulting notes."""
parser = argparse.ArgumentParser(description='Reads in an org-mode file and generates a ' +
'matching Anki card with HTML formatting.')
parser.add_argument('-i', '--input', nargs=1, type=str, required=True, dest='input_file',
help='Location of the org-mode file to consume.')
parser.add_argument('-o', '--output', nargs=1, type=str, required=False, dest='output_file',
help='Output file where HTML-formatted Anki cards are written.')
parser.add_argument('-a', '--append', action='store_true', help='Append to output file, ' +
'rather than writing to it.')
args = parser.parse_args()
# Argparse guarantees that we don't need to wrap the next two lines in a try-except
input_file = args.input_file[0].strip()
if args.output_file is not None:
output_file = args.output_file[0].strip()
else:
# Strip off the input file's filetype; append '.txt'
output_prefix = input_file.rsplit('.', 1)[0]
output_file = output_prefix + '.txt'
try:
notes = read_orgmode_file(input_file)
cards = [format_card(note) for note in notes]
write_cards_to_file(cards, output_file, args.append)
except UnicodeDecodeError as err:
print('Text in the input file {} was not UTF-8 encoded.'.format(input_file), err)
return
print('Done! Anki cards written to "{}"'.format(output_file)) | 5,325,355 |
def normalize_contract_type(
contract_type_data: Dict[str, Any],
source_id: str,
) -> Iterable[Tuple[str, Any]]:
"""
Serialize contract_data found in compiler output to the defined fields.
"""
yield "abi", contract_type_data["abi"]
yield "sourceId", source_id
if "evm" in contract_type_data:
if "bytecode" in contract_type_data["evm"]:
yield "deploymentBytecode", normalize_bytecode_object(
contract_type_data["evm"]["bytecode"]
)
if "deployedBytecode" in contract_type_data["evm"]:
yield "runtimeBytecode", normalize_bytecode_object(
contract_type_data["evm"]["deployedBytecode"]
)
if "devdoc" in contract_type_data:
yield "devdoc", contract_type_data['devdoc']
if "userdoc" in contract_type_data:
yield "userdoc", contract_type_data['userdoc']
# make sure metadata isn't an empty string in solc output
if "metadata" in contract_type_data and contract_type_data["metadata"]:
yield "compiler", normalize_compiler_object(
json.loads(contract_type_data["metadata"])
) | 5,325,356 |
def test_SetView_ge(testcase, obj1, obj2, exp_gt, exp_lt, exp_ge, exp_le):
# pylint: disable=unused-argument
"""
Test function for SetView.__ge__()
"""
# Double check they are different objects
assert id(obj1) != id(obj2)
# The code to be tested
ge = (obj1 >= obj2)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
assert ge == exp_ge | 5,325,357 |
def compute_v2g_scores(reg, cisreg):
"""
Goes through evidence and scores associations to a SNP
Args:
* [ Regulatory_Evidence ]
* [ Cisregulatory_Evidence ]
Returntype: dict(Gene: dict(string: float)), dict(Gene: float)
"""
intermediary_scores = dict()
gene_scores = dict()
for gene in cisreg:
intermediary_scores[gene] = collections.defaultdict(int)
seen = set()
for evidence in cisreg[gene] + reg:
if evidence.source not in seen or float(evidence.score) > intermediary_scores[gene][evidence.source]:
intermediary_scores[gene][evidence.source] = float(evidence.score)
seen.add(evidence.source)
# VEP stats
if evidence.source == 'VEP':
intermediary_scores[gene]['VEP_count'] += 1
intermediary_scores[gene]['VEP_sum'] += float(evidence.score)
if evidence.source == 'GTEx':
intermediary_scores[gene][evidence.tissue] = float(evidence.score)
# Ad hoc bounds defined here:
# PCHiC
intermediary_scores[gene]['PCHiC'] = min(intermediary_scores[gene]['PCHiC'], 1)
# VEP
if 'VEP' in intermediary_scores[gene]:
intermediary_scores[gene]['VEP_mean'] = intermediary_scores[gene]['VEP_sum'] / intermediary_scores[gene]['VEP_count']
# Weighted sum
gene_scores[gene] = sum(intermediary_scores[gene][source] * postgap.Globals.EVIDENCE_WEIGHTS[source] for source in intermediary_scores[gene] if source in postgap.Globals.EVIDENCE_WEIGHTS)
return intermediary_scores, gene_scores | 5,325,358 |
def _old_normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
dev = theano.config.device
use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
normed = theano.tensor.as_tensor_variable(normed)
mean = theano.tensor.as_tensor_variable(mean)
stdinv = theano.tensor.as_tensor_variable(stdinv)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var | 5,325,359 |
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size)) | 5,325,360 |
def write_run_record(message_text):
"""对程式的运行及文件处理过程写入日志文件,已备后续查询"""
# 检测日志文件夹是否存在
if not os.path.exists("./record/"):
os.makedirs("./record/")
# 获取日志路径&需写入的信息
message = message_text
# 打开日志并写入
with open(f"./record/{str(datetime.datetime.now().strftime('%Y_%m_%d'))}.txt", 'a') as file_record:
file_record.write(f"{message}\n") | 5,325,361 |
def test_session_ca_bundle():
"""
test salt.utils.https.session when setting ca_bundle
"""
fpath = "/tmp/test_bundle"
patch_os = patch("os.path.exists", MagicMock(return_value=True))
with patch_os:
ret = salt.utils.http.session(ca_bundle=fpath)
assert ret.verify == fpath | 5,325,362 |
def error(msg, error_type=None):
"""Exit with an error message."""
if error_type is None:
error_type = _("error")
sys.exit("qastetray-cli: {}: {}".format(error_type, msg)) | 5,325,363 |
def quaternion_to_rotation_matrix(quaternion):
"""
This function transforms a quaternion into a 3x3 rotation matrix.
Parameters
----------
:param quaternion: a quaternion or a batch of quaternion N x [scalar term, vector term]
Returns
-------
:return: 3x3 rotation matrices
"""
init_shape = list(quaternion.shape)
q = quaternion.view(-1, init_shape[-1])
R = torch.zeros((quaternion.shape[0], 3, 3), dtype=quaternion.dtype).to(device)
for i in range(R.shape[0]):
w, x, y, z = q[i]
R[i] = torch.tensor([[2 * (w ** 2 + x ** 2) - 1, 2 * (x * y - w * z), 2 * (x * z + w * y)],
[2 * (x * y + w * z), 2 * (w ** 2 + y ** 2) - 1, 2 * (y * z - w * x)],
[2 * (x * z - w * y), 2 * (y * z + w * x), 2 * (w ** 2 + z ** 2) - 1]],
dtype=quaternion.dtype)
new_shape = init_shape[:-1]
new_shape.append(3)
new_shape.append(3)
return R.view(new_shape) | 5,325,364 |
def recreate():
"""Recreate the database"""
drop()
create() | 5,325,365 |
def test_protids():
"""Pdom r1.2: extract protein IDs from GFF3"""
db = genhub.test_registry.genome('Pdtl')
protids = ['PdomMRNAr1.2-08518.1', 'PdomMRNAr1.2-11420.1',
'PdomMRNAr1.2-08519.1']
infile = 'testdata/gff3/pdom-266.gff3'
testids = list()
with open(infile, 'r') as instream:
for protid in db.gff3_protids(instream):
testids.append(protid)
assert sorted(protids) == sorted(testids), \
'protein ID mismatch: %r %r' % (protids, testids) | 5,325,366 |
def session_end(bot):
""":crossed_flags: *TRPGのセッションを終わります*\n`/cc kp end`"""
target_status = "pc_id"
user_data = {}
lst_end_content = []
lst_player_data = get_lst_player_data(bot.team_id, bot.user_id, target_status)
msg_return = "| 名前 | PC | 備考 |\n|--|--|--|\n"
for player_data in lst_player_data:
name = player_data["name"]
user_id = player_data["user_id"]
url = player_data["user_param"]["url"]
user_data[user_id] = {"url": url,
"name": name}
lst_users_list = get_users_list(bot.token)
for user_id, user_datum in user_data.items():
# N+! 誰がいい感じに
player_data = list(filter(lambda x: x["id"] == user_id , lst_users_list))
if player_data is None:
continue
pc_name = user_datum["name"]
url = user_datum["url"]
real_name = player_data[0]["real_name"]
msg_return += f"| @{real_name} | [{pc_name}]({url}) | |\n"
return msg_return, None | 5,325,367 |
def log_json(req_context, params=None, err=None):
"""
:param req_context:
:param params:
:param err:
:return:
"""
context = Util.get_context()
dict_items = dict(req_context)
dict_items.update(context)
logMsg = {key: value for (key, value) in (dict_items.items())}
if params or err:
pe = append_error(params, err)
if pe:
logMsg['params'] = pe
# logger.debug(json.dumps(logMsg))
return logMsg
# return json.dumps(logMsg)
# {"aws_request_id": "1fcf5a10-9d44-49dd-bbad-9f23945c306f", "level": "DEBUG", "x-user-agent": "halolib:/:GET:f55a", "awsRegion": "REGION", "x-correlation-id": "1f271213-6d32-40b7-b1dc-12e3a9a31bf4", "debug-log-enabled": "false", "functionVersion": "VER", "message": "we did it", "stage": "STAGE", "functionMemorySize": "MEM", "functionName": "halolib"} | 5,325,368 |
def _anonymize_file(dicom_file_in, dicom_file_out, fields_to_keep):
"""
Anonimize a single dicomfile
:param dicom_file_in: filepath for input file
:param dicom_file_out: filepath for output file
:param fields_to_keep: dicom tags to keep
"""
# Default meta_fields
# Required fields according to reference
meta_fields = ['MediaStorageSOPClassUID',
'MediaStorageSOPInstanceUID',
'ImplementationClassUID']
# Load dicom_file_in
dicom_in = compressed_dicom.read_file(dicom_file_in)
# Create new dicom file
# Set new file meta information
file_meta = pydicom.dataset.Dataset()
file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
for field_key in meta_fields:
file_meta.add(dicom_in.file_meta.data_element(field_key))
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
dicom_out = pydicom.dataset.FileDataset(dicom_file_out, {}, file_meta=file_meta, preamble=b'\0' * 128)
# Copy transfer syntax
dicom_out.is_little_endian = dicom_in.is_little_endian
dicom_out.is_implicit_VR = dicom_in.is_implicit_VR
# Add the data elements
for (field_key, field_value) in iteritems(fields_to_keep):
logging.info(field_key)
if field_key == (0x7fe0, 0x0010):
# anonimize the dicom pixeldata
random_data = numpy.random.randint(0, 255, dicom_in.pixel_array.shape).astype(dicom_in.pixel_array.dtype)
dicom_out.PixelData = random_data.tostring() # = byte array (see pydicom docs)
# dicom_out.PixelData = dicom_in.pixel_array.tostring() # = byte array (see pydicom docs)
# noinspection PyPep8Naming
dicom_out[0x7fe0, 0x0010].VR = 'OB'
elif field_value is None:
try:
if isinstance(field_key, string_types):
if field_key in dicom_in:
dicom_out.add(dicom_in.data_element(field_key))
else:
if dicom_in.get(field_key) is not None:
dicom_out.add(dicom_in[field_key])
except KeyError:
logging.info('Warning: %s not found' % field_key)
else:
setattr(dicom_out, field_key, field_value)
# Save dicom_file_out
# Make sure we have a directory
if not os.path.exists(os.path.dirname(dicom_file_out)):
logging.info('Decompressing files')
# Save the file
dicom_out.is_little_endian = True
dicom_out.is_implicit_VR = False
dicom_out.save_as(dicom_file_out, write_like_original=False) | 5,325,369 |
def colored_print(text, use_color=True, color_name='white'):
"""Print with or without color codes"""
if use_color:
print(color(text, fg=color_name))
else:
print(text) | 5,325,370 |
def make_input_signature(inputs, include_tensor_ranks_only,
encode_variables_by_resource_id):
"""Generates an input signature representation.
Args:
inputs: The function inputs that need to be formed into a signature
include_tensor_ranks_only: If Tensors should be considered by rank
encode_variables_by_resource_id: If Variables should be considered by
resource id
Returns:
An object representing the input signature
"""
return pywrap_tfe.TFE_Py_EncodeArg(
inputs, include_tensor_ranks_only, encode_variables_by_resource_id) | 5,325,371 |
def _activities_from_datasets_followed_by_user_query(
user_id: str, limit: int
) -> QActivity:
"""Return a query for all activities from datasets that user_id follows."""
# Get a list of the datasets that the user is following.
follower_objects = model.UserFollowingDataset.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(Activity).filter(text("0=1"))
return _activities_union_all(
*[
_activities_limit(
_package_activity_query(follower.object_id), limit
)
for follower in follower_objects
]
) | 5,325,372 |
def getRowType(row):
"""Infers types for each row"""
d = row
for col, data in enumerate(row):
try:
if isNone(data):
d[col] = 'none'
else:
num = float(data)
if num.is_integer():
d[col] = 'int'
else:
d[col] = 'double'
except:
try:
toDate(data)
d[col] = 'date'
except:
d[col] = 'string'
return d | 5,325,373 |
def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[],
numeric_only=False, dropna=False):
"""Load the Law School GPA dataset
Note:
By default, the data is downloaded from tempeh. See
https://github.com/microsoft/tempeh for details.
Args:
subset ({'train', 'test', or 'all'}, optional): Select the dataset to
load: 'train' for the training set, 'test' for the test set, 'all'
for both.
usecols (single label or list-like, optional): Feature column(s) to
keep. All others are dropped.
dropcols (single label or list-like, optional): Feature column(s) to
drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs.
Returns:
namedtuple: Tuple containing X, y, and sample_weights for the Law School
GPA dataset accessible by index or name.
"""
if subset not in {'train', 'test', 'all'}:
raise ValueError("subset must be either 'train', 'test', or 'all'; "
"cannot be {}".format(subset))
dataset = tc.datasets["lawschool_gpa"]()
X_train, X_test = dataset.get_X(format=pd.DataFrame)
y_train, y_test = dataset.get_y(format=pd.Series)
A_train, A_test = dataset.get_sensitive_features(name='race',
format=pd.Series)
all_train = pd.concat([X_train, y_train, A_train], axis=1)
all_test = pd.concat([X_test, y_test, A_test], axis=1)
if subset == "train":
df = all_train
elif subset == "test":
df = all_test
else:
df = pd.concat([all_train, all_test], axis=0)
return standardize_dataset(df, prot_attr=['race'], target='zfygpa',
usecols=usecols, dropcols=dropcols,
numeric_only=numeric_only, dropna=dropna) | 5,325,374 |
def sync(obj_prim, obj: bpy.types.Object, mesh: bpy.types.Mesh = None, **kwargs):
""" Creates pyrpr.Shape from obj.data:bpy.types.Mesh """
if not mesh:
mesh = obj.data
log("sync", mesh, obj)
data = MeshData.init_from_mesh(mesh, obj=obj)
if not data:
return
stage = obj_prim.GetStage()
usd_mesh = UsdGeom.Mesh.Define(stage, obj_prim.GetPath().AppendChild(Tf.MakeValidIdentifier(mesh.name)))
usd_mesh.CreateDoubleSidedAttr(True)
usd_mesh.CreatePointsAttr(data.vertices)
usd_mesh.CreateFaceVertexIndicesAttr(data.vertex_indices)
usd_mesh.CreateFaceVertexCountsAttr(data.num_face_vertices)
usd_mesh.CreateSubdivisionSchemeAttr(UsdGeom.Tokens.none)
usd_mesh.CreateNormalsAttr(data.normals)
usd_mesh.SetNormalsInterpolation(UsdGeom.Tokens.faceVarying)
for name, uv_layer in data.uv_layers.items():
uv_primvar = usd_mesh.CreatePrimvar("st", # default name, later we'll use sdf_path(name)
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.faceVarying)
uv_primvar.Set(uv_layer[0])
uv_primvar.SetIndices(Vt.IntArray.FromNumpy(uv_layer[1]))
break # currently we use only first UV layer
_assign_materials(obj_prim, obj.original, usd_mesh) | 5,325,375 |
def test_ba_validation_right_size_invalid_number_8_digits_and_second_digit_equal_6_7_9():
"""Test if an invalid number is really invalid with 8 digits"""
invalid_number = '61234559'
assert ba.start(invalid_number) == False | 5,325,376 |
def check_ref_exons(exon_seqs, mask_stops):
"""Check if the reference sequence is correct.
Should start with ATG and end with a stop.
Mask_stops controls handling of inframe stops.
"""
sec_codons = set() # in case there are TGA codons in the ref seq -> collect them
gene_seq = "".join([exon_seqs[i] for i in range(len(exon_seqs.keys()))])
codons = parts(gene_seq, n=3) # split a seq of letterns in chuncks of len == 3
if codons[0] != "ATG":
eprint("Input is corrupted! Reference sequence should start with ATG!")
elif codons[-1] not in STOPS:
eprint("Input is corrupted! Reference sequence should end with a stop codon!")
stop_codons = [(n, c) for n, c in enumerate(codons[:-1]) if c in STOPS]
if len(stop_codons) == 0: # no stop codons -> nothing else to do
return exon_seqs, set()
# there are stop codons in reference sequence:
eprint("Warning! There are inframe stop codons!")
for stop in stop_codons:
eprint(f"Codon num {stop[0] + 1} - {stop[1]}")
codons[stop[0]] = "NNN" if mask_stops else codons[stop[0]]
if stop[1] == "TGA":
# maybe a sec codon
sec_codons.add(stop[0])
eprint(">>>STOP_CODON>>>") if not mask_stops else None
die("Abort, there are inframe stop codons.", 0) if not mask_stops else None
# if stop codons in reference are allowed, then we need to mask them (rewrite as NNN)
# otherwise CESAR will show an error
safe_seq = "".join(codons)
stop_masked = {}
prev_index = 0
for num, exon_seq in exon_seqs.items():
exon_len = len(exon_seq)
stop_masked[num] = safe_seq[prev_index: prev_index + exon_len]
prev_index += exon_len
return stop_masked, sec_codons | 5,325,377 |
def campaign(args):
"""Repeatedly inject bitflips into system and then fast-forwards."""
args = args.strip().split(" ")
if len(args) < 2 or len(args) > 4:
print("usage: campaign <iterations> <mtbf> [<address>] [<bytewidth>]")
print("bytewidth defaults to 4 bytes if address specified")
print("if <address> starts with reg, then register injections are performed instead")
print("specify address as reg:X to specify register X for the register injection")
print("if <address> is 'restart' or 'restart-later', then an undefined instruction is injected")
return
iterations = int(args[0])
mtbf = parse_time(args[1])
assert mtbf > 0
is_reg = False
is_restart = False
if args[2:]:
rand_addr = False
if args[2] == "restart":
is_restart = True
layout = None
elif args[2] == "restart-later":
is_restart = True
layout = ExecutableLayout(gdb.objfiles())
elif args[2].startswith("reg") and args[2][3:4] in (":", ""):
is_reg = True
address = args[2][4:]
else:
address = int(gdb.parse_and_eval(args[2]))
bytewidth = int(args[3]) if args[3:] else 4
if iterations < 1 or mtbf <= 0 or bytewidth < 1 or address < 0:
print("Invalid number for argument.")
return
else:
rand_addr = True
bytewidth = 1
for i in range(iterations):
if is_restart:
if layout is None:
inject_instant_restart()
else:
inject_spatial_restart(layout)
elif is_reg:
inject_reg_internal(address)
else:
if rand_addr:
address = sample_address()
inject_bitflip(address, bytewidth)
ns_to_failure = sample_geo(mtbf_to_rate(mtbf))
step_ns(ns_to_failure) | 5,325,378 |
def measure_fluxline_crosstalk(
dev, target_qubit, crosstalk_qubits, amplitudes,
crosstalk_qubits_amplitudes=None, phases=None,
target_fluxpulse_length=500e-9, crosstalk_fluxpulse_length=None,
skip_qb_freq_fits=False, n_cal_points_per_state=2,
cal_states='auto', prep_params=None, label=None, upload=True,
analyze=True):
"""
Applies a flux pulse on the target qubit with various amplitudes.
Measure the phase shift due to these pulses on the crosstalk qubits which
are measured in a Ramsey setting and fluxed to a more sensitive frequency.
Args:
dev: The Device object used for the measurement
target_qubit: the qubit to which a fluxpulse with varying amplitude
is applied
crosstalk_qubits: a list of qubits to do a Ramsey on.
amplitudes: A list of flux pulse amplitudes to apply to the target qubit
crosstalk_qubits_amplitudes: A dictionary from crosstalk qubit names
to flux pulse amplitudes that are applied to them to increase their
flux sensitivity. Missing amplitudes are set to 0.
phases: An array of Ramsey phases in degrees.
target_fluxpulse_length: length of the flux pulse on the target qubit.
Default: 500 ns.
crosstalk_fluxpulse_length: length of the flux pulses on the crosstalk
qubits. Default: target_fluxpulse_length + 50 ns.
n_cal_points_per_state: Number of calibration measurements per
calibration state. Defaults to 2.
cal_states:
List of qubit states to use for calibration. Defaults to 'auto'.
prep_params: Perparation parameters dictionary specifying the type
of state preparation.
label: Overwrite the default measuremnt label.
upload: Whether the experimental sequence should be uploaded.
Defaults to True.
analyze: Whether the analysis will be run. Defaults to True.
"""
if phases is None:
phases = np.linspace(0, 360, 3, endpoint=False)
if crosstalk_fluxpulse_length is None:
crosstalk_fluxpulse_length = target_fluxpulse_length + 50e-9
if crosstalk_qubits_amplitudes is None:
crosstalk_qubits_amplitudes = {}
if isinstance(target_qubit, str):
target_qubit = dev.get_qb(target_qubit)
target_qubit_name = target_qubit.name
crosstalk_qubits = [dev.get_qb(qb) if isinstance(qb, str) else qb
for qb in crosstalk_qubits]
crosstalk_qubits_names = [qb.name for qb in crosstalk_qubits]
MC = dev.instr_mc.get_instr()
if label is None:
label = f'fluxline_crosstalk_{target_qubit_name}_' + \
''.join(crosstalk_qubits_names)
if prep_params is None:
prep_params = dev.get_prep_params(crosstalk_qubits)
sweep_points = SweepPoints('phase', phases, 'deg', 'Ramsey phase')
sweep_points.add_sweep_dimension()
sweep_points.add_sweep_parameter('target_amp', amplitudes, 'V',
'Target qubit flux pulse amplitude')
exp_metadata = {}
for qb in set(crosstalk_qubits) | {target_qubit}:
qb.prepare(drive='timedomain')
cal_states = CalibrationPoints.guess_cal_states(cal_states,
for_ef=False)
cp = CalibrationPoints.multi_qubit(
[qb.name for qb in crosstalk_qubits], cal_states,
n_per_state=n_cal_points_per_state)
operation_dict = dev.get_operation_dict()
# We get sweep_vals for only one dimension since drive_cancellation_seq
# turns 2D sweep points into 1D-SegmentHardSweep.
# FIXME: in the future, this should rather be implemented via
# sequence.compress_2D_sweep
seq, sweep_vals = mqs.fluxline_crosstalk_seq(
target_qubit_name, crosstalk_qubits_names,
crosstalk_qubits_amplitudes, sweep_points, operation_dict,
crosstalk_fluxpulse_length=crosstalk_fluxpulse_length,
target_fluxpulse_length=target_fluxpulse_length,
prep_params=prep_params, cal_points=cp, upload=False)
[seq.repeat_ro(f"RO {qbn}", operation_dict)
for qbn in crosstalk_qubits_names]
sweep_func = awg_swf.SegmentHardSweep(
sequence=seq, upload=upload,
parameter_name='segment_index')
MC.set_sweep_function(sweep_func)
MC.set_sweep_points(sweep_vals)
det_func = get_multiplexed_readout_detector_functions(
crosstalk_qubits,
nr_averages=max([qb.acq_averages() for qb in crosstalk_qubits])) \
['int_avg_det']
MC.set_detector_function(det_func)
# !!! Watch out with the call below. See docstring for this function
# to see the assumptions it makes !!!
meas_obj_sweep_points_map = sweep_points.get_meas_obj_sweep_points_map(
[qb.name for qb in crosstalk_qubits])
exp_metadata.update({
'target_qubit_name': target_qubit_name,
'crosstalk_qubits_names': crosstalk_qubits_names,
'crosstalk_qubits_amplitudes': crosstalk_qubits_amplitudes,
'target_fluxpulse_length': target_fluxpulse_length,
'crosstalk_fluxpulse_length': crosstalk_fluxpulse_length,
'skip_qb_freq_fits': skip_qb_freq_fits,
'preparation_params': prep_params,
'cal_points': repr(cp),
'sweep_points': sweep_points,
'meas_obj_sweep_points_map': meas_obj_sweep_points_map,
'meas_obj_value_names_map':
get_meas_obj_value_names_map(crosstalk_qubits, det_func),
'rotate': len(cp.states) != 0,
'data_to_fit': {qbn: 'pe' for qbn in crosstalk_qubits_names}
})
MC.run(label, exp_metadata=exp_metadata)
if analyze:
return tda.FluxlineCrosstalkAnalysis(
qb_names=crosstalk_qubits_names, options_dict={
'TwoD': True,
'skip_qb_freq_fits': skip_qb_freq_fits,
}) | 5,325,379 |
def resistancedistances(graph):
"""
Returns the pairwise resistance distances on the given graph.
Args:
network: networkx graph
Returns:
Dictionary of pairwise resistance distances,
accessed by the (i,j) node labels
"""
nodes = graph.nodes()
nodecount = len(nodes)
nodenrs = range(nodecount)
labeling = dict(zip(nodenrs,graph.nodes()))
L = np.linalg.pinv(nx.laplacian_matrix(graph))
rdist = {}
for i in nodenrs:
rdist[labeling[i]] = {}
for j in nodenrs:
rdist[labeling[i]][labeling[j]] = L[i,i] + L[j,j] - L[i,j] - L[j,i]
return rdist | 5,325,380 |
def upd_token_exp(req_token, expt=60 * 15):
"""
更新token过期时间
:param req_token:
:param expt: 过期时间(秒)
:return:
"""
r = RedisCtrl()
user_info = r.get_one(REDIS_KEY_PRE_TOKEN + req_token)
if user_info:
r.set_one(REDIS_KEY_PRE_TOKEN + req_token, user_info, expt=expt) | 5,325,381 |
def flatten_to_raster(data):
""" Flatten numpy array of various dimensions to RGB raster image.
:param data: numpy array of one of following sizes.
1) H x W x C (color/gray image)
2) N x Y x X x C (array of color/gray images)
3) nY x nX x Y x X x C (2d array of color/gray images)
(C has to be 1 or 3)
E.g., C = 1
<---------------W--------------->
------- ------- ------- -------
^ | | | | |
| Y 1 | 2 | ... | nX |
| | | | | |
| ---X--- ------- ------- -------
| | | | | |
| | 2 | | | |
| | | | | |
H ------- ------- ------- -------
| | | | | |
| | ... | | | |
| | | | | |
| ------- ------- ------- -------
| | | | | |
| | nY | | | |
v | | | | |
------- ------- ------- -------
Input -> Output
1) H x W x C -> H x W x C
2) N x Y x X x C -> (nY*Y) x (nX*X) x C
where nX & nY are factors of N such that we get as close to square grid as possible
(with bias towards having more columns than rows so for 12 images we have nY x nX = 3 x 4 grid)
3) nH x nW x H x W x C -> (nY*Y) x (nX*X) x C
"""
n_dim = len(data.shape)
if n_dim == 3:
H, W, C = data.shape
nY, nX = 1, 1
flattened = data
n_states = nY * nX
elif n_dim == 4:
# N x Y x X x C (array of color/gray images)
image_grid = image_array_to_grid(data)
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid) # .transpose(1,0,2,3,4)
n_states = len(data)
elif n_dim == 5:
image_grid = data
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid)
n_states = nY * nX
else:
raise Exception("data dimension {} not supported!".format(n_dim))
return flattened, nY, nX, n_states | 5,325,382 |
def VMObjectWalkPages(cmd_args=None, cmd_options={}):
""" Print the resident pages contained in the provided object. If a vm_page_t is provided as well, we
specifically look for this page, highlighting it in the output or noting if it was not found. For
each page, we confirm that it points to the object. We also keep track of the number of pages we
see and compare this to the object's resident page count field.
Usage:
vmobjectwalkpages <vm_object_t> : Walk and print all the pages for a given object (up to 4K pages by default)
vmobjectwalkpages <vm_object_t> -C : list pages in compressor after processing resident pages
vmobjectwalkpages <vm_object_t> -B : Walk and print all the pages for a given object (up to 4K pages by default), traversing the memq backwards
vmobjectwalkpages <vm_object_t> -N : Walk and print all the pages for a given object, ignore the page limit
vmobjectwalkpages <vm_object_t> -Q : Walk all pages for a given object, looking for known signs of corruption (i.e. q_state == VM_PAGE_IS_WIRED && wire_count == 0)
vmobjectwalkpages <vm_object_t> -P <vm_page_t> : Walk all the pages for a given object, annotate the specified page in the output with ***
vmobjectwalkpages <vm_object_t> -P <vm_page_t> -S : Walk all the pages for a given object, stopping when we find the specified page
vmobjectwalkpages <vm_object_t> -O <offset> : Like -P, but looks for given offset
"""
if (cmd_args == None or len(cmd_args) < 1):
raise ArgumentError("Please specify at minimum a vm_object_t and optionally a vm_page_t")
out_string = ""
obj = kern.GetValueFromAddress(cmd_args[0], 'vm_object_t')
page = 0
if "-P" in cmd_options:
page = kern.GetValueFromAddress(cmd_options['-P'], 'vm_page_t')
off = -1
if "-O" in cmd_options:
off = kern.GetValueFromAddress(cmd_options['-O'], 'vm_offset_t')
stop = 0
if "-S" in cmd_options:
if page == 0 and off < 0:
raise ArgumentError("-S can only be passed when a page is specified with -P or -O")
stop = 1
walk_backwards = False
if "-B" in cmd_options:
walk_backwards = True
quiet_mode = False
if "-Q" in cmd_options:
quiet_mode = True
if not quiet_mode:
print VMObjectWalkPages.header
format_string = "{0: <#10d} of {1: <#10d} {2: <#020x} {3: <#020x} {4: <#020x} {5: <#010x} {6: <#05d}\t"
first_bitfield_format_string = "{0: <#2d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:{7: <#1d}\t"
second_bitfield_format_string = "{0: <#1d}:{1: <#1d}:{2: <#1d}:{3: <#1d}:{4: <#1d}:{5: <#1d}:{6: <#1d}:"
second_bitfield_format_string += "{7: <#1d}:{8: <#1d}:{9: <#1d}:{10: <#1d}:{11: <#1d}:{12: <#1d}:"
second_bitfield_format_string += "{13: <#1d}:{14: <#1d}:{15: <#1d}:{16: <#1d}:{17: <#1d}:{18: <#1d}:{19: <#1d}:"
second_bitfield_format_string += "{20: <#1d}:{21: <#1d}:{22: <#1d}:{23: <#1d}:{24: <#1d}:{25: <#1d}:{26: <#1d}\n"
limit = 4096 #arbitrary limit of number of pages to walk
ignore_limit = 0
if "-N" in cmd_options:
ignore_limit = 1
show_compressed = 0
if "-C" in cmd_options:
show_compressed = 1
page_count = 0
res_page_count = unsigned(obj.resident_page_count)
page_found = False
pages_seen = set()
for vmp in IterateQueue(obj.memq, "vm_page_t", "vmp_listq", walk_backwards, unpack_ptr_fn=_vm_page_unpack_ptr):
page_count += 1
out_string = ""
if (page != 0 and not(page_found) and vmp == page):
out_string += "******"
page_found = True
if (off > 0 and not(page_found) and vmp.vmp_offset == off):
out_string += "******"
page_found = True
if page != 0 or off > 0 or quiet_mode:
if (page_count % 1000) == 0:
print "traversed %d pages ...\n" % (page_count)
else:
out_string += format_string.format(page_count, res_page_count, vmp, vmp.vmp_offset, _vm_page_unpack_ptr(vmp.vmp_listq.next), _vm_page_get_phys_page(vmp), vmp.vmp_wire_count)
out_string += first_bitfield_format_string.format(vmp.vmp_q_state, vmp.vmp_in_background, vmp.vmp_on_backgroundq, vmp.vmp_gobbled, vmp.vmp_laundry, vmp.vmp_no_cache,
vmp.vmp_private, vmp.vmp_reference)
if hasattr(vmp,'slid'):
vmp_slid = vmp.slid
else:
vmp_slid = 0
out_string += second_bitfield_format_string.format(vmp.vmp_busy, vmp.vmp_wanted, vmp.vmp_tabled, vmp.vmp_hashed, vmp.vmp_fictitious, vmp.vmp_clustered,
vmp.vmp_pmapped, vmp.vmp_xpmapped, vmp.vmp_wpmapped, vmp.vmp_free_when_done, vmp.vmp_absent,
vmp.vmp_error, vmp.vmp_dirty, vmp.vmp_cleaning, vmp.vmp_precious, vmp.vmp_overwriting,
vmp.vmp_restart, vmp.vmp_unusual, 0, 0,
vmp.vmp_cs_validated, vmp.vmp_cs_tainted, vmp.vmp_cs_nx, vmp.vmp_reusable, vmp.vmp_lopage, vmp_slid,
vmp.vmp_written_by_kernel)
if (vmp in pages_seen):
print out_string + "cycle detected! we've seen vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " twice. stopping...\n"
return
if (_vm_page_unpack_ptr(vmp.vmp_object) != unsigned(obj)):
print out_string + " vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " points to different vm_object_t: " + "{0: <#020x}".format(unsigned(_vm_page_unpack_ptr(vmp.vmp_object)))
return
if (vmp.vmp_q_state == VM_PAGE_IS_WIRED) and (vmp.vmp_wire_count == 0):
print out_string + " page in wired state with wire_count of 0\n"
print "vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + "\n"
print "stopping...\n"
return
if ((vmp.vmp_unused_page_bits != 0) or (vmp.vmp_unused_object_bits != 0)):
print out_string + " unused bits not zero for vm_page_t: " + "{0: <#020x}".format(unsigned(vmp)) + " unused__pageq_bits: %d unused_object_bits : %d\n" % (vmp.vmp_unused_page_bits,
vmp.vmp_unused_object_bits)
print "stopping...\n"
return
pages_seen.add(vmp)
if False:
hash_id = _calc_vm_page_hash(obj, vmp.vmp_offset)
hash_page_list = kern.globals.vm_page_buckets[hash_id].page_list
hash_page = _vm_page_unpack_ptr(hash_page_list)
hash_page_t = 0
while (hash_page != 0):
hash_page_t = kern.GetValueFromAddress(hash_page, 'vm_page_t')
if hash_page_t == vmp:
break
hash_page = _vm_page_unpack_ptr(hash_page_t.vmp_next_m)
if (unsigned(vmp) != unsigned(hash_page_t)):
print out_string + "unable to find page: " + "{0: <#020x}".format(unsigned(vmp)) + " from object in kernel page bucket list\n"
print lldb_run_command("vm_page_info %s 0x%x" % (cmd_args[0], unsigned(vmp.vmp_offset)))
return
if (page_count >= limit and not(ignore_limit)):
print out_string + "Limit reached (%d pages), stopping..." % (limit)
break
print out_string
if page_found and stop:
print("Object reports resident page count of: %d we stopped after traversing %d and finding the requested page.\n" % (unsigned(obj.res_page_count), unsigned(page_count)))
return
if (page != 0):
print("page found? : %s\n" % page_found)
if (off > 0):
print("page found? : %s\n" % page_found)
print("Object reports resident page count of %d, we saw %d pages when we walked the resident list.\n" % (unsigned(obj.resident_page_count), unsigned(page_count)))
if show_compressed != 0 and obj.pager != 0 and unsigned(obj.pager.mo_pager_ops) == unsigned(addressof(kern.globals.compressor_pager_ops)):
pager = Cast(obj.pager, 'compressor_pager *')
chunks = pager.cpgr_num_slots / 128
pagesize = kern.globals.page_size
page_idx = 0
while page_idx < pager.cpgr_num_slots:
if chunks != 0:
chunk = pager.cpgr_slots.cpgr_islots[page_idx / 128]
slot = chunk[page_idx % 128]
elif pager.cpgr_num_slots > 2:
slot = pager.cpgr_slots.cpgr_dslots[page_idx]
else:
slot = pager.cpgr_slots.cpgr_eslots[page_idx]
if slot != 0:
print("compressed page for offset: %x slot %x\n" % ((page_idx * pagesize) - obj.paging_offset, slot))
page_idx = page_idx + 1 | 5,325,383 |
def read_google(url,**kwargs):
"""
Reads a google sheet
"""
if url[-1]!='/':
url+='/'
return pd.read_csv(url+'export?gid=0&format=csv',**kwargs) | 5,325,384 |
def ts_inspect_2d(target, *preds, start_date=None, freq=None):
"""
Builds TSMertics for point predictions only, creating internal representation for it.
"""
return TSMetrics(
xr_2d_factory(target, start_date=start_date, freq=freq),
*[xr_2d_factory(p, start_date=start_date, freq=freq) for p in preds],
) | 5,325,385 |
def is_numeric_dtype(arr_or_dtype: List[Literal["b", "a"]]):
"""
usage.seaborn: 2
"""
... | 5,325,386 |
def adjust_age_groups(age_labels):
"""
for each pair of cols to aggregate, takes the first number of the first element, and the last number for the last element
for instance: ["0-4",'5-10'] -> ['0-10']
"""
i=0
new_age_labels=[]
label=""
for element in age_labels:
if i%2==0:
label+=element.split('-')[0]
i+=1
elif i%2==1:
label=label+'-'+element.split('-')[-1]
new_age_labels.append(label)
label=""
i+=1
#making the last agegroup based on the first number +
new_age_labels[-1]= new_age_labels[-1].split("-")[0]+"+"
return(new_age_labels) | 5,325,387 |
def tablize5(soup, hdr):
"""
:param soup: [bs4.BeautifulSoup] document context
:param hdr: [dict] header node to process
"""
hdrs3 = hdr['children']
assert len(hdrs3) > 0
table = soup.new_tag('table', border="1", style="max-width:75%")
# insert the table this way so we don't disturb any content elements
# between the h2 and the start of its structured children
first_h3 = hdrs3[0]['tag']
first_h3.insert_before(table)
# NOTE: column widths are hard-wired to this formatting function
col1 = soup.new_tag('col', width="30%")
table.append(col1)
for h3 in hdrs3:
tr = soup.new_tag('tr')
table.append(tr)
extracted = h3['tag'].extract()
# NOTE: this tag conversion is hard-wired to this formatting function
converted = convert_tag(extracted, 'strong')
td1 = soup.new_tag('td')
td1.append(converted)
td1.extend(h3['content'])
td2 = soup.new_tag('td')
tr.extend([td1, td2]) | 5,325,388 |
def get_kni_ports():
"""
A KNI port is a list of string of format vEth0_%d where %d is the port index.
"""
kni_ports = run_local_cmd('ifconfig | grep vEth0_ | cut -d\':\' -f1 ', get_output = True).split('\n')
return set([port for port in kni_ports if port != '']) | 5,325,389 |
def parse_log(file_abspath):
"""Parse warning and error info from TRNSYS generated log file.
Parses warning and error count when simulation ends with errors.
If simulation ends successfully, counts number of warnings and return
successful completion message.
Args:
file_abspath: absolute path to result file.
Returns:
dict.
Raises:
IOError: problem reading out_file
"""
runsumdict = {}
# Parse data from Type-46-generated tab separated file and return a list of
# dicts (one dict per each row of result file)
pat01 = re.compile(r'Simulation stopped with errors')
pat02 = re.compile(r'Total Warnings\s+:\s+(\d+)')
pat03 = re.compile(r'Total Fatal Errors\s+:\s+(\d+)')
pat04 = re.compile(r'Warning at time')
with open(file_abspath, 'rU') as log_f:
temp = log_f.read()
match = pat01.search(temp)
if match:
runsumdict['Message'] = match.group()
match = pat02.search(temp)
runsumdict['Warnings'] = int(match.group(1))
match = pat03.search(temp)
runsumdict['Errors'] = int(match.group(1))
else:
runsumdict['Message'] = "Simulation ended successfully"
match = pat04.findall(temp)
runsumdict['Warnings'] = len(match)
runsumdict['Errors'] = 0
return runsumdict | 5,325,390 |
def test_close_private_issue(mock_mr):
"""Test issue state and API request that is sent to GitHub."""
mock_mr.return_value.status_code == 200
json_event, signature = event_data('private_issue_opened.json')
payload = json.loads(json_event)
issue = WebHookIssue.from_dict(payload)
issue.close_private_issue()
method, uri, data = mock_mr.call_args[0]
# make sure our issue state is what we expect
assert issue.state == 'closed'
# make sure we sent a patch with the right data to GitHub
assert method == 'patch'
assert 'state' in data | 5,325,391 |
def extract_publish_info_from_issue(
issue: "Issue", publish_type: PublishType
) -> Union[PublishInfo, MyValidationError]:
"""从议题中提取发布所需数据"""
try:
if publish_type == PublishType.BOT:
return BotPublishInfo.from_issue(issue)
elif publish_type == PublishType.PLUGIN:
return PluginPublishInfo.from_issue(issue)
return AdapterPublishInfo.from_issue(issue)
except MyValidationError as e:
return e | 5,325,392 |
def points_inside_poly(points, all_verts):
"""Return bool array of points inside a polygon """
from matplotlib.path import Path
return Path(all_verts).contains_points(points) | 5,325,393 |
def write_links():
"""Add or update a cell in each notebook with a Google Colab link"""
for nb_name, link in iter_links():
notebook = nbformat.read(nb_name, as_version=4)
nb_file = os.path.basename(nb_name)
is_comment = lambda cell: cell.source.startswith(LINK_COMMENT)
if is_comment(notebook.cells[0]):
print("Amending link for {0}".format(nb_file))
notebook.cells[0].source = link
else:
print("Inserting link for {0}".format(nb_file))
notebook.cells.insert(0, new_markdown_cell(source=link))
nbformat.write(notebook, nb_name) | 5,325,394 |
async def test_properties_v3(event_loop, v3_server):
"""Test that v3 sensor properties are created properly."""
async with v3_server:
async with aiohttp.ClientSession(loop=event_loop) as websession:
[system] = await get_systems(TEST_EMAIL, TEST_PASSWORD, websession)
entry_sensor = system.sensors['825']
assert not entry_sensor.error
assert not entry_sensor.low_battery
assert not entry_sensor.offline
assert not entry_sensor.settings['instantTrigger']
assert not entry_sensor.triggered
siren = system.sensors['236']
assert not siren.triggered
temperature_sensor = system.sensors['320']
assert temperature_sensor.temperature == 67
# Ensure that attempting to access the temperature attribute of a
# non-temperature sensor throws an error:
with pytest.raises(ValueError):
assert siren.temperature == 42 | 5,325,395 |
def exploration_function(q_space, x_space, index_, action_space_n, k):
"""returns exploration value"""
x_value = float('-inf')
for i in range(action_space_n):
x_value = max(x_value, q_space[index_][i] + k/(1 + x_space[index_][i]))
#print("q={}, q+x_bonus={}".format(max(q_space[index_]), x_value))
return x_value | 5,325,396 |
def remove_docker_image(repo: str, tag: str, dc: DockerClient):
"""Remove docker image"""
image_name = f"{repo}:{tag}"
for container in dc.containers.list(filters=dict(ancestor=image_name)):
with suppress_docker_wait_error():
container.wait(condition="removed", timeout=30)
retry_call(
dc.images.remove,
fargs=[image_name],
fkwargs={"force": True},
tries=5,
) | 5,325,397 |
def get_vectors(model_dm, model_dbow):
"""
将训练完成的数据转换为vectors
:param model_dm:
:param model_dbow:
:return:
"""
# 获取训练数据集的文档向量
train_vecs_dm = getVecs(model_dm, x_train, size)
train_vecs_dbow = getVecs(model_dbow, x_train, size)
train_vecs = np.hstack((train_vecs_dm, train_vecs_dbow))
# 获取测试数据集的文档向量
test_vecs_dm = getVecs(model_dm, x_test, size)
test_vecs_dbow = getVecs(model_dbow, x_test, size)
test_vecs = np.hstack((test_vecs_dm, test_vecs_dbow))
return train_vecs, test_vecs | 5,325,398 |
def test_profile_queue(db, test_profile, test_project_data):
"""A queue with test data, associated with the first test profile."""
return add_queue(test_project_data, TEST_QUEUE_LEN, profile=test_profile) | 5,325,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.