content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def erratic_leveling(target_level: int) -> int:
"""
Non-trivial calculation of experience to next level for an erratic leveling curve.
Args:
target_level (int): the level to reach.
Returns:
The amount of experience to reach this level from the ground up (from experience 0),
according to an erractic leveling curve.
"""
if target_level <= 50:
return (target_level ** 3) * (100 - target_level) / 50
elif 51 <= target_level <= 68:
return (target_level ** 3) * (150 - target_level) / 100
elif 69 <= target_level <= 98:
if target_level % 3 == 0:
return round((target_level ** 3) * (1.274 - target_level / 150), 3)
elif target_level % 3 == 1:
return round((target_level ** 3) * (1.274 - target_level / 150 - 0.008), 3)
else:
return round((target_level ** 3) * (1.274 - target_level / 150 - 0.014), 3)
elif 99 <= target_level <= 100:
return (target_level ** 3) * (160 - target_level) / 100
else:
logger.error(
f"An invalid target level was provided: {target_level} which is higher than "
f"the highest reachable level (100)."
)
raise ValueError("Invalid target level: too high.") | 0841aed503226932ebd49a66cdd42665eee265b2 | 31,200 |
import re
def get_puppetfile_tags(puppetfile):
"""
obtain tags from Puppetfile
:return: tuple(list, list)
"""
regex_vcs = re.compile(r"^:(git|svn)\s+=>\s+['\"](.+)['\"]\,", re.I)
regex_tag = re.compile(r"^:(ref|tag|commit|branch)\s+=>\s+['\"](.+)['\"]\,?", re.I)
vcss = []
tags = []
with open(puppetfile) as f:
for line in f:
match_vcs = regex_vcs.match(line.strip())
if match_vcs:
vcss.append(match_vcs.group(2))
match_tag = regex_tag.match(line.strip())
if match_tag:
tags.append(match_tag.group(2))
if len(vcss) == len(tags):
return vcss, tags | 6beec37d4c8a3a3b9a2c845cea0f5e12e18af620 | 31,201 |
def inf_is_wide_high_byte_first(*args):
"""
inf_is_wide_high_byte_first() -> bool
"""
return _ida_ida.inf_is_wide_high_byte_first(*args) | 13f30f49823e792ec83fb4b50266c72ceeed942c | 31,202 |
def rewrite_and_sanitize_link(link_header):
"""Sanitize and then rewrite a link header."""
return rewrite_links(sanitize_link(link_header)) | 907cc1492be7162611408200ad660e1a49dd5e14 | 31,203 |
def user_info():
"""
用户个人中心页面显示
:return:
"""
user = g.user
if not user:
return redirect("/")
data = {
"user": user.to_dict()
}
return render_template("news/user.html", data=data) | 2cbe80c6086bffbb5e147ee756b7b393b546da99 | 31,204 |
def cartesian2polar(state: CartesianState, state_goal : CartesianState) -> PolarState:
"""
rho is the distance between the robot and the goal position
: \sqrt((x*-x)^2 + (y*-y)^2)
alpha is the heading of the robot relative the angle to the goal
: theta - atan2((y*-y),(x*-x))
beta is the goal position relative to the angle to the goal
: theta* - atan2((y*-y),(x*-x))
>>> state = np.random.rand(3)* np.array([2, 2, 2*np.pi]) - np.array([1, 1, np.pi])
>>> state_goal = np.random.rand(3)* np.array([2, 2, 2*np.pi]) - np.array([1, 1, np.pi])
>>> polar = cartesian2polar(state, state_goal)
>>> statep = polar2cartesian(polar, state_goal)
>>> np.testing.assert_allclose(state, statep)
"""
x, y, theta = state
x_goal, y_goal, theta_goal = state_goal
x_diff = x_goal - x
y_diff = y_goal - y
# reparameterization
rho = np.hypot(x_diff, y_diff)
phi = np.arctan2(y_diff, x_diff)
alpha = angdiff(theta, phi)
beta = angdiff(theta_goal , phi)
return np.array((rho, alpha, beta)) | 92eea79a8ac8f7c83e78d9aaaff3d6af500b9876 | 31,205 |
def organize_array_by_rows(unformatted_array, num_cols):
"""Take unformatted array and make grid array"""
num_rows = int(len(unformatted_array) / num_cols)
array = []
for row in range(num_rows):
array.append(unformatted_array[row * num_cols:(row + 1) * num_cols])
return array | 8a7d74ea593bfcc5c4d3a92d1c192b2bf628f641 | 31,206 |
from typing import Union
from typing import Literal
from typing import Sequence
def group_abundance(
adata: AnnData,
groupby: str,
target_col: str = "has_ir",
*,
fraction: Union[None, str, bool] = None,
sort: Union[Literal["count", "alphabetical"], Sequence[str]] = "count",
) -> pd.DataFrame:
"""Summarizes the number/fraction of cells of a certain category by a certain group.
Ignores NaN values.
Parameters
----------
adata
AnnData object to work on.
groupby
Group by this column from `obs`. E.g, sample, or group.
target_col
Caregorical variable from `obs` according to which the abundance/fractions
will be computed. This defaults to "has_ir", simply counting
the number of cells with a detected :term:`IR` by group.
fraction
If `True`, compute fractions of abundances relative to the `groupby` column
rather than reporting abosolute numbers. Alternatively, a column
name can be provided according to that the values will be normalized.
sort
How to arrange the dataframe columns.
Default is by the category count ("count").
Other options are "alphabetical" or to provide a list of column names.
By providing an explicit list, the DataFrame can also be subsetted to
specific categories.
Returns
-------
Returns a data frame with the number (or fraction) of cells per group.
"""
if target_col not in adata.obs.columns:
raise ValueError("`target_col` not found in obs`")
ir_obs = adata.obs
return _group_abundance(
ir_obs, groupby, target_col=target_col, fraction=fraction, sort=sort
) | adfc5047349ec5fcffdc05de0ae2ecdfbf9b8b6c | 31,207 |
def infer(model, text_sequences, input_lengths):
"""
An inference hook for pretrained synthesizers
Arguments
---------
model: Tacotron2
the tacotron model
text_sequences: torch.Tensor
encoded text sequences
input_lengths: torch.Tensor
input lengths
Returns
-------
result: tuple
(mel_outputs_postnet, mel_lengths, alignments) - the exact
model output
"""
return model.infer(text_sequences, input_lengths) | e7937395956e2dcd35dd86bc23599fbb63417c22 | 31,208 |
def build_info(image, spack_version):
"""Returns the name of the build image and its tag.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
spack_version (str): version of Spack that we want to use to build
Returns:
A tuple with (image_name, image_tag) for the build image
"""
# Don't handle error here, as a wrong image should have been
# caught by the JSON schema
image_data = data()["images"][image]
build_image = image_data.get('build', None)
if not build_image:
return None, None
# Translate version from git to docker if necessary
build_tag = image_data['build_tags'].get(spack_version, spack_version)
return build_image, build_tag | bb09a530e2fdf50b78225647df1238ae08fe5b3d | 31,209 |
def _get_data_attr(data, attr):
"""Get data object field."""
if isinstance(data, dict):
# `Data` object's id is hydrated as `__id` in expression engine
data = data["__id"]
data_obj = Data.objects.get(id=data)
return getattr(data_obj, attr) | bdc90d01172655f77680f0c373ed609b4100e874 | 31,210 |
def get_user_project(user, dds_project_id):
"""
Get a single Duke DS Project for a user
:param user: User who has DukeDS credentials
:param dds_project_id: str: duke data service project id
:return: DDSProject: project details
"""
try:
remote_store = get_remote_store(user)
project = remote_store.data_service.get_project_by_id(dds_project_id).json()
return DDSProject(project)
except DataServiceError as dse:
raise WrappedDataServiceException(dse) | 71649da2b092954d8f7d65059edd75fc18a8e750 | 31,211 |
def _event_split(elist):
"""Split event list into dictionary of event keywords
"""
eventdict = dict()
dictkeys = (roxar.EventType.WLIMRATE,
roxar.EventType.WLIMPRES,
roxar.EventType.WLIMRATIO,
roxar.EventType.WHISTRATE,
roxar.EventType.WHISTPRES,
roxar.EventType.WCONTROL,
roxar.EventType.WTYPE,
roxar.EventType.WSEGMOD,
roxar.EventType.WSEGSEG,
roxar.EventType.GCONTROL,
roxar.EventType.GMEMBER,
roxar.EventType.GLIMRATE,
roxar.EventType.GLIMPRES,
roxar.EventType.GLIMRATIO,
roxar.EventType.PERF,
roxar.EventType.CPERF,
roxar.EventType.SQUEEZE,
roxar.EventType.TUBING)
for d in dictkeys:
eventdict[d] = []
for ev in elist:
if ev.type in dictkeys:
eventdict[ev.type].append(ev)
return eventdict | 8097fdee8b36881b0c5a4851165ac57f70482415 | 31,212 |
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
if sys.version_info < (3, 0):
choice = raw_input().lower()
else:
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 2dfc13a2ec812b0f0877ab936392961885099217 | 31,213 |
import random
import re
def generate_reply(utt, dais):
"""Generate a reply task for the given utterance and DAIs list."""
ret = DataLine(dat='reply', abstr_utt=utt, abstr_da='&'.join([unicode(dai) for dai in dais]))
utt, dais = deabstract(utt, dais)
# offer a ride (meeting the specifications in dais)
if all([dai.dat in ['inform', 'confirm'] for dai in dais]):
info = {dai.name: dai.value for dai in dais}
if 'vehicle' not in info:
info['vehicle'] = random.choice(['subway', 'bus'])
if info['vehicle'] == 'subway':
info['line'] = random.choice('1234567ABCDEFGJLMNQRZ')
else:
info['line'] = 'M' + str(random.choice(BUS_LINES))
if 'ampm' not in info:
if 'time' in info:
time_val, _ = info['time'].split(':')
time_val = int(time_val)
if time_val < 7 or time_val == 12:
info['ampm'] = 'pm'
if 'ampm' not in info:
info['ampm'] = random.choice(['am', 'pm'])
if 'departure_time' not in info:
if 'time' in info:
info['departure_time'] = info['time']
del info['time']
elif info['ampm'] == 'am':
info['departure_time'] = str(random.choice(range(7, 12))) + ':00'
else:
info['departure_time'] = str(random.choice(range(1, 13))) + ':00'
if 'from_stop' not in info:
info['from_stop'] = random.choice(STOPS)
if 'to_stop' not in info:
remaining_stops = list(STOPS)
remaining_stops.remove(info['from_stop'])
info['to_stop'] = random.choice(remaining_stops)
info['direction'] = info['to_stop']
del info['to_stop']
info['departure_time'] = re.sub(r'00$', '%02d' % random.choice(range(20)),
info['departure_time'])
info['departure_time'] += info['ampm']
del info['ampm']
for slot_name in ['departure_time_rel', 'time_rel',
'alternative', 'arrival_time', 'arrival_time_rel']:
if slot_name in info:
del info[slot_name]
dais_str = [slot + '=' + value for slot, value in info.iteritems()]
random.shuffle(dais_str)
dais_str = ', '.join(dais_str)
# offer additional information
else:
dais_str = ''
if any([dai.name == 'distance' and dai.dat == 'request' for dai in dais]):
dais_str += ', distance=%3.1f miles' % (random.random() * 12)
if any([dai.name == 'num_transfers' and dai.dat == 'request' for dai in dais]):
dais_str += ', num_transfers=%d' % random.choice(range(0, 3))
if any([dai.name == 'duration' and dai.dat == 'request' for dai in dais]):
dais_str += ', duration=%d minutes' % random.choice(range(10, 80))
if any([dai.name == 'departure_time' and dai.dat == 'request' for dai in dais]):
hr, ampm = random_hour()
min = random.choice(range(60))
dais_str += ', departure_time=%d:%02d%s' % (hr, min, ampm)
if any([dai.name == 'arrival_time' and dai.dat == 'request' for dai in dais]): # arrival_time_rel does not occur
hr, ampm = random_hour()
min = random.choice(range(60))
dais_str += ', arrival_time=%d:%02d%s' % (hr, min, ampm)
if dais_str == '':
raise NotImplementedError('Cannot generate a reply for: ' + unicode(dais))
dais_str = dais_str[2:]
ret.utt = utt
ret.da = dais_str
return ret | 9a9d1a7271b03e01e492be830e29725399f61387 | 31,214 |
def create_concept_graphs(example_indices, grakn_session):
"""
Builds an in-memory graph for each example, with an example_id as an anchor for each example subgraph.
Args:
example_indices: The values used to anchor the subgraph queries within the entire knowledge graph
grakn_session: Grakn Session
Returns:
In-memory graphs of Grakn subgraphs
"""
graphs = []
infer = True
for example_id in example_indices:
print(f'Creating graph for example {example_id}')
graph_query_handles = get_query_handles(example_id)
with grakn_session.transaction().read() as tx:
# Build a graph from the queries, samplers, and query graphs
graph = build_graph_from_queries(graph_query_handles, tx, infer=infer)
obfuscate_labels(graph, TYPES_AND_ROLES_TO_OBFUSCATE)
graph.name = example_id
graphs.append(graph)
return graphs | 66d5d13fad865e6d6437eb29d20e611e509ad7f7 | 31,215 |
def GetChange(host, change):
"""Queries a Gerrit server for information about a single change."""
path = 'changes/%s' % change
return _SendGerritJsonRequest(host, path) | 3f4c7c3554fdbba0cc6bc0c8c513823859d22d61 | 31,216 |
import os
import time
def train_net(logger, dims=20, deep=True, conv_channel=32, init="glorot_uniform", fast=False, num_iterations=20,
visual_name="", lr_start=1e-3, LR_decay=0.95, size=1600, input_name="new_eval", N_Cls=10,
bn=True, batch_size=32, input=None, use_sample_weights=False, mins=None, maxs=None,
use_jaccard_loss=False):
"""
Trains a U-Net simultaneously for all ten classes.
"""
print("start train net")
model = get_unet(lr=lr_start, deep=deep, dims=dims, conv_channel=conv_channel, bn=bn,
use_jaccard_loss=use_jaccard_loss,
use_sample_weights=use_sample_weights, init=init, N_Cls=N_Cls)
if input is not None:
model.load_weights('weights/{}'.format(input))
print("Loaded {}".format(input))
logger.info("Loaded {}".format(input))
logger.info("Channel: {}".format(dims))
logger.info("Inputsize: {}".format(size))
logger.info("conv_channel: {}".format(conv_channel))
if bn:
logger.info("Batch Normalization: YES")
else:
logger.info("Batch Normalization: NO")
logger.info("Batchsize: {}".format(batch_size))
logger.info("Start LR: {}".format(lr_start))
logger.info("Multiplicative LR Decay: {}".format(LR_decay))
print("Model has {} Parameters".format(model.count_params()))
logger.info("Model has {} Parameters".format(model.count_params()))
x_val = np.load('../data/x_eval_{}.npy'.format(input_name))
y_val = np.load('../data/y_eval_{}.npy'.format(input_name))
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
print("-------------------------------------------------------------------------------------")
logger.info("-------------------------------------------------------------------------------------")
for z in range(N_Cls):
print("{:.4f}% Class {} in eval set".format(
100 * y_val[:,z].sum() / (y_val.shape[0] * y_val.shape[2] * y_val.shape[3]), class_list[z]))
logger.info("{:.4f}% Class {} in eval set".format(
100 * y_val[:,z].sum() / (y_val.shape[0] * y_val.shape[2] * y_val.shape[3]), class_list[z]))
if fast:
x_val, y_val = unison_shuffled_copies(x_val, y_val)
x_trn = x_val[:200]
y_trn = y_val[:200]
x_val = x_val[200:300]
y_val = y_val[200:300]
else:
x_trn = np.load('../data/x_trn_{}.npy'.format(input_name))
y_trn = np.load('../data/y_trn_{}.npy'.format(input_name))
x_trn, y_trn = unison_shuffled_copies(x_trn, y_trn)
for z in range(N_Cls):
print("{:.4f}% Class {} in training set".format(100*y_trn[:,z].sum()/(y_trn.shape[0]*160*160),
class_list[z]))
logger.info("{:.4f}% Class {} in training set".format(100*y_trn[:,z].sum()/(y_trn.shape[0]*160*160),
class_list[z]))
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accs = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accs.append(logs.get('acc'))
history = LossHistory()
avg_scores, trs = [], []
ind_scores_over_time = {}
for z in range(N_Cls):
ind_scores_over_time[z] = []
loss_train = np.zeros(0)
acc_train = np.zeros(0)
loss_eval = np.zeros(0)
acc_eval = np.zeros(0)
loss_eval_once = np.zeros(0)
def min_max_normalize(bands, mins, maxs):
out = np.zeros_like(bands).astype(np.float32)
n = bands.shape[1]
for i in range(n):
a = 0
b = 1
c = mins[i]
d = maxs[i]
t = a + (bands[:, i] - c) * (b - a) / (d - c)
t[t < a] = a
t[t > b] = b
out[:, i] = t
return out.astype(np.float32)
# Normalization: Scale with Min/Max
x_trn = min_max_normalize(x_trn, mins, maxs)
x_val = min_max_normalize(x_val, mins, maxs)
# Center to zero mean and save means for later use
means = x_trn.mean(axis=(0,2,3))
np.save("../data/means_{}".format(visual_name), means)
for k in range(dims):
x_trn[:,k] -= means[k]
x_val[:,k] -= means[k]
print(x_trn[:,k].mean())
os.makedirs("../weights", exist_ok=True)
model_checkpoint = ModelCheckpoint('../weights/unet_10classes.hdf5', monitor='loss', save_best_only=True)
if use_sample_weights:
y_trn = np.transpose(y_trn, (0,2,3,1))
y_val = np.transpose(y_val, (0,2,3,1))
y_trn = y_trn.reshape((y_trn.shape[0], y_trn.shape[1]*y_trn.shape[2], N_Cls))
y_val = y_val.reshape((y_val.shape[0], y_val.shape[1]*y_val.shape[2], N_Cls))
count_classes = []
sum_unequal_zero = 0
for j in range(N_Cls):
count_non = np.count_nonzero(y_trn[:,:,j])
count_classes.append(count_non)
sum_unequal_zero += count_non
count_zeros = y_trn.shape[0]*y_trn.shape[1]*N_Cls - sum_unequal_zero
count_classes = [count_zeros*1.0/val for val in count_classes]
sample_weights = np.ones((y_trn.shape[0], y_trn.shape[1]))
for j in range(N_Cls):
sample_weights[y_trn[:,:,j] == 1] = count_classes[j]**0.5
print("{} has weight {}".format(class_list[j], count_classes[j]**0.5))
logger.info("{} has weight {}".format(class_list[j], count_classes[j]** 0.5))
for iteration in range(num_iterations):
print("ITERATION: {}".format(iteration))
logger.info("ITERATION: {}".format(iteration))
start = time.time()
if LR_decay:
if iteration > 0:
# multiplicative learning rate decay
new_LR = float(model.optimizer.lr.get_value() * LR_decay)
model.optimizer.lr.set_value(new_LR)
print("LR: {}".format(model.optimizer.lr.get_value()))
print("-------------------------------------------------------------------------------------")
logger.info("LR: {}".format(model.optimizer.lr.get_value()))
logger.info("-------------------------------------------------------------------------------------")
if use_sample_weights:
model.fit(x_trn, y_trn, batch_size=batch_size, nb_epoch=1, verbose=1,
shuffle=True, validation_data=(x_val, y_val), callbacks=[history, model_checkpoint],
sample_weight=sample_weights)
else:
model.fit(x_trn, y_trn, batch_size=batch_size, nb_epoch=1, verbose=1, shuffle=True,
validation_data=(x_val, y_val), callbacks=[history, model_checkpoint])
loss_train = np.concatenate([loss_train, np.stack([j for j in history.losses])])
acc_train = np.concatenate([acc_train, np.stack([j for j in history.accs])])
for metric in ["acc", "loss", "val_acc", "val_loss"]:
logger.info("{}: {}".format(metric, model.history.history[metric]))
batches = len(np.stack([j for j in history.losses]))
for l in range(batches):
loss_eval = np.append(loss_eval, model.history.history["val_loss"])
acc_eval = np.append(acc_eval, model.history.history["val_acc"])
loss_eval_once = np.append(loss_eval_once, model.history.history["val_loss"])
# Calculate best score and thresholds
avg_score, trs, ind_scores = calc_jacc(model, logger, dims, visual_name, x_val, y_val,
use_sample_weights, N_Cls=N_Cls)
avg_scores.append(avg_score)
for z in range(N_Cls):
ind_scores_over_time[z].append(ind_scores[z])
model.save_weights('../weights/unet_{}_{:.4f}'.format(visual_name, avg_score))
visualize_training(loss_train, loss_eval, name="{}".format(visual_name), acc_train=acc_train,
acc_eval=acc_eval)
visualize_scores(avg_scores, ind_scores_over_time, trs, name="{}".format(visual_name))
print("Iteration {} took {:.2f}s.".format(iteration, time.time() - start))
return model, avg_score, trs, avg_scores | 70a97e9505b9fa7bf05fba786add8035c4e11391 | 31,217 |
def block_shape(f):
"""
find the block shape (nxb, nyb, nzb) given the hdf5 file f
returns
dimension, (nxb, nyb, nzb)
"""
if 'integer scalars' in f.root:
params = f.getNode(f.root, 'integer scalars').read()
p_dict = dict((name.rstrip(), val) for name, val in params)
dimension = p_dict['dimensionality']
nb = empty(dimension, dtype=int32)
for i,par in enumerate(['nxb', 'nyb', 'nzb'][:dimension]):
nb[i]= p_dict[par]
else:
print dir(f.getNode(f.root, 'block size'))
dimension = 3
params = f.getNode(f.root, 'simulation parameters')
nb = empty(dimension, dtype=int32)
for i in range(dimension):
nb[i] = params[0][5+i]
return dimension, nb | ce7e3f58400185fa76855dc809f78867905915bc | 31,218 |
def model_init(rng_key, batch, encoder_sizes=(1000, 500, 250, 30)):
"""Initialize the standard autoencoder."""
x_size = batch.shape[-1]
decoder_sizes = encoder_sizes[len(encoder_sizes) - 2::-1]
sizes = (x_size,) + encoder_sizes + decoder_sizes + (x_size,)
keys = jax.random.split(rng_key, len(sizes) - 1)
params = []
for rng_key, dim_in, dim_out in zip(keys, sizes, sizes[1:]):
# Glorot uniform initialization
w = glorot_uniform((dim_in, dim_out), rng_key)
b = jnp.zeros([dim_out])
params.append((w, b))
return params, None | 937aa19a7bac1fd1e90e6ef7d7027dcb3822dcc8 | 31,219 |
def do2_SVU(calphase, temp, csv):
"""
Description:
Stern-Volmer-Uchida equation for calculating temperature
corrected dissolved oxygen concentration. OOI L1 data product.
Usage:
DO = do2_SVU(calphase, temp, csv)
where
DO = dissolved oxygen [micro-mole/L]
calphase = calibrated phase from an Oxygen sensor [deg]
(see DOCONCS DPS)
temp = Either CTD temperature, or oxygen sensor temperature
[deg C], (see DOCONCS DPS)
csv = Stern-Volmer-Uchida Calibration Coefficients array.
7 element float array, (see DOCONCS DPS)
Example:
csv = np.array([0.002848, 0.000114, 1.51e-6, 70.42301, -0.10302,
-12.9462, 1.265377])
calphase = 27.799
temp = 19.841
DO = do2_SVU(calphase, temp, csv)
print DO
> 363.900534505
Implemented by:
2013-04-26: Stuart Pearce. Initial Code.
2015-04-10: Russell Desiderio. Revised code to work with CI implementation
of calibration coefficients: they are to be implemented as time-
vectorized arguments (tiled in the time dimension to match the
number of data packets).
Fix for "blocker bug #2972".
References:
OOI (2012). Data Product Specification for Oxygen Concentration
from "Stable" Instruments. Document Control Number
1341-00520. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level
>> 1341-00520_Data_Product_SPEC_DOCONCS_OOI.pdf)
"""
# this will work for both old and new CI implementations of cal coeffs.
csv = np.atleast_2d(csv)
# Calculate DO using Stern-Volmer:
Ksv = csv[:, 0] + csv[:, 1]*temp + csv[:, 2]*(temp**2)
P0 = csv[:, 3] + csv[:, 4]*temp
Pc = csv[:, 5] + csv[:, 6]*calphase
DO = ((P0/Pc) - 1) / Ksv
return DO | be3d3faee477749a2f7b2429759f4aff38b9a0ac | 31,220 |
def _make_rotation_matrix(vector_1,vector_2):
"""" Generates the rotation matrix from vector_1 to vector_2"""
# Use formula for rotation matrix: R = I + A + A^2 * b
# https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d
v = np.cross(vector_1,vector_2)
c = np.dot(vector_1,vector_2)
s = np.linalg.norm(v)
b = (1-c)/s**2
# Generate rotation matrix
A = np.zeros((3,3))
A[0][1] += -v[2]
A[0][2] += v[1]
A[1][2] += -v[0]
A[1][0] += v[2]
A[2][0] += -v[1]
A[2][1] += v[0]
B = np.dot(A,A)
I = np.identity(3)
R = I + A + b * B
return(R) | 17c24c4c4e6c8378b65076686f4d80736d6ccf3e | 31,221 |
def get_models(models='all'):
"""
Returns model names as a list
Parameters
----------
models: str
OPTIONAL. Default value is 'all' in which case all keys in defaule_models are returned.
If 'mixed' is passed, only the MixedFluid model names are returned.
"""
if models == 'all':
return list(default_models.keys())
if models == 'mixed':
return ['Shishkina', 'Dixon', 'IaconoMarziano', 'Liu'] | dcf0a00946f3146e5511825d875947bb5278be6a | 31,222 |
def other_language_code():
"""Language code used for testing, currently not set by user."""
return 'de-DE' | 2cbac23cd7a13e71991be6516a3a38dee19ae690 | 31,223 |
import numpy
def do_novelty_detection(
baseline_image_matrix, test_image_matrix, image_normalization_dict,
predictor_names, cnn_model_object, cnn_feature_layer_name,
ucn_model_object, num_novel_test_images,
percent_svd_variance_to_keep=97.5):
"""Does novelty detection.
Specifically, this method follows the procedure in Wagstaff et al. (2018)
to determine which images in the test set are most novel with respect to the
baseline set.
NOTE: Both input and output images are (assumed to be) denormalized.
B = number of baseline examples (storm objects)
T = number of test examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param baseline_image_matrix: B-by-M-by-N-by-C numpy array of baseline
images.
:param test_image_matrix: T-by-M-by-N-by-C numpy array of test images.
:param image_normalization_dict: See doc for `normalize_images`.
:param predictor_names: length-C list of predictor names.
:param cnn_model_object: Trained CNN model (instance of
`keras.models.Model`). Will be used to turn images into scalar
features.
:param cnn_feature_layer_name: The "scalar features" will be the set of
activations from this layer.
:param ucn_model_object: Trained UCN model (instance of
`keras.models.Model`). Will be used to turn scalar features into
images.
:param num_novel_test_images: Number of novel test images to find.
:param percent_svd_variance_to_keep: See doc for `_fit_svd`.
:return: novelty_dict: Dictionary with the following keys. In the following
discussion, Q = number of novel test images found.
novelty_dict['novel_image_matrix_actual']: Q-by-M-by-N-by-C numpy array of
novel test images.
novelty_dict['novel_image_matrix_upconv']: Same as
"novel_image_matrix_actual" but reconstructed by the upconvnet.
novelty_dict['novel_image_matrix_upconv_svd']: Same as
"novel_image_matrix_actual" but reconstructed by SVD (singular-value
decomposition) and the upconvnet.
:raises: TypeError: if `image_normalization_dict is None`.
"""
if image_normalization_dict is None:
error_string = (
'image_normalization_dict cannot be None. Must be specified.')
raise TypeError(error_string)
num_test_examples = test_image_matrix.shape[0]
baseline_image_matrix_norm, _ = normalize_images(
predictor_matrix=baseline_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
test_image_matrix_norm, _ = normalize_images(
predictor_matrix=test_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
baseline_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=baseline_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
test_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=test_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
novel_indices = []
novel_image_matrix_upconv = None
novel_image_matrix_upconv_svd = None
for k in range(num_novel_test_images):
print('Finding {0:d}th of {1:d} novel test images...'.format(
k + 1, num_novel_test_images))
if len(novel_indices) == 0:
this_baseline_feature_matrix = baseline_feature_matrix + 0.
this_test_feature_matrix = test_feature_matrix + 0.
else:
novel_indices_numpy = numpy.array(novel_indices, dtype=int)
this_baseline_feature_matrix = numpy.concatenate(
(baseline_feature_matrix,
test_feature_matrix[novel_indices_numpy, ...]),
axis=0)
this_test_feature_matrix = numpy.delete(
test_feature_matrix, obj=novel_indices_numpy, axis=0)
svd_dictionary = _fit_svd(
baseline_feature_matrix=this_baseline_feature_matrix,
test_feature_matrix=this_test_feature_matrix,
percent_variance_to_keep=percent_svd_variance_to_keep)
svd_errors = numpy.full(num_test_examples, numpy.nan)
test_feature_matrix_svd = numpy.full(
test_feature_matrix.shape, numpy.nan)
for i in range(num_test_examples):
print(i)
if i in novel_indices:
continue
test_feature_matrix_svd[i, ...] = _apply_svd(
feature_vector=test_feature_matrix[i, ...],
svd_dictionary=svd_dictionary)
svd_errors[i] = numpy.linalg.norm(
test_feature_matrix_svd[i, ...] - test_feature_matrix[i, ...]
)
new_novel_index = numpy.nanargmax(svd_errors)
novel_indices.append(new_novel_index)
new_image_matrix_upconv = ucn_model_object.predict(
test_feature_matrix[[new_novel_index], ...], batch_size=1)
new_image_matrix_upconv_svd = ucn_model_object.predict(
test_feature_matrix_svd[[new_novel_index], ...], batch_size=1)
if novel_image_matrix_upconv is None:
novel_image_matrix_upconv = new_image_matrix_upconv + 0.
novel_image_matrix_upconv_svd = new_image_matrix_upconv_svd + 0.
else:
novel_image_matrix_upconv = numpy.concatenate(
(novel_image_matrix_upconv, new_image_matrix_upconv), axis=0)
novel_image_matrix_upconv_svd = numpy.concatenate(
(novel_image_matrix_upconv_svd, new_image_matrix_upconv_svd),
axis=0)
novel_indices = numpy.array(novel_indices, dtype=int)
novel_image_matrix_upconv = denormalize_images(
predictor_matrix=novel_image_matrix_upconv,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
novel_image_matrix_upconv_svd = denormalize_images(
predictor_matrix=novel_image_matrix_upconv_svd,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
return {
NOVEL_IMAGES_ACTUAL_KEY: test_image_matrix[novel_indices, ...],
NOVEL_IMAGES_UPCONV_KEY: novel_image_matrix_upconv,
NOVEL_IMAGES_UPCONV_SVD_KEY: novel_image_matrix_upconv_svd
} | 69181690b81a987b45dcdbea5b0febe8928b365b | 31,224 |
from datetime import datetime
def parse_last_timestamp(df):
"""
Parse last timestamp from dataframe.
Add one minute forward to prevent the script from fetching the same value.
The last timestamp already in database, so we need to fetch the weather data
one minute forward.
"""
if df.empty:
return None
date_string = df['timestamp'].iloc[-1]
# We add one minute forward to prevent data duplication at the edge.
date_obj = to_datetime(date_string) + datetime.timedelta(minutes=1)
return date_obj.strftime(ISO_DATE_FORMAT) | 2fe7430344229e89aab33ef47af944735b79c169 | 31,225 |
def len_subword_features():
""" TODO: There is probably a better way to centralize this """
# Grapheme embedding (4), grapheme duration (1)
LEN_GRAPHEME_FEATURES = 5
return LEN_GRAPHEME_FEATURES | d9e0b3959b340a29d11817e9ac94e7f1078a7bc6 | 31,226 |
def NextLexem_OperatorPredicate(op_value):
""" construct a predicate: lexem_list -> boolean
which checks if the next lexem is an operator whose value macthes
@p op_value (do not consume it) """
def predicate(lexem_list):
if len(lexem_list) == 0:
return False
head_lexem = lexem_list[0]
return isinstance(head_lexem, OperatorLexem) and head_lexem.value == op_value
return predicate | caf2866e85a42bee2e7eab0355cad4568bde46de | 31,227 |
import torch
def optim_inits(objective, x_opt, inference_samples, partition_samples, edge_mat_samples, n_vertices,
acquisition_func=expected_improvement, reference=None):
"""
:param x_opt: 1D Tensor
:param inference_samples:
:param partition_samples:
:param edge_mat_samples:
:param n_vertices:
:param acquisition_func:
:param reference:
:return:
"""
# for x, y in zip(objective.problem.lower_bounds, objective.problem.upper_bounds):
# print(x, y)
# print("n_vertices", n_vertices)
# print(partition_samples)
#print(edge_mat_samples)
#print(len(edge_mat_samples))
#print(edge_mat_samples[0])
#for i in range(len(edge_mat_samples)):
# print(len(edge_mat_samples[i]))
#rnd_nbd = torch.cat(tuple([torch.randint(low=0, high=int(n_v), size=(N_RANDOM_VERTICES, 1)) for n_v in n_vertices]), dim=1).long()
rnd_nbd = objective.generate_random_points(N_RANDOM_VERTICES)
min_nbd = neighbors(x_opt[:objective.num_discrete], partition_samples, edge_mat_samples, n_vertices, uniquely=False)
# print(min_nbd.size(0))
# print(min_nbd)
# print(x_opt[objective.num_discrete:].unsqueeze(0).repeat(min_nbd.size(0), 1)[:10])
min_nbd = torch.cat((min_nbd, x_opt[objective.num_discrete:].unsqueeze(0).repeat(min_nbd.size(0), 1)), dim=1)
# print(min_nbd[:6])
shuffled_ind = list(range(min_nbd.size(0)))
np.random.shuffle(shuffled_ind)
x_init_candidates = torch.cat(tuple([min_nbd[shuffled_ind[:N_SPRAY]], rnd_nbd]), dim=0)
acquisition_values = acquisition_expectation(x_init_candidates, inference_samples, partition_samples, n_vertices,
acquisition_func, reference)
#print("acquisition_values")
#print(acquisition_values[:30])
nonnan_ind = ~torch.isnan(acquisition_values).squeeze(1)
x_init_candidates = x_init_candidates[nonnan_ind]
acquisition_values = acquisition_values[nonnan_ind]
acquisition_sorted, acquisition_sort_ind = torch.sort(acquisition_values.squeeze(1), descending=True)
x_init_candidates = x_init_candidates[acquisition_sort_ind]
return x_init_candidates[:N_GREEDY_ASCENT_INIT], acquisition_sorted[:N_GREEDY_ASCENT_INIT] | f048fc3290d890bc687f3176f66e5ad86dfa5141 | 31,228 |
from sys import stderr
def suggest_max_coverage(alignment_file, y):
"""Estimate a max-coverage value for use with dysgu. Mean genome coverage is estimated from the index file, so
will only be useful for whole-genome alignment files"""
f = pysam.AlignmentFile(alignment_file)
cov, read_length = index_stats(f)
cov = round(cov, 2)
read_length = round(read_length, 1)
max_cov = round(cov * y)
print(f"Read-length {read_length} bp, mean whole-genome coverage estimate: {cov}, max-cov ~ {max_cov}", file=stderr)
print(max_cov)
return max_cov | 34b8c9327d3c4e7304845955bf82e7641435777e | 31,229 |
import torch
def stack(mems):
"""
Stack a list of tensors
Could use torch.stack here but torch.stack is much slower
than torch.cat + view
Submitted an issue for investigation:
https://github.com/pytorch/pytorch/issues/22462
FIXME: Remove this function after the issue above is resolved
"""
shape = (-1, *mems[0].shape)
return torch.cat(mems).view(*shape) | e65cfe65d032dd42a7297f35092fa484bb7f4867 | 31,230 |
def _pushb2phases(pushop, bundler):
"""handle phase push through bundle2"""
if 'phases' in pushop.stepsdone:
return
b2caps = bundle2.bundle2caps(pushop.remote)
if not 'pushkey' in b2caps:
return
pushop.stepsdone.add('phases')
part2node = []
enc = pushkey.encode
for newremotehead in pushop.outdatedphases:
part = bundler.newpart('pushkey')
part.addparam('namespace', enc('phases'))
part.addparam('key', enc(newremotehead.hex()))
part.addparam('old', enc(str(phases.draft)))
part.addparam('new', enc(str(phases.public)))
part2node.append((part.id, newremotehead))
def handlereply(op):
for partid, node in part2node:
partrep = op.records.getreplies(partid)
results = partrep['pushkey']
assert len(results) <= 1
msg = None
if not results:
msg = _('server ignored update of %s to public!\n') % node
elif not int(results[0]['return']):
msg = _('updating %s to public failed!\n') % node
if msg is not None:
pushop.ui.warn(msg)
return handlereply | ff8f5c839919c6593e2d7d5cb98c477f8b2bc735 | 31,231 |
def predict_all(model, all_data):
"""
Predict odor probabilities for all trials.
:param model: (keras) decoding model
:param all_data: (4d numpy array) data of format [trial, window, neuron, time]
:return: (3d numpy array) prediction of format [trial, time, odor]
"""
test = stack_data(all_data, 25, 10)
n_trial, n_window = test.shape[0:2]
all_pred = np.zeros((n_trial, n_window, 5))
for i in range(n_trial):
all_pred[i, :, :] = model.predict(test[i, :, :, :])
return all_pred | 5bc748f6eddc4e6791601b87ff73a000a72efa4c | 31,232 |
def normalize(X):
"""Normalize the given dataset X
Args:
X: ndarray, dataset
Returns:
(Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset
with mean 0 and standard deviation 1; mean and std are the
mean and standard deviation respectively.
Note:
You will encounter dimensions where the standard deviation is
zero, for those when you do normalization the normalized data
will be NaN. Handle this by setting using `std = 1` for those
dimensions when doing normalization.
"""
mu = np.mean(X,axis=0) # <-- EDIT THIS, compute the mean of X
std = np.std(X, axis=0)
std_filled = std.copy()
std_filled[std==0] = 1.
Xbar = (X - mu)/std_filled # <-- EDIT THIS, compute the normalized data Xbar
return Xbar, mu, std | 5db71253b148387663b8575cf4df086cd182fbff | 31,233 |
def get_lat_lon(exif_data):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
if "GPSInfo" in exif_data:
gps_info = exif_data["GPSInfo"]
gps_latitude = _get_if_exist(gps_info, "GPSDestLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSDestLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSDestLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSDestLongitudeRef')
'''
gps_latitude = _get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')
'''
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon | d653dd84cd47efb2063db724cf7a88fa3f2a7490 | 31,234 |
import requests
def get_rendered_original_stream(warc_filename, warc_offset, compressedendoffset, payload_only=True):
"""
Grabs a resource.
"""
# If not found, say so:
if warc_filename is None:
return None, None
# Grab the payload from the WARC and return it.
url = "%s%s?op=OPEN&user.name=%s&offset=%s" % (WEBHDFS_PREFIX, warc_filename, WEBHDFS_USER, warc_offset)
if compressedendoffset and int(compressedendoffset) > 0:
url = "%s&length=%s" % (url, compressedendoffset)
r = requests.get(url, stream=True)
# We handle decoding etc.
r.raw.decode_content = False
logger.debug("Loading from: %s" % r.url)
logger.debug("Got status code %s" % r.status_code)
# Return the payload, or the record:
if payload_only:
# Parse the WARC, return the payload:
rl = ArcWarcRecordLoader()
record = rl.parse_record_stream(DecompressingBufferedReader(stream=r.raw))
#return record.raw_stream, record.content_type
return record.content_stream(), record.content_type
else:
# This makes sure we only get the first GZip chunk:
s = DecompressingBufferedReader(stream=r.raw)
warc_record = s.read()
return warc_record, 'application/warc' | e3fce32a061445e6ec3f69bd80e7ef46cd2dedaf | 31,235 |
import argparse
def get_parser() -> argparse.ArgumentParser:
"""Create and return the argparser for concord flask/cheroot server"""
parser = argparse.ArgumentParser(
description="Start the concord flask/cheroot server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-d", "--host", default='localhost',
help="Hostname to listen on")
parser.add_argument("-p", "--port", default=8080, type=int,
help="Port of the webserver")
parser.add_argument("--debug", action="store_true",
help="Run the server in Flask debug mode")
parser.add_argument("--database", default=DEFAULT_SQLITE_PATH,
help="Path to the SQLITE database to store messages")
add_log_parser(parser)
return parser | 8ffec895b722c975f79b2d7e7bd1ccae11f52347 | 31,236 |
def reduce_dimensions(df, reduce_cols=None, n_components=2):
"""
given a dataframe, columns to reduce and number of components for dimensionality reduction algorithm
returns a dictionary of reduction algorithm to it's name and reduced df.
dimensionality reduction or dimension reduction is the process of reducing the number of random variables under
consideration, via obtaining a set of principal variables.
:param df: pandas dataframe
:param reduce_cols: columns to perform dimensionality reduction on
:param n_components: number of components for dimensionality reduction algorithm
:return: dictionary of reduction algorithm to it's name and reduced df
"""
assert (isinstance(df, pd.DataFrame)) and (not df.empty), 'df should be a valid pandas DataFrame'
if reduce_cols:
assert (set(reduce_cols).issubset(set(df.columns.tolist()))) and (
len(df[reduce_cols].index) > 0), "reduce_cols must be a subset of df columns"
X = df[reduce_cols].copy()
else:
X = df.copy()
reductions_algorithms, reducer_to_results = set(), dict()
pca = PCA(n_components=n_components, svd_solver='randomized')
reductions_algorithms.add(pca)
if len(X.index) > 10000:
k_pca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
reductions_algorithms.add(k_pca)
else:
n_neighbors = 10
isomap = Isomap(n_components=n_components, n_neighbors=n_neighbors)
se = SpectralEmbedding(n_components=n_components, n_neighbors=n_neighbors)
lle = LocallyLinearEmbedding(n_components=n_components, n_neighbors=n_neighbors, method='standard')
reductions_algorithms.update([isomap, se, lle])
for reducer in reductions_algorithms:
reduced_df = pd.DataFrame(reducer.fit_transform(X))
reducer_to_results[reducer.__class__.__name__] = reduced_df
return reducer_to_results | 39c5bf6257da93f449dc4081fde98ecd18465a0f | 31,237 |
import numpy
import math
def _dct_or_dst_type3(
x, n=None, axis=-1, norm=None, forward=True, dst=False, overwrite_x=False
):
"""Forward DCT/DST-III (or inverse DCT/DST-II) along a single axis.
Parameters
----------
x : cupy.ndarray
The data to transform.
n : int
The size of the transform. If None, ``x.shape[axis]`` is used.
axis : int
Axis along which the transform is applied.
forward : bool
Set true to indicate that this is a forward DCT-II as opposed to an
inverse DCT-III (The difference between the two is only in the
normalization factor).
norm : {None, 'ortho', 'forward', 'backward'}
The normalization convention to use.
dst : bool
If True, a discrete sine transform is computed rather than the discrete
cosine transform.
overwrite_x : bool
Indicates that it is okay to overwrite x. In practice, the current
implementation never performs the transform in-place.
Returns
-------
y: cupy.ndarray
The transformed array.
"""
if axis < -x.ndim or axis >= x.ndim:
raise numpy.AxisError('axis out of range')
if axis < 0:
axis += x.ndim
if n is not None and n < 1:
raise ValueError(
f'invalid number of data points ({n}) specified'
)
x = _cook_shape(x, (n,), (axis,), 'R2R')
n = x.shape[axis]
# determine normalization factor
if norm == 'ortho':
sl0_scale = 0.5 * math.sqrt(2)
inorm = 'sqrt'
elif norm == 'forward':
sl0_scale = 0.5
inorm = 'full' if forward else 'none'
elif norm == 'backward' or norm is None:
sl0_scale = 0.5
inorm = 'none' if forward else 'full'
else:
raise ValueError(f'Invalid norm value "{norm}", should be "backward", '
'"ortho" or "forward"')
norm_factor = _get_dct_norm_factor(n, inorm=inorm, dct_type=3)
dtype = cupy.promote_types(x, cupy.complex64)
sl0 = [slice(None)] * x.ndim
sl0[axis] = slice(1)
if dst:
if norm == 'ortho':
float_dtype = cupy.promote_types(x.dtype, cupy.float32)
if x.dtype != float_dtype:
x = x.astype(float_dtype)
elif not overwrite_x:
x = x.copy()
x[tuple(sl0)] *= math.sqrt(2)
sl0_scale = 0.5
slrev = [slice(None)] * x.ndim
slrev[axis] = slice(None, None, -1)
x = x[tuple(slrev)]
# scale by exponentials and normalization factor
tmp = _exp_factor_dct3(x, n, axis, dtype, norm_factor)
x = x * tmp # broadcasting
x[tuple(sl0)] *= sl0_scale
# inverse fft
x = _fft.ifft(x, n=n, axis=axis, overwrite_x=True)
x = cupy.real(x)
# reorder entries
return _reshuffle_dct3(x, n, axis, dst) | 7e617e478c38ea47767a259df74581c960bfcaff | 31,238 |
def indi_events(person, tags=None):
"""Returns all events for a given individual.
Parameters
----------
person : `ged4py.model.Individual`
GEDCOM INDI record.
tags : `list` [ `str` ], optional
Set of tags to return, default is all event tags.
Returns
-------
events : `list` [ `Event` ]
List of events.
"""
return _get_events(person, tags or _indi_events_tags) | 632a532ddcf6d187d1a9f8a5cf7b4451b3d73f37 | 31,239 |
def encrypt(key, plaintext):
"""Encrypt the string and return the ciphertext"""
return ''.join(key[l] for l in plaintext) | 0dc693fe1357756fdfee21cbc847fc6929dab2d1 | 31,240 |
from re import T
def rename_keys(
mapping: T.Dict[str, T.Any],
*,
prefix: T.Optional[str] = None,
suffix: T.Optional[str] = None
) -> T.Dict[str, T.Any]:
"""Renames every key in `mapping` with a `prefix` and/or `suffix`.
Args:
mapping (T.Dict): Mapping.
prefix (str, optional): String to prepend. Defaults to None.
suffix (str, optional): String to append. Defaults to None.
Returns:
T.Dict: Returns the updated mapping.
"""
return {
f'{prefix or ""}{k}{suffix or ""}': v
for k, v in mapping.items()
} | fdfc335354e0ccf36c5416159927b7ffe8e5aec9 | 31,241 |
def any_root_path(path):
"""Rendering the React template."""
return render_template('index.html') | ba1069e4e52f2388b7a68129fa6ee7a4701ce31b | 31,242 |
def getChartdata():
"""
获取图表数据
params: request
return: response
"""
data = {'staff': {}}
data['staff']['is_worker'] = Staff.query.filter(Staff.is_leave==True).count()
data['staff']['not_worker'] = Staff.query.filter(Staff.is_leave==False).count()
data['staff']['total_worker'] = data['staff']['is_worker'] + data['staff']['not_worker']
data['department'] = [{'name': department.name, 'value': len(department.staff_of_department)} for department in Department.query.all()]
data['company'] = [{'name': company.name, 'value': len(company.staff_of_company)} for company in Company.query.all()]
return apiResponse(200, data=data) | 70dcee23ca8e55ab8500e6ca56d44216aea69f95 | 31,243 |
def md_to_html(content):
""" Converts markdown content to HTML """
html = markdown.markdown(content)
return html | 16c67405d35b1119e2f52708aed26ad2f3f23244 | 31,244 |
import logging
def userdata_loader(s3_training_bucket='', trainer_script_name='trainer-script.sh'):
"""
Given the filepath for the trainer-script, load and return its contents as a str.
:param s3_training_bucket:
:param trainer_script_name:
:return:
"""
try:
# If the user didn't pass in another location to pull in the trainer-script from, grab the one in this package.
if not s3_training_bucket:
userdata_filepath = 'src/{}'.format(trainer_script_name)
with open(userdata_filepath, 'r') as f:
userdata_script = f.read()
else:
# If a value was passed in, assume it to be an S3 key - retrieve its contents.
client_s3 = boto3.client('s3')
s3_response = client_s3.get_object(
Bucket=s3_training_bucket,
Key=trainer_script_name
)
userdata_script = s3_response['Body'].read().decode('utf-8')
return userdata_script
except Exception as e:
err = 'userdata_loader failure: {}'.format(e)
logging.error(err)
return False | 9ed6bf1c4cb252c855acf4ed943f3c8ce2a07952 | 31,245 |
def timer(string,i,f):
"""
Takes in:
i = starting time;
f = finishing time.
Returns: Time taken in full minutes and seconds.
"""
sec = f - i # Total time to run.
mins, sec= divmod(sec, 60.0)
time = string+' time: '+str(int(mins))+'min '+str(int(sec))+'s'
print(time)
return time | cbb3c857160a4cbade7a02311455737b1e6e89ef | 31,246 |
def format_server_wrs(world_records, server_id):
"""Format the world records on the server browser to a table
world_records format: {server_id: [list of records]}
where every record is a tuple like {map_name, mode, date, time, player_name, steam_id, rank} accessible like sqlalchemy result"""
if world_records[server_id]:
html_output = '<table><tr><th>Player</th><th>Mode</th><th>Time</th><th>Date</th></tr>'
for wr in world_records[server_id]:
# format time
time = format_record_time(wr.time)
# format date
date = format_record_date(wr.date)
# format player name
player_name = format_player_name(wr.player_name, wr.steam_id)
# format mode
mode = format_record_mode(wr.mode)
# create table row
html_output += f"<tr><td>{player_name}</td><td>{mode}</td><td>{time}</td><td>{date}</td></tr>"
html_output += '</table>'
else:
html_output = ''
return do_mark_safe(html_output) | eef6be19b13694e8e7c7bf33d833c2f74960ad95 | 31,247 |
from pathlib import Path
def clean_file(path=Path('data') / 'Fangraphs Leaderboard.csv',
level='MLB', league='', season='', position=''):
"""Update names for querying and provide additional context.
Args:
level (str): the minor/major leave level selected. Default MLB.
league (str): optionally add a league column
season (int): optionally add the year of the data
position (str): optionally add the position of the data
Returns:
a renamed pandas dataframe.
"""
# Define characters to replace prior to being loaded in a database
char_rep = {' ': '_',
'%': 'pct',
'(': '',
')': '',
'.': '',
'-': '_',
'/': 'per',
'+': 'plus',
'1B': 'singles',
'2B': 'doubles',
'3B': 'triples'}
# Load file
leaderboard = pd.read_csv(path)
# Add additional context from selection not present in the file
leaderboard['Level'] = level
if season != '':
leaderboard['Season'] = season
else:
pass
if league != '':
leaderboard['League'] = league
else:
pass
if position != '':
leaderboard['Position'] = position
else:
pass
# Replace invalid header characters
cols = list(leaderboard)
for i in enumerate(cols):
for key in char_rep:
cols[i[0]] = cols[i[0]].replace(key, char_rep[key])
leaderboard.columns = cols
return leaderboard | 4b01de07630f694c4b5a8010036b6394bed414ec | 31,248 |
def TextRangeCommandStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder) | dacf2fdb830f0fdfc5951288730963e3deb77741 | 31,249 |
import random
def ai_derp(gstate: TicTacToe, *args):
"""AI that randomly picks the next move"""
return random.choice(list(gstate.next_moves.keys())) | bfa1521c4bc2d4dad79a9f91b6bfed14b872f918 | 31,250 |
def get_logits_img(features, n_classes, mode, params):
"""Computes logits for provided features.
Args:
features: A dictionary of tensors that are the features
and whose first dimension is batch (as returned by input_fn).
n_classes: Number of classes from which to predict (i.e. the number
of different values in the "labels" tensor returned by the
input_fn).
mode: A tf.estimator.ModeKeys.
params: Hyper parameters: "convs" specifying the configuration of the
convolutions, and "hidden" specifying the configuration of the
dense layers after the convolutions.
Returns:
The logits tensor with shape=[batch, n_classes].
"""
# The parameter "convs" specifies (kernel, stride, filters)
# of successive convolution layers.
convs = params.get('convs', ((10, 4, 32), (5, 4, 64)))
# The parameter "hidden" specifies the number of neurons of
# successive fully connected layers (after convolution).
hidden = params.get('hidden', (256,))
# The function tf.layers.conv2d expects the tensor to have format
# [batch, height, width, channels] -- since our "img_64" tensor
# has format [batch, height, width], we need to expand the tensor
# to get [batch, height, width, channels=1].
last_layer = tf.expand_dims(features['img_64'], axis=3)
# We start with dims=width=height=64 and filters=channels=1 and then
# successively reduce the number of dimensions while increasing the
# number of filters in every convolutional/maxpooling layer.
dim = 64
filters = 1
for kernel, stride, filters in convs:
conv = tf.layers.conv2d(
inputs=last_layer, filters=filters, kernel_size=[kernel, kernel],
padding='same', activation=tf.nn.relu)
last_layer = tf.layers.max_pooling2d(
inputs=conv, pool_size=[stride, stride], strides=stride)
dim /= stride
# "Flatten" the last layer to get shape [batch, *]
last_layer = tf.reshape(last_layer, [-1, filters * dim * dim])
# Add some fully connected layers.
for units in hidden:
dense = tf.layers.dense(inputs=last_layer, units=units,
activation=tf.nn.relu)
# Regularize using dropout.
training = mode == tf.estimator.ModeKeys.TRAIN
last_layer = tf.layers.dropout(inputs=dense, rate=0.4,
training=training)
# Finally return logits that is activation of neurons in last layer.
return tf.layers.dense(inputs=last_layer, units=n_classes) | e4170d31949c531c54021b6a17c9cbd6306175eb | 31,251 |
def ccnv(pad=0):
"""Current canvas"""
global _cnvs
if pad == 0:
return _cnvs[-1]
_cnvs[-1].cd(pad)
return _cnvs[0].GetPad(pad) | 121f61661ea2a7d9ae941503c3bc2caa29f86dbd | 31,252 |
import functools
import unittest
def NetworkTest(reason='Skipping network test'):
"""Decorator for unit tests. Skip the test if --network is not specified."""
def Decorator(test_item):
@functools.wraps(test_item)
def NetworkWrapper(*args, **kwargs):
if GlobalTestConfig.NETWORK_TESTS_DISABLED:
raise unittest.SkipTest(reason)
test_item(*args, **kwargs)
# We can't check GlobalTestConfig.NETWORK_TESTS_DISABLED here because
# __main__ hasn't run yet. Wrap each test so that we check the flag before
# running it.
if isinstance(test_item, type) and issubclass(test_item, TestCase):
test_item.setUp = Decorator(test_item.setUp)
return test_item
else:
return NetworkWrapper
return Decorator | f694902249d38be4d897ac20d47a23eb9ce10223 | 31,253 |
from typing import Dict
from typing import Any
def azure_firewall_network_rule_collection_update_command(client: AzureFirewallClient,
args: Dict[str, Any]) -> CommandResults:
"""
Update network rule collection in firewall or policy.
Args:
client (AzureFirewallClient): Azure Firewall API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
ScheduledCommand.raise_error_if_not_supported()
should_poll = True
interval = arg_to_number(args.get('interval', 30))
timeout = arg_to_number(args.get('timeout', 60))
firewall_name = args.get('firewall_name')
policy = args.get('policy')
collection_name = args.get('collection_name')
priority = args.get('priority')
if priority:
priority = arg_to_number(priority)
action = args.get('action')
if firewall_name:
firewall_data, filtered_rules = get_firewall_rule_collection(client, firewall_name,
rule_type="network_rule")
collection_index = -1
for index, collection in enumerate(filtered_rules):
if collection.get("name") == collection_name:
collection_index = index
break
if collection_index == -1:
raise Exception(f'Collection {collection_name} is not exists in {firewall_name} firewall.')
if action:
filtered_rules[collection_index]["properties"]["action"]["type"] = action
if priority:
filtered_rules[collection_index]["properties"]["priority"] = priority
response = client.azure_firewall_update_request(firewall_name=firewall_name, firewall_data=firewall_data)
state = dict_safe_get(response, ["properties", "provisioningState"], '')
if should_poll and state not in ["Succeeded", "Failed"]:
# schedule next poll
scheduled_command = create_scheduled_command(command_name='azure-firewall-get', interval=interval,
timeout=timeout, firewall_names=firewall_name)
return CommandResults(scheduled_command=scheduled_command,
readable_output=generate_polling_readable_message(resource_type_name="Firewall",
resource_name=firewall_name))
else:
return generate_firewall_command_output(response,
readable_header=f'Successfully Updated Firewall "{firewall_name}"')
else:
if not policy:
raise Exception('One of the arguments: ''firewall_name'' or ''policy'' must be provided.')
response = update_policy_rule_collection(client=client, policy=policy, collection_name=collection_name,
priority=priority,
action=action)
state = dict_safe_get(response, ["properties", "provisioningState"], '')
if should_poll and state not in ["Succeeded", "Failed"]:
# schedule next poll
scheduled_command = create_scheduled_command(command_name='azure-firewall-policy-get', interval=interval,
timeout=timeout, policy_names=policy)
return CommandResults(scheduled_command=scheduled_command,
readable_output=generate_polling_readable_message(resource_type_name="Policy",
resource_name=policy))
response = client.azure_firewall_policy_get_request(policy)
return generate_policy_command_output(response, readable_header=f'Successfully Updated Policy "{policy}"') | 4d3d5ac09d345d661b2ef258ba2d6311c0f5b764 | 31,254 |
from typing import Optional
def get_stream(id: Optional[str] = None,
ledger_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult:
"""
Resource schema for AWS::QLDB::Stream.
"""
__args__ = dict()
__args__['id'] = id
__args__['ledgerName'] = ledger_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:qldb:getStream', __args__, opts=opts, typ=GetStreamResult).value
return AwaitableGetStreamResult(
arn=__ret__.arn,
id=__ret__.id,
tags=__ret__.tags) | 244721f3424c8de4c923b8eb57c96429c028280d | 31,255 |
def _is_course_or_run_deleted(title):
"""
Returns True if '[delete]', 'delete ' (note the ending space character)
exists in a course's title or if the course title equals 'delete' for the
purpose of skipping the course
Args:
title (str): The course.title of the course
Returns:
bool: True if the course or run should be considered deleted
"""
title = title.strip().lower()
if (
"[delete]" in title
or "(delete)" in title
or "delete " in title
or title == "delete"
):
return True
return False | c32c69e15fafbc899048b89ab8199f653d59e7a8 | 31,256 |
def copy_installer_dict(installer_dict, default_installer):
"""Copy installer dict.
The installer rules themselves are not deep-copied.
'default_installer' installer names are replaced according to
``default_installer``.
:param str default_installer: name of the default installer
"""
result = {}
for installer_name, installer_rule in installer_dict.items():
installer_name = replace_default_installer(
installer_name, default_installer)
result[installer_name] = installer_rule
return result | 1bae37f603b0ac36b80e433b44722245f5df0090 | 31,257 |
import numpy
def pbg_dispersion_1d_imre(
results,
wave="p",
size=(6,4), xlim=(-1, 1), ylim=(0, 1)
):
"""
Plots the photonic dispersion (Bloch wavevector) of a photonic crystal structure,
computed for a range of frequencies (wavelengths) and one angle of incidence.
Takes one polarisation type in complex format, and plots on the left the imaginary
part and on the right the real part.
pbg_dispersion_1d_imre(results, wave="p", size=(6,4))
results: results structure from the simulation
wave: either p-wave ("p", default) or s-wave ("s")
size: size of the figure
"""
omega = results.omega*results.crystal_period/2.0/numpy.pi # frequency range normalized
k = results.crystal_period/numpy.pi
if wave == "p":
k *= results.bloch_vector_p
elif wave == "s":
k *= results.bloch_vector_s
else:
raise ValueError("The wave parameter should be either 'p' or 's'.")
fig, ax = pyplot.subplots(figsize=size)
pyplot.xlabel("K*Lambda/pi")
pyplot.ylabel("omega*Lambda/(2*pi)")
ax.plot(-numpy.imag(k), omega, linestyle="-", label="Imag")
ax.plot(numpy.real(k), omega, linestyle="--", label="Real")
ax.plot([0, 0], [0, 1], linestyle="-", c="black", linewidth=0.5)
_x = numpy.linspace(-1, 1, 11).round(2)
ax.set_xticks(_x)
ax.set_xticklabels(numpy.hstack([-_x[:5], _x[5:]]).astype(str))
pyplot.xlim(xlim)
pyplot.ylim(ylim)
pyplot.legend(loc="best")
return fig, ax | d1c8605e1255669b31f9caf530f3c8669a2e03a6 | 31,258 |
from typing import OrderedDict
def map_constructor(loader, node):
"""
Constructs a map using OrderedDict.
:param loader: YAML loader
:param node: YAML node
:return: OrderedDictionary data
"""
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node)) | 21bf92d0c3975758ae434026fae3f54736b7f21d | 31,259 |
def index():
"""首页"""
return redirect(url_for('site.hot')) | 816585c515c254929fdbd0f8e2c0af99c73f9f9d | 31,260 |
def tpr(df, label_column):
"""Measure the true positive rate."""
fp = sum((df['predictions'] >= 0.0) & (df[label_column] > 0.5))
ln = sum(df[label_column] > 0.5)
return float(fp) / float(ln) | 62cd3908f5e8490c507b2b320a8a453aa861f77d | 31,261 |
from typing import Optional
def get_pathway_names(
database: str,
pathway_df: pd.DataFrame,
kegg_manager: Optional[bio2bel_kegg.Manager] = None,
reactome_manager: Optional[bio2bel_reactome.Manager] = None,
wikipathways_manager: Optional[bio2bel_wikipathways.Manager] = None
):
"""Get pathway names from database specific pathway IDs.
:param database:
:param pathway_df:
:param kegg_manager:
:param reactome_manager:
:param wikipathways_manager:
:return:
"""
if database == KEGG:
pathway_df['pathway_name'] = [
kegg_manager.get_pathway_by_id('path:' + pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df
elif database == REACTOME:
pathway_df['pathway_name'] = [
reactome_manager.get_pathway_by_id(pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df
elif database == WIKIPATHWAYS:
pathway_df['pathway_name'] = [
wikipathways_manager.get_pathway_by_id(pathway_id)
for pathway_id in list(pathway_df['pathway_id'])
]
return pathway_df | 40397aa26fc90b06f21fe30605ef654b14a98662 | 31,262 |
from pathlib import Path
def gather_rgi_results(rgi_sample_list: [RGIResult], outdir: Path) -> tuple:
"""
Symlinks RGI result files to a single destination folder -- required for rgi heatmap command
:param rgi_sample_list: List containing RGIResult object instances
:param outdir: Destination directory for result files
:return: Tuple containing paths to (json directory, text directory)
"""
json_dir = outdir / 'json'
txt_dir = outdir / 'txt'
json_dir.mkdir(parents=True, exist_ok=False)
txt_dir.mkdir(parents=True, exist_ok=False)
for rgi_sample in rgi_sample_list:
src_json_path = Path(MEDIA_ROOT / str(rgi_sample.rgi_main_json_results))
dst_json_path = Path(json_dir) / Path(str(rgi_sample.rgi_main_json_results)).name
dst_json_path.symlink_to(src_json_path)
src_txt_path = Path(MEDIA_ROOT / str(rgi_sample.rgi_main_text_results))
dst_txt_path = Path(txt_dir) / Path(str(rgi_sample.rgi_main_text_results)).name
dst_txt_path.symlink_to(src_txt_path)
return json_dir, txt_dir | 664172d0d6de5619c7f92ba74a5f3673726aedf9 | 31,263 |
from connio.rest.api.v3.account.propertyy import PropertyInstance
def retention(retention):
"""
Serialize a retention object to retention JSON
:param retention: PropertyInstance.Retention
:return: jsonified string represenation of obj
"""
if retention is values.unset or retention is None:
return None
retentionType = 'historical'
if retention.type == PropertyInstance.Retention.RetentionType.MOSTRECENT:
retentionType = 'mostrecent'
return {
'type': retentionType,
'context': { 'type': retention.context.type },
'lifetime': retention.lifetime,
'capacity': retention.capacity,
'condition': { 'when': retention.condition.when, 'value': retention.condition.value }
} | 38762297e80c434ce3e561731850b40137a16fdb | 31,264 |
def get_tool_path(loader, node):
""" yaml tag handler to access tools dict at load time """
py_str = loader.construct_python_str(node)
return py_str.format(**tools) | 22e2d82e428e376b31082b213a50d7ed33a5045f | 31,265 |
def _get_oath2_access_token(client_key, client_secret):
"""
Query the vistara API and get an access_token
"""
if not client_key and not client_secret:
log.error(
"client_key and client_secret have not been specified "
"and are required parameters."
)
return False
method = "POST"
url = "https://api.vistara.io/auth/oauth/token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
}
params = {
"grant_type": "client_credentials",
"client_id": client_key,
"client_secret": client_secret,
}
resp = salt.utils.http.query(
url=url, method=method, header_dict=headers, params=params, opts=__opts__
)
respbody = resp.get("body", None)
if not respbody:
return False
access_token = salt.utils.json.loads(respbody)["access_token"]
return access_token | 2be67e8305aac64f3cf39517e64efa7659100bf5 | 31,266 |
def sanity_check_dp(A_org, XW, U, L, delta_l, delta_g, check_symmetry=True, \
activation='linear'):
"""
Sanity approach for solving min_{A_G^{1+2+3}} F_c(A) + np.sum(A.*L)
param:
A_org: original adjacency matrix
XW: XW
U: (u_y-u_c)/nG
L: L
delta_l: row budgets
delta_g: global budgets
check_symmetry: If True, optA is symmtric
activation: 'linear' or 'relu'
return a dict with keywords:
opt_A: optimal perturbed matrix
opt_f: optimal dual objective
"""
nG = A_org.shape[0]
if nG > 6 and delta_g > 2:
print("Sanity check only support nG < 7, return None!")
else:
if delta_g == 2:
Flip_idx = []
for row in range(nG):
for col in range(row+1, nG):
if delta_l[row] > 0 and delta_l[col] > 0:
Flip_idx.append([(row, col), (col, row)])
minimum = np.inf
for idx in Flip_idx:
A = A_org.copy()
for s in idx:
A[s] = 1-A[s]
val = calculate_Fc(A, XW, U, activation) + np.sum(L*A)
if val < minimum:
minimum = val
A_final = A
else:
all_possible_adjacency_matrices = possible_matrix_with_delta_l(A_org, delta_l)
print('# matrice satisfing delta_l: ', len(all_possible_adjacency_matrices))
XWU = XW @ U
minimum = np.inf
for possible_matrix in all_possible_adjacency_matrices:
possible_matrix = np.asarray(possible_matrix)
symmetry = np.allclose(possible_matrix, possible_matrix.T) if check_symmetry else True
if symmetry and np.sum(np.abs(A_org-possible_matrix)) <= delta_g:
val = calculate_Fc(possible_matrix, XW, U, activation) + np.sum(L*possible_matrix)
if val < minimum:
minimum = val
A_final = possible_matrix
sol = {
'opt_A': A_final,
'opt_f': minimum
}
return sol | 21f51523b21c2ca94feddf4724d7848317054279 | 31,267 |
def quadratic_bezier(t, p0, p1, p2):
"""
:return: Quadratic bezier formular according to https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B%C3%A9zier_curves
"""
return (1 - t) * ((1 - t) * p0 + t * p1) + t * ((1 - t) * p1 + t * p2) | ac9319683afb5b156ac40ba24865d9bc04531917 | 31,268 |
def add_musician_genres(musician, genre_list):
"""Add genres to a musician's profile"""
musician_genres = []
found_genres = Genre.query.filter(Genre.genre_name.in_(genre_list)).all()
for genre in found_genres:
musician_genre = MusicianGenre(genre_id=genre.genre_id,
musician_id=musician.musician_id)
musician_genres.append(musician_genre)
db.session.add(musician_genre)
db.session.commit()
return musician_genres | 2557498853b8ecb634c282db5c27d0772ae066a1 | 31,269 |
def test_eat_exceptions_normal_case():
"""
If no exceptions, this wrapper should do nothing.
"""
@utils.eat_exceptions
def test_function(x):
return x
assert test_function(1) == 1 | ce16fff9511ac52b1e2ffb08305c839a1bb36b57 | 31,270 |
def delete_system_interface(api_client, interface_id, **kwargs): # noqa: E501
"""delete_system_interface # noqa: E501
Delete System Interface # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.delete_system_interface(interface_id, async_req=True)
:param str interface_id: ID for system interface (required)
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
collection_formats = {}
path_params = {"interface_id": interface_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/interfaces/system/{interface_id}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | a6c4db5d1c5ea1674146a7d723b8cea5725dcd51 | 31,271 |
def IsPlacementGroupCompatible(machine_type):
"""Returns True if VMs of 'machine_type' can be put in a placement group."""
prefix = machine_type.split('.')[0]
return prefix not in NON_PLACEMENT_GROUP_PREFIXES | 4c5cd10e2f2024d93b676df87a6e531fb866c228 | 31,272 |
import inspect
import os
import pathlib
def join_paths(new_Folder, file_Name=False):
"""
Requer uma string. Nome da pasta a ser criada.
Por padrão file_Name é False.
Quando file_Name é falso retorna o abspath da
pasta passada em folder.
Quando file_Name é verdadeiro junta o abspath da pasta
passada mais o nome do arquivo passado e retorna.
"""
current_file = inspect.getfile(inspect.currentframe())
current_path = os.path.dirname(os.path.abspath(current_file))
path_folder = pathlib.PurePath(current_path).joinpath(new_Folder)
if file_Name:
path_to_file = pathlib.PurePath(path_folder).joinpath(file_Name)
return path_to_file
if not file_Name:
return path_folder | b709cd879f4c820517fc0a4ac044cb7ea3bce10e | 31,273 |
import binascii
def create_public_key_from_b64(b64Key: bytes) -> X25519PublicKey:
"""Derive X25519 Private key from b64 ascii string"""
public_bytes = binascii.a2b_base64(b64Key)
loaded_private_key = X25519PublicKey.from_public_bytes(public_bytes)
return loaded_private_key | 8cdac21431ed278fb82cfc4c76379baec401e518 | 31,274 |
from re import T
def im_detect_bbox(model, images, target_scale, target_max_size, device,
captions=None,
positive_map_label_to_token=None
):
"""
Performs bbox detection on the original image.
"""
if cfg.INPUT.FORMAT is not '':
input_format = cfg.INPUT.FORMAT
elif cfg.INPUT.TO_BGR255:
input_format = 'bgr255'
transform = T.Compose([
T.Resize(target_scale, target_max_size),
T.ToTensor(),
T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, format=input_format
)
])
images = [transform(image) for image in images]
images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
if captions is None:
return model(images.to(device))
else:
return model(images.to(device),
captions=captions,
positive_map=positive_map_label_to_token
) | a20c4eb8fb8b5cf37bc5ee59901504e3a03a1307 | 31,275 |
import warnings
def jmap(g, H, ae0, be0, af0, bf0, max_iter=1000, tol=1e-4, rcond=None, observer=None):
"""Maximum a posteriori estimator for g = H @ f + e
p(g | f) = normal(H f, ve I)
p(ve) = inverse_gauss(ae0, be0)
p(f | vf) = normal(0, vf I)
p(vf) = inverse_gauss(af0, bf0)
JMAP: maximizes p(f,ve,vf|g) = p(g | f) p(f | vf) p(ve) p(vf) / p(g)
with respect to f, ve and vf
Original Author: Ali Mohammad-Djafari, April 2015
Args:
g:
H:
ae0:
be0:
af0:
bf0:
max_iter:
rcond:
Returns:
"""
n_features, n_samples = H.shape
HtH = H.T @ H
Htg = H.T @ g
ve0 = be0 / ae0
vf0 = bf0 / af0
lambda_ = ve0 / vf0
fh, *_ = np.linalg.lstsq(HtH + lambda_ * np.eye(n_samples, n_samples), Htg, rcond=rcond)
fhs = [fh]
for _ in range(max_iter):
dg = g - H @ fh
ae = ae0 + 0.5
be = be0 + 0.5 * dg ** 2
ve = be / ae + eps
iVe = np.diag(1 / ve)
af = af0 + 0.5
bf = bf0 + 0.5 * fh ** 2
vf = bf / af + eps
iVf = np.diag(1.0 / vf)
HR = H.T @ iVe @ H + iVf
fh, *_ = np.linalg.lstsq(HR, H.T @ iVe @ g, rcond=rcond)
fhs.append(fh)
if observer is not None:
observer(fh, vf, ve)
if _converged(fhs, tol=tol):
break
else:
warnings.warn(f"jmap did not converge after {max_iter} iterations.", ConvergenceWarning)
# sigma = np.diag(np.diag(np.linalg.inv(HR)))
sigma = np.linalg.inv(HR)
return fh, vf, ve, sigma | 82b1199dfbaf1ecc9811b0d3127d976304df576f | 31,276 |
def get_chats(im):
"""This function gets the chatting messages.
Arguments:
im (PIL.Image.Image): Image object
Return:
Image object list (PIL.Image.Image).
[0]: The most latest chatting message. e.g, The most below messages.
"""
return get_chat_msg(im) | b3d30ee36025866020e8b8ce4c1b1477c2950fa3 | 31,277 |
def state_field(value):
"""Fetch the pagination state field from flask.request.args.
:returns: list of the state(s)
"""
states = istate.States.all()
value = value.split(',')
invalid_states = [state for state in value if state not in states]
assert not invalid_states, \
_('State(s) "%s" are not valid') % ', '.join(invalid_states)
return value | c7e3d31780994c46fc1e43fc3f4398a4e93e77f6 | 31,278 |
import torch
def cal_smoothness_orig(var1_orig, var2_orig, var3_orig, io, args):
"""
Input:
var1_orig, var2_orig, var3_orig: scalar tensors, original variances on the 3 principal orientations
Return: smoothness_orig: scalar, original smoothness of this region (linearity/planarity/scattering,
depending on args.mode)
"""
with torch.no_grad():
s_min, s_mid, s_max = sort_var(var1_orig, var2_orig, var3_orig)
if args.mode == "linearity":
smoothness_orig = (s_max - s_mid) / s_max
elif args.mode == "planarity":
smoothness_orig = (s_mid - s_min) / s_max
else: # args.mode == "scattering"
smoothness_orig = s_min / s_max
io.cprint("orig %s: %.8f" % (args.mode, smoothness_orig))
return smoothness_orig.cpu().item() | 4713aede2109c17deb917fb2f86f73142185a258 | 31,279 |
def format_size(size):
"""Format provided size in bytes in a human-friendly format
:param int size: size to format in bytes
:return: formatted size with an SI prefix ('k', 'M', 'G', 'T') and unit
('B')
:rtype: str
"""
if abs(size) < 1000:
return str(size) + 'B'
for unit in ('k', 'M', 'G'):
size /= 1000
if abs(size) < 1000:
return SIZE_FORMAT.format(size, unit)
return SIZE_FORMAT.format(size / 1000, 'T') | 04d9099a99e7c4863ada898096829aed9f6d7fc1 | 31,280 |
def get_acl_permission(acl, complete_acl_list):
"""
This uses numpy's vectorized operations to quickly match the acl returned from the API, to
the complete list of acls to get the description.
"""
index = -1
where_arrays = np.where(acl == complete_acl_list[:,0])
try:
index = where_arrays[0][0]
# print(complete_acl_list[index])
return complete_acl_list[index][1], complete_acl_list[index][2]
except IndexError:
return "Unknown", "Unknown" | 55dc256c75be9dfcf897fffc6a5842cc19dbf1d8 | 31,281 |
import torch
def interface_script(mod_interface, nn_module):
"""
Makes a ScriptModule from an nn.Module, using the interface methods rule for
determining which methods to compile.
Args:
mod_interface: the interface type that the module have
nn_module: The original Python nn.Module that we are creating a ScriptModule for.
"""
if isinstance(nn_module, torch.jit.ScriptModule):
return nn_module
check_module_initialized(nn_module)
def infer_interface_methods_to_compile(nn_module):
"""
Rule to infer the methods from the interface type to know which
methods need to act as starting points for compilation.
"""
stubs = []
for method in mod_interface.getMethodNames():
stubs.append(make_stub_from_method(nn_module, method))
return stubs
return create_script_module(nn_module, infer_interface_methods_to_compile) | dcfe3b7710a353da53c3e3b3ee2d360a943b77dd | 31,282 |
def create_call_error(message: str) -> str:
"""Create CallError serialized representation based on serialize Call.
Raises ValueError if message is not type Call. CallResult and CallError
don't require response.
"""
call: Call = unpack(message)
if isinstance(call, Call):
call_error: CallError = call.create_call_error(None)
return call_error.to_json()
else:
raise ValueError("message is not type Call") | c30a5c50c8d43805b554e4b2002bdc73be568918 | 31,283 |
def filter_boxes(min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes | 596c9ecab145df0d6a3a7f1da44898da27566b72 | 31,284 |
def recenter_image(im):
"""
"""
n_height, n_width = im.shape
com = nd.center_of_mass(im)
if any(np.isnan(com)):
return im
im_center = im[(com[0]-n_height/2):(com[0]+n_height/2)]
offset = [(n_height-im_center.shape[0]),(n_width-im_center.shape[1])]
if offset[0]%2 > 0:
h_odd = 1
else:
h_odd = 0
if offset[1]%2 > 0:
w_odd = 1
else:
w_odd = 0
im[offset[0]/2:n_height-offset[0]/2-h_odd, offset[1]/2:n_width-offset[1]/2-w_odd] = im_center
return im | 64a180c8ea67a8105a08e7c326cc92c6cf281803 | 31,285 |
from typing import Counter
def cal_participate_num(course: Course) -> Counter:
"""
计算该课程对应组织所有成员的参与次数
return {Naturalperson.id:参与次数}
前端使用的时候直接读取字典的值就好了
"""
org = course.organization
activities = Activity.objects.activated().filter(
organization_id=org,
status=Activity.Status.END,
category=Activity.ActivityCategory.COURSE,
)
#只有小组成员才可以有学时
members = Position.objects.activated().filter(
pos__gte=1,
person__identity=NaturalPerson.Identity.STUDENT,
org=org,
).values_list("person", flat=True)
all_participants = (
Participant.objects.activated(no_unattend=True)
.filter(activity_id__in=activities, person_id_id__in=members)
).values_list("person_id", flat=True)
participate_num = dict(Counter(all_participants))
#没有参加的参与次数设置为0
participate_num.update({id: 0 for id in members if id not in participate_num})
return participate_num | c2dff0f9b956c819170070116f4fda858f616546 | 31,286 |
def plot(
self,
fig=None,
ax=None,
is_lam_only=False,
sym=1,
alpha=0,
delta=0,
is_edge_only=False,
edgecolor=None,
is_add_arrow=False,
is_display=True,
is_show_fig=True,
):
"""Plot the Lamination with empty Slots in a matplotlib fig
Parameters
----------
self : LamSlot
A LamSlot object
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
Axis on which to plot the data
is_lam_only: bool
True to plot only the lamination (No effect for LamSlot)
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_edge_only: bool
To plot transparent Patches
edgecolor:
Color of the edges if is_edge_only=True
is_display : bool
False to return the patches
is_show_fig : bool
To call show at the end of the method
Returns
-------
patches : list
List of Patches
or
fig : Matplotlib.figure.Figure
Figure containing the plot
ax : Matplotlib.axes.Axes object
Axis containing the plot
"""
if self.is_stator:
lam_color = STATOR_COLOR
else:
lam_color = ROTOR_COLOR
(fig, ax, patch_leg, label_leg) = init_fig(fig=fig, ax=ax, shape="rectangle")
surf_list = self.build_geometry(sym=sym, alpha=alpha, delta=delta)
patches = list()
for surf in surf_list:
if "Lamination" in surf.label:
patches.extend(
surf.get_patches(
color=lam_color, is_edge_only=is_edge_only, edgecolor=edgecolor
)
)
else:
patches.extend(
surf.get_patches(is_edge_only=is_edge_only, edgecolor=edgecolor)
)
# Display the result
if is_display:
ax.set_xlabel("(m)")
ax.set_ylabel("(m)")
for patch in patches:
ax.add_patch(patch)
# Axis Setup
ax.axis("equal")
# The Lamination is centered in the figure
Lim = self.Rext * 1.5
ax.set_xlim(-Lim, Lim)
ax.set_ylim(-Lim, Lim)
# Add the legend
if not is_edge_only:
if self.is_stator and "Stator" not in label_leg:
patch_leg.append(Patch(color=STATOR_COLOR))
label_leg.append("Stator")
ax.set_title("Stator with empty slot")
elif not self.is_stator and "Rotor" not in label_leg:
patch_leg.append(Patch(color=ROTOR_COLOR))
label_leg.append("Rotor")
ax.set_title("Rotor with empty slot")
ax.legend(patch_leg, label_leg)
if is_show_fig:
fig.show()
return fig, ax
else:
return patches | b317fe6518b20cd266f035bbb6a6ff3e4de94e10 | 31,287 |
def usage_percentage(usage, limit):
"""Usage percentage."""
if limit == 0:
return ""
return "({:.0%})".format(usage / limit) | 7caf98ddb37036c79c0e323fc854cbc550eaaa60 | 31,288 |
def all(numbered=False):
"""
Get all included stanzas.
Takes optional argument numbered.
Returns a dict if numbered=True, else returns a list.
"""
return dict(zip(range(1, 165 + 1), stanzas)) if numbered else stanzas | af61087223411f3d57ec2e35f048da9da41bf469 | 31,289 |
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
def cosine_decay(learning_rate, global_step, maximum_steps,
name=None):
"""
"""
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
with ops.name_scope(name, "CosineDecay",
[learning_rate, global_step, maximum_steps]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
maximum_steps = math_ops.cast(maximum_steps, dtype)
p = tf.mod(global_step / maximum_steps, 1)
return learning_rate * (0.5 + 0.5 * math_ops.cos(p * np.pi)) | 6f4395bf5ca38beb483f142acec91455e2a77ced | 31,290 |
def parse_file_header_64(bytes):
"""Parse the ELF file header."""
e_ident = {}
e_ident['EI_CLASS'] = get_bytes(bytes, 4)
e_ident['EI_DATA'] = get_bytes(bytes, 5)
endian = get_byte_order(e_ident['EI_DATA'])
e_ident['EI_VERSION'] = get_bytes(bytes, 6)
e_ident['EI_OSABI'] = get_bytes(bytes, 7)
if e_ident['EI_OSABI'] == b'\x03':
# Linux uses the EI_ABIVERSION
e_ident['EI_ABIVERSION'] = get_bytes(bytes, 8)
e_ident['EI_PAD'] = get_bytes(bytes, 9, 7)
else:
# EI_PAD takes the full 8 bytes
e_ident['EI_PAD'] = get_bytes(bytes, 8, 8)
e_type = get_bytes(bytes, 16, 2)
e_machine = get_bytes(bytes, 18, 2)
e_version = get_bytes(bytes, 20, 4)
e_entry = get_bytes(bytes, 24, 8)
e_phoff = get_bytes(bytes, 32, 8)
e_shoff = get_bytes(bytes, 40, 8)
e_flags = get_bytes(bytes, 48, 4)
e_ehsize = get_bytes(bytes, 52, 2)
e_phentsize = get_bytes(bytes, 54, 2)
e_phnum = get_bytes(bytes, 56, 2)
e_shentsize = get_bytes(bytes, 58, 2)
e_shnum = get_bytes(bytes, 60, 2)
e_shstrndx = get_bytes(bytes, 62, 2)
return {'endian': endian,
'e_ident': e_ident,
'e_type': e_type,
'e_machine': e_machine,
'e_version': e_version,
'e_entry': e_entry,
'e_phoff': e_phoff,
'e_shoff': e_shoff,
'e_flags': e_flags,
'e_ehsize': e_ehsize,
'e_phentsize': e_phentsize,
'e_phnum': e_phnum,
'e_shentsize': e_shentsize,
'e_shnum': e_shnum,
'e_shstrndx': e_shstrndx,
} | 1b4a5cbd8f9dad58dc8d8ad6bd6a87f65d7bad07 | 31,291 |
def _or (*args):
"""Helper function to return its parameters or-ed
together and bracketed, ready for a SQL statement.
eg,
_or ("x=1", _and ("a=2", "b=3")) => "(x=1 OR (a=2 AND b=3))"
"""
return " OR ".join (args) | 1162600b49acb57e3348e6281767ce2fb0118984 | 31,292 |
from typing import Dict
def strip_empty_values(values: Dict) -> Dict:
"""Remove any dict items with empty or ``None`` values."""
return {k: v for k, v in values.items() if v or v in [False, 0, 0.0]} | 982814edbd73961d9afa2e2389cbd970b2bc231e | 31,293 |
import torch
def dispnet(path=None, batch_norm=True):
"""dispNet model architecture.
Args:
path : where to load pretrained network. will create a new one if not set
"""
model = DispNet(batch_norm=batch_norm)
if path is not None:
data = torch.load(path)
if 'state_dict' in data.keys():
model.load_state_dict(data['state_dict'])
else:
model.load_state_dict(data)
return model | 8229c4616148c771686edbb7d99217404c48e3f9 | 31,294 |
import os
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
The following Pyomo model components are defined in this module:
+-------------------------------------------------------------------------+
| Expressions |
+=========================================================================+
| | :code:`Transmit_Power_MW` |
| | *Defined over*: :code:`TX_OPR_TMPS` |
| |
| The power in MW sent on a transmission line (before losses). |
| A positive number means the power flows in the line's defined direction,|
| while a negative number means it flows in the opposite direction. |
+-------------------------------------------------------------------------+
| | :code:`Transmit_Power_MW` |
| | *Defined over*: :code:`TX_OPR_TMPS` |
| |
| The power in MW received via a transmission line (after losses). |
| A positive number means the power flows in the line's defined direction,|
| while a negative number means it flows in the opposite direction. |
+-------------------------------------------------------------------------+
| | :code:`Tx_Losses_MW` |
| | *Defined over*: :code:`TX_OPR_TMPS` |
| |
| Losses on the transmission line in MW. A positive number means the |
| power flows in the line's defined direction when losses incurred, |
| while a negative number means it flows in the opposite direction. |
+-------------------------------------------------------------------------+
"""
# Dynamic Inputs
###########################################################################
df = pd.read_csv(
os.path.join(scenario_directory, str(subproblem), str(stage), "inputs",
"transmission_lines.tab"),
sep="\t",
usecols=["TRANSMISSION_LINES", "tx_capacity_type",
"tx_operational_type"]
)
required_tx_operational_modules = df.tx_operational_type.unique()
# Import needed transmission operational type modules
imported_tx_operational_modules = load_tx_operational_type_modules(
required_tx_operational_modules
)
# TODO: should we add the module specific components here or in
# operational_types/__init__.py? Doing it in __init__.py to be consistent
# with projects/operations/power.py
# Expressions
###########################################################################
def transmit_power_rule(mod, tx, tmp):
tx_op_type = mod.tx_operational_type[tx]
return imported_tx_operational_modules[tx_op_type].\
transmit_power_rule(mod, tx, tmp)
m.Transmit_Power_MW = Expression(
m.TX_OPR_TMPS,
rule=transmit_power_rule
)
def transmit_power_losses_lz_from_rule(mod, tx, tmp):
tx_op_type = mod.tx_operational_type[tx]
return imported_tx_operational_modules[tx_op_type].\
transmit_power_losses_lz_from_rule(mod, tx, tmp)
m.Tx_Losses_LZ_From_MW = Expression(
m.TX_OPR_TMPS,
rule=transmit_power_losses_lz_from_rule
)
def transmit_power_losses_lz_to_rule(mod, tx, tmp):
tx_op_type = mod.tx_operational_type[tx]
return imported_tx_operational_modules[tx_op_type].\
transmit_power_losses_lz_to_rule(mod, tx, tmp)
m.Tx_Losses_LZ_To_MW = Expression(
m.TX_OPR_TMPS,
rule=transmit_power_losses_lz_to_rule
) | 99840966713dc49b9f1ff8906cd7ed23a10245a6 | 31,295 |
def apigw_required(view_func):
"""apigw装饰器
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
request.jwt = JWTClient(request)
if not request.jwt.is_valid:
return jwt_invalid_view(request)
return view_func(request, *args, **kwargs)
return _wrapped_view | c0bd9105df47297ae0f7db418ac3260c93272488 | 31,296 |
def text_analysis(string: str, *, nlp) -> str:
"""Return a text analysed string.
post-analysis sentences are separated by <sent> tags
e.g., 'a sentence<sent>a second sentence<sent>a third.
see https://spacy.io/usage/rule-based-matching#adding-patterns-attributes
"""
sents = []
doc = nlp(string)
for sent in doc.sents:
tokens = [token for token in sent if len(token) >= 3]
# remove puntuation
tokens = [token for token in tokens if token.is_punct == False]
# remove stop words
tokens = [token for token in tokens if not token.is_stop]
# lemmatize
tokens = [token.lemma_ for token in tokens]
# convert numeric to '<NUMERIC>'
tokens = ['<NUMERIC>' if contains_numeric(token) else token for token in tokens]
sents.append(" ".join(tokens))
return "<sent>".join(sents) | 6bd16be281237bd2f2001755ce06d056a2cd8fda | 31,297 |
def wtr_tens(P, T):
"""Function to Calculate Gas-Water Interfacial Tension in dynes/cm"""
#P pressure, psia
#T temperature, °F
s74 = 75 - 1.108 * P ** 0.349
s280 = 53 - 0.1048 * P ** 0.637
if (T <= 74):
sw = s74
elif(T >= 280):
sw = s280
else:
sw = s74 - (T - 74) * (s74 - s280) / 206
if (sw < 1):
sw = 1
return sw | acbf649a8dfe1302350b35f141afc09198470d8d | 31,298 |
from typing import List
def _decompose_move(event: MoveElements) -> List[MoveElements]:
"""
Decompose an event moving elements into a list of MoveElements events representing the
same action.
:param event: event to decompose
:return: list of events representing the same action
"""
return [event] | c3572a2b183219280b4f352a8ddc98cbdfb7aa43 | 31,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.