content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def sample_categorical(pmf):
"""Sample from a categorical distribution.
Args:
pmf: Probablity mass function. Output of a softmax over categories.
Array of shape [batch_size, number of categories]. Rows sum to 1.
Returns:
idxs: Array of size [batch_size, 1]. Integer of category sampled.
"""
if pmf.ndim == 1:
pmf = np.expand_dims(pmf, 0)
batch_size = pmf.shape[0]
cdf = np.cumsum(pmf, axis=1)
rand_vals = np.random.rand(batch_size)
idxs = np.zeros([batch_size, 1])
for i in range(batch_size):
idxs[i] = cdf[i].searchsorted(rand_vals[i])
return idxs | 5b270e63bb5e290a97cacede9bd0f8bf34fc0ecf | 22,200 |
def make_Dex_3D(dL, shape, bloch_x=0.0):
""" Forward derivative in x """
Nx, Ny , Nz= shape
phasor_x = np.exp(1j * bloch_x)
Dex = sp.diags([-1, 1, phasor_x], [0, Nz*Ny, -Nx*Ny*Nz+Nz*Ny], shape=(Nx*Ny*Nz, Nx*Ny*Nz))
Dex = 1 / dL * sp.kron(sp.eye(1),Dex)
return Dex | 1d3a47624f180d672f43fb65082f29727b42f720 | 22,201 |
def feature_decoder(proto_bytes):
"""Deserializes the ``ProtoFeature`` bytes into Python.
Args:
proto_bytes (bytes): The ProtoBuf encoded bytes of the ProtoBuf class.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
pb_feature = ProtoFeature.FromString(proto_bytes)
return from_pb_feature(pb_feature) | ff7cdc6c0d7f056c69576af2a5b5eb98f57266af | 22,202 |
async def calculate_board_fitness_report(
board: list, zone_height: int, zone_length: int
) -> tuple:
"""Calculate Board Fitness Report
This function uses the general solver functions api to calculate and return all the different collisions on a given board array
representation.
Args:
board (list): A full filled board representation.
zone_height (int): The zones height.
zone_length (int): The zones length.
Returns:
int: Total collisions on the board.
int: Total collisions on the board columns.
int: Total collisions on the board rows.
int: Total collisions on the board zones.
"""
body = {"zoneHeight": zone_height, "zoneLength": zone_length, "board": board}
url = str(environ["FITNESS_REPORT_SCORE_LINK"])
response_body = dict()
headers = {"Authorization": api_key, "Content-Type": "application/json"}
async with ClientSession(headers=headers) as session:
async with session.post(url=url, json=body) as response:
response_body = await response.json()
return (
response_body["totalCollisions"],
response_body["columnCollisions"],
response_body["rowCollisions"],
response_body["zoneCollisions"],
) | a770863c044a4c4452860f9fccf99428dbfb5013 | 22,203 |
def quote_fqident(s):
"""Quote fully qualified SQL identifier.
The '.' is taken as namespace separator and
all parts are quoted separately
Example:
>>> quote_fqident('tbl')
'public.tbl'
>>> quote_fqident('Baz.Foo.Bar')
'"Baz"."Foo.Bar"'
"""
tmp = s.split('.', 1)
if len(tmp) == 1:
return 'public.' + quote_ident(s)
return '.'.join(map(quote_ident, tmp)) | 26cf409a09d2e8614ac4aba04db1eee6cac75f08 | 22,204 |
def row_generator(x, H, W, C):
"""Returns a single entry in the generated dataset.
Return a bunch of random values as an example."""
return {'frame_id': x,
'frame_data': np.random.randint(0, 10,
dtype=np.uint8, size=(H, W, C))} | e99c6e8b1557890b6d20ea299bc54c0773ea8ade | 22,205 |
def accuracy(output, target, topk=(1,)):
"""Computes the precor@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 80e73c907e57b9666a8f399b8ed655c919d79abb | 22,206 |
def define_model(input_shape, output_shape, FLAGS):
"""
Define the model along with the TensorBoard summaries
"""
data_format = "channels_last"
concat_axis = -1
n_cl_out = 1 # Number of output classes
dropout = 0.2 # Percentage of dropout for network layers
num_datapoints = input_shape[0]
imgs = tf.placeholder(tf.float32,
shape=([None] + list(input_shape[1:])))
msks = tf.placeholder(tf.float32,
shape=([None] + list(output_shape[1:])))
inputs = K.layers.Input(tensor=imgs, name="Images")
params = dict(kernel_size=(3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")
trans_params = dict(kernel_size=(2, 2), strides=(2, 2),
data_format=data_format,
kernel_initializer="he_uniform",
padding="same")
conv1 = K.layers.Conv2D(name="conv1a", filters=32, **params)(inputs)
conv1 = K.layers.Conv2D(name="conv1b", filters=32, **params)(conv1)
pool1 = K.layers.MaxPooling2D(name="pool1", pool_size=(2, 2))(conv1)
conv2 = K.layers.Conv2D(name="conv2a", filters=64, **params)(pool1)
conv2 = K.layers.Conv2D(name="conv2b", filters=64, **params)(conv2)
pool2 = K.layers.MaxPooling2D(name="pool2", pool_size=(2, 2))(conv2)
conv3 = K.layers.Conv2D(name="conv3a", filters=128, **params)(pool2)
# Trying dropout layers earlier on, as indicated in the paper
conv3 = K.layers.Dropout(dropout)(conv3)
conv3 = K.layers.Conv2D(name="conv3b", filters=128, **params)(conv3)
pool3 = K.layers.MaxPooling2D(name="pool3", pool_size=(2, 2))(conv3)
conv4 = K.layers.Conv2D(name="conv4a", filters=256, **params)(pool3)
# Trying dropout layers earlier on, as indicated in the paper
conv4 = K.layers.Dropout(dropout)(conv4)
conv4 = K.layers.Conv2D(name="conv4b", filters=256, **params)(conv4)
pool4 = K.layers.MaxPooling2D(name="pool4", pool_size=(2, 2))(conv4)
conv5 = K.layers.Conv2D(name="conv5a", filters=512, **params)(pool4)
conv5 = K.layers.Conv2D(name="conv5b", filters=512, **params)(conv5)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up6", size=(2, 2))(conv5)
else:
up = K.layers.Conv2DTranspose(name="transConv6", filters=256,
**trans_params)(conv5)
up6 = K.layers.concatenate([up, conv4], axis=concat_axis)
conv6 = K.layers.Conv2D(name="conv6a", filters=256, **params)(up6)
conv6 = K.layers.Conv2D(name="conv6b", filters=256, **params)(conv6)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up7", size=(2, 2))(conv6)
else:
up = K.layers.Conv2DTranspose(name="transConv7", filters=128,
**trans_params)(conv6)
up7 = K.layers.concatenate([up, conv3], axis=concat_axis)
conv7 = K.layers.Conv2D(name="conv7a", filters=128, **params)(up7)
conv7 = K.layers.Conv2D(name="conv7b", filters=128, **params)(conv7)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up8", size=(2, 2))(conv7)
else:
up = K.layers.Conv2DTranspose(name="transConv8", filters=64,
**trans_params)(conv7)
up8 = K.layers.concatenate([up, conv2], axis=concat_axis)
conv8 = K.layers.Conv2D(name="conv8a", filters=64, **params)(up8)
conv8 = K.layers.Conv2D(name="conv8b", filters=64, **params)(conv8)
if FLAGS.use_upsampling:
up = K.layers.UpSampling2D(name="up9", size=(2, 2))(conv8)
else:
up = K.layers.Conv2DTranspose(name="transConv9", filters=32,
**trans_params)(conv8)
up9 = K.layers.concatenate([up, conv1], axis=concat_axis)
conv9 = K.layers.Conv2D(name="conv9a", filters=32, **params)(up9)
conv9 = K.layers.Conv2D(name="conv9b", filters=32, **params)(conv9)
predictionMask = K.layers.Conv2D(name="Mask", filters=n_cl_out,
kernel_size=(1, 1),
data_format=data_format,
activation="sigmoid")(conv9)
"""
Define the variables, losses, and metrics
We"ll return these as a dictionary called "model"
"""
model = {}
model["input"] = imgs
model["label"] = msks
model["output"] = predictionMask
model["loss"] = dice_coef_loss(msks, predictionMask)
model["metric_dice"] = dice_coef(msks, predictionMask)
model["metric_sensitivity"] = sensitivity(msks, predictionMask)
model["metric_specificity"] = specificity(msks, predictionMask)
model["metric_dice_test"] = dice_coef(msks, predictionMask)
model["loss_test"] = dice_coef_loss(msks, predictionMask)
model["metric_sensitivity_test"] = sensitivity(msks, predictionMask)
model["metric_specificity_test"] = specificity(msks, predictionMask)
"""
Summaries for TensorBoard
"""
tf.summary.scalar("loss", model["loss"])
tf.summary.histogram("loss", model["loss"])
tf.summary.scalar("dice", model["metric_dice"])
tf.summary.histogram("dice", model["metric_dice"])
tf.summary.scalar("sensitivity", model["metric_sensitivity"])
tf.summary.histogram("sensitivity", model["metric_sensitivity"])
tf.summary.scalar("specificity", model["metric_specificity"])
tf.summary.histogram("specificity", model["metric_specificity"])
tf.summary.image("predictions", predictionMask, max_outputs=3)
tf.summary.image("ground_truth", msks, max_outputs=3)
tf.summary.image("images", imgs, max_outputs=3)
summary_op = tf.summary.merge_all()
return model | 4d6bea9444935af1b95e9b209eee2df7d455e90c | 22,207 |
import re
import collections
def group_files(config_files, group_regex, group_alias="\\1"):
"""group input files by regular expression"""
rx = re.compile(group_regex)
for key, files in list(config_files.items()):
if isinstance(files, list):
groups = collections.defaultdict(list)
unmatched = []
for fn in sorted(files):
r = rx.search(fn)
if r is None:
unmatched.append(fn)
continue
group_name = r.expand(group_alias)
groups[group_name].append(fn)
if len(unmatched) == len(files):
pass
elif len(unmatched) == 0:
config_files[key] = [{x: y} for x, y in list(groups.items())]
else:
raise ValueError(
"input files not matching regular expression {}: {}"
.format(group_regex, str(unmatched)))
return config_files | 7f0c14387a9a63d03e8fdcb2297502a4ebf31e80 | 22,208 |
def get_current_icmp_seq():
"""See help(scapy.arch.windows.native) for more information.
Returns the current ICMP seq number."""
return GetIcmpStatistics()['stats']['icmpOutStats']['dwEchos'] | 4e5798a6187cd8da55b54698deab4f00aec19144 | 22,209 |
def text_mocked_request(data: str, **kwargs) -> web.Request:
"""For testng purposes."""
return mocked_request(data.encode(), content_type="text/plain", **kwargs) | fe9acfd2d7801a387f6497bdd72becd94da57ea9 | 22,210 |
def get_imu_data():
"""Returns a 2d array containing the following
* ``senses[0] = accel[x, y, z]`` for accelerometer data
* ``senses[1] = gyro[x, y, z]`` for gyroscope data
* ``senses[2] = mag[x, y, z]`` for magnetometer data
.. note:: Not all data may be aggregated depending on the IMU device connected to the robot.
"""
senses = [
[100, 50, 25],
[-100, -50, -25],
[100, -50, 25]
]
for imu in IMUs:
if isinstance(imu, LSM9DS1_I2C):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gyro)
senses[2] = list(imu.magnetic)
elif isinstance(imu, MPU6050):
senses[0] = list(imu.acceleration)
senses[1] = list(imu.gryo)
return senses | 24f24316a051a4ac9f1d8d7cbab00be05ff11c25 | 22,211 |
def parse_proc_diskstats(proc_diskstats_contents):
# type: (six.text_type) -> List[Sample]
"""
Parse /proc/net/dev contents into a list of samples.
"""
return_me = [] # type: List[Sample]
for line in proc_diskstats_contents.splitlines():
match = PROC_DISKSTATS_RE.match(line)
if not match:
continue
name = match.group(1)
read_sectors = int(match.group(2))
write_sectors = int(match.group(3))
if read_sectors == 0 and write_sectors == 0:
continue
# Multiply by 512 to get bytes from sectors:
# https://stackoverflow.com/a/38136179/473672
return_me.append(Sample(name + " read", read_sectors * 512))
return_me.append(Sample(name + " write", write_sectors * 512))
return return_me | af24bc01d7e31dc43cf07057fae672ba62b20e53 | 22,212 |
def normalize(x):
"""Normalize a vector or a set of vectors.
Arguments:
* x: a 1D array (vector) or a 2D array, where each row is a vector.
Returns:
* y: normalized copies of the original vector(s).
"""
if x.ndim == 1:
return x / np.sqrt(np.sum(x ** 2))
elif x.ndim == 2:
return x / np.sqrt(np.sum(x ** 2, axis=1)).reshape((-1, 1)) | f4e813b22a9088c3a9a209e94963b33c24fab88e | 22,213 |
def compute_perrakis_estimate(marginal_sample, lnlikefunc, lnpriorfunc,
lnlikeargs=(), lnpriorargs=(),
densityestimation='histogram', **kwargs):
"""
Computes the Perrakis estimate of the bayesian evidence.
The estimation is based on n marginal posterior samples
(indexed by s, with s = 0, ..., n-1).
:param array marginal_sample:
A sample from the parameter marginal posterior distribution.
Dimensions are (n x k), where k is the number of parameters.
:param callable lnlikefunc:
Function to compute ln(likelihood) on the marginal samples.
:param callable lnpriorfunc:
Function to compute ln(prior density) on the marginal samples.
:param tuple lnlikeargs:
Extra arguments passed to the likelihood function.
:param tuple lnpriorargs:
Extra arguments passed to the lnprior function.
:param str densityestimation:
The method used to estimate the marginal posterior density of each
model parameter ("normal", "kde", or "histogram").
Other parameters
----------------
:param kwargs:
Additional arguments passed to estimate_density function.
:return:
References
----------
Perrakis et al. (2014; arXiv:1311.0674)
"""
if not isinstance(marginal_sample, np.ndarray):
marginal_sample = np.array(marginal_sample)
number_parameters = marginal_sample.shape[1]
##
# Estimate marginal posterior density for each parameter.
log_marginal_posterior_density = np.zeros(marginal_sample.shape)
for parameter_index in range(number_parameters):
# Extract samples for this parameter.
x = marginal_sample[:, parameter_index]
# Estimate density with method "densityestimation".
log_marginal_posterior_density[:, parameter_index] = \
estimate_logdensity(x, method=densityestimation, **kwargs)
# Compute produt of marginal posterior densities for all parameters
log_marginal_densities = log_marginal_posterior_density.sum(axis=1)
##
# Compute log likelihood in marginal sample.
log_likelihood = lnlikefunc(marginal_sample, *lnlikeargs)
# Compute weights (i.e. prior over marginal density)
w = weight(marginal_sample, lnpriorfunc, lnpriorargs,
log_marginal_densities)
# Mask values with zero likelihood (a problem in lnlike)
cond = log_likelihood != 0
# Use identity for summation
# http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction
# ln(sum(x)) = ln(x[0]) + ln(1 + sum( exp( ln(x[1:]) - ln(x[0]) ) ) )
# log_summands = log_likelihood[cond] + np.log(prior_probability[cond])
# - np.log(marginal_densities[cond])
perr = lib.log_sum(w[cond] + log_likelihood[cond]) - log(len(w[cond]))
return perr | 70a287e3ed8391ecef1e48d7db846593fe240823 | 22,214 |
def postNewProfile(profile : Profile):
"""Gets all profile details of user with given profile_email
Parameters:
str: profile_email
Returns:
Json with Profile details """
profile_email = profile.email
profile_query = collection.find({"email":profile_email})
profile_query = [item for item in profile_query]
if not profile_query :
collection.save(dict(profile))
return True
return False | be81eac071e89a9ff8d44ac8e2cd479e911763b6 | 22,215 |
from datetime import datetime
def buy():
"""Buy shares of stock."""
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure SYMBOL and Share is submitted
if request.form.get("symbol") == "" or request.form.get("share") == "":
return apology("Please Enter SYMBOL/SHARE CORRECTLY!")
# ensure if stock exists
elif lookup(request.form.get("symbol")) == None:
return apology("SYMBOL DOES NOT EXIST!")
# ensure if user input for share is positive
elif int(request.form.get("share")) < 0:
return apology("Cannot Buy Negative Shares Bruu!")
# if everything is ok then ..
# retrieve stock
stock = lookup(request.form.get("symbol"))
# stock price
stock_price = stock["price"]
# user cash
user_cash = db.execute("SELECT cash FROM users WHERE id=:id", id = session["user_id"])
user_cash = float(user_cash[0]["cash"])
# ensure appropriate cash is available for purchase
nShare = 0
for i in request.form.get("share"):
nShare = nShare + float(i)
if not user_cash - stock_price * nShare >= 0:
return apology("YOU DO NOT HAVE ENOUGH CASH")
else:
# check if stock already exists in purchase table, if yes then update the no. of stocks
rows = db.execute("SELECT stockname FROM purchase WHERE user_id=:user_id AND stockname=:stockname", user_id=session["user_id"], stockname=request.form.get("symbol"))
if rows:
db.execute("UPDATE purchase SET nstocks = nstocks + :nstocks WHERE stockname = :stockname", nstocks=nShare, stockname=stock["symbol"])
else:
result = db.execute("INSERT INTO purchase (user_id, stockname, nstocks, price) VALUES (:user_id, :stockname, :nstocks, :price)",
user_id=session["user_id"], stockname=stock["symbol"], nstocks=nShare, price=stock_price)
# bought
by = "BUY"
# current time
c_time = str(datetime.utcnow())
# insert data in history table
db.execute("INSERT INTO history (user_id, stockname, nstocks, price, time, ty_purchase) VALUES (:user_id, :stockname, :nstocks, :price, :time, :b)", user_id=session["user_id"], stockname=stock["symbol"], nstocks=nShare, price=stock_price, time=c_time, b= by)
# update the users cash
db.execute("UPDATE users SET cash = cash - :tcash WHERE id=:user_id", tcash=stock_price*nShare,user_id=session["user_id"])
return redirect(url_for("index"))
# if user reached route via GET (as by submitting a form via GET)
else:
return render_template("buy.html")
return apology("TODO") | 5565de080d4593618ea3b79ddca738b2d9f5d1ae | 22,216 |
from typing import List
def get_templates() -> List[dict]:
"""
Gets a list of Templates that the active client can access
"""
client = get_active_notification_client()
if not client:
raise NotificationClientNotFound()
r = _get_templates(client=client)
return r | b7603ba33e1628eb6dad91b861bf17ecb914c1eb | 22,217 |
def svn_mergeinfo_intersect2(*args):
"""
svn_mergeinfo_intersect2(svn_mergeinfo_t mergeinfo1, svn_mergeinfo_t mergeinfo2,
svn_boolean_t consider_inheritance, apr_pool_t result_pool,
apr_pool_t scratch_pool) -> svn_error_t
"""
return _core.svn_mergeinfo_intersect2(*args) | 5c8176eec56fb1a95b306af41c2d98caa75459ec | 22,218 |
import time
import multiprocessing
def _update_images():
"""Update all docker images in this list, running a few in parallel."""
any_new = False
def comment(name, new):
nonlocal any_new
if new:
log.info(f"Downloaded new Docker image for {name} - {docker.image_size(name)}")
else:
log.debug(f"Docker image is up to date for {name} - {docker.image_size(name)}")
pass
any_new |= new
t0 = time()
log.info("Downloading docker images...")
override_images = set(config._image(i) for i in config.image_keys)
with multiprocessing.Pool(6) as p:
for name, new in p.imap_unordered(_update_image, override_images):
comment(name, new)
images = set(all_images()) | set(config.images) | override_images
with multiprocessing.Pool(6) as p:
for name, new in p.imap_unordered(_update_image, images):
comment(name, new)
log.info(f"All {len(images)} images are up to date, took {time()-t0:.02f}s")
return any_new | ecb95955d3f2514cb53849384de0815f88dae133 | 22,219 |
def conv_backward(dZ, A_prev, W, b, padding="same", stride=(1, 1)):
"""
Performs back propagation over a convolutional layer of a neural network
dZ is a numpy.ndarray of shape (m, h_new, w_new, c_new) containing the
partial derivatives with respect to the unactivated output of the
convolutional layer
m is the number of examples
h_new is the height of the output
w_new is the width of the output
c_new is the number of channels in the output
A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing
the output of the previous layer
m is the number of examples
h_prev is the height of the previous layer
w_prev is the width of the previous layer
c_prev is the number of channels in the previous layer
W is a numpy.ndarray of shape (kh, kw, c_prev, c_new) containing the
kernels for the convolution
kh is the filter height
kw is the filter width
c_prev is the number of channels in the previous layer
c_new is the number of channels in the output
b is a numpy.ndarray of shape (1, 1, 1, c_new) containing the biases
applied to the convolution
padding is a string that is either same or valid, indicating the type of
padding used
stride is a tuple of (sh, sw) containing the strides for the convolution
sh is the stride for the height
sw is the stride for the width
Returns: the partial derivatives with respect to the previous layer
(dA_prev), the kernels (dW), and the biases (db), respectively
"""
sh, sw = stride
kh, kw, c, c_new = W.shape
m, h_prev, w_prev, c_prev = A_prev.shape
d, h_new, w_new, _ = dZ.shape
if padding == 'same':
padw = int((((w_prev - 1) * sw + kw - w_prev) / 2) + 1)
padh = int((((h_prev - 1) * sh + kh - h_prev) / 2) + 1)
else:
padh, padw = (0, 0)
A_prev = np.pad(A_prev, ((0,), (padh,), (padw,), (0,)), constant_values=0,
mode='constant')
dW = np.zeros(W.shape)
dA = np.zeros(A_prev.shape)
db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)
for i in range(m):
for j in range(h_new):
for k in range(w_new):
jsh = j * sh
ksw = k * sw
for ll in range(c_new):
dW[:, :, :, ll] += A_prev[i, jsh: jsh + kh,
ksw: ksw + kw, :] * \
dZ[i, j, k, ll]
dA[i, jsh: jsh + kh, ksw: ksw + kw, :] += \
dZ[i, j, k, ll] * W[:, :, :, ll]
if padding == 'same':
dA = dA[:, padh: -padh, padw: -padw, :]
return dA, dW, db | d55eab80411efa903e03b584464ff50196468d7d | 22,220 |
import os
def clone(repo, user, site, parent=None):
"""
Clone a repo from the requested site and user.
:param repo: The name of the repo.
:param user: The name of the user.
:param site: The site to download from.
:param parent: The parent folder where the repo will be cloned. By default,
this is the current working directory.
:return: The full path to the root directory of the cloned repo.
"""
if parent is None:
parent = os.getcwd()
elif not os.path.isdir(parent):
raise NotADirectoryError(parent)
path = os.path.join(parent, repo)
if os.path.isdir(path):
raise IsADirectoryError(path)
site = site.lower()
if site not in URL_TEMPLATES:
base = os.path.splitext(site)[0]
if base not in URL_TEMPLATES:
raise KeyError(site)
site = base
url = URL_TEMPLATES[site].format(user=user, repo=repo)
# TODO: Download to a temporary folder first, then rename.
downloaded_path = clone_from_url(url, parent)
os.rename(downloaded_path, path)
if not os.path.isdir(path):
raise NotADirectoryError(path)
return path | 11b9eb1a448fa2b0915602129ea2f0695d9f6992 | 22,221 |
import collections
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | 5ca989a4ae5ce00cd9c09c4d9480dbeb935d6ca8 | 22,222 |
def createContext(data, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type data: ``dict`` or ``list``
:param data: The data to be added to the context (required)
:type id: ``str``
:keyword id: The ID of the context entry
:type keyTransform: ``function``
:keyword keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:keyword removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
if isinstance(data, (list, tuple)):
return [createContextSingle(d, id, keyTransform, removeNull) for d in data]
else:
return createContextSingle(data, id, keyTransform, removeNull) | a97c599689932cbfea7063fdae32702d413352ac | 22,223 |
import os
def default_xonshrc(env) -> "tuple[str, ...]":
"""
``['$XONSH_SYS_CONFIG_DIR/xonshrc', '$XONSH_CONFIG_DIR/xonsh/rc.xsh', '~/.xonshrc']``
"""
dxrc = (
os.path.join(xonsh_sys_config_dir(env), "xonshrc"),
os.path.join(xonsh_config_dir(env), "rc.xsh"),
os.path.expanduser("~/.xonshrc"),
)
# Check if old config file exists and issue warning
old_config_filename = xonshconfig(env)
if os.path.isfile(old_config_filename):
print(
"WARNING! old style configuration ("
+ old_config_filename
+ ") is no longer supported. "
+ "Please migrate to xonshrc."
)
return dxrc | de58924da5fd6d8683edcbe049dbc2eb7a7c8b45 | 22,224 |
import math
def haversine(phi1, lambda1, phi2, lambda2):
"""
calculate angular great circle distance with haversine formula
see parameters in spherical_law_of_cosines
"""
d_phi = phi2 - phi1
d_lambda = lambda2 - lambda1
a = math.pow(math.sin(d_phi / 2), 2) + \
math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(d_lambda / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return c | acb25fc8d305dde7b18059a770bdcd9b135b295a | 22,225 |
import yaml
from datetime import datetime
def get_backup_start_timestamp(bag_name):
"""
Input: Fisrt bag name
Output: datatime object
"""
info_dict = yaml.load(Bag(bag_name, 'r')._get_yaml_info())
start_timestamp = info_dict.get("start", None)
start_datetime = None
if start_timestamp is None:
print("No start time info in bag, try to retrieve the start time by parsing bag name.")
start_datetime = parse_backup_start_timestamp(bag_name)
else:
start_datetime = datetime.datetime.fromtimestamp(start_timestamp)
# print("info_dict = \n%s" % str(info_dict))
# print('type(info_dict["start"]) = %s' % type(info_dict["start"]))
# print(info_dict["start"])
return start_datetime | b8ee55c3028fb6f6e1137d614e836724c3e00bd6 | 22,226 |
import os
def ResidualSlopesSequence2ResidualPhase(Gradients,S2M,M2V,name='residual_phase_cube.fits',\
path='.',binning=40):
"""
Same functions as ResidualSlopes2ResidualPhase but applies it to a sequence
of slopes (gradients) instead of a single vector.
Input:
- Gradients: a sequence of vectors of 2480 elements representing the slopes measured
by the WFS
- S2M: the slopes to modes matrix (shape (988, 2480))
- M2V: the modes to voltage matrix (shape (1377, 988))
- name: the name of the file to save
- path: path where the file is saved
- binning: the binning factor to apply (40 by default). If no binning
is desired, use binning=1
"""
if Gradients.ndim != 2 and Gradients.shape[1] != 2480:
raise IOError('The input vector must be a 2D array of shape (nframes,2480) (currently',Gradients.shape,')')
if S2M.ndim!=2 or S2M.shape[1]!=2480:
raise IOError('The input S2M matrix must have a shape (988, 2480) (currently',S2M.shape,')')
if M2V.ndim!=2 or M2V.shape[0]!=1377:
raise IOError('The input M2V matrix must have a shape (1377, 988) (currently',M2V.shape,')')
IMF = fits.getdata(os.path.join(path_sparta,'SAXO_DM_IFM.fits')) #shape (1377, 240, 240)
# The matrix needs to be normalised to allow conversion into optical wavefront errors:
# influence matrix normalization = defoc meca in rad @ 632 nm
rad_632_to_nm_opt = 1. / 2. / np.pi * 632 * 2
IMF = IMF * rad_632_to_nm_opt
IMF = IMF.reshape(1377, 240*240).T # shape (57600, 1377)
nframes = Gradients.shape[0]
mode = Gradients @ S2M.T
volt = mode @ M2V.T
res_turbulence = (volt @ IMF.T).reshape((nframes, 240, 240))
if binning ==1:
nframes = Gradients.shape[0]
slopes = Gradients
elif binning>1:
slopes = np.ndarray((int(Gradients.shape[0]/binning),2480),dtype=float)
for i in range(int(nframes/binning)):
slopes[i,:] = np.mean(Gradients[i*binning:(i+1)*binning,:],axis=0)
nframes = int(Gradients.shape[0]/binning)
else:
raise IOError('The binning factor must be an integer greater or equal to 1.')
mode = slopes @ S2M.T
volt = mode @ M2V.T
res_turbulence = (volt @ IMF.T).reshape((nframes, 240, 240))
fits.writeto(os.path.join(path,name), res_turbulence, overwrite=True)
return res_turbulence | e6845f199c7901e5b640e4081aa9363f7b60e667 | 22,227 |
def get_mask_areas(masks: np.ndarray) -> np.ndarray:
"""Get mask areas from the compressed mask map."""
# 0 for background
ann_ids = np.sort(np.unique(masks))[1:]
areas = np.zeros((len(ann_ids)))
for i, ann_id in enumerate(ann_ids):
areas[i] = np.count_nonzero(ann_id == masks)
return areas | bf584c9529118d9946e461b4df22cf64efbeb251 | 22,228 |
def cursor_from_image(image):
"""
Take a valid cursor image and create a mouse cursor.
"""
colors = {(0,0,0,255) : "X",
(255,255,255,255) : "."}
rect = image.get_rect()
icon_string = []
for j in range(rect.height):
this_row = []
for i in range(rect.width):
pixel = tuple(image.get_at((i,j)))
this_row.append(colors.get(pixel, " "))
icon_string.append("".join(this_row))
return icon_string | 173c3fc6bfcc6bb45c9e1e6072d7c68244750da9 | 22,229 |
def h_matrix(jac, p, lamb, method='kotre', W=None):
"""
JAC method of dynamic EIT solver:
H = (J.T*J + lamb*R)^(-1) * J.T
Parameters
----------
jac: NDArray
Jacobian
p, lamb: float
regularization parameters
method: str, optional
regularization method
Returns
-------
H: NDArray
pseudo-inverse matrix of JAC
"""
if W is None:
j_w_j = np.dot(jac.transpose(), jac)
else:
j_w_j = multi_dot([jac.transpose(), W, jac])
if method == 'kotre':
# see adler-dai-lionheart-2007
# p=0 : noise distribute on the boundary ('dgn')
# p=0.5 : noise distribute on the middle
# p=1 : noise distribute on the center ('lm')
r_mat = np.diag(np.diag(j_w_j)) ** p
elif method == 'lm':
# Marquardt–Levenberg, 'lm' for short
# or can be called NOSER, DLS
r_mat = np.diag(np.diag(j_w_j))
else:
# Damped Gauss Newton, 'dgn' for short
r_mat = np.eye(jac.shape[1])
# build H
h_mat = np.dot(la.inv(j_w_j + lamb * r_mat), jac.transpose())
return h_mat | fc4d225bb2d98ee067b03c10f14ad23db6fad1a9 | 22,230 |
def _get_flavors_metadata_ui_converters_from_configuration():
"""Get flavor metadata ui converters from flavor mapping config dir."""
flavors_metadata_ui_converters = {}
configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
for config in configs:
adapter_name = config['ADAPTER']
flavor_name = config['FLAVOR']
flavors_metadata_ui_converters.setdefault(
adapter_name, {}
)[flavor_name] = config.get('CONFIG_MAPPING', {})
adapters = adapter_api.ADAPTERS
parents = {}
for adapter_name, adapter in adapters.items():
parent = adapter.get('parent', None)
parents[adapter_name] = parent
for adapter_name, adapter in adapters.items():
flavors_metadata_ui_converters[adapter_name] = (
util.recursive_merge_dict(
adapter_name, flavors_metadata_ui_converters, parents
)
)
return flavors_metadata_ui_converters | 4cb8dc1737579cd76dd696ec08f984015e3ef77b | 22,231 |
def outermost_scope_from_subgraph(graph, subgraph, scope_dict=None):
"""
Returns the outermost scope of a subgraph.
If the subgraph is not connected, there might be several
scopes that are locally outermost. In this case, it
throws an Exception.
"""
if scope_dict is None:
scope_dict = graph.scope_dict()
scopes = set()
for element in subgraph:
scopes.add(scope_dict[element])
# usual case: Root of scope tree is in subgraph,
# return None (toplevel scope)
if None in scopes:
return None
toplevel_candidates = set()
for scope in scopes:
# search the one whose parent is not in scopes
# that must be the top level one
current_scope = scope_dict[scope]
while current_scope and current_scope not in scopes:
current_scope = scope_dict[current_scope]
if current_scope is None:
toplevel_candidates.add(scope)
if len(toplevel_candidates) != 1:
raise TypeError("There are several locally top-level nodes. "
"Please check your subgraph and see to it "
"being connected.")
else:
return toplevel_candidates.pop() | 0bd649d00b745065e75e2dcfc37d9b16eaa0c3db | 22,232 |
def calc_entropy_ew(molecule, temp):
"""
Expoential well entropy
:param molecule:
:param temp:
:param a:
:param k:
:return:
"""
mass = molecule.mass / Constants.amu_to_kg * Constants.amu_to_au
a = molecule.ew_a_inv_ang * Constants.inverse_ang_inverse_au
k = molecule.ew_k_kcal * Constants.kcal_mol_to_au
q_t = _q_t_ew(molecule, temp)
beta = 1.0 / (Constants.kb_au * temp)
cap_lambda = ((2.0 * mass * np.pi) / (beta * Constants.h_au ** 2)) ** 1.5
def integrand(r, beta, a, b):
return r ** 2 * np.exp(-beta * a * (np.exp(b * r) - 1.0) + b * r)
integral = integrate.quad(integrand, 0.0, 10.0, args=(beta, k, a))[0]
term_4 = 4.0 * np.pi * (k * beta * cap_lambda / q_t) * integral
return Constants.r * (1.5 - k * beta + np.log(q_t) + term_4) | 7d4d2c13ce5b081e4b209169a3cf996dd0b34a44 | 22,233 |
import json
import uuid
import sys
def registerCreatorDataCallbackURL():
"""
params:
creatorID
dataCallbackURL
"""
try:
global rmlEngine
rawRequest = request.POST.dict
for rawKey in rawRequest.keys():
keyVal = rawKey
jsonPayload = json.loads(keyVal)
#ownerID
try:
creatorID = jsonPayload["creatorID"]
creatorUUID = uuid.UUID(creatorID)
except KeyError:
raise Exceptions.MissingPOSTArgumentError("creatorID parameter missing from POST request.")
except Exception as e:
raise e
try:
ownerEntityType = rmlEngine.api.getEntityMemeType(creatorUUID)
except Exception as e:
raise Exceptions.NoSuchEntityError("creatorID parameter value %s does not exist." %creatorID)
if ownerEntityType != "Agent.Creator":
raise Exceptions.TemplatePathError("creatorID parameter value %s does not refer to a valid data creator" %creatorID)
#stimulusCallbackURL
try:
dataCallbackURL = jsonPayload["dataCallbackURL"]
except KeyError:
raise Exceptions.MissingPOSTArgumentError("dataCallbackURL parameter missing from POST request.")
except Exception as e:
raise e
try:
rmlEngine.api.setEntityPropertyValue(creatorUUID, "dataCallbackURL", dataCallbackURL)
except Exception as e:
raise Exceptions.MismatchedPOSTParametersError("Error while assigning stimulusCallbackURL value %s to entity %s " %(dataCallbackURL, creatorID))
returnStr = "Assigned dataCallbackURL %s to owner %s " %(dataCallbackURL, creatorID)
response.body = json.dumps({"status": returnStr})
response.status = 200
return response
except Exception as unusedE:
fullerror = sys.exc_info()
errorID = str(fullerror[0])
errorMsg = str(fullerror[1])
returnStr = "Failed to assign dataCallbackURL to new Agent.Creator Entity. %s, %s" %(errorID, errorMsg)
response.body = json.dumps({"status": returnStr})
response.status = 500
return response | 215780a12b232aaf7993ccc3dccb5905a2fe0cc7 | 22,234 |
def csm(A, B):
"""
Calculate Cosine similarity measure of distance between two vectors `A` and `B`.
Parameters
-----------
A : ndarray
First vector containing values
B : ndarray
Second vector containing values
Returns
--------
float
distance value between two vectors
Examples
---------
>>> distance = csm(A, B)
"""
numerator = np.sum(A * B)
denominator = (np.sqrt(np.sum(A))) * (np.sqrt(np.sum(B)))
if denominator == 0:
denominator = 1
return numerator / denominator | cebca4a53ed3200d4820041fca7886df57f4a40c | 22,235 |
def RationalsModP(p):
"""Assume p is a prime."""
class RationalModP(_Modular):
"""A rational modulo p
The rational is stored with numerator and denominator relatively prime.
This is done to prevent growth in numerator or denominator which causes
overflow and makes math harder.
"""
def __init__(self, m, n=1):
"""Constructor for rationals.
If denominator is not specified, set it to 1.
"""
try:
# This is awkward constructor overloading
if isinstance(m, bytes):
num = int.from_bytes(m[:32], 'big')
den = int.from_bytes(m[32:], 'big')
else:
num = int(m) % RationalModP.p
den = int(n) % RationalModP.p
common = gcd(num, den)
# Handle case with 0
if common == 0:
self.m, self.n = num, den
else:
self.m = num // common
self.n = den // common
except:
raise TypeError("Can't cast type %s to %s in __init__" %
(type(n).__name__, type(self).__name__))
self.field = RationalModP
@typecheck
def __add__(self, other):
num = (self.m * other.n + other.m * self.n) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
common = gcd(num, den)
return RationalModP(num // common, den // common)
@typecheck
def __sub__(self, other):
num = (self.m * other.n - other.m * self.n) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
common = gcd(num, den)
return RationalModP(num // common, den // common)
@typecheck
def __mul__(self, other):
num = (self.m * other.m) % RationalModP.p
den = (self.n * other.n) % RationalModP.p
return RationalModP(num, den)
def __neg__(self):
return RationalModP(-self.m, self.n)
@typecheck
def __eq__(self, other):
return isinstance(other, RationalModP) and (
(self.m * other.n) % RationalModP.p == (other.m * self.n) % RationalModP.p)
@typecheck
def __ne__(self, other):
return isinstance(other, IntegerModP) is False or (
(self.m * other.n) % RationalModP.p != (other.m * self.n) % RationalModP.p)
# TODO(rbharath): This should be possible to implement. Think more about it.
#@typecheck
#def __divmod__(self, divisor):
# q, r = divmod(self.n, divisor.n)
# return (IntegerModP(q), IntegerModP(r))
# TODO(rbharath): Check if this makes sense
def inverse(self):
if self.m == 0:
raise Exception("Cannot invert with numerator 0")
return RationalModP(self.n, self.m)
## need to use the division algorithm *as integers* because we're
## doing it on the modulus itself (which would otherwise be zero)
#x, y, d = extended_euclidean_algorithm(self.n, self.p)
#if d != 1:
# raise Exception("Error: p is not prime in %s!" % (self.__name__))
#return IntegerModP(x)
#def __abs__(self):
# return abs(self.n)
def __str__(self):
return "%s/%s" % (str(self.m), str(self.n))
def __repr__(self):
return '%d/%d (mod %d)' % (self.m, self.n, self.p)
# TODO(rbharath): Can this method be done better?
def to_bytes(self):
return self.m.to_bytes(32, 'big') + self.n.to_bytes(32, 'big')
#def __int__(self):
# return self.n
RationalModP.p = p
RationalModP.__name__ = 'Q/%d' % (p)
RationalModP.englishName = 'RationalsMod%d' % (p)
return RationalModP | 137ac2b7f728b49c83e5e02dcbae7c782cc2877f | 22,236 |
import random
import string
def rand_email():
"""Random email.
Usage Example::
>>> rand_email()
Z4Lljcbdw7m@npa.net
"""
name = random.choice(string.ascii_letters) + \
rand_str(string.ascii_letters + string.digits, random.randint(4, 14))
domain = rand_str(string.ascii_lowercase, random.randint(2, 10))
kind = random.choice(_all_email_kinds)
return "%s@%s%s" % (name, domain, kind) | 9898669f59511d5b8fd403de0ab7174e7710d898 | 22,237 |
import os
import re
def __parse_quic_timing_from_scenario(in_dir: str, scenario_name: str, pep: bool = False) -> pd.DataFrame:
"""
Parse the quic timing results in the given scenario.
:param in_dir: The directory containing all measurement results
:param scenario_name: The name of the scenario to parse
:param pep: Whether to parse QUIC or QUIC (PEP) files
:return: A dataframe containing the parsed results of the specified scenario.
"""
logger.debug("Parsing quic%s timing files in %s", " (pep)" if pep else "", scenario_name)
df = pd.DataFrame(columns=['run', 'con_est', 'ttfb'])
for file_name in os.listdir(os.path.join(in_dir, scenario_name)):
file_path = os.path.join(in_dir, scenario_name, file_name)
if not os.path.isfile(file_path):
continue
match = re.search(r"^quic%s_ttfb_(\d+)_client\.txt$" % ("_pep" if pep else "",), file_name)
if not match:
continue
logger.debug("%s: Parsing '%s'", scenario_name, file_name)
run = int(match.group(1))
con_est = None
ttfb = None
with open(file_path) as file:
for line in file:
if line.startswith('connection establishment time:'):
if con_est is not None:
logger.warning("Found duplicate value for con_est in '%s', ignoring", file_path)
else:
con_est = float(line.split(':', 1)[1].strip()[:-2])
elif line.startswith('time to first byte:'):
if ttfb is not None:
logger.warning("Found duplicate value for ttfb in '%s', ignoring", file_path)
else:
ttfb = float(line.split(':', 1)[1].strip()[:-2])
df = df.append({
'run': run,
'con_est': con_est,
'ttfb': ttfb
}, ignore_index=True)
with_na = len(df.index)
df.dropna(subset=['con_est', 'ttfb'], inplace=True)
without_na = len(df.index)
if with_na != without_na:
logger.warning("%s: Dropped %d lines with NaN values", scenario_name, with_na - without_na)
if df.empty:
logger.warning("%s: No quic%s timing data found", scenario_name, " (pep)" if pep else "")
return df | 71990b317f2e5d87ae62403bb730a1ea0b2ab8e2 | 22,238 |
import asyncio
async def value_to_deep_structure(value, hash_pattern):
"""build deep structure from value"""
try:
objects = {}
deep_structure0 = _value_to_objects(
value, hash_pattern, objects
)
except (TypeError, ValueError):
raise DeepStructureError(hash_pattern, value) from None
obj_id_to_checksum = {}
new_checksums = set()
async def conv_obj_id_to_checksum(obj_id):
obj = objects[obj_id]
obj_buffer = await serialize(obj, "mixed")
obj_checksum = await calculate_checksum(obj_buffer)
new_checksums.add(obj_checksum.hex())
buffer_cache.cache_buffer(obj_checksum, obj_buffer)
obj_id_to_checksum[obj_id] = obj_checksum.hex()
coros = []
for obj_id in objects:
coro = conv_obj_id_to_checksum(obj_id)
coros.append(coro)
await asyncio.gather(*coros)
deep_structure = _build_deep_structure(
hash_pattern, deep_structure0, obj_id_to_checksum
)
return deep_structure, new_checksums | 05df4e4cec2a39006631f96a84cd6268a6550b68 | 22,239 |
def get_users_run(jobs, d_from, target, d_to='', use_unit='cpu',
serialize_running=''):
"""Takes a DataFrame full of job information and
returns usage for each "user"
uniquely based on specified unit.
This function operates as a stepping stone for plotting usage figures
and returns various series and frames for several different uses.
Parameters
-------
jobs: DataFrame
Job DataFrame typically generated by slurm/sacct_jobs
or the ccmnt package.
use_unit: str, optional
Usage unit to examine. One of: {'cpu', 'cpu-eqv', 'gpu', 'gpu-eqv'}.
Defaults to 'cpu'.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00'.
target: int-like
Typically a cpu allocation or core eqv value for a particular acount.
Often 50.
d_to: date str, optional
End of the query period, e.g. '2020-01-01T00:00:00'.
Defaults to now if empty.
serialize_running: str, optional
Pickle given structure with argument as a name.
If left empty, pickle procedure is skipped.
Defaults to empty.
Returns
-------
user_running_cat:
Frame of running resources for each of the unique
"users" in the jobs data frame.
"""
users = jobs.user.unique()
user_count = 0
for user in users:
user_mask = jobs['user'].str.match(user)
user_jobs = jobs[user_mask].copy()
_, user_queued, user_running, _ = job_use(user_jobs, d_from,
target, d_to=d_to,
use_unit=use_unit)
user_queued=user_queued[d_from:d_to]
user_running=user_running[d_from:d_to]
if user_count == 0:
user_running_cat = pd.Series(user_running,
index=user_running.index,
name=user)
else:
user_running_ser = pd.Series(user_running,
index=user_running.index,
name=user)
user_running_cat = pd.concat([user_running_cat, user_running_ser],
axis=1)
user_count = user_count + 1
if user_count == 1:
user_running_cat = user_running_cat.to_frame()
if serialize_running != '':
user_running_cat.to_pickle(serialize_running)
return user_running_cat | ab40605468e40b7e76a35d4bc2c1344be9050d5f | 22,240 |
import collections
def get_classes_constants(paths):
"""
Extract the vtk class names and constants from the path.
:param paths: The path(s) to the Python file(s).
:return: The file name, the VTK classes and any VTK constants.
"""
res = collections.defaultdict(set)
for path in paths:
content = path.read_text().split('\n')
for line in content:
for pattern in Patterns.skip_patterns:
m = pattern.search(line)
if m:
continue
for pattern in Patterns.vtk_patterns:
m = pattern.search(line)
if m:
for g in m.groups():
res[str(path)].add(g)
return res | e58531e99c37c1c23abb46b18f4b2af0b95c5db9 | 22,241 |
def predict_unfolding_at_temperature(temp, data, PDB_files):
"""
Function to predict lables for all trajectoires at a given temperature
Note: The assumption is that at a given temperature, all snapshots are at the same times
Filter should be 'First commit' or 'Last commit' or 'Filter osc' as described in ClusterPCA
You can also enter None (or anything else besides the options above) in whcih case no filtering is applied
"""
temp=str(temp)
if len(temp)==1:
temp='{}.'.format(temp)
while len(temp)<5: #add zeros so that the temperature is of the form 0.80
temp='{}0'.format(temp)
f, trajectories = utils.get_trajectory(data, PDB_files, '{}_'.format(temp) )
#need to figure out how long are all the trajectories.
#to figure this out, iterate through the first files until you see a change
go=True
i=0
traj_nums=[]
while go:
file=f[i]
file=file.split('{}_'.format(temp))
suffix=file[1]
traj_num=suffix.split('.')[0]
traj_nums.append(traj_num)
if traj_nums[i]!=traj_nums[i-1]:
go=False
else:
i+=1
traj_len=i
n_trajectories=int(len(f)/traj_len)
sim_labels=np.zeros((n_trajectories, traj_len))
times=utils.get_times(f[0:traj_len])
for n in range(n_trajectories):
traj=trajectories[n*traj_len:n*traj_len+traj_len]
sim_labels[n,:]=traj
return times, sim_labels | 9fec4ee407bf41692c57899e96ff16ad2acdf4ea | 22,242 |
def _frac_scorer(matched_hs_ions_df, all_hyp_ions_df, N_spectra):
"""Fraction ion observed scorer.
Provides a score based off of the fraction of hypothetical ions that were observed
for a given hypothetical structure.
Parameters
----------
matched_hs_ions_df : pd.DataFrame
Dataframe of observed ions that matched a specific hypothetical structure
all_hyp_ions_df : pd.DataFrame
Dataframe of all possible ions for a given hypothetical structure.
N_spectra : int
Number of spectra provided.
Returns
-------
float
Score for a given hypothetical structure.
"""
# Calculate the number of matched ions observed and total possible
N_matched_hs_ions = matched_hs_ions_df.shape[0]
N_tot_hyp_ions = all_hyp_ions_df.shape[0]
score = N_matched_hs_ions / (N_tot_hyp_ions*N_spectra)
return score | a341b02b7ba64eb3b29032b4fe681267c5d36a00 | 22,243 |
def role_in(roles_allowed):
"""
A permission checker that checks that a role possessed by the user matches one of the role_in list
"""
def _check_with_authuser(authuser):
return any(r in authuser.roles for r in roles_allowed)
return _check_with_authuser | 24ff0423dc50187f3607329342af6c8930596a36 | 22,244 |
from typing import List
def elements_for_model(model: Model) -> List[str]:
"""Creates a list of elements to expect to register.
Args:
model: The model to create a list for.
"""
def increment(index: List[int], dims: List[int]) -> None:
# assumes index and dims are the same length > 0
# modifies index argument
i = len(index) - 1
index[i] += 1
while index[i] == dims[i]:
index[i] = 0
i -= 1
if i == -1:
break
index[i] += 1
def index_to_str(index: List[int]) -> str:
result = ''
for i in index:
result += '[{}]'.format(i)
return result
def generate_indices(multiplicity: List[int]) -> List[str]:
# n-dimensional counter
indices = list() # type: List[str]
index = [0] * len(multiplicity)
indices.append(index_to_str(index))
increment(index, multiplicity)
while sum(index) > 0:
indices.append(index_to_str(index))
increment(index, multiplicity)
return indices
result = list() # type: List[str]
for element in model.compute_elements:
if len(element.multiplicity) == 0:
result.append(str(element.name))
else:
for index in generate_indices(element.multiplicity):
result.append(str(element.name) + index)
return result | a0769d762fc31ac128ad077e1601b3ba3bcd6a27 | 22,245 |
def form_IntegerNoneDefault(request):
"""
An integer field defaulting to None
"""
schema = schemaish.Structure()
schema.add('myIntegerField', schemaish.Integer())
form = formish.Form(schema, 'form')
form.defaults = {'myIntegerField':None}
return form | 322671035e232cfd99c7500fe0995d652a4fbe7a | 22,246 |
import string
def tokenize(text, stopwords):
"""Tokenizes and removes stopwords from the document"""
without_punctuations = text.translate(str.maketrans('', '', string.punctuation))
tokens = word_tokenize(without_punctuations)
filtered = [w.lower() for w in tokens if not w in stopwords]
return filtered | 7a231d124e89c97b53779fee00874fb2cb40155e | 22,247 |
def to_dict(prim: Primitive) -> ObjectData:
"""Convert a primitive to a dictionary for serialization."""
val: BasePrimitive = prim.value
data: ObjectData = {
"name": val.name,
"size": val.size,
"signed": val.signed,
"integer": prim in INTEGER_PRIMITIVES,
}
if val.min != 0 or val.max != 0:
data["min"] = val.min
data["max"] = val.max
return data | 32d57b89e6740239b55b7f491e16de7f9b31a186 | 22,248 |
import requests
def get_raw_img(url):
"""
Download input image from url.
"""
pic = False
response = requests.get(url, stream=True)
with open('./imgs/img.png', 'wb') as file:
for chunk in response.iter_content():
file.write(chunk)
pic = True
response.close()
return pic | 67b2cf9f2c89c26fca865ea93be8f6e32cfa2de5 | 22,249 |
def get_and_validate_study_id(chunked_download=False):
"""
Checks for a valid study object id or primary key.
If neither is given, a 400 (bad request) error is raised.
Study object id malformed (not 24 characters) causes 400 error.
Study object id otherwise invalid causes 400 error.
Study does not exist in our database causes 404 error.
"""
study = _get_study_or_abort_404(request.values.get('study_id', None),
request.values.get('study_pk', None))
if not study.is_test and chunked_download:
# You're only allowed to download chunked data from test studies
return abort(404)
else:
return study | 405420481c343afcaacbcfc14bc75fc7acf5aae9 | 22,250 |
import re
def tokenize_char(pinyin: str) -> tuple[str, str, int] | None:
"""
Given a string containing the pinyin representation of a Chinese character, return a 3-tuple containing its
initial (``str``), final (``str``), and tone (``int; [0-4]``), or ``None`` if it cannot be properly tokenized.
"""
initial = final = ''
tone = 0
for i in pinyin:
if i in __TONED_VOWELS:
tone = __TONED_VOWELS[i][1]
pinyin = pinyin.replace(i, __TONED_VOWELS[i][0])
break
for f in __FINALS:
if (s := re.search(f, pinyin)) is not None:
final = s[0]
initial = re.sub(f, '', pinyin)
break
return (initial, final, tone) if final else None | e4bfb4712857d9201daff187ab63c9846be17764 | 22,251 |
def is_in_cell(point:list, corners:list) -> bool:
"""
Checks if a point is within a cell.
:param point: Tuple of lat/Y,lon/X-coordinates
:param corners: List of corner coordinates
:returns: Boolean whether point is within cell
:Example:
"""
y1, y2, x1, x2 = corners[2][0], corners[0][0], corners[0][1], corners[2][1]
if (y1 <= point[0] <= y2) and (x1 <= point[1] <= x2):
return True
return False | 5f8f13a65ea4da1909a6b701a04e391ebed413dc | 22,252 |
def json_response(function):
"""
This decorator can be used to catch :class:`~django.http.Http404` exceptions and convert them to a :class:`~django.http.JsonResponse`.
Without this decorator, the exceptions would be converted to :class:`~django.http.HttpResponse`.
:param function: The view function which should always return JSON
:type function: ~collections.abc.Callable
:return: The decorated function
:rtype: ~collections.abc.Callable
"""
@wraps(function)
def wrap(request, *args, **kwargs):
r"""
The inner function for this decorator.
It tries to execute the decorated view function and returns the unaltered result with the exception of a
:class:`~django.http.Http404` error, which is converted into JSON format.
:param request: Django request
:type request: ~django.http.HttpRequest
:param \*args: The supplied arguments
:type \*args: list
:param \**kwargs: The supplied kwargs
:type \**kwargs: dict
:return: The response of the given function or an 404 :class:`~django.http.JsonResponse`
:rtype: ~django.http.JsonResponse
"""
try:
return function(request, *args, **kwargs)
except Http404 as e:
return JsonResponse({"error": str(e) or "Not found."}, status=404)
return wrap | 0b13ff38d932c64fd5afbb017601e34c1c26648b | 22,253 |
import re
def generate_junit_report_from_cfn_guard(report):
"""Generate Test Case from cloudformation guard report"""
test_cases = []
count_id = 0
for file_findings in report:
finding = file_findings["message"]
# extract resource id from finsind line
resource_regex = re.search("^\[([^]]*)]", finding)
if resource_regex:
resource_id = resource_regex.group(1)
test_case = TestCase(
"%i - %s" % (count_id, finding),
classname=resource_id)
test_case.add_failure_info(output="%s#R:%s" % (file_findings["file"], resource_id))
test_cases.append(test_case)
count_id += 1
test_suite = TestSuite("aws cfn-guard test suite", test_cases)
return TestSuite.to_xml_string([test_suite], prettyprint=False) | cdf747c535042bf93c204fe8d2b647b3045f7ed7 | 22,254 |
def new_custom_alias():
"""
Create a new custom alias
Input:
alias_prefix, for ex "www_groupon_com"
alias_suffix, either .random_letters@simplelogin.co or @my-domain.com
optional "hostname" in args
Output:
201 if success
409 if the alias already exists
"""
user = g.user
if not user.can_create_new_alias():
LOG.d("user %s cannot create any custom alias", user)
return (
jsonify(
error="You have reached the limitation of a free account with the maximum of "
f"{MAX_NB_EMAIL_FREE_PLAN} aliases, please upgrade your plan to create more aliases"
),
400,
)
user_custom_domains = [cd.domain for cd in user.verified_custom_domains()]
hostname = request.args.get("hostname")
data = request.get_json()
if not data:
return jsonify(error="request body cannot be empty"), 400
alias_prefix = data.get("alias_prefix", "").strip()
alias_suffix = data.get("alias_suffix", "").strip()
alias_prefix = convert_to_id(alias_prefix)
if not verify_prefix_suffix(user, alias_prefix, alias_suffix, user_custom_domains):
return jsonify(error="wrong alias prefix or suffix"), 400
full_alias = alias_prefix + alias_suffix
if GenEmail.get_by(email=full_alias):
LOG.d("full alias already used %s", full_alias)
return jsonify(error=f"alias {full_alias} already exists"), 409
gen_email = GenEmail.create(user_id=user.id, email=full_alias)
db.session.commit()
if hostname:
AliasUsedOn.create(gen_email_id=gen_email.id, hostname=hostname)
db.session.commit()
return jsonify(alias=full_alias), 201 | 552812711eefd182d7671e3ac72776bbf908ff33 | 22,255 |
def setup_pen_kw(penkw={}, **kw):
"""
Builds a pyqtgraph pen (object containing color, linestyle, etc. information) from Matplotlib keywords.
Please dealias first.
:param penkw: dict
Dictionary of pre-translated pyqtgraph keywords to pass to pen
:param kw: dict
Dictionary of Matplotlib style plot keywords in which line plot relevant settings may be specified. The entire
set of mpl plot keywords may be passed in, although only the keywords related to displaying line plots will be
used here.
:return: pyqtgraph pen instance
A pen which can be input with the pen keyword to many pyqtgraph functions
"""
# Move the easy keywords over directly
direct_translations_pen = { # plotkw: pgkw
'linewidth': 'width',
}
for direct in direct_translations_pen:
penkw[direct_translations_pen[direct]] = kw.pop(direct, None)
# Handle colors and styles
penkw['color'] = color_translator(**kw)
penkw['style'] = style_translator(**kw)
# Prune values of None
penkw = {k: v for k, v in penkw.items() if v is not None}
return pg.mkPen(**penkw) if len(penkw.keys()) else None | d6b2c68501a88896b7eb09032e2ac7cde6812e94 | 22,256 |
def seq(seq_aps):
"""Sequence of parsers `seq_aps`."""
if not seq_aps:
return succeed(list())
else:
ap = seq_aps[0]
aps = seq_aps[1:]
return ap << cons >> seq(aps) | ab94d3372f229e13a83387b256f3daa3ab2357a5 | 22,257 |
def Growth_factor_Heath(omega_m, z):
"""
Computes the unnormalised growth factor at redshift z given the present day value of omega_m. Uses the expression
from Heath1977
Assumes Flat LCDM cosmology, which is fine given this is also assumed in CambGenerator. Possible improvement
could be to tabulate this using the CambGenerator so that it would be self consistent for non-LCDM cosmologies.
:param omega_m: the matter density at the present day
:param z: the redshift we want the matter density at
:return: the unnormalised growth factor at redshift z.
"""
avals = np.logspace(-4.0, np.log10(1.0 / (1.0 + z)), 10000)
integ = integrate.simps(1.0 / (avals * E_z(omega_m, 1.0 / avals - 1.0)) ** 3, avals, axis=0)
return 5.0 / 2.0 * omega_m * E_z(omega_m, z) * integ | c14e93a871f57c0566b13adb9005c54e68fbfa0f | 22,258 |
def freq2bark(freq_axis):
""" Frequency conversion from Hertz to Bark
See E. Zwicker, H. Fastl: Psychoacoustics. Springer,Berlin, Heidelberg, 1990.
The coefficients are linearly interpolated from the values given in table 6.1.
Parameter
---------
freq_axis : numpy.array
Hertz frequencies to be converted
Output
------
bark_axis : numpy.array
frequencies converted in Bark
"""
xp = np.array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 510, 570, 630, 700, 770, 840, 920, 1000,
1080, 1170, 1270, 1370, 1480, 1600, 1720, 1850, 2000,
2150, 2320, 2500, 2700, 2900, 3150, 3400, 3700, 4000,
4400, 4800, 5300, 5800, 6400, 7000, 7700, 8500, 9500,
10500, 12000, 13500, 15500, 20000])
yp = np.arange(0,25,0.5)
return np.interp(freq_axis,xp,yp) | f6bd27c54debe8cd8b79099f106e1bf7d4350010 | 22,259 |
import requires_internet
import urllib.request
import sys
import os
import distutils
import getopt
def main(argv):
"""Run tests, return number of failures (integer)."""
# insert our paths in sys.path:
# ../build/lib.*
# ..
# Q. Why this order?
# A. To find the C modules (which are in ../build/lib.*/Bio)
# Q. Then, why ".."?
# A. Because Martel may not be in ../build/lib.*
test_path = sys.path[0] or "."
source_path = os.path.abspath(f"{test_path}/..")
sys.path.insert(1, source_path)
build_path = os.path.abspath(
f"{test_path}/../build/lib.{distutils.util.get_platform()}-{sys.version[:3]}"
)
if os.access(build_path, os.F_OK):
sys.path.insert(1, build_path)
# Using "export LANG=C" (which should work on Linux and similar) can
# avoid problems detecting optional command line tools on
# non-English OS (we may want 'command not found' in English).
# HOWEVER, we do not want to change the default encoding which is
# rather important on Python 3 with unicode.
# lang = os.environ['LANG']
# get the command line options
try:
opts, args = getopt.getopt(
argv, "gv", ["generate", "verbose", "doctest", "help", "offline"]
)
except getopt.error as msg:
print(msg)
print(__doc__)
return 2
verbosity = VERBOSITY
# deal with the options
for opt, _ in opts:
if opt == "--help":
print(__doc__)
return 0
if opt == "--offline":
print("Skipping any tests requiring internet access")
EXCLUDE_DOCTEST_MODULES.extend(ONLINE_DOCTEST_MODULES)
# This is a bit of a hack...
requires_internet.check.available = False
# Monkey patch for urlopen()
def dummy_urlopen(url):
raise RuntimeError(
"Internal test suite error, attempting to use internet despite --offline setting"
)
urllib.request.urlopen = dummy_urlopen
if opt == "-v" or opt == "--verbose":
verbosity = 2
# deal with the arguments, which should be names of tests to run
for arg_num in range(len(args)):
# strip off the .py if it was included
if args[arg_num][-3:] == ".py":
args[arg_num] = args[arg_num][:-3]
print(f"Python version: {sys.version}")
print(f"Operating system: {os.name} {sys.platform}")
# run the tests
runner = TestRunner(args, verbosity)
return runner.run() | 87b7a0c49cc488a7ad2e4a508fdff06a84f30049 | 22,260 |
def close_connection(conn: Connection):
"""
Closes current connection.
:param conn Connection: Connection to close.
"""
if conn:
conn.close()
return True
return False | bca91687677860a7937875335701afb923ba49cc | 22,261 |
import warnings
def tile_memory_free(y, shape):
"""
XXX Will be deprecated
Tile vector along multiple dimension without allocating new memory.
Parameters
----------
y : np.array, shape (n,)
data
shape : np.array, shape (m),
Returns
-------
Y : np.array, shape (n, *shape)
"""
warnings.warn('Will be deprecated. Use np.newaxis instead')
for dim in range(len(shape)):
y = y[..., np.newaxis]
return y | f800c44ddd2a66553619157d8c8374a4c33dde18 | 22,262 |
def load_ref_system():
""" Returns d-talose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.6934 -0.4440 -0.1550
C -2.0590 0.1297 0.3312
C -3.1553 -0.9249 0.1673
O -0.9091 -0.8895 -1.4780
C 0.4226 0.6500 -0.0961
O -1.9403 0.6391 1.6411
O -3.6308 -1.5177 1.1069
C 1.7734 0.0930 -0.6280
O 0.6442 1.1070 1.2190
C 2.7961 1.2385 -0.8186
O 2.2979 -0.9417 0.1683
O 3.8858 0.8597 -1.6117
H -0.4009 -1.3143 0.4844
H -2.3349 1.0390 -0.2528
H -3.4909 -1.1261 -0.8615
H -0.0522 -1.1155 -1.8272
H 0.1195 1.5189 -0.7325
H -2.0322 -0.0862 2.2502
H 1.5977 -0.4374 -1.5988
H -0.2204 1.2523 1.6061
H 3.1423 1.6308 0.1581
H 2.3529 2.0761 -1.3846
H 2.4151 -0.5980 1.0463
H 4.2939 0.1096 -1.1961
""") | 7b41df916cb06dccaa53f13461f2bb7c6bfd882a | 22,263 |
def format_user_id(user_id):
"""
Format user id so Slack tags it
Args:
user_id (str): A slack user id
Returns:
str: A user id in a Slack tag
"""
return f"<@{user_id}>" | 2b3a66739c3c9c52c5beb7161e4380a78c5e2664 | 22,264 |
import socket
import ssl
def test_module(params: dict):
"""
Returning 'ok' indicates that the integration works like it is supposed to.
This test works by running the listening server to see if it will run.
Args:
params (dict): The integration parameters
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
certificate = str(params.get('certificate'))
private_key = str(params.get('private_key'))
certificate_file = NamedTemporaryFile(mode='w', delete=False)
certificate_path = certificate_file.name
certificate_file.write(certificate)
certificate_file.close()
private_key_file = NamedTemporaryFile(mode='w', delete=False)
private_key_path = private_key_file.name
private_key_file.write(private_key)
private_key_file.close()
s = socket.socket()
ssl.wrap_socket(s, keyfile=private_key_path, certfile=certificate_path, server_side=True,
ssl_version=ssl.PROTOCOL_TLSv1_2)
return 'ok'
except ssl.SSLError as e:
if e.reason == 'KEY_VALUES_MISMATCH':
return 'Private and Public keys do not match'
except Exception as e:
return f'Test failed with the following error: {repr(e)}' | 0f49bff09fcb84fa810ee2c6d32a52089f2f0147 | 22,265 |
def class_loss_regr(num_classes, num_cam):
"""Loss function for rpn regression
Args:
num_anchors: number of anchors (9 in here)
num_cam : number of cam (3 in here)
Returns:
Smooth L1 loss function
0.5*x*x (if x_abs < 1)
x_abx - 0.5 (otherwise)
"""
def class_loss_regr_fixed_num(y_true, y_pred):
#x = y_true[:, :, 4*num_classes:] - y_pred
x = y_true[:, :, num_cam*4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
#return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes])
#return lambda_cls_regr * K.sum(y_true[:, :, :num_cam*4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :num_cam*4*num_classes]) * 0
return class_loss_regr_fixed_num | cad962f1af1a1acb2013063c6803261535652c18 | 22,266 |
import smtplib
import ssl
def smtplib_connector(hostname, port, username=None, password=None, use_ssl=False):
""" A utility class that generates an SMTP connection factory.
:param str hostname: The SMTP server's hostname
:param int port: The SMTP server's connection port
:param str username: The SMTP server username
:param str password: The SMTP server port
:param bool use_ssl: Whether to use SSL
"""
def connect():
ctor = smtplib.SMTP_SSL if use_ssl else smtplib.SMTP
conn = ctor(hostname, port)
if use_ssl:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
conn.ehlo()
conn.starttls(context=context)
conn.ehlo()
if username or password:
conn.login(username, password)
return conn
return connect | 511fe48b1f3f2d5d3b9ef3a803166be1519a1b7f | 22,267 |
def _to_one_hot_sequence(indexed_sequence_tensors):
"""Convert ints in sequence to one-hots.
Turns indices (in the sequence) into one-hot vectors.
Args:
indexed_sequence_tensors: dict containing SEQUENCE_KEY field.
For example: {
'sequence': '[1, 3, 3, 4, 12, 6]' # This is the amino acid sequence.
... }
Returns:
indexed_sequence_tensors with the same overall structure as the input,
except that SEQUENCE_KEY field has been transformed to a one-hot
encoding.
For example:
{
# The first index in sequence is from letter C, which
# is at index 1 in the amino acid vocabulary, and the second is from
# E, which is at index 4.
SEQUENCE_KEY: [[0, 1, 0, ...], [0, 0, 0, 1, 0, ...]...]
...
}
"""
indexed_sequence_tensors[SEQUENCE_KEY] = tf.one_hot(
indices=indexed_sequence_tensors[SEQUENCE_KEY],
depth=len(utils.AMINO_ACID_VOCABULARY))
return indexed_sequence_tensors | 32ff14139b53f181d6f032e4e372357cf54c1d62 | 22,268 |
def kaiser_smooth(x,beta):
""" kaiser window smoothing """
window_len=41 #Needs to be odd for proper response
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] #start:stop:step
w = np.kaiser(window_len,beta)
y = np.convolve(w/w.sum(),s,mode='valid')
return y[20:len(y)-20] | 2b766edd85927766330c8cddded3af639d5f16f3 | 22,269 |
def get_indel_dicts(bamfile, target):
"""Get all insertion in alignments within target. Return dict."""
samfile = pysam.AlignmentFile(bamfile, "rb")
indel_coverage = defaultdict(int)
indel_length = defaultdict(list)
indel_length_coverage = dict()
for c, s, e in parse_bed(target):
s = int(s) - 151
e = int(e) + 151
for alignment in samfile.fetch(c, int(s), int(e)):
if good_alignment(alignment) and cigar_has_insertion(alignment.cigarstring):
read_start = alignment.get_reference_positions(full_length=True)[0]
if read_start is None:
continue
locus, length = parse_cigartuple(alignment.cigar, read_start,
alignment.reference_name)
if pos_in_interval(locus.split(':')[1], s, e):
if locus in indel_length:
indel_length[locus].append(length)
else:
indel_length[locus] = [length]
indel_coverage[locus] += 1
samfile.close()
for locus, coverage in indel_coverage.items():
indel_length_coverage[locus] = tuple(set(indel_length[locus])), int(coverage)
return indel_length_coverage | e8f6883f1cf1d653fe0825b4f10518daa2801178 | 22,270 |
def _ComputeRelativeAlphaBeta(omega_b, position_b, apparent_wind_b):
"""Computes the relative alpha and beta values, in degrees, from kinematics.
Args:
omega_b: Array of size (n, 3). Body rates of the kite [rad/s].
position_b: Array of size (1, 3). Position of the surface to compute local
alpha/beta [m].
apparent_wind_b: Array of size (n,3). Apparent wind vector from the state
estimator [m/s].
Returns:
local_alpha_deg, local_beta_deg: The values of local alpha and beta.
The math for a relative angle of attack at a given section is as follows:
(1) Kinematically:
v_section_b = apparent_wind_b - omega_b X position_b
(2) By definition:
alpha_rad = atan2(-v_section_b_z, -v_section_b_x)
beta_rad = asin(-v_section_b_y, mag(v_section_b))
where _x, _y, _z denote the unit basis vectors in the body coordinates.
"""
assert np.shape(omega_b) == np.shape(apparent_wind_b)
# The subtraction is because the cross product is the rigid body motion
# but the reference frame for the aero has the opposite effect of the
# motion of the rigid body motion frame.
local_vel = apparent_wind_b - np.cross(omega_b, position_b, axisa=1,
axisb=1)
local_vel_mag = np.linalg.norm(local_vel, axis=1)
local_alpha_deg = np.rad2deg(np.arctan2(-1.0 * local_vel[:, 2],
-1.0 * local_vel[:, 0]))
local_beta_deg = np.rad2deg(np.arcsin(-1.0 * local_vel[:, 1]
/ local_vel_mag))
return local_alpha_deg, local_beta_deg | 6aa5f82e85b50abab0c72800b5e2b11ec613bcbd | 22,271 |
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave | babc9d22b92b2e7085680178718959cd7ef15eca | 22,272 |
def calculate_percent(partial, total):
"""Calculate percent value."""
if total:
percent = round(partial / total * 100, 2)
else:
percent = 0
return f'{percent}%' | 4d3da544dd1252acec3351e7f67568be80afe020 | 22,273 |
import requests
def okgets(urls):
"""Multi-threaded requests.get, only returning valid response objects
:param urls: A container of str URLs
:returns: A tuple of requests.Response objects
"""
return nest(
ripper(requests.get),
filt(statusok),
tuple
)(urls) | 0933f4df68745a6c9d69d0b42d4bb005c1c69772 | 22,274 |
def worker(args):
"""
This function does the work of returning a URL for the NDSE view
"""
# Step 1. Create the NDSE view request object
# Set the url where you want the recipient to go once they are done
# with the NDSE. It is usually the case that the
# user will never "finish" with the NDSE.
# Assume that control will not be passed back to your app.
view_request = ConsoleViewRequest(return_url=args["ds_return_url"])
if args["starting_view"] == "envelope" and args["envelope_id"]:
view_request.envelope_id = args["envelope_id"]
# Step 2. Get the console view url
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.create_console_view(args["account_id"], console_view_request=view_request)
url = results.url
return {"redirect_url": url} | abcb2a94e5d14519a708ae8d531e47f30bc3c0da | 22,275 |
import warnings
import math
def plot_horiz_xsection_quiver_map(Grids, ax=None,
background_field='reflectivity',
level=1, cmap='pyart_LangRainbow12',
vmin=None, vmax=None,
u_vel_contours=None,
v_vel_contours=None,
w_vel_contours=None,
wind_vel_contours=None,
u_field='u', v_field='v', w_field='w',
show_lobes=True, title_flag=True,
axes_labels_flag=True,
colorbar_flag=True,
colorbar_contour_flag=False,
bg_grid_no=0, contour_alpha=0.7,
coastlines=True,
quiver_spacing_x_km=10.0,
quiver_spacing_y_km=10.0,
gridlines=True,
quiverkey_len=5.0,
quiverkey_loc='best',
quiver_width=0.01):
"""
This procedure plots a horizontal cross section of winds from wind fields
generated by PyDDA using quivers onto a geographical map. The length of
the quivers varies with wind speed.
Parameters
----------
Grids: list
List of Py-ART Grids to visualize
ax: matplotlib axis handle (with cartopy ccrs)
The axis handle to place the plot on. Set to None to create a new map.
Note: the axis needs to be in a PlateCarree() projection. Support for
other projections is planned in the future.
background_field: str
The name of the background field to plot the quivers on.
level: int
The number of the vertical level to plot the cross section through.
cmap: str or matplotlib colormap
The name of the matplotlib colormap to use for the background field.
vmin: float
The minimum bound to use for plotting the background field. None will
automatically detect the background field minimum.
vmax: float
The maximum bound to use for plotting the background field. None will
automatically detect the background field maximum.
u_vel_contours: 1-D array
The contours to use for plotting contours of u. Set to None to not
display such contours.
v_vel_contours: 1-D array
The contours to use for plotting contours of v. Set to None to not
display such contours.
w_vel_contours: 1-D array
The contours to use for plotting contours of w. Set to None to not
display such contours.
u_field: str
Name of zonal wind (u) field in Grids.
v_field: str
Name of meridional wind (v) field in Grids.
w_field: str
Name of vertical wind (w) field in Grids.
show_lobes: bool
If True, the dual doppler lobes from each pair of radars will be shown.
title_flag: bool
If True, PyDDA will generate a title for the plot.
axes_labels_flag: bool
If True, PyDDA will generate axes labels for the plot.
colorbar_flag: bool
If True, PyDDA will generate a colorbar for the plot background field.
colorbar_contour_flag: bool
If True, PyDDA will generate a colorbar for the contours.
bg_grid_no: int
Number of grid in Grids to take background field from.
Set to -1 to use maximum value from all grids.
contour_alpha: float
Alpha (transparency) of velocity contours. 0 = transparent, 1 = opaque
coastlines: bool
Set to true to display coastlines.
quiver_spacing_x_km: float
Spacing in km between quivers in x axis.
quiver_spacing_y_km: float
Spacing in km between quivers in y axis.
gridlines: bool
Set to true to show grid lines.
quiverkey_len: float
Length to use for the quiver key in m/s.
quiverkey_loc: str
Location of quiverkey. One of:
'best'
'top_left'
'top'
'top_right'
'bottom_left'
'bottom'
'bottom_right'
'left'
'right'
'top_left_outside'
'top_right_outside'
'bottom_left_outside'
'bottom_right_outside'
'best' will put the quiver key in the corner with the fewest amount of
valid data points while keeping the quiver key inside the plot.
The rest of the options will put the quiver key in that
particular part of the plot.
quiver_width: float
The width of the lines for the quiver given as a fraction
relative to the plot width. Use this to specify the thickness
of the quiver lines.
Returns
-------
ax: matplotlib axis
Axis handle to output axis
"""
if(bg_grid_no > -1):
grid_bg = Grids[bg_grid_no].fields[background_field]['data']
else:
grid_array = np.ma.stack(
[x.fields[background_field]['data'] for x in Grids])
grid_bg = grid_array.max(axis=0)
if(vmin is None):
vmin = grid_bg.min()
if(vmax is None):
vmax = grid_bg.max()
grid_h = Grids[0].point_altitude['data']/1e3
grid_x = Grids[0].point_x['data']/1e3
grid_y = Grids[0].point_y['data']/1e3
grid_lat = Grids[0].point_latitude['data'][level]
grid_lon = Grids[0].point_longitude['data'][level]
qloc_x, qloc_y = _parse_quiverkey_string(
quiverkey_loc, grid_h[level], grid_x[level],
grid_y[level], grid_bg[level])
dx = np.diff(grid_x, axis=2)[0, 0, 0]
dy = np.diff(grid_y, axis=1)[0, 0, 0]
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
u = Grids[0].fields[u_field]['data'].filled(fill_value=np.nan)
else:
u = Grids[0].fields[u_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[v_field]['data'])):
v = Grids[0].fields[v_field]['data'].filled(fill_value=np.nan)
else:
v = Grids[0].fields[v_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
w = Grids[0].fields[w_field]['data'].filled(fill_value=np.nan)
else:
w = Grids[0].fields[w_field]['data']
transform = ccrs.PlateCarree()
if(ax is None):
ax = plt.axes(projection=transform)
the_mesh = ax.pcolormesh(grid_lon[:, :], grid_lat[:, :],
grid_bg[level, :, :],
cmap=cmap, transform=transform, zorder=0,
vmin=vmin, vmax=vmax)
horiz_wind_speed = np.ma.sqrt(u**2 + v**2)
quiver_density_x = int((1/dx)*quiver_spacing_x_km)
quiver_density_y = int((1/dy)*quiver_spacing_y_km)
q = ax.quiver(grid_lon[::quiver_density_y, ::quiver_density_x],
grid_lat[::quiver_density_y, ::quiver_density_x],
u[level, ::quiver_density_y, ::quiver_density_x],
v[level, ::quiver_density_y, ::quiver_density_x],
transform=transform, width=quiver_width,
scale=25.*quiverkey_len)
quiver_font = {'family': 'sans-serif',
'style': 'normal',
'variant': 'normal',
'weight': 'bold',
'size': 'medium'}
ax.quiverkey(q, qloc_x, qloc_y,
quiverkey_len, label=(str(quiverkey_len) +' m/s'),
fontproperties=quiver_font)
if(colorbar_flag is True):
cp = Grids[bg_grid_no].fields[background_field]['long_name']
cp.replace(' ', '_')
cp = cp + ' [' + Grids[bg_grid_no].fields[background_field]['units']
cp = cp + ']'
plt.colorbar(the_mesh, ax=ax, label=(cp))
if(u_vel_contours is not None):
u_filled = np.ma.masked_where(u[level, :, :] < np.min(u_vel_contours),
u[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
u_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(u_vel_contours), np.max(u_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='U [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(v_vel_contours is not None):
v_filled = np.ma.masked_where(v[level, :, :] < np.min(v_vel_contours),
v[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
v_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(v_vel_contours), np.max(v_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='V [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(w_vel_contours is not None):
w_filled = np.ma.masked_where(w[level, :, :] < np.min(w_vel_contours),
w[level, :, :])
try:
cs = ax.contourf(grid_lon[::, ::], grid_lat[::, ::],
w_filled, levels=w_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(w_vel_contours), np.max(w_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='W [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
if(wind_vel_contours is not None):
vel = np.ma.sqrt(u[level, :, :]**2 + v[level, :, :]**2)
vel = vel.filled(fill_value=np.nan)
try:
cs = ax.contourf(grid_x[level, :, :], grid_y[level, :, :],
vel, levels=wind_vel_contours, linewidths=2,
alpha=contour_alpha)
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='|V\ [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
bca_min = math.radians(Grids[0].fields[u_field]['min_bca'])
bca_max = math.radians(Grids[0].fields[u_field]['max_bca'])
if(show_lobes is True):
for i in range(len(Grids)):
for j in range(len(Grids)):
if (i != j):
bca = retrieval.get_bca(Grids[j].radar_longitude['data'],
Grids[j].radar_latitude['data'],
Grids[i].radar_longitude['data'],
Grids[i].radar_latitude['data'],
Grids[j].point_x['data'][0],
Grids[j].point_y['data'][0],
Grids[j].get_projparams())
ax.contour(
grid_lon[:, :], grid_lat[:, :], bca,
levels=[bca_min, bca_max], color='k', zorder=1)
if(axes_labels_flag is True):
ax.set_xlabel(('Latitude [$\degree$]'))
ax.set_ylabel(('Longitude [$\degree$]'))
if(title_flag is True):
ax.set_title(
('PyDDA retreived winds @' + str(grid_h[level, 0, 0]) + ' km'))
if(coastlines is True):
ax.coastlines(resolution='10m')
if(gridlines is True):
ax.gridlines()
ax.set_extent([grid_lon.min(), grid_lon.max(),
grid_lat.min(), grid_lat.max()])
num_tenths = round((grid_lon.max()-grid_lon.min())*10)+1
the_ticks_x = np.round(
np.linspace(grid_lon.min(), grid_lon.max(), num_tenths), 1)
num_tenths = round((grid_lat.max()-grid_lat.min())*10)+1
the_ticks_y = np.round(
np.linspace(grid_lat.min(), grid_lat.max(), num_tenths), 1)
ax.set_xticks(the_ticks_x)
ax.set_yticks(the_ticks_y)
return ax | 7f093435ad5488226232a6028d94e6f22b1a2688 | 22,276 |
def register(registered_collection, reg_key):
"""Register decorated function or class to collection.
Register decorated function or class into registered_collection, in a
hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0"
the decorated function or class is stored under
registered_collection["my_model"]["my_exp"]["my_config_0"].
This decorator is supposed to be used together with the lookup() function in
this file.
Args:
registered_collection: a dictionary. The decorated function or class will be
put into this collection.
reg_key: The key for retrieving the registered function or class. If reg_key
is a string, it can be hierarchical like my_model/my_exp/my_config_0
Returns:
A decorator function
Raises:
KeyError: when function or class to register already exists.
"""
def decorator(fn_or_cls):
"""Put fn_or_cls in the dictionary."""
if isinstance(reg_key, str):
hierarchy = reg_key.split("/")
collection = registered_collection
for h_idx, entry_name in enumerate(hierarchy[:-1]):
if entry_name not in collection:
collection[entry_name] = {}
collection = collection[entry_name]
if not isinstance(collection, dict):
raise KeyError(
"Collection path {} at position {} already registered as "
"a function or class.".format(entry_name, h_idx))
leaf_reg_key = hierarchy[-1]
else:
collection = registered_collection
leaf_reg_key = reg_key
if leaf_reg_key in collection:
raise KeyError("Function or class {} registered multiple times.".format(
leaf_reg_key))
collection[leaf_reg_key] = fn_or_cls
return fn_or_cls
return decorator | affba6b7ee1294040633f488752623b3fa0462e4 | 22,277 |
def form_hhaa_records(df,
team_locn='h',
records='h',
feature='ftGoals'):
"""
Accept a league table of matches with a feature
"""
team_records = []
for _, team_df in df.groupby(by=team_locn):
lags = range(0, len(team_df))
records_df = pd.DataFrame({f'{team_locn}_{records}_{feature}-{n}':
team_df[team_locn + '_' + feature].shift(n)
for n in lags})
team_record = pd.concat([team_df, records_df], sort=True, axis=1)
team_records.append(team_record)
full_df = pd.concat(team_records, axis=0, sort=True).sort_index()
return full_df | af618fa0fe3c1602018ba6830c381bde73c158c3 | 22,278 |
def process_dataset(material: str, frequency: float, plot=False,
pr=False) -> float:
"""
Take a set of data, fit curve and find thermal diffustivity.
Parameters
----------
material : str
Gives material of this dataset. 'Cu' or 'Al'.
frequency : float
Frequency used, in mHz.
plot : bool
True if a plot of the curves should be shown.
plot : bool
True if the ODR output should be printed.
Returns
-------
diffustivity : float
The estimated thermal diffusivity of this material.
"""
# Check parameter validity
if material not in ['Cu', 'Al']:
raise ValueError('Invalid material name')
# Get file
filename = '{}_{}mHz.csv'.format(material, frequency)
raw = pd.read_csv(filename,
names=['Time',
'Ref',
'Source',
'S1',
'S2',
'S3',
'S4',
'S5',
'S6'])
# Set sensor position (in m) based on bar material
if material == 'Cu':
x = np.array([12, 35, 70, 150, 310, 610]) / 1000
dx = np.full(6, 0.015)
elif material == 'Al':
x = np.array([27.5, 70, 150, 310, 630]) / 1000
dx = np.array([0.25, 0.25, 0.25, 0.25, 0.5]) / 100
# Start processing data into a useful format
data = raw.to_numpy()
# delete first row of zeroes
data = np.delete(data, 0, 0)
# For every temperature measurement, associates it with time and position
# Also dumps data from the dodgy sensor
# Calculates error in Temperature based a C class Pt100
def add_independents(row):
if material == 'Cu':
t = np.full(6, row[0])
relative_temperature = row[3:] - row[1]
temp_err = (row[3:] + row[1]) * 0.01 + 1.2
elif material == 'Al':
t = np.full(5, row[0])
relative_temperature = row[4:] - row[1]
temp_err = (row[4:] + row[1]) * 0.01 + 1.2
return np.column_stack((t, x, dx, relative_temperature, temp_err))
# This produces an array for each time measurment,
# where each row is [t, x, T(x,t) ]
data = np.apply_along_axis(add_independents, 1, data)
# Extract the rows from each time measurement array into one big array
data = np.reshape(data, (-1, 5))
# Split columns into named vars for clarity
# Note how the array has been transposed
time, x, dx, Temperature, dT = data.T
# Estimate time error
dtime = np.full(len(time), 0.01)
dindep = [dx, dtime]
# Set angular frquency, given we know frequency
w = 2 * np.pi * (frequency / 1000)
# Equation to fit to
def model(params, independent):
A, B, C = params
t, x = independent
return A * np.exp(- B * x) * np.sin(w * t - (C * x))
# Fit curve
mod = odr.Model(model)
realData = odr.RealData([time, x], y=Temperature, sx=dindep, sy=dT)
myodr = odr.ODR(realData, mod, beta0=[11., 2., 9.])
output = myodr.run()
parameters = output.beta
if plot:
# Plot experimental data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(time, x, Temperature, s=1, color='black')
# ax.scatter(time, x, Temperature, s=1, c=Temperature, cmap='plasma')
ax.set_title('{} at {}mHz'.format(material, frequency))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Distance (m)')
ax.set_zlabel('Temperature (C)')
# Plot the fitted function
sampling_time = 5 * 1000 / frequency
sample_time = np.linspace(0, sampling_time, 750)
sample_x = np.linspace(0, 0.65, 750)
Time, X = np.meshgrid(sample_time, sample_x, sparse=True)
sample_Temperature = model(parameters, [Time, X])
ax.plot_surface(Time, X, sample_Temperature, cmap='plasma',
alpha=0.4)
# ax.plot_wireframe(Time, X, sample_Temperature, color='black',
# alpha=0.5)
# Include sd uncertainties with parameters
pu = uarray(parameters, output.sd_beta)
if pr:
output.pprint()
# print(pu)
# Calculate diffusitivity
return w / (2 * pu[1] * pu[2]) | 64e25b326ddf33adf568b395320e9dddcc9c637d | 22,279 |
import os
def load_shuttle(main_data_path, folder='shuttle', df=None):
"""
____ _ _ _ _ ___ ___ _ ____
[__ |__| | | | | | |___
___] | | |__| | | |___ |___
From UCI https://archive.ics.uci.edu/ml/datasets/Shuttle+Landing+Control
"""
# Encoder
encoder_shuttle = [
list(range(1, 3)),
list(range(1, 3)),
list(range(1, 5)),
list(range(1, 3)),
list(range(1, 3)),
list(range(1, 5)),
list(range(1, 3))
]
# Columns names
shuttle_columns = [
'Recommended\nControl Mode', 'Positioning', 'Altimeter Error\nMagnitude', 'Altimeter Error\nSign',
'Wind\nDirection', 'Wind\nStrength', 'Sky Condition'
]
# Decoder
shuttle_decoder = [['Manual', 'Automatic'], ['Stable', 'Unstable'], ['Very Large', 'Large', 'Medium', 'Small'],
['Positive', 'Negative'], ['Head', 'Tail'], ['Light', 'Medium', 'Strong', 'Very Strong'],
['Good Visibility', 'No Visibility']]
def combinatorial_from_record(record):
"""
Generate the combinatorial rows for missing ones
i.e. if * is present in a record it generates all the possible combinations for that column
(works on dicts, it's easier than pd.DataFrame)
"""
combi = [k for k, v in record.items() if v == '*']
non_combi = [k for k, v in record.items() if v != '*']
if len(combi) > 0:
combi_mesh_start = [encoder_shuttle[i] for i in combi]
combi_cols = np.array(np.meshgrid(*combi_mesh_start)).T.reshape(-1, len(combi_mesh_start))
retds = []
for cs in combi_cols:
retds.append({**{k: int(record[k]) for k in non_combi}, **{k: int(c) for k, c in zip(combi, cs)}})
return retds
else:
return [{k: int(v) for k, v in record.items()}]
df_raw = pd.read_csv(os.path.join(main_data_path, folder, 'shuttle-landing-control.data'), header=None)
df = pd.DataFrame(sum([combinatorial_from_record(record) for record in df_raw.to_dict('records')], []))
for col in df:
df[col] = df[col].apply(lambda x: shuttle_decoder[col][x - 1])
df.columns = [shuttle_columns[col] for col in df]
df = df[[shuttle_columns[i] for i in [0, 1, -2, -3, -1, 2, 3]]]
return df, df.columns.values[0] | 4f7394ed18d168742db8cb747a57b3d01cc2ed52 | 22,280 |
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator
def show_drizzle_HDU(hdu):
"""Make a figure from the multiple extensions in the drizzled grism file.
Parameters
----------
hdu : `~astropy.io.fits.HDUList`
HDU list output by `drizzle_grisms_and_PAs`.
Returns
-------
fig : `~matplotlib.figure.Figure`
The figure.
"""
h0 = hdu[0].header
NX = h0['NGRISM']
NY = 0
grisms = OrderedDict()
for ig in range(NX):
g = h0['GRISM{0:03d}'.format(ig+1)]
NY = np.maximum(NY, h0['N'+g])
grisms[g] = h0['N'+g]
NY += 1
fig = plt.figure(figsize=(5*NX, 1*NY))
widths = []
for i in range(NX):
widths.extend([0.2, 1])
gs = GridSpec(NY, NX*2, height_ratios=[1]*NY, width_ratios=widths)
for ig, g in enumerate(grisms):
sci_i = hdu['SCI',g]
wht_i = hdu['WHT',g]
kern_i = hdu['KERNEL',g]
h_i = sci_i.header
clip = wht_i.data > 0
if clip.sum() == 0:
clip = np.isfinite(wht_i.data)
avg_rms = 1/np.median(np.sqrt(wht_i.data[clip]))
vmax = np.maximum(1.1*np.percentile(sci_i.data[clip],98),
5*avg_rms)
vmax_kern = 1.1*np.percentile(kern_i.data,99.5)
# Kernel
ax = fig.add_subplot(gs[NY-1, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, cmap=plt.cm.viridis_r,
extent=extent, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[NY-1, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$ ($\mu$m) - '+g)
ax.xaxis.set_major_locator(MultipleLocator(grism_major[g]))
for ip in range(grisms[g]):
#print(ip, ig)
pa = h0['{0}{1:02d}'.format(g, ip+1)]
sci_i = hdu['SCI','{0},{1}'.format(g, pa)]
wht_i = hdu['WHT','{0},{1}'.format(g, pa)]
kern_i = hdu['KERNEL','{0},{1}'.format(g, pa)]
h_i = sci_i.header
# Kernel
ax = fig.add_subplot(gs[ip, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, extent=extent,
cmap=plt.cm.viridis_r, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[ip, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([]); ax.set_xticklabels([])
ax.xaxis.set_major_locator(MultipleLocator(grism_major[g]))
ax.text(0.015, 0.94, '{0:3.0f}'.format(pa), ha='left',
va='top',
transform=ax.transAxes, fontsize=8,
backgroundcolor='w')
if (ig == (NX-1)) & (ip == 0):
ax.text(0.98, 0.94, 'ID = {0}'.format(h0['ID']),
ha='right', va='top', transform=ax.transAxes,
fontsize=8, backgroundcolor='w')
gs.tight_layout(fig, pad=0.1)
return fig | 9ca8efd9278d495765eee08566ff56e0ec63efeb | 22,281 |
def make_ln_func(variable):
"""Take an qs and computed the natural log of a variable"""
def safe_ln_queryset(qs):
"""Takes the natural log of a queryset's values and handles zeros"""
vals = qs.values_list(variable, flat=True)
ret = np.log(vals)
ret[ret == -np.inf] = 0
return ret
return safe_ln_queryset | 200c17c011788e53aa3f678ede22c02bad10613a | 22,282 |
def calc_all_energies(n, k, states, params):
"""Calculate all the energies for the states given. Can be used for Potts.
Parameters
----------
n : int
Number of spins.
k : int
Ising or Potts3 model.
states : ndarray
Number of distinct states.
params : ndarray
(h,J) vector
Returns
-------
E : ndarray
Energies of all given states.
"""
e = np.zeros(len(states))
s_ = np.zeros((1,n), dtype=np.int8)
if k==2:
for i in range(len(states)):
s = states[i]
e[i] -= fast_sum(params[n:], s)
e[i] -= np.sum(s*params[:n])
elif k==3:
for i in range(len(states)):
s = states[i]
for ix in range(n):
# fields
e[i] -= params[ix+s[ix]*n]
e[i] -= fast_sum_ternary(params[n*k:], s)
else: raise NotImplementedError
return e | 9de47da0f0dfa2047fdddc7796ada861d7be0f6b | 22,283 |
from heroku_connect.models import TriggerLog, TriggerLogArchive
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):
"""
Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists.
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])
schema_exists = cursor.fetchone()[0]
if schema_exists:
return False
cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])
with connection.schema_editor() as editor:
for model in get_heroku_connect_models():
editor.create_model(model)
# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):
editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";')
for cls in [TriggerLog, TriggerLogArchive]:
editor.create_model(cls)
return True | bb7eacbf4775bb08f723b69adc6a43c10ffe9287 | 22,284 |
import re
def extract_sentences(modifier, split_text):
"""
Extracts the sentences that contain the modifier references.
"""
extracted_text = []
for sentence in split_text:
if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(modifier), sentence,
re.IGNORECASE):
extracted_text.append(sentence)
return extracted_text | 4e31a250520b765d998aa8bc88f2414fe206901c | 22,285 |
def get_1_neighbours(graph, i):
"""
This function gets all the 1-neighborhoods including i itself.
"""
nbhd_nodes = graph.get_out_neighbours(i)
nbhd_nodes = np.concatenate((nbhd_nodes,np.array([i])))
return nbhd_nodes | 4b19f6eb2cbd7044cf0da26e6770a2be85ae901d | 22,286 |
def window_slice(frame, center, window):
"""
Get the index ranges for a window with size `window` at `center`, clipped to the boundaries of `frame`
Parameters
----------
frame : ArrayLike
image frame for bound-checking
center : Tuple
(y, x) coordinate of the window
window : float,Tuple
window length, or tuple for each axis
Returns
-------
(ys, xs)
tuple of ranges for the indices for the window
"""
half_width = np.asarray(window) / 2
Ny, Nx = frame.shape[-2:]
lower = np.maximum(0, np.round(center - half_width), dtype=int, casting="unsafe")
upper = np.minimum(
(Ny - 1, Nx - 1), np.round(center + half_width), dtype=int, casting="unsafe"
)
return range(lower[0], upper[0] + 1), range(lower[1], upper[1] + 1) | 111c53b7b2ead44e462cc3c5815e9d44b4c3d024 | 22,287 |
def revnum_to_revref(rev, old_marks):
"""Convert an hg revnum to a git-fast-import rev reference (an SHA1
or a mark)"""
return old_marks.get(rev) or b':%d' % (rev+1) | 13730de4c1debe0cecdd1a14652490b9416b22f5 | 22,288 |
def onset_precision_recall_f1(ref_intervals, est_intervals,
onset_tolerance=0.05, strict=False, beta=1.0):
"""Compute the Precision, Recall and F-measure of note onsets: an estimated
onset is considered correct if it is within +-50ms of a reference onset.
Note that this metric completely ignores note offset and note pitch. This
means an estimated onset will be considered correct if it matches a
reference onset, even if the onsets come from notes with completely
different pitches (i.e. notes that would not match with
:func:`match_notes`).
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_valued_intervals(
... 'reference.txt')
>>> est_intervals, _ = mir_eval.io.load_valued_intervals(
... 'estimated.txt')
>>> (onset_precision,
... onset_recall,
... onset_f_measure) = mir_eval.transcription.onset_precision_recall_f1(
... ref_intervals, est_intervals)
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
beta : float > 0
Weighting factor for f-measure (default value = 1.0).
Returns
-------
precision : float
The computed precision score
recall : float
The computed recall score
f_measure : float
The computed F-measure score
"""
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_onsets(ref_intervals, est_intervals,
onset_tolerance=onset_tolerance,
strict=strict)
onset_precision = float(len(matching))/len(est_intervals)
onset_recall = float(len(matching))/len(ref_intervals)
onset_f_measure = util.f_measure(onset_precision, onset_recall, beta=beta)
return onset_precision, onset_recall, onset_f_measure | aa4747925a59116246ece29e4cec55a2f91a903d | 22,289 |
def parse_acs_metadata(acs_metadata, groups):
"""Returns a map of variable ids to metadata for that variable, filtered to
specified groups.
acs_metadata: The ACS metadata as json.
groups: The list of group ids to include."""
output_vars = {}
for variable_id, metadata in acs_metadata["variables"].items():
group = metadata.get("group")
if group in groups and metadata["label"].startswith("Estimate!!Total"):
output_vars[variable_id] = metadata
return output_vars | f0bfb0172b0b2d5fec92b613b5f2e2baf6e7c8f0 | 22,290 |
def split_series_using_lytaf(timearray, data, lytaf):
"""
Proba-2 analysis code for splitting up LYRA timeseries around locations
where LARs (and other data events) are observed.
Parameters
----------
timearray : `numpy.ndarray` of times understood by `sunpy.time.parse_time`
function.
data : `numpy.array` corresponding to the given time array
lytaf : `numpy.recarray`
Events obtained from querying LYTAF database using
lyra.get_lytaf_events().
Output
------
output : `list` of dictionaries
Each dictionary contains a sub-series corresponding to an interval of
'good data'.
"""
n = len(timearray)
mask = np.ones(n)
el = len(lytaf)
# make the input time array a list of datetime objects
datetime_array = []
for tim in timearray:
datetime_array.append(parse_time(tim))
# scan through each entry retrieved from the LYTAF database
for j in range(0, el):
# want to mark all times with events as bad in the mask, i.e. = 0
start_dt = lytaf['begin_time'][j]
end_dt = lytaf['end_time'][j]
# find the start and end indices for each event
start_ind = np.searchsorted(datetime_array, start_dt)
end_ind = np.searchsorted(datetime_array, end_dt)
# append the mask to mark event as 'bad'
mask[start_ind:end_ind] = 0
diffmask = np.diff(mask)
tmp_discontinuity = np.where(diffmask != 0.)
# disc contains the indices of mask where there are discontinuities
disc = tmp_discontinuity[0]
if len(disc) == 0:
print('No events found within time series interval. '
'Returning original series.')
return [{'subtimes': datetime_array, 'subdata': data}]
# -1 in diffmask means went from good data to bad
# +1 means went from bad data to good
# want to get the data between a +1 and the next -1
# if the first discontinuity is a -1 then the start of the series was good.
if diffmask[disc[0]] == -1.0:
# make sure we can always start from disc[0] below
disc = np.insert(disc, 0, 0)
split_series = []
limit = len(disc)
# now extract the good data regions and ignore the bad ones
for h in range(0, limit, 2):
if h == limit-1:
# can't index h+1 here. Go to end of series
subtimes = datetime_array[disc[h]:-1]
subdata = data[disc[h]:-1]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
else:
subtimes = datetime_array[disc[h]:disc[h+1]]
subdata = data[disc[h]:disc[h+1]]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
return split_series | 2cc509ede0f2f74f999fae180acb23049a87f165 | 22,291 |
def getrqdata(request):
"""Return the request data.
Unlike the now defunct `REQUEST
<https://docs.djangoproject.com/en/1.11/ref/request-response/#django.http.HttpRequest.REQUEST>`_
attribute, this inspects the request's `method` in order to decide
what to return.
"""
if request.method in ('PUT', 'DELETE'):
return QueryDict(request.body)
# note that `body` was named `raw_post_data` before Django 1.4
# print 20130222, rqdata
# rqdata = request.REQUEST
if request.method == 'HEAD':
return request.GET
return getattr(request, request.method) | d385943c4c8c7fc7e0b5fc4b1d0f1ba0bc272a13 | 22,292 |
from typing import List
def generate_per_level_fractions(highest_level_ratio: int, num_levels: int = NUM_LEVELS) -> List[float]:
"""
Generates the per-level fractions to reach the target sum (i.e. the highest level ratio).
Args:
highest_level_ratio:
The 1:highest_level_ratio ratio for the highest level; i.e. the target sum for the geometric series.
num_levels:
The number of levels to calculate the sum over.
Returns:
A list of fractions of the population, per-level.
"""
ratio = calc_geometric_ratio(highest_level_ratio, num_levels)
per_level = [(ratio ** i) / highest_level_ratio for i in range(num_levels)]
# Change so that the highest level information is at the end
per_level.reverse()
return per_level | 6c7aee63a2b89671ae65bd28fb8616ffc72d014b | 22,293 |
def choose_transformations(name):
"""Prompts user with different data transformation options"""
transformations_prompt=[
{
'type':'confirm',
'message':'Would you like to apply some transformations to the file? (Default is no)',
'name':'confirm_transformations',
'default':False
},
{
'type':'checkbox',
'message':f'Ok {name}, let\'s select some transformation before we convert your file:',
'name':'transformations',
'choices':[
{'name':'Change Column Names'},
{'name':'Change File Name'}
],
'when': lambda answers: answers['confirm_transformations']
}
]
answers = prompt(questions=transformations_prompt)
return answers | f24c560cb23573daa57e4fece7a28b3a809ae478 | 22,294 |
from typing import Dict
from typing import Tuple
def update_list_item_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Updates a list item. return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
list_id = int(args.get('list_id')) # type: ignore
item_id = int(args.get('item_id')) # type: ignore
raw_response = client.update_list_item(
list_id=list_id,
item_id=item_id,
type=args.get('type'),
value=args.get('value'),
risk=args.get('risk'),
notes=args.get('notes')
)
if raw_response:
title = f'{INTEGRATION_NAME} - List item {item_id} from list {list_id} was updated successfully'
context_entry = create_context_result(raw_response, LIST_ITEM_TRANS)
context = {
f'{INTEGRATION_CONTEXT_NAME}List(val.ID && val.ID === {list_id}).Item(val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not update list item.', {}, raw_response | 6471170d72bec7dd19d102470e2b29dec2131e17 | 22,295 |
import torch
def fft(input, inverse=False):
"""Interface with torch FFT routines for 3D signals.
fft of a 3d signal
Example
-------
x = torch.randn(128, 32, 32, 32, 2)
x_fft = fft(x)
x_ifft = fft(x, inverse=True)
Parameters
----------
x : tensor
Complex input for the FFT.
inverse : bool
True for computing the inverse FFT.
Raises
------
TypeError
In the event that x does not have a final dimension 2 i.e. not
complex.
Returns
-------
output : tensor
Result of FFT or IFFT.
"""
if not _is_complex(input):
raise TypeError('The input should be complex (e.g. last dimension is 2)')
if inverse:
return torch.ifft(input, 3)
return torch.fft(input, 3) | 8b7bdfbaeaf712ee8734c7d035f404fd154d3838 | 22,296 |
def dbdescs(data, dbname):
"""
return the entire set of information for a specific server/database
"""
# pylint: disable=bad-continuation
return {
'admin': onedesc(data, dbname, 'admin', 'rw'),
'user': onedesc(data, dbname, 'user', 'rw'),
'viewer': onedesc(data, dbname, 'viewer', 'ro')
} | 895f87300192fbad1045665eef0a08c64c6ba294 | 22,297 |
from datetime import datetime
def format_date(date):
"""Format date to readable format."""
try:
if date != 'N/A':
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%d %b %Y')
except ValueError:
logger.error("Unexpected ValueError while trying to format date -> {}".format(date))
pass
return date | 48d6d426925e45f0c3b92e492efa5d23e1550a2f | 22,298 |
def favor_attention(query,
key,
value,
kernel_transformation,
causal,
projection_matrix=None):
"""Computes FAVOR normalized attention.
Args:
query: query tensor.
key: key tensor.
value: value tensor.
kernel_transformation: transformation used to get finite kernel features.
causal: whether attention is causal or not.
projection_matrix: projection matrix to be used.
Returns:
FAVOR normalized attention.
"""
query_prime = kernel_transformation(query, True,
projection_matrix) # [B,L,H,M]
key_prime = kernel_transformation(key, False, projection_matrix) # [B,L,H,M]
query_prime = query_prime.permute(1, 0, 2, 3) # [L,B,H,M]
key_prime = key_prime.permute(1, 0, 2, 3) # [L,B,H,M]
value = value.permute(1, 0, 2, 3) # [L,B,H,D]
if causal:
av_attention = causal_numerator(query_prime, key_prime, value)
attention_normalizer = causal_denominator(query_prime, key_prime)
else:
av_attention = noncausal_numerator(query_prime, key_prime, value)
attention_normalizer = noncausal_denominator(query_prime, key_prime)
# TODO(kchoro): Add more comments.
av_attention = av_attention.permute(1, 0, 2, 3)
attention_normalizer = attention_normalizer.permute(1, 0, 2)
attention_normalizer = attention_normalizer.unsqueeze(dim=len(attention_normalizer.shape))
return av_attention / attention_normalizer | b01a9385b321b1bd008a818cba0630cfbb3a93c3 | 22,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.