content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def is_context_spec(mapping):
"""Return True IFF `mapping` is a mapping name *or* a date based mapping specification.
Date-based specifications can be interpreted by the CRDS server with respect to the operational
context history to determine the default operational context which was in use at that date.
This function verifies syntax only, not the existence of corresponding context.
>>> is_context_spec("hst_0042.pmap")
True
>>> is_context_spec("hst.pmap")
True
>>> is_context_spec("foo.pmap")
True
>>> is_context_spec("foo")
False
>>> is_context_spec("hst-2040-01-29T12:00:00")
True
>>> is_context_spec("hst-acs-2040-01-29T12:00:00")
False
"""
return is_context(mapping) or (isinstance(mapping, python23.string_types) and bool(PIPELINE_CONTEXT_RE.match(mapping))) | a44af272dc18aa6c2a872309d89e9b4695114056 | 3,634,400 |
def find_pair(cards):
"""
Find best pair of cards + three highest ranked cards
Parameters
----------
cards : TYPE
DESCRIPTION.
Returns
-------
prevCard : TYPE
DESCRIPTION.
"""
PokerCard.cardsRank(cards)
PairsList = []
try:
prevCard = cards[0]
except:
return []
PairsList.append(prevCard)
for card in cards[1:]:
#check if current one is next to prevCard
if prevCard - card == 0:
PairsList.append(card)
break
else:
PairsList = []
PairsList.append(card)
prevCard = card
if len(PairsList) == 2:
for card in cards:
if card - PairsList[0] != 0:
PairsList.append(card)
if len(PairsList) == 5:
return PairsList
return []
else:
return [] | 3e11ca68667b3be3fb900d5a65e8c4d21b216b13 | 3,634,401 |
def mergeSort(nums):
"""归并排序"""
if len(nums) <= 1:
return nums
mid = len(nums)//2
#left
left_nums = mergeSort(nums[:mid])
#right
right_nums = mergeSort(nums[mid:])
print(left_nums)
print(right_nums)
left_pointer,right_pointer = 0,0
result = []
while left_pointer < len(left_nums) and right_pointer < len(right_nums):
if left_nums[left_pointer] <= right_nums[right_pointer]:
result.append(left_nums[left_pointer])
left_pointer += 1
else:
result.append(right_nums[right_pointer])
right_pointer += 1
result += left_nums[left_pointer:]
result += right_nums[right_pointer:]
return result | 708166485cf3e916bbde12edec7057c404ee830d | 3,634,402 |
def readProtectedRegistry(protectedRegistryFile):
"""
Reads records from a protected registry and divides into two dictionaries
that map record->(recordId, siteId).
@return: (exactMatchDict, partialMatchDict)
"""
exactMatch, partialMatch = {}, {}
# Iterate the protected registry file one line at a time
def regReader():
with open(protectedRegistryFile, 'r') as f:
for line in f:
yield line.strip()
# Divide the registry records by exact and partial matches
for record in parseProtectedRecords(regReader()):
# Insert into the correct dictionary
if record.isExact:
table = exactMatch
else:
table = partialMatch
table[record.hash] = record.siteId
return exactMatch, partialMatch | b02c185f8369775b113cc9d7386f448660fd8885 | 3,634,403 |
import torch
def get_graph_feature(x, xyz=None, idx=None, k_hat=20):
"""
Get graph features by minus the k_hat nearest neighbors' feature.
:param x: (B,C,N)
input features
:param xyz: (B,3,N) or None
xyz coordinate
:param idx: (B,N,k_hat)
kNN graph index
:param k_hat: (int)
the neighbor number
:return: graph feature (B,C,N,k_hat)
"""
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(xyz, k=k_hat) # (batch_size, num_points, k_hat)
idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k_hat, num_dims)
x = x.view(batch_size, num_points, 1, num_dims)
feature = feature - x
feature = feature.permute(0, 3, 1, 2)
return feature | cb18e6d673bdc59b25e8d3135a0bca3014707319 | 3,634,404 |
def get_poly(time, npoly=3):
"""Returns a matrix of polynomial """
# Time polynomial
t = (time - time.mean())
t /= (t.max() - t.min())
poly = np.vstack([t**idx for idx in np.arange(0, npoly + 1)]).T
return poly | d5f830c0c247f4a77e4ea16e1536ff03f7143188 | 3,634,405 |
from typing import Match
def match(first_name, last_name, province, date_of_birth, record_id):
"""Find the Type of Match, if there is any and create Match object"""
def update_match(notice, match_type):
"""Create Match object"""
try:
record = Record.objects.get(id=record_id)
Match.objects.create(notice=notice, record=record, type=match_type)
except Exception as e:
raise e
if date_of_birth: # Check for a strong match since date_of_birth is present
notice = strong_match(first_name, last_name,
date_of_birth)
match_type = STRONG
if notice:
return update_match(notice, match_type)
if province: # Check for a possible match since province is present and its not a strong match
notice = possible_match(first_name, last_name,
province)
match_type = POSSIBLE
if notice:
return update_match(notice, match_type)
# Check for a weak match since its not a strong or possible match
notice = weak_match(first_name, last_name)
match_type = WEAK
if notice:
return update_match(notice, match_type) | 12b426b44427b2f3d1de0c14f83f50586ea6add6 | 3,634,406 |
def rmse(adata):
"""Calculate the root mean squared error.
Computes (RMSE) between the full (or processed) data matrix and a list of
dimensionally-reduced matrices.
"""
(
adata.obsp["kruskel_matrix"],
adata.uns["kruskel_score"],
adata.uns["rmse_score"],
) = calculate_rmse(adata)
return float(adata.uns["rmse_score"]) | ccaa4eae7ea9bc4e68ca59cd9c52d8d69344635b | 3,634,407 |
def format_dic(dic):
"""将 dic 格式化为 JSON,处理日期等特殊格式"""
for key, value in dic.iteritems():
dic[key] = format_value(value)
return dic | d532e02f77a5d596e4ddf19b5b65c91bc10f79cc | 3,634,408 |
def validate_task(task, tasks=None):
"""
Validate the jsonschema configuration of a task
"""
name = task['name']
config = task.get('config', {})
schema = getattr(TaskRegistry.get(name)[0], 'SCHEMA', {})
format_checker = getattr(TaskRegistry.get(name)[0], 'FORMAT_CHECKER', None)
try:
validate_schema(config, schema, format_checker=format_checker)
except ValidationError as exc:
return TemplateError.format_details(exc, task) | c1cbd326df171d0ff524761a84f923cfe1d1a05c | 3,634,409 |
import os
def pred_data_2D_per_sample(model, x_dir, y_dir, fnames, pad_shape = (256, 320), batch_size = 2, \
mean_patient_shape = (115, 320, 232), ct = False):
"""
Loads raw data, preprocesses it, predicts 3D volumes slicewise (2D) one sample at a time, and pads to the original shape.
Assumptions:
The segmentation task you're working with is binary.
The multi-output models output only have two outputs: (prediction mask, reconstruction mask)
The .nii.gz files have the shape: (x, y, z) where z is the number of slices.
Args:
model: keras.models.Model instance
x_dir: path to test images
y_dir: path to the corresponding test masks
fnames: files to evaluate in the directories; assuming that the input and labels are the same name
* if it's None, we assume that it's all of the files in x_dir.
pad_shape: of size (x,y); doesn't include batch size and channels
batch_size: prediction batch size
mean_patient_shape: (z,x,y) representing the average shape. Defaults to (115, 320, 232).
ct: whether or not the data is a CT scan or not. Defaults to False.
Returns:
actual_y: actual labels
padded_pred: stacked, thresholded predictions (padded to original shape)
padded_recon: stacked, properly padded reconstruction. Defaults to None if the model only outputs segmentations.
orig_images: original input images
"""
# can automatically infer the filenames to use (ensure that there are no junk files in x_dir)
if fnames is None:
fnames = os.listdir(x_dir)
# lists that hold the arrays
y_list = []
pred_list = []
recon_list = []
orig_list = []
for id in fnames:
# loads sample as a 3D numpy arr and then changes the type to float32
x = nib.load(os.path.join(x_dir, id))
y = nib.load(os.path.join(y_dir, id))
orig_images, actual_label = nii_to_np(x), nii_to_np(y) # purpose is to transpose axes to (z,x, y)
orig_shape = orig_images.shape + (1,)
# preprocessing
preprocessed_x, preprocessed_y, coords = isensee_preprocess(x, y, orig_spacing = None, get_coords = True, ct = \
ct, mean_patient_shape = mean_patient_shape)
# pad to model input shape (predicting on a slicewise basis)
_pad_shape = (preprocessed_x.shape[0],) + pad_shape # unique to each volume because the n_slice varies
# preparing the shape for the model (reshaping to model input shape and adding a channel dimension)
reshaped_x = np.expand_dims(reshape(preprocessed_x, preprocessed_x.min(), new_shape = _pad_shape), -1)
# prediction
print("Predicting: ", id)
predicted = model.predict(reshaped_x, batch_size = batch_size)
# inferring that the model has a reconstruction decoder based on the outputted predictions
if isinstance(predicted, (list, tuple)):
predicted, reconstruction = predicted
# properly converting the reconstruction to the original shape
padded_recon = undo_reshape_and_nonint_extraction(reconstruction, prior_reshape_shape = preprocessed_x.shape, \
orig_shape = orig_shape, coords = coords, pad_value = 0)
recon_list.append(padded_recon)
# thresholding
predicted[predicted >= 0.5] = 1
predicted[predicted < 0.5] = 0
# properly converting the prediction mask to the original shape
padded_pred = undo_reshape_and_nonint_extraction(predicted, prior_reshape_shape = preprocessed_x.shape, \
orig_shape = orig_shape, coords = coords, pad_value = 0)
y_list.append(actual_label), pred_list.append(padded_pred), orig_list.append(orig_images)
# stacking the lists
actual_y, padded_pred, orig_images = np.vstack(y_list), np.vstack(pred_list), np.vstack(orig_list)
try:
padded_recon = np.vstack(recon_list)
except ValueError: # can't stack empty list
padded_recon = None
return (actual_y, padded_pred, padded_recon, orig_images) | 4d564b0d114f0f30261a7b37132147546a65aab6 | 3,634,410 |
from scipy.signal._arraytools import odd_ext
import scipy.fftpack
def cogve(COP, freq, mass, height, show=False, ax=None):
"""COGv estimation using COP data based on the inverted pendulum model.
This function estimates the center of gravity vertical projection (COGv)
displacement from the center of pressure (COP) displacement at the
anterior-posterior direction during quiet upright standing. COP and COGv
displacements are measurements useful to quantify the postural sway of a
person while standing.
The COGv displacement is estimated by low-pass filtering the COP
displacement in the frequency domain according to the person's moment
of rotational inertia as a single inverted pendulum [1]_.
Parameters
----------
COP : 1D array_like
center of pressure data [cm]
freq : float
sampling frequency of the COP data
mass : float
body mass of the subject [kg]
height : float
height of the subject [cm]
show : bool, optional (default = False)
True (1) plots data and results in a matplotlib figure
False (0) to not plot
ax : matplotlib.axes.Axes instance, optional (default = None)
Returns
-------
COGv : 1D array
center of gravity vertical projection data [cm]
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/IP_Model.ipynb
Examples
--------
>>> from cogve import cogve
>>> y = np.cumsum(np.random.randn(3000))/50
>>> cogv = cogve(y, freq=100, mass=70, height=170, show=True)
"""
COP = np.asarray(COP)
height = height / 100 # cm to m
g = 9.8 # gravity acceleration in m/s2
# height of the COG w.r.t. ankle (McGinnis, 2005; Winter, 2005)
hcog = 0.56 * height - 0.039 * height
# body moment of inertia around the ankle
# (Breniere, 1996), (0.0572 for the ml direction)
I = mass * 0.0533 * height ** 2 + mass * hcog ** 2
# Newton-Euler equation of motion for the inverted pendulum
# COGv'' = w02*(COGv - COP)
# where w02 is the squared pendulum natural frequency
w02 = mass * g * hcog / I
# add (pad) data and remove mean to avoid problems at the extremities
COP = odd_ext(COP, n=freq)
COPm = np.mean(COP)
COP = COP - COPm
# COGv is estimated by filtering the COP data in the frequency domain
# using the transfer function for the inverted pendulum equation of motion
N = COP.size
COPfft = scipy.fftpack.fft(COP, n=N) / N # COP fft
w = 2 * np.pi * scipy.fftpack.fftfreq(n=N, d=1 / freq) # angular frequency
# transfer function
TF = w02 / (w02 + w ** 2)
COGv = np.real(scipy.fftpack.ifft(TF * COPfft) * N)
COGv = COGv[0: N]
# get back the mean and pad off data
COP, COGv = COP + COPm, COGv + COPm
COP, COGv = COP[freq: -freq], COGv[freq: -freq]
if show:
_plot(COP, COGv, freq, ax)
return COGv | 4b7809f3a50ffab09ed9d6740782ad76b0687926 | 3,634,411 |
def locked_view_with_exception(request):
"""View, locked by the decorator with url exceptions."""
return HttpResponse('A locked view.') | 13eb49ed7d3385c9a5bb870e8c91883f1582c63d | 3,634,412 |
import json
def generate_api_queries(input_container_sas_url,file_list_sas_urls,request_name_base,caller):
"""
Generate .json-formatted API input from input parameters. file_list_sas_urls is
a list of SAS URLs to individual file lists (all relative to the same container).
request_name_base is a request name for the set; if the base name is 'blah', individual
requests will get request names of 'blah_chunk000', 'blah_chunk001', etc.
Returns both strings and Python dicts
return request_strings,request_dicts
"""
assert isinstance(file_list_sas_urls,list)
request_dicts = []
request_strings = []
# i_url = 0; file_list_sas_url = file_list_sas_urls[0]
for i_url,file_list_sas_url in enumerate(file_list_sas_urls):
d = {}
d['input_container_sas'] = input_container_sas_url
d['images_requested_json_sas'] = file_list_sas_url
if len(file_list_sas_urls) > 1:
chunk_id = '_chunk{0:0>3d}'.format(i_url)
request_name = request_name_base + chunk_id
else:
request_name = request_name_base
d['request_name'] = request_name
d['caller'] = caller
request_dicts.append(d)
request_strings.append(json.dumps(d,indent=1))
return request_strings,request_dicts | fa6ba9bbbfa26af9a7d1c6e6aa03d0e53e16f630 | 3,634,413 |
def estimate_H_unbiased_parallel(X, Y, n_jobs, freq_dict = None):
"""Parallelised estimation of H with unbiased HSIC-estimator"""
assert Y.shape[0] == X.shape[0]
p = X.shape[1]
x_bw = util.meddistance(X, subsample = 1000)**2
kx = kernel.KGauss(x_bw)
if freq_dict is not None:
ky = KDiscrete(freq_dict, freq_dict)
else:
y_bw = util.meddistance(Y[:, np.newaxis], subsample = 1000)**2
ky = kernel.KGauss(y_bw)
hsic_H = hsic.HSIC_U(kx, ky)
def one_calc(i):
return hsic_H.compute(X[:,i,np.newaxis], Y[:,np.newaxis])
par = Parallel(n_jobs = n_jobs)
res = par(delayed(one_calc)(i) for i in range(p))
return np.array(res) | eb2f768d2c48251d56551d70f8a53833ba775ef3 | 3,634,414 |
import torch
def logsumexp_across_rois(roi_inputs, rois):
"""
Args:
roi_inputs (torch.Tensor): shape (bn, chn, rh, rw)
rois (torch.Tensor): shape (bn, 5)
Returns:
Tensor, shape (bn, chn, rh, rw)
"""
bn, kn, rh, rw = roi_inputs.size()
# allocate memory, (bn, chn, rh, rw)
rois_logsumexp = roi_inputs.clone()
if bn > 0:
roi_ids = rois[:, 0].round().int()
roi_bboxes = rois[:, 1:]
for roi_id in range(max(roi_ids) + 1):
ids = (roi_id == roi_ids).nonzero(as_tuple=False).squeeze(-1)
if len(ids) <= 1:
continue
boxes = roi_bboxes[ids]
pos_mask, overlap_boxes = get_overlap_boxes(boxes, boxes)
pos_mask.fill_diagonal_(False)
for id_self, pos_mask_single, overlap_boxes_single \
in zip(ids, pos_mask, overlap_boxes):
if not any(pos_mask_single):
continue
ids_overlap = ids[pos_mask_single]
num_overlap = ids_overlap.size(0)
roi_input_self = roi_inputs[id_self] # (chn, rh, rw)
roi_inputs_overlap = roi_inputs[ids_overlap]
bbox_self = roi_bboxes[id_self]
bbox_overlap = roi_bboxes[ids_overlap]
overlap_boxes_ = overlap_boxes_single[pos_mask_single]
wh_bbox_self = bbox_self[2:] - bbox_self[:2]
wh_bbox_overlap = bbox_overlap[:, 2:] - bbox_overlap[:, :2]
# resample
scale = wh_bbox_self / wh_bbox_overlap
xy_tl_in_bbox_overlap = 2 * (
overlap_boxes_[:, :2] - bbox_overlap[:, :2]
) / (bbox_overlap[:, 2:] - bbox_overlap[:, :2]) - 1
xy_tl_in_bbox_self = 2 * (
overlap_boxes_[:, :2] - bbox_self[:2]
) / (bbox_self[2:] - bbox_self[:2]) - 1
affine_mat = wh_bbox_self.new_zeros((num_overlap, 2, 3))
affine_mat[:, 0, 0] = scale[:, 0]
affine_mat[:, 1, 1] = scale[:, 1]
affine_mat[:, :, 2] = \
xy_tl_in_bbox_overlap - scale * xy_tl_in_bbox_self
grid = F.affine_grid(affine_mat, (num_overlap, 1, rh, rw), align_corners=False)
# (#ovlp, chn, rh, rw)
roi_inputs_resample = F.grid_sample(
roi_inputs_overlap, grid, padding_mode='border', align_corners=False)
# (#ovlp, rh, rw, 2) -> (#ovlp, rh, rw) -> (#ovlp, 1, rh, rw)
valid_grid = torch.all((grid > -1) & (grid < 1), dim=3).unsqueeze(1)
# allocate memory to avoid concat
roi_inputs_resampled_cat_self = roi_inputs_resample.new_empty(
(num_overlap + 1, kn, rh, rw))
# fill out-of-border with inf
roi_inputs_resampled_cat_self[:-1] = roi_inputs_resample.masked_fill(
~valid_grid, float('-inf'))
roi_inputs_resampled_cat_self[-1] = roi_input_self
# (chn, rh, rw)
rois_logsumexp[id_self] = roi_inputs_resampled_cat_self.logsumexp(dim=0)
return rois_logsumexp | 0ca53a1b2565da615ff9f159299fcab41aa8442e | 3,634,415 |
import re
def add_symbol_and_color(df: pd.DataFrame, colormap: dict):
"""
Color logic happens here. Use nowcast's precipitation, when it is available and
otherwise forecast's weather symbol (defined by YR).
:param df: DataFrame containing weather data
:param colormap: color definitions to use
:return: enhanced DataFrame
"""
symbols = []
colors = []
rain_re = re.compile(r"rain|sleet|snow", re.IGNORECASE)
for i in df.index:
# Take always nowcast's precipitation, it should be the most accurate
if pd.notnull(df["prec_now"][i]):
precipitation = df["prec_now"][i]
nowcast = True
else:
precipitation = df["prec_fore"][i]
nowcast = False
prob_of_prec = df["prob_of_prec"][i]
if nowcast:
if precipitation >= 3.0:
colors_key = "VERYHEAVYRAIN"
symbols.append(colors_key)
colors.append(colormap[colors_key])
elif precipitation >= 1.5:
colors_key = "HEAVYRAIN"
symbols.append(colors_key)
colors.append(colormap[colors_key])
elif precipitation >= 0.5:
colors_key = "RAIN"
symbols.append(colors_key)
colors.append(colormap[colors_key])
elif precipitation > 0.0:
colors_key = "LIGHTRAIN"
symbols.append(colors_key)
colors.append(colormap[colors_key])
elif precipitation == 0.0 and rain_re.findall(df["symbol"][i]):
colors_key = "CLOUDY"
symbols.append(colors_key)
colors.append(colormap[colors_key])
else:
colors_key = symbolmap[df["symbol"][i]]
symbols.append(colors_key)
colors.append(colormap[colors_key])
else:
symbol = df["symbol"][i]
colors_key = symbolmap[symbol]
if colors_key == "LIGHTRAIN":
if prob_of_prec <= 50:
colors_key = "LIGHTRAIN_LT50"
symbols.append(colors_key)
colors.append(colormap[colors_key])
df["wl_symbol"] = symbols
df["color"] = colors
return df | f03448fcddd069f599e61cda8ce8c00ec1dbd7c0 | 3,634,416 |
def ignore_troublesome_polymer(polymer):
"""
See what the possible shortest string is by ignoring one of the polymers and its polymer of inverse polarity.
:param polymer: the string representing the polymer
:return: the simplified polymore
>>> ignore_troublesome_polymer('dabAcCaCBAcCcaDA')
'daDA'
"""
s = set(polymer.lower())
return min([simplify_polymer(polymer, i) for i in s], key=lambda p: len(p)) | 98e92c6e221ca0899311fa28d8637af963180366 | 3,634,417 |
def add_version(match):
"""return a dict from the version number"""
return {'VERSION': match.group(1).replace(" ", "").replace(",", ".")} | 101578396425aceaacc2827ef6f362c382aaa89b | 3,634,418 |
from typing import Sequence
def replace_cryptomatte_hashes_by_asset_index(
segmentation_ids: ArrayLike,
assets: Sequence[core.assets.Asset]):
"""Replace (inplace) the cryptomatte hash (from Blender) by the index of each asset + 1.
(the +1 is to ensure that the 0 for background does not interfere with asset index 0)
Args:
segmentation_ids: Segmentation array of cryptomatte hashes as returned by Blender.
assets: List of assets to use for replacement.
"""
# replace crypto-ids with asset index
new_segmentation_ids = np.zeros_like(segmentation_ids)
for idx, asset in enumerate(assets, start=1):
asset_hash = mm3hash(asset.uid)
new_segmentation_ids[segmentation_ids == asset_hash] = idx
return new_segmentation_ids | 342324b5b694b0934c17e3aa8a26513dafca669c | 3,634,419 |
def getValueBetweenKey1AndKey2(str, key1, key2):
"""得到关键字1和关键字2之间的值
Args:
str: 包括key1、key2的字符串
key1: 关键字1
key2: 关键字2
Return:
key1 ... key2 内的值(去除了2端的空格)
"""
offset = len(key1)
start = str.find(key1) + offset
end = str.find(key2)
value = str[start : end]
return value.strip() | 02337550db4b9e230261e325443fdeadf90664ee | 3,634,420 |
def sample_function_parameter(parameter_name, return_variable_name=None):
""" Returns sample function that extracts a parameter from current state
Args:
parameter_name (string): atrribute in sampler.parameters
(e.g. A, C, LRinv, R)
return_variable_name (string, optional): name of return name
default is `parameter_name`
Returns:
A function of sampler that returns dictionary of variable, value
"""
if return_variable_name is None:
return_variable_name = parameter_name
def custom_sample_function(sampler):
cur_parameter = np.copy(getattr(sampler.parameters, parameter_name))
sample = {'variable': return_variable_name,
'value': cur_parameter,
}
return sample
return custom_sample_function | 6e5c9ca13be7302d8f2fefdf000e76a67fe63bff | 3,634,421 |
def generate_linear_probe(num_elec=16, ypitch=20,
contact_shapes='circle', contact_shape_params={'radius': 6}):
"""
Generate a one-column linear probe
"""
probe = generate_multi_columns_probe(num_columns=1, num_contact_per_column=num_elec,
xpitch=0, ypitch=ypitch, contact_shapes=contact_shapes,
contact_shape_params=contact_shape_params)
return probe | 9e06e85870bff8ace43a0dbb4351956958524b03 | 3,634,422 |
from sys import path
import requests
def subview(request, subid):
"""present an overview page about the substance in sciflow"""
substance = Substances.objects.get(id=subid)
ids = substance.identifiers_set.values_list('type', 'value', 'source')
descs = substance.descriptors_set.values_list('type', 'value', 'source')
srcs = substance.sources_set.all()
inchikey = getinchikey(substance.id)
baseimage = 'https://cactus.nci.nih.gov/chemical/structure/{}/file?format=sdf&get3d=true'
image_url = baseimage.format(inchikey)
testvar = path.exists("/static/Jsmol/JSmol.min.js")
print(substance)
print('js path is ' + str(testvar))
print(inchikey)
r = requests.get(image_url)
print(str(r.status_code))
if r.status_code == 200:
image_found = ''
else:
image_found = 'Error Model not Found'
image_url = ''
print(image_found)
print(image_url)
if not descs:
key = ""
for i in ids:
if i.type == 'inchikey':
key = i.value
break
m, i, descs, srcs = getsubdata(key)
savedescs(subid, descs)
idlist = {}
for idtype, value, src in ids:
if idtype not in idlist.keys():
idlist.update({idtype: {}})
if value not in idlist[idtype].keys():
idlist[idtype].update({value: []})
idlist[idtype][value].append(src)
dlist = {}
for desc, value, src in descs:
if desc not in dlist.keys():
dlist.update({desc: {}})
if value not in dlist[desc].keys():
dlist[desc].update({value: []})
dlist[desc][value].append(src)
# related data files
files = substance.jsonlookupsubstances_set.all()
# print(json.dumps(dlist, indent=4))
# print(descs)
# print(srcs)
# exit()
return render(request, "substances/subview.html",
{'substance': substance, "ids": idlist, "descs": dlist, "srcs": srcs, "files": files,
"image_url": image_url, "image_found": image_found, "inchikey": inchikey}) | cc15d6d209ce666674422b27f2c1ebc208760a9b | 3,634,423 |
from typing import Union
from typing import Dict
from typing import Any
import types
import copy
def convert_to_attributes(
raw: Union[Dict[str, Any], types.Attributes]
) -> types.Attributes:
"""Convert dict to mapping of attributes (deep copy values).
Values that aren't str/bool/int/float (or homogeneous
"Optional" tuples/lists of these) are swapped for
their `repr()` strings, wholesale.
From OTEL API:
AttributeValue = Union[
str,
bool,
int,
float,
Sequence[Optional[str]],
Sequence[Optional[bool]],
Sequence[Optional[int]],
Sequence[Optional[float]],
]
Note: Types of sequences other than tuple and list are
treated as "other types", despite the OTEL API definition.
This is to safeguard against, tricky sequences like `bytes`,
custom instances, etc.
"""
if not raw:
return {}
out = {}
for attr in list(raw):
# check if simple, single type
if isinstance(raw[attr], LEGAL_ATTR_BASE_TYPES):
out[attr] = copy.deepcopy(raw[attr])
# is this a tuple/list?
elif isinstance(raw[attr], (tuple, list)):
# get all types (but ignore `None`s b/c they're always allowed)
member_types = list(set(type(m) for m in raw[attr] if m is not None))
# if every member is same (legal) type, copy it all
if len(member_types) == 1 and member_types[0] in LEGAL_ATTR_BASE_TYPES:
out[attr] = copy.deepcopy(raw[attr])
else:
out[attr] = [repr(v) for v in raw[attr]] # retain list, but as reprs
# other types -> get `repr()`
else:
out[attr] = repr(raw[attr])
return out | 4df605f0c4492d35bc3df34939a3b9a0e2d00d8e | 3,634,424 |
import time
import logging
def shouldpoll(name, curtime):
""" check whether a new poll is needed. """
global lastpoll
try: lp = lastpoll.data[name]
except KeyError: lp = lastpoll.data[name] = time.time() ; lastpoll.sync()
global sleeptime
try: st = sleeptime.data[name]
except KeyError: st = sleeptime.data[name] = 900 ; sleeptime.sync()
logging.debug("pollcheck - %s - %s - remaining %s" % (name, time.ctime(lp), (lp + st) - curtime))
if curtime - lp > st: return True | fb647ac1a81edecd10002a175c105f4c4445bc86 | 3,634,425 |
def submit_task(pipeline_name, accession, rest_api_key, priority="MEDIUM", starting_index=0):
"""
Submits a Conan task. Sending post request to ``api/submissions`` with data similar to the following JSON
{
"priority": `priority`,
"pipelineName": `pipeline_name`,
"startingProcessIndex": `starting_index`,
"inputParameters": {"Accession Number": `accession`},
"restApiKey": `rest_api_key`,
}
:param pipeline_name: Name of pipeline. e.g. load or unload ...etc.
:type pipeline_name: str
:param accession: ArrayExpress accession. e.g. E-MTAB-xxxx
:type accession: str
:param rest_api_key: User rest API key.
:type rest_api_key: str
:param priority: Task priority in the queue. default `MEDIUM`
:type priority: str
:param starting_index: The starting task number in the requested pipeline.
:type starting_index: int
:return: Submitted task ID.
"""
url = CONAN_URL + 'api/submissions'
data = {
"priority": priority,
"pipelineName": pipeline_name,
"startingProcessIndex": str(starting_index),
"inputParameters": {"Accession Number": accession},
"restApiKey": str(rest_api_key),
}
r = requests.post(url=url, json=data)
print r.text
return json.loads(r.text)['submittedTaskID'] | 1232c9842500ff71a4c73d7d3b8947c19bb016bb | 3,634,426 |
def puissance(poly, n):
"""Renvoie le polynôme _poly_ à la puissance _n_"""
if n == 0: return [1]
poly = clear_poly(poly)
result = poly.copy()
for i in range(n-1):
result = mult_poly(result,poly)
return result | 92bba8acb3c5350b0c8c99b4d8c4a9a6817525bc | 3,634,427 |
import math
def getGridSample(lat, lon, n):
"""
Get a random sampling of n locations within k km from (lat, lon)
param: lat latitude of grid center point
param: lon longitude of grid center point
param: n number of locations to sample
return: array of length n of latitude, longitude tuples
"""
locations = []
for i in range(n):
# Get a random point in km's
min_delta = -1 * grid_size
max_delta = grid_size
delta_x_kms = (np.random.rand()*(max_delta - min_delta) + min_delta)/1000 # kms
delta_y_kms = (np.random.rand()*(max_delta - min_delta) + min_delta)/1000 # kms
# Convert from km's to latitude/longitude
delta_lat = (delta_x_kms/earth_radius) * 180/math.pi
r = earth_radius*math.cos(lat*math.pi/180.0)
delta_lon = (delta_y_kms/r)*180/math.pi
# Get the new lat/lon point
new_lat = lat + delta_lat
new_lon = lon + delta_lon
locations.append((new_lat, new_lon))
return locations | 24986c11f81fa8a237ef4f742f5c70934435e070 | 3,634,428 |
def multiplication(integer_one, integer_two):
"""
It multiplies two numbers
Args:
integer_one: The original integer
integer_two: The integer which needs to be multiplied with integer_one
Returns:
an integer with the value: integer_one*integer_two
"""
multiplied_value = 0
for _ in range(integer_two):
multiplied_value = adding(integer_one, multiplied_value)
return multiplied_value | c16c5928541d0e28ef854ff2a18c97d7908e5114 | 3,634,429 |
def group_obs_table(obs_table, offset_range=[0, 2.5], n_off_bin=5,
eff_range=[0, 100], n_eff_bin=4, zen_range=[0., 70.],
n_zen_bin=7):
"""Helper function to provide an observation grouping in offset,
muon_efficiency, and zenith.
Parameters
----------
obs_table : `~gammapy.data.ObservationTable`
Obs table to group
offset_range : tuple
Range of the offset band
n_off_bin : int
Number of offset bins
eff_range : tuple
Range of the muon efficiency band
n_eff_bin : int
Number of muon efficiency bins
zen_range : tuple
Range of the zenith angle band
n_zen_bin : int
Number of zenith bins
Returns
-------
grouped_table : `~gammapy.data.ObservationTable`
"""
offmin, offmax = offset_range
effmin, effmax = eff_range
zenmin, zenmax = zen_range
offtab = Angle(np.linspace(offmin, offmax, n_off_bin + 1), 'deg')
efftab = Quantity(np.linspace(effmin, effmax, n_eff_bin + 1) / 100., '')
zentab = Quantity(np.linspace(zenmin, zenmax, n_zen_bin + 1), 'deg')
coszentab = np.cos(zentab)[::-1]
val = list()
val.append(ObservationGroupAxis('MUONEFF', efftab, 'edges'))
val.append(ObservationGroupAxis('COSZEN', coszentab, 'edges'))
val.append(ObservationGroupAxis('OFFSET', offtab, 'edges'))
obs_groups = ObservationGroups(val)
cos_zen = np.cos(obs_table['ZEN_PNT'].quantity)
obs_table.add_column(Column(cos_zen, 'COSZEN'))
grouped_table = obs_groups.apply(obs_table)
return grouped_table | 94a31b3647e99b843696e1d70c180b8895fa4f1f | 3,634,430 |
def dist_matrix(n, cx=None, cy=None):
"""
Create matrix with euclidian distances from a reference point (cx, cy).
Parameters
----------
n : int
output image shape is (n, n)
cx,cy : float
reference point. Defaults to the center.
Returns
-------
im : ndarray with shape (n, n)
Notes
-----
This is a replacement for ANDROMEDA's DISTC.
"""
if cx is None:
cx = (n - 1) / 2
if cy is None:
cy = (n - 1) / 2
yy, xx = np.ogrid[:n, :n]
return np.sqrt((yy-cy)**2 + (xx-cx)**2) | f4a15645bbaa91cbf8c0e97f5a61f595cbafee10 | 3,634,431 |
from typing import Union
def by_srid(
srid: int,
authority: Union[Authorities, str] = Authorities.EPSG.name,
validate: bool = True
) -> Sr:
"""
Get a spatial reference (`Sr`) by its SRID and, optionally, the authority
(if it isn't an `EPSG <http://www.epsg.org/>`_ spatial reference).
:param srid: the SRID
:param authority: the authority *(The default is `epsg`)*
:param validate: `True` to validate the :py:class:`Sr`
:return: the SRID
:raises InvalidProjectionException: if there is no valid projection defined
for the spatial reference
"""
_sr = sr(srid=srid, authority=authority)
# If we're asked to validate the `Sr`...
if validate:
# ...make sure there is a valid projection.
_ = _sr.proj
return _sr | 9ff6e68a3a090cae293847027fa75a86cb624b4b | 3,634,432 |
def admin_cli(request, rancher_cli) -> RancherCli:
"""
Login occurs at a global scope, so need to ensure we log back in as the
user in a finalizer so that future tests have no issues.
"""
rancher_cli.login(CATTLE_TEST_URL, ADMIN_TOKEN)
def fin():
rancher_cli.login(CATTLE_TEST_URL, USER_TOKEN)
request.addfinalizer(fin)
return rancher_cli | a780cccef12163a38444c9476676cdd1f1f62bb1 | 3,634,433 |
def crop_image(image, crop_box):
"""Crop image.
# Arguments
image: Numpy array.
crop_box: List of four ints.
# Returns
Numpy array.
"""
cropped_image = image[crop_box[0]:crop_box[2], crop_box[1]:crop_box[3], :]
return cropped_image | 03ddb9927b82ddfe3ab3a36ec3329b5a980fe209 | 3,634,434 |
import warnings
def reorder(names, faname):
"""Format the string of author names and return a string.
Adapated from one of the `customization` functions in
`bibtexparser`.
INPUT:
names -- string of names to be formatted. The names from BibTeX are
formatted in the style "Last, First Middle and Last, First
Middle and Last, First Middle" and this is the expected
style here.
faname -- string of the initialized name of the author to whom
formatting will be applied
OUTPUT:
nameout -- string of formatted names. The current format is
"F.M. Last, F.M. Last, and F.M. Last".
"""
# Set the format tag for the website's owner, to highlight where on
# the author list the website owner is. Default is **
my_name_format_tag = '**'
# Convert the input string to a list by splitting the string at the
# "and " and strip out any remaining whitespace.
nameslist = [i.strip() for i in names.replace('\n', ' ').split("and ")]
# Initialize a list to store the names after they've been tidied
# up.
tidynames = []
# Loop through each name in the list.
for namestring in nameslist:
# Strip whitespace from the string
namestring = namestring.strip()
# If, for some reason, we've gotten a blank name, skip it
if len(namestring) < 1:
continue
# Split the `namestring` at the comma, but only perform the
# split once.
namesplit = namestring.rsplit(',', 1)
if (len(namesplit) == 1):
namesplit = namestring.rsplit(' ', 1)
last = namesplit[-1].strip().strip('{}')
firsts = namesplit[:-1]
else:
last = namesplit[0].strip().strip('{}')
firsts = [i.strip().strip('.') for i in namesplit[1].split()]
# Now that all the first name edge cases are sorted out, we
# want to initialize all the first names. Set the variable
# initials to an empty string to we can add to it. Then loop
# through each of the items in the list of first names. Take
# the first element of each item and append a period, but no
# space.
initials = ''
for item in firsts:
initials += item[0] + '.'
# Stick all of the parts of the name together in `tidynames`
tidynames.append(initials + ' ' + last)
# Find the case of the website author and set the format for that
# name
if faname is not None:
try:
i = tidynames.index(faname)
tidynames[i] = my_name_format_tag + tidynames[i] + my_name_format_tag
except ValueError:
warnings.warn("Couldn't find {} in the names list. Sorry!".format(faname))
# Handle the various cases of number of authors and how they should
# be joined. Convert the elements of `tidynames` to a string.
if len(tidynames) > 2:
tidynames[-1] = 'and ' + tidynames[-1]
nameout = ', '.join(tidynames)
elif len(tidynames) == 2:
tidynames[-1] = 'and ' + tidynames[-1]
nameout = ' '.join(tidynames)
else:
# If `tidynames` only has one name, we only need to convert it
# to a string. The first way that came to mind was to join the
# list to an empty string.
nameout = ''.join(tidynames)
# Return `nameout`, the string of formatted authors
return nameout | 4012add188a3497b582078d7e7e05eeafc95252f | 3,634,435 |
def smoter(
## main arguments / inputs
data, ## training set (pandas dataframe)
y, ## response variable y by name (string)
k = 5, ## num of neighs for over-sampling (pos int)
pert = 0.02, ## perturbation / noise percentage (pos real)
samp_method = "balance", ## over / under sampling ("balance" or extreme")
under_samp = True, ## under sampling (bool)
drop_na_col = True, ## auto drop columns with nan's (bool)
drop_na_row = True, ## auto drop rows with nan's (bool)
replace = False, ## sampling replacement (bool)
## phi relevance function arguments / inputs
rel_thres = 0.5, ## relevance threshold considered rare (pos real)
rel_method = "auto", ## relevance method ("auto" or "manual")
rel_xtrm_type = "both", ## distribution focus ("high", "low", "both")
rel_coef = 1.5, ## coefficient for box plot (pos real)
rel_ctrl_pts_rg = None ## input for "manual" rel method (2d array)
):
"""
the main function, designed to help solve the problem of imbalanced data
for regression, much the same as SMOTE for classification; SMOGN applies
the combintation of under-sampling the majority class (in the case of
regression, values commonly found near the mean of a normal distribution
in the response variable y) and over-sampling the minority class (rare
values in a normal distribution of y, typically found at the tails)
procedure begins with a series of pre-processing steps, and to ensure no
missing values (nan's), sorts the values in the response variable y by
ascending order, and fits a function 'phi' to y, corresponding phi values
(between 0 and 1) are generated for each value in y, the phi values are
then used to determine if an observation is either normal or rare by the
threshold specified in the argument 'rel_thres'
normal observations are placed into a majority class subset (normal bin)
and are under-sampled, while rare observations are placed in a seperate
minority class subset (rare bin) where they're over-sampled
under-sampling is applied by a random sampling from the normal bin based
on a calculated percentage control by the argument 'samp_method', if the
specified input of 'samp_method' is "balance", less under-sampling (and
over-sampling) is conducted, and if "extreme" is specified more under-
sampling (and over-sampling is conducted)
over-sampling is applied one of two ways, either synthetic minority over-
sampling technique for regression 'smoter' or 'smoter-gn' which applies a
similar interpolation method to 'smoter', but takes an additional step to
perturb the interpolated values with gaussian noise
'smoter' is selected when the distance between a given observation and a
selected nearest neighbor is within the maximum threshold (half the median
distance of k nearest neighbors) 'smoter-gn' is selected when a given
observation and a selected nearest neighbor exceeds that same threshold
both 'smoter' and 'smoter-gn' are only applied to numeric / continuous
features, synthetic values found in nominal / categorical features, are
generated by randomly selecting observed values found within their
respective feature
procedure concludes by post-processing and returns a modified pandas data
frame containing under-sampled and over-sampled (synthetic) observations,
the distribution of the response variable y should more appropriately
reflect the minority class areas of interest in y that are under-
represented in the original training set
ref:
Branco, P., Torgo, L., Ribeiro, R. (2017).
SMOGN: A Pre-Processing Approach for Imbalanced Regression.
Proceedings of Machine Learning Research, 74:36-50.
http://proceedings.mlr.press/v74/branco17a/branco17a.pdf.
"""
## pre-process missing values
if bool(drop_na_col) == True:
data = data.dropna(axis = 1) ## drop columns with nan's
if bool(drop_na_row) == True:
data = data.dropna(axis = 0) ## drop rows with nan's
## quality check for missing values in dataframe
if data.isnull().values.any():
raise ValueError("cannot proceed: data cannot contain NaN values")
## quality check for y
if isinstance(y, str) is False:
raise ValueError("cannot proceed: y must be a string")
if y in data.columns.values is False:
raise ValueError("cannot proceed: y must be an header name (string) \
found in the dataframe")
## quality check for k number specification
if k > len(data):
raise ValueError("cannot proceed: k is greater than number of \
observations / rows contained in the dataframe")
## quality check for perturbation
if pert > 1 or pert <= 0:
raise ValueError("pert must be a real number number: 0 < R < 1")
## quality check for sampling method
if samp_method in ["balance", "extreme"] is False:
raise ValueError("samp_method must be either: 'balance' or 'extreme' ")
## quality check for relevance threshold parameter
if rel_thres == None:
raise ValueError("cannot proceed: relevance threshold required")
if rel_thres > 1 or rel_thres <= 0:
raise ValueError("rel_thres must be a real number number: 0 < R < 1")
## store data dimensions
n = len(data)
d = len(data.columns)
## store original data types
feat_dtypes_orig = [None] * d
for j in range(d):
feat_dtypes_orig[j] = data.iloc[:, j].dtype
## determine column position for response variable y
y_col = data.columns.get_loc(y)
## move response variable y to last column
if y_col < d - 1:
cols = list(range(d))
cols[y_col], cols[d - 1] = cols[d - 1], cols[y_col]
data = data[data.columns[cols]]
## store original feature headers and
## encode feature headers to index position
feat_names = list(data.columns)
data.columns = range(d)
## sort response variable y by ascending order
y = pd.DataFrame(data[d - 1])
y_sort = y.sort_values(by = d - 1)
y_sort = y_sort[d - 1]
## -------------------------------- phi --------------------------------- ##
## calculate parameters for phi relevance function
## (see 'phi_ctrl_pts()' function for details)
phi_params = phi_ctrl_pts(
y = y_sort, ## y (ascending)
method = rel_method, ## defaults "auto"
xtrm_type = rel_xtrm_type, ## defaults "both"
coef = rel_coef, ## defaults 1.5
ctrl_pts = rel_ctrl_pts_rg ## user spec
)
## calculate the phi relevance function
## (see 'phi()' function for details)
y_phi = phi(
y = y_sort, ## y (ascending)
ctrl_pts = phi_params ## from 'phi_ctrl_pts()'
)
## phi relevance quality check
if all(i == 0 for i in y_phi):
raise ValueError("redefine phi relevance function: all points are 1")
if all(i == 1 for i in y_phi):
raise ValueError("redefine phi relevance function: all points are 0")
## ---------------------------------------------------------------------- ##
## determine bin (rare or normal) by bump classification
bumps = [0]
for i in range(0, len(y_sort) - 1):
if ((y_phi[i] >= rel_thres and y_phi[i + 1] < rel_thres) or
(y_phi[i] < rel_thres and y_phi[i + 1] >= rel_thres)):
bumps.append(i + 1)
bumps.append(n)
## number of bump classes
n_bumps = len(bumps) - 1
## determine indicies for each bump classification
b_index = {}
for i in range(n_bumps):
b_index.update({i: y_sort[bumps[i]:bumps[i + 1]]})
## calculate over / under sampling percentage according to
## bump class and user specified method ("balance" or "extreme")
b = round(n / n_bumps)
s_perc = []
scale = []
obj = []
if samp_method == "balance":
for i in b_index:
s_perc.append(b / len(b_index[i]))
if samp_method == "extreme":
for i in b_index:
scale.append(b ** 2 / len(b_index[i]))
scale = n_bumps * b / sum(scale)
for i in b_index:
obj.append(round(b ** 2 / len(b_index[i]) * scale, 2))
s_perc.append(round(obj[i] / len(b_index[i]), 1))
## conduct over / under sampling and store modified training set
data_new = pd.DataFrame()
for i in range(n_bumps):
## no sampling
if s_perc[i] == 1:
## simply return no sampling
## results to modified training set
data_new = pd.concat([data.iloc[b_index[i].index], data_new])
## over-sampling
if s_perc[i] > 1:
## generate synthetic observations in training set
## considered 'minority'
## (see 'over_sampling()' function for details)
synth_obs = over_sampling(
data = data,
index = list(b_index[i].index),
perc = s_perc[i],
pert = pert,
k = k
)
## concatenate over-sampling
## results to modified training set
data_new = pd.concat([synth_obs, data_new])
## under-sampling
if under_samp is True:
if s_perc[i] < 1:
## drop observations in training set
## considered 'normal' (not 'rare')
omit_index = np.random.choice(
a = list(b_index[i].index),
size = int(s_perc[i] * len(b_index[i])),
replace = replace
)
omit_obs = data.drop(
index = omit_index,
axis = 0
)
## concatenate under-sampling
## results to modified training set
data_new = pd.concat([omit_obs, data_new])
## rename feature headers to originals
data_new.columns = feat_names
## restore response variable y to original position
if y_col < d - 1:
cols = list(range(d))
cols[y_col], cols[d - 1] = cols[d - 1], cols[y_col]
data_new = data_new[data_new.columns[cols]]
# data_new=data_new.fillna(data_new.median(), axis= 1) ## kamal's instertion
## restore original data types
for j in range(d):
data_new.iloc[:, j] = data_new.iloc[:, j].astype(feat_dtypes_orig[j])
## return modified training set
return data_new | 2e1e37896ddff3df619ba1f752662da472d1052c | 3,634,436 |
def _compute_descriptive_stats(lst: list):
"""Basic descriptive statistics and a (parametric) seven-number summary.
Calculates descriptive statistics for a list of numerical values, including
count, min, max, mean, and a parametric seven-number-summary. This summary
includes values for the lower quartile, median, upper quartile, and
percentiles 2, 9, 91, and 98. If the data is normally distributed, these
seven percentiles will be equally spaced when plotted.
Parameters
----------
lst : list of int or float values
Returns
-------
dict
a dictionary containing the following descriptive statistics:
count
int: the number of items in `lst`
min
int or float: the smallest number in `lst`
max
int or float: the largest number in `lst`
mean
float: the mean of `lst`
range
int or float: the range of values in `lst`
std
float: the standard deviation of values in `lst`
seven_num_summ_percentiles
list of floats: the parameter percentiles used to calculate this
seven-number summary: [2, 9, 25, 50, 75, 91, 98]
seven_num_summ_values
list of floats: the calculated percentile values of the summary
"""
# NOTE: With .describe(), NaN values in passed lst are excluded by default
if len(lst) == 0:
raise ValueError('No values provided.')
seq_lengths = pd.Series(lst)
seven_num_summ_percentiles = [0.02, 0.09, 0.25, 0.5, 0.75, 0.91, 0.98]
descriptive_stats = seq_lengths.describe(
percentiles=seven_num_summ_percentiles)
return {'count': int(descriptive_stats.loc['count']),
'min': descriptive_stats.loc['min'],
'max': descriptive_stats.loc['max'],
'range': descriptive_stats.loc['max'] -
descriptive_stats.loc['min'],
'mean': descriptive_stats.loc['mean'],
'std': descriptive_stats.loc['std'],
'seven_num_summ_percentiles': seven_num_summ_percentiles,
'seven_num_summ_values': descriptive_stats.loc['2%':'98%'].tolist()
} | a5fb4cc19cec08a584fe9c9f89fe28e8ffc30148 | 3,634,437 |
def prepareNewHTTPDConfig(inputDict, currentHttpdConf):
"""Check if needed start end tags are available.
If not consistent or was modified, file will append new config between tags"""
start, end = -1, -1
# Get the start and the end. In the automatic preparation it will 3 lines defined:
# # PROXYRULE|HOST|1.2.3.4|PORT|19999
# ProxyPass "/nodemonitoring/1.2.3.4/" "http://1.2.3.4:19999/" connectiontimeout=15 timeout=30
# ProxyPassReverse "/nodemonitoring/1.2.3.4/" "http://1.2.3.4:19999/"
for lineNum in range(len(currentHttpdConf)):
print(currentHttpdConf[lineNum])
if currentHttpdConf[lineNum].strip().startswith("### HERE STARTS PROXYREWRITERULES"):
print('start')
start = lineNum
elif currentHttpdConf[lineNum].strip().startswith("### HERE ENDS PROXYREWRITERULES"):
print('end')
end = lineNum
if start == -1 or end == -1 or start == end:
print('Do not do any change as there is no start or end.... ERROR!')
return None, None
diff = end - start
if diff != 1:
diff = float(diff - 1) / 3
intDiff = int(diff)
floatDiff = float(diff)
if intDiff != floatDiff:
# Make sure there is 3 lines defined. Even it is automatic, we can`t be sure that admins will not mess it up.
print('There is not an equal number of lines, and agent does not know how to prepare it...')
print('Will skip all information and will prepare a totally new file')
newOutConfig, wasChanged = checkConsistency(inputDict, currentHttpdConf, start, end)
if not wasChanged:
print('Configuration looks the same what is available on FE. No point of changing restarting...')
return newOutConfig, wasChanged | bc525e12fe27a196ab0fd335b1e7780e3325c982 | 3,634,438 |
def array_xy_offsets(test_geo, test_xy):
"""Return upper left array coordinates of test_xy in test_geo
Args:
test_geo (): GDAL Geotransform used to calcululate the offset
test_xy (): x/y coordinates in the same projection as test_geo
passed as a list or tuple
Returns:
x_offset: x coordinate of the upper left of the array
y_offset: y coordinate of the uppler left of the array
"""
x_offset = int((test_xy[0] - test_geo[0]) / test_geo[1])
y_offset = int((test_xy[1] - test_geo[3]) / test_geo[5])
return x_offset, y_offset | 5fa67b7df833459f3fc59951a056316f249acc69 | 3,634,439 |
import torch
def log_density_normal(x, mean=0, var=1, average=False, reduce_dim=None):
"""
:param x:
:param mean:
:param var:
:param average:
:param reduce_dim:
:return:
"""
if isinstance(var, Number):
var = torch.tensor(var).float()
if x.is_cuda:
var = var.to(x.get_device())
log_densities = -0.5 * (torch.log(var) + torch.pow(x - mean, 2) / var)
if reduce_dim is None:
return log_densities
elif average:
return torch.mean(log_densities, reduce_dim)
else:
return torch.sum(log_densities, reduce_dim) | f8d4f4950265a05c37dece80d5c78d3b1dd20006 | 3,634,440 |
def get_group_policies(group_names):
"""
returns groups attached policies
"""
# TODO optimize algorithm
group_list = group_names.split(" ")
all_group_policies = ""
for group in group_list:
group_policies_response = iam.list_attached_group_policies(GroupName=group)
group_policies_list = group_policies_response["AttachedPolicies"]
group_policies_names = ""
for policy in group_policies_list:
group_policies_names += str(policy["PolicyName"] + " ")
all_group_policies += group_policies_names
return all_group_policies | c875fd38b71ae6a65faa48063f9f42ea936007d6 | 3,634,441 |
def _sort2D(signal):
"""Revert the operation of _sort.
Args:
signal an instance of numpy.ndarray of one dimention
Returns:
An instance of numpy.ndarray
"""
to = signal.shape[1]
for i in range(1, to // 2 + 1, 1):
temp = signal[:, i].copy()
signal[:, i:to - 1] = signal[:, i + 1:to]
signal[:, -1] = temp
return signal | 566b2bbfcee7741cdb01451d6b0250c0fe21b4b5 | 3,634,442 |
def get_organizations_by_types(types, allowed_keys=None):
"""Get organization by list of types."""
session = get_session()
items = (
session.query(models.Organization)
.filter(models.Organization.type.in_(types))
.order_by(models.Organization.created_at.desc()).all())
return _to_dict(items, allowed_keys=allowed_keys) | bb15491d3e00cf483994654b19b727d3b1a44c6d | 3,634,443 |
def hiscale(trange=['2003-01-01', '2003-01-02'],
datatype='lmde_m1',
suffix='',
get_support_data=False,
varformat=None,
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads data from the HI-SCALE experiment from the Ulysses mission
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options:
level: str
Data level; Valid options:
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Returns:
List of tplot variables created.
"""
return load(instrument='hiscale', trange=trange, datatype=datatype, suffix=suffix, get_support_data=get_support_data, varformat=varformat, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update) | 80d75df6b6d60998007e6d98c87a7c6c7c227524 | 3,634,444 |
def order_stats(X):
"""Compute order statistics on sample `X`.
Follows convention that order statistic 1 is minimum and statistic n is maximum. Therefore, array elements ``0``
and ``n+1`` are ``-inf`` and ``+inf``.
Parameters
----------
X : :class:`numpy:numpy.ndarray` of shape (n,)
Data for order statistics. Can be vectorized. Must be sortable data type (which is almost everything).
Returns
-------
o_stats : :class:`numpy:numpy.ndarray` of shape (n+2,)
Order statistics on `X`.
"""
assert np.ndim(X) >= 1
# NaN is not allowed since it does not have well defined order.
assert not np.any(np.isnan(X))
X_shape = np.shape(X)
inf_pad = np.full(X_shape[:-1] + (1,), np.inf)
o_stats = np.concatenate((-inf_pad, np.sort(X, axis=-1), inf_pad), axis=-1)
return o_stats | 37ea2d05f894fb9d8caed8bec6bc8cada267a58e | 3,634,445 |
def get_npr(treedata, idx):
"""
Returns number of progenitors of a given idx
"""
ind = np.where(treedata['id'] == idx)[0]
return treedata['nprog'][ind][0] | a331211ee87c5f8a3584d3096240079373a39a60 | 3,634,446 |
import argparse
def parse_args():
"""Parse arguments from the command line."""
parser = argparse.ArgumentParser("Generate trace files.")
parser.add_argument('--save-dir', type=str, required=True,
help="direcotry to save the model.")
# parser.add_argument('--trace-file', type=str, required=True,
# help='Path to trace file')
parser.add_argument('--trace-dir', type=str, required=True,
help='Path to trace file')
args, unknown = parser.parse_known_args()
return args | 2980d61fdf5ecdfd8196ceb24479be0265d3b1e7 | 3,634,447 |
def dir_xtrack_to_geo(xtrack_dir, ground_heading):
"""
Convert image direction relative to antenna to geographical direction
Parameters
----------
xtrack_dir: geographical direction in degrees north
ground_heading: azimuth at position, in degrees north
Returns
-------
np.float64
same shape as input. angle in radian, relative to xtrack, anticlockwise
"""
return 90 - np.rad2deg(xtrack_dir) + ground_heading | bec1aa1897970b40951aeefa777ef0ab37abff07 | 3,634,448 |
def log(lvl, msg, *args, **kwargs):
""" Logs a message with integer level lvl """
return get_outer_logger().log(lvl, msg, *args, **kwargs) | f917817560e55594859517f5068eb9f1d53127b9 | 3,634,449 |
import torch
def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path, scheduler, patience=9):
"""returns trained model"""
early_stopping = EarlyStopping(save_path=save_path, patience=patience, )
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf
for epoch in range(1, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## find the loss and update the model parameters accordingly
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to the model
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
## record the average training loss, using something like
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## update the average validation loss
# forward pass to get net output
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update validation loss
valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
scheduler.step(valid_loss)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
early_stopping(valid_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
## TODO: save the model if validation loss has decreased
model.load_state_dict(torch.load('checkpoint.pt'))
best_vloss = -early_stopping.best_score
torch.save(model.state_dict(), f'{save_path}_vloss{best_vloss:.5f}.pt')
print('Finished Training')
# return trained model
return model | d35a055edd56c64b704c2354d13f454fa1ba1dae | 3,634,450 |
def micro_jy_to_luminosity(mjy, msun, d):
"""Convert an SED in µJy to log solar luminosities.
Parameters
----------
mjy : ndarray
Flux in microjankies.
msun : float
Absolute magnitude of the Sun. 4.74 is the bolometric absolute
magnitude of the Sun.
d : ndarray
Distance in parsecs.
Returns
-------
logLsun : ndarray
Log of luminosity in solar units.
"""
dmod = 5. * np.log10(d) - 5.
AB = 23.9 - 2.5 * np.log10(mjy)
logLsol = -0.4 * (AB - dmod - msun)
return logLsol | d120c80d245c32a27be6c4134b946d2b60fe469f | 3,634,451 |
import re
def _MakeRE(regex_str):
"""Return a regular expression object, expanding our shorthand as needed."""
return re.compile(regex_str.format(**SHORTHAND)) | fd9080d17cbfdf8291fe02734aee8a119be73864 | 3,634,452 |
import random
def generate_rand_num(n):
"""
Create n 3-digits random numbers
:param n:
:return:
"""
nums = []
for i in range(n):
r = random.randint(100, 999)
nums.append(r)
return nums | 8e6ef674479767ce45b73807ee90c2d3adaf65ce | 3,634,453 |
def preprocess_for_eval(image_bytes,
image_size=IMAGE_SIZE,
resize_method=tf.image.ResizeMethod.BILINEAR):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image size.
resize_method: if None, use bicubic.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
# image = _rotate_tfa(image, angle=0.524)
image = transform(image,image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image | 47f8cbe607546f202961afc3e6ca7b048ecf7771 | 3,634,454 |
def _time_to_seconds_nanos(t):
"""
Convert a time.time()-style timestamp to a tuple containing
seconds and nanoseconds.
"""
seconds = int(t)
nanos = int((t - seconds) * constants.SECONDS_TO_NANOS)
return (seconds, nanos) | 1e6822ba4f0e9cc82c30fbcafd18c895c3e30c19 | 3,634,455 |
import struct
def _extract_impl(ctx, name = "", image = None, commands = None, docker_run_flags = None, extract_file = "", output_file = "", script_file = ""):
"""Implementation for the container_run_and_extract rule.
This rule runs a set of commands in a given image, waits for the commands
to finish, and then extracts a given file from the container to the
bazel-out directory.
Args:
ctx: The bazel rule context
name: String, overrides ctx.label.name
image: File, overrides ctx.file.image_tar
commands: String list, overrides ctx.attr.commands
docker_run_flags: String list, overrides ctx.attr.docker_run_flags
extract_file: File, overrides ctx.outputs.out
"""
name = name or ctx.label.name
image = image or ctx.file.image
commands = commands or ctx.attr.commands
docker_run_flags = docker_run_flags or ctx.attr.docker_run_flags
extract_file = extract_file or ctx.attr.extract_file
output_file = output_file or ctx.outputs.out
script = script_file or ctx.outputs.script
# Generate a shell script to execute the run statement
ctx.actions.expand_template(
template = ctx.file._extract_tpl,
output = script,
substitutions = {
"%{image_tar}": image.path,
"%{commands}": _process_commands(commands),
"%{docker_run_flags}": " ".join(docker_run_flags),
"%{extract_file}": extract_file,
"%{output}": output_file.path,
"%{image_id_extractor_path}": ctx.file._image_id_extractor.path,
},
is_executable = True,
)
ctx.actions.run(
outputs = [output_file],
tools = [image, ctx.file._image_id_extractor],
executable = script,
)
return struct() | e23fe9f45d81d95a7f72cb680da3ee79f676d97c | 3,634,456 |
def nlopt_newuoa(
criterion_and_derivative,
x,
lower_bounds,
upper_bounds,
*,
convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,
convergence_absolute_params_tolerance=CONVERGENCE_ABSOLUTE_PARAMS_TOLERANCE,
convergence_relative_criterion_tolerance=CONVERGENCE_RELATIVE_CRITERION_TOLERANCE,
convergence_absolute_criterion_tolerance=CONVERGENCE_ABSOLUTE_CRITERION_TOLERANCE,
stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,
):
"""Minimize a scalar function using the NEWUOA algorithm.
For details see :ref:`list_of_nlopt_algorithms`.
"""
if np.any(np.isfinite(lower_bounds)) or np.any(np.isfinite(upper_bounds)):
algo = nlopt.LN_NEWUOA_BOUND
else:
algo = nlopt.LN_NEWUOA
out = _minimize_nlopt(
criterion_and_derivative,
x,
lower_bounds,
upper_bounds,
algorithm=algo,
algorithm_name="nlopt_newuoa",
convergence_xtol_rel=convergence_relative_params_tolerance,
convergence_xtol_abs=convergence_absolute_params_tolerance,
convergence_ftol_rel=convergence_relative_criterion_tolerance,
convergence_ftol_abs=convergence_absolute_criterion_tolerance,
stopping_max_eval=stopping_max_criterion_evaluations,
)
return out | 64c8a997378190665be35fa412360247fc97af12 | 3,634,457 |
import os
def ExistsOnPath(cmd):
"""Returns whether the given executable exists on PATH."""
paths = os.getenv('PATH').split(os.pathsep)
return any(os.path.exists(os.path.join(d, cmd)) for d in paths) | b7c2915566e6ebf9d4cfb75731866f55367f1be1 | 3,634,458 |
def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape) - 1)) | f139dd2bcfa2c59da9b6a1198c90f8b70646f0b5 | 3,634,459 |
import zlib
def getObjectFormat(repo, sha):
"""Returns the object format of the object represented by hash"""
"""NOTE: hash has to be a full sha"""
path = repo_file(repo, "objects", sha[0:2], sha[2:])
with open(path, "rb") as f:
raw = zlib.decompress(f.read())
# computing the starting position of the whitespace in header of the object file
x = raw.find(b' ')
fmt = raw[0:x].decode("ascii") # the type of object in ascii string
return fmt | e65eccccfbf95316d72bc40632ecfe3d1f58eabe | 3,634,460 |
def v(a, b, th, nu, dimh, k):
"""Function used in **analytic_solution_slope()**
:param a:
:type a:
:param b:
:type b:
:param th:
:type th:
:param nu:
:type nu:
:param dimh:
:type dimh:
:param k:
:type k:
:return:
:rtype:
"""
# real, b
# real, dimension(dimh) :: a, th
# real, dimension(dimh) :: vp, vm, v
# integer, dimension(dimh) :: nplus, nmoins
vp = np.zeros((dimh), dtype=np.complex128)
vm = np.zeros((dimh), dtype=np.complex128)
nmoins = np.zeros((dimh), dtype=np.float64)
nplus = np.ones((dimh), dtype=np.float64)
for th_idx, th_val in enumerate(th):
if (th_val - nu * np.pi + np.pi) <= 0:
nplus[th_idx] = 0
nmoins[th_idx] = 1
if (th_val - nu * np.pi - np.pi) <= 0:
nmoins[th_idx] = 0
if (th_val + nu * np.pi - np.pi) <= 0:
nmoins[th_idx] = -1
vp[th_idx] = - (np.exp(1j * np.pi / 4)) * (2 * np.pi * k * a[th_idx] * b) ** (-.5) / (2 * nu) * \
(np.tan((np.pi + th_val) / (2 * nu))) ** (-1) * \
fp(2 * k * a[th_idx] * (np.cos((2 * nplus[th_idx] * nu * np.pi - th_val) / 2)) ** 2)
vm[th_idx] = - (np.exp(1j * np.pi / 4)) * (2 * np.pi * k * a[th_idx] * b) ** (-.5) / (2 * nu) * \
(np.tan((np.pi - th_val) / (2 * nu))) ** (-1) * \
fp(2 * k * a[th_idx] * (np.cos((2 * nmoins[th_idx] * nu * np.pi - th_val) / 2)) ** 2)
return vp + vm | a10dc41e40a014b0923c1d98c114158a3986263e | 3,634,461 |
def image_filenames(image_numbers):
"""List of image file names with directory
image_numbers: list or array of 1-based indices
"""
return [filename(i) for i in image_numbers] | 2c74bc943ce98ed10f8ddc36540826b49db522c4 | 3,634,462 |
import asyncio
def _load_from_mongo(mongo_uri: str):
"""
Load API Test information from a MongoDB.
Collection used to store API Test information will be named: **apitest**
>>> load_from_mongo("mongodb://127.0.0.1:27017")
<type 'APITest'>
>>> _load_from_mongo("mongodb://user:pass@mongo.example.com:27017/database")
<type 'APITest'>
:param mongo_uri: MongoDB connection string
:type mongo_uri: str
:return: Return a APITest object instance
:rtype: APITest
:raise ApitestConnectionError: If some error occurs when try to connect to MongoDB
"""
try:
# Make connection
client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
# Get database form connectionstring
db = urlparse(mongo_uri).path
if not db:
db = "apitest"
# Get database -> collection
col = client[db]["apitest"]
# Do the query
loop = asyncio.get_event_loop()
ret = loop.run_until_complete(_do_mongodb_query(col, {}))
if ret:
return ret[0]
else:
return {}
except Exception as e:
raise ApitestConnectionError from e | ab32356029a293739eaa366d0f88af40c03db05a | 3,634,463 |
from datetime import datetime
def string_as_datetime(time_str):
"""Expects timestamps inline with '2017-06-05T22:45:24.423+0000'"""
# split the utc offset part
naive_time_str, offset_str = time_str[:-5], time_str[-5:]
# parse the naive date/time part
naive_dt = datetime.strptime(naive_time_str, '%Y-%m-%dT%H:%M:%S.%f')
# parse the utc offset
offset = int(offset_str[-4:-2]) * 60 + int(offset_str[-2:])
if offset_str[0] == "-":
offset = -offset
dt = naive_dt.replace(tzinfo=FixedOffset(offset))
return dt | 18b9b3b4afc0ae3454e056935ad1489f96b0f821 | 3,634,464 |
def __find_regexp_in_pdf(extra_data, patterns, forbidden_patterns=None, accept_even_if_not_found=False):
"""
Finds all matches for given patterns with surrounding characters in all filetypes.
Fails only if there are no matches at all or there is a match for a forbidden pattern.
:param patterns: iterable of string patterns
:param accept_even_if_not_found: check will be accepted if no forbidden patterns are
found, even if normal patterns are not found.
"""
check_accepted = True
details = []
for filetype, data in extra_data['extracted_text'].iteritems():
# first check for forbidden patterns
if forbidden_patterns:
forbidden_matches = __find_regexp(data, forbidden_patterns)
if forbidden_matches:
check_accepted = False
details.append('Found forbidden match in %s: "%s"' % (filetype, '", "'.join(set(forbidden_matches))))
matches = __find_regexp(data, patterns)
if not matches:
if not accept_even_if_not_found:
check_accepted = False
details.append('Not found in %s' % filetype)
else:
details.append('Found in %s as: "%s"' % (filetype, '", "'.join(set(matches))))
return check_accepted, details, None | e29dbc92171be2a8de59ae9c4f84fbfe5893938f | 3,634,465 |
def truncated_normal(mean, std, num_samples, min, max):
"""
Return samples with normal distribution inside the given region
"""
return np.random.multivariate_normal(mean=mean, cov=std, size=num_samples * 2) % (max - min) + min | 6cc9a543e016ed28ee46dd79c85004df36abf398 | 3,634,466 |
def post_required(func):
"""Decorator that returns an error unless request.method == 'POST'."""
def post_wrapper(request, *args, **kwds):
if request.method != 'POST':
return HttpResponse('This requires a POST request.', status=405)
return func(request, *args, **kwds)
return post_wrapper | 5c6a4bff7c6605be79e78c9f2924766e55289348 | 3,634,467 |
def get_serial():
"""
Gets a globally unique serial number for each music change.
"""
global serial
serial += 1
return (unique, serial) | 0ea73476b746d22871e0b596d037ff9823f16c71 | 3,634,468 |
def model(load, shape, checkpoint=None):
"""Return a model from file or to train on."""
if load and checkpoint: return load_model(checkpoint)
conv_layers, dense_layers = [32, 32, 64, 128], [1024, 512]
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='elu', input_shape=shape))
model.add(MaxPooling2D())
for cl in conv_layers:
model.add(Convolution2D(cl, 3, 3, activation='elu'))
model.add(MaxPooling2D())
model.add(Flatten())
for dl in dense_layers:
model.add(Dense(dl, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer="adam")
return model | 749f0660b87c93e29cfda21f7c595b66313a93b3 | 3,634,469 |
def cross_kerr_interaction(kappa, mode1, mode2, in_modes, D, pure=True, batched=False):
"""returns cross-Kerr unitary matrix on specified input modes"""
matrix = cross_kerr_interaction_matrix(kappa, D, batched)
output = two_mode_gate(matrix, mode1, mode2, in_modes, pure, batched)
return output | 8d32816782eaa987b1adfb9973f2518214e2ee65 | 3,634,470 |
def edgelength(G, node_wise=False, edge_wise=False, summary="mean"):
"""
This function calculates the physical distance between pairs of nodes. The default behaviour is to
return a dictionary of edges.
nodeWise: if True, then returns a dictionary of the sum of distance of all edges for each node
edgewise: if True returns a dictionary of all edge values
summary: values are either "mean" or "median", returns a single value for all edges
"""
el_vals = dict(zip(G.edges(),
[np.absolute(np.linalg.norm(np.array(G.nodes[edge[0]]['xyz']) -
np.array(G.nodes[edge[1]]['xyz'])))
for edge in G.edges()]))
if node_wise:
eln = {}
for n in G.nodes():
eln[n] = np.sum([el_vals[e] for e in el_vals.keys() if n in e])
return eln
elif edge_wise:
return el_vals
elif summary == "mean":
return np.mean([v for v in el_vals.values()])
elif summary == "median":
return np.median([v for v in el_vals.values()]) | e500c70b400f69e31747439524c5b492d13546f3 | 3,634,471 |
def matern52(params, x1, x2, warp_func=None):
"""Matern 5/2 kernel: Eq.(4.17) of GPML book.
Args:
params: parameters for the kernel.
x1: a d-diemnsional vector that represent a single datapoint.
x2: a d-diemnsional vector that represent a single datapoint that can be the
same as or different from x1.
warp_func: optional dictionary that specifies the warping function for each
parameter.
Returns:
The kernel function evaluation on x1 and x2.
"""
params_keys = ['lengthscale', 'signal_variance']
lengthscale, signal_variance = retrieve_params(params, params_keys, warp_func)
r = jnp.sqrt(5) * linalg.safe_l2norm((x1 - x2) / lengthscale)
return signal_variance * (1 + r + r**2 / 3) * jnp.exp(-r) | 7cbba9b84dd1e78d2ed6f7a9cc41feac2d4e597a | 3,634,472 |
def isint(i):
"""Returns if input is of integer type."""
return isinstance(i, (int, np.int8, np.int16, np.int32, np.int64)) | f9418b5869f2f15159322af24b7b787a1712b3f2 | 3,634,473 |
def decode_reponse(response):
"""Return utf-8 string."""
return response.data.decode("utf-8", "ignore") | 3c2c91f08c44db4705feaea525c9c58837fa6d6c | 3,634,474 |
def add_sheet_user(session, *, cls, discord_user, start_row, sheet_src=None):
"""
Add a fort sheet user to system based on a Member.
Kwargs:
cls: The class of the sheet user like FortUser.
duser: The DiscordUser object of the requesting user.
start_row: Starting row if none inserted.
"""
next_row = next_sheet_row(session, cls=cls, start_row=start_row, sheet_src=sheet_src)
user = cls(name=discord_user.pref_name, cry=discord_user.pref_cry, row=next_row)
if sheet_src:
user.sheet_src = sheet_src
session.add(user)
session.commit()
return user | df4d9f9325769bf5f80557a33c7aac5c5bd0eab6 | 3,634,475 |
def clean_uncertain(value, keep=False):
"""
Handle uncertain values in the data.
Process any value containing a '[?]' string.
:param value: the value or list of values to process
:param keep: whether to keep the clean value or discard it
"""
was_list = isinstance(value, list)
values = common.listify(value)
new_list = []
for val in values:
if '[?]' in val:
if keep:
new_list.append(
val.replace('[?]', '').replace(' ', ' ').strip())
else:
new_list.append(val)
# return in same format as original
if not was_list:
if not new_list:
return ''
return new_list[0]
return new_list | 982be717dec2c3872198fefcc632d86af0281bc9 | 3,634,476 |
def traceback_file_lines(trace_text=None):
""" this returns a list of lines that start with file in the given traceback
usage:
traceback_steps(traceback.format_exc())
"""
# split the text into traceback steps
return [i for i in trace_text.splitlines() if i.startswith(' File "') and '", line' in i] | 939fd7e978612d891f38825898f575bc8d9b38af | 3,634,477 |
def load(year, gp, session):
"""session can be 'Qualifying' or 'Race'
mainly to port on upper level libraries
"""
day = 'qualifying' if session == 'Qualifying' else 'results'
sel = 'QualifyingResults' if session == 'Qualifying' else 'Results'
return _parse_ergast(fetch_day(year, gp, day))[0][sel] | 45d36124b16fd3f4108e6a9e01423d8df432f081 | 3,634,478 |
def pad_extents(extents: Extents, pad: float = 0.05) -> Extents:
"""Pad an Extents by a factor
Parameters:
extents: bounding extents to pad
pad: padding distance
Returns:
padded bounding extents
"""
padx = (extents.maxx - extents.minx)*0.05
pady = (extents.maxy - extents.miny)*0.05
result = Extents(extents.minx - padx,
extents.miny - pady,
extents.maxx + padx,
extents.maxy + pady)
return result | d5ed2b353c704fbbcb9d23e2454e9a449dcb6261 | 3,634,479 |
from typing import Set
def make_VR_model():
""" This function constructs and returns a pyomo model for the vehicle routing problem this repo is focuses on solving """
model= AbstractModel()
# model sets:
model.P = Set () # set of pick ups
model.D = Set () # set of drop offs
model.R = Set () # set of routees ( really set of route routes)
model.N = Set () # set of network nodes
model.S = Set () # possible numbers of busees on a route
model.Ty = Set () # type of buses
model.P_union_D = model.P | model.D
# model parameters that depend on sets
model.Q = Param(model.Ty) # route cap
model.q = Param(model.N) # demand at a node
model.E = Param(model.Ty) # Energy consumption per mile
model.s = Param(model.N) # dwell time at a node
model.tt = Param(model.N,model.N) # matrix of travel times between nodes
model.n = Param(model.S)
# model parameters that DON'T depend on sets:
model.M = Param() # big M constant
model.max_ride_time = Param() # max time a passaenger can be on a route
model.alpha = Param() # loading time constant
model.r = Param() # number of requests
model.HW = Param()
# variables (continuous):
model.z = Var(model.N,model.N,model.R, domain=Binary)
model.x = Var(model.N,model.N,model.R, domain=Binary)
model.y = Var(model.S,model.R, domain=Binary)
model.w = Var(model.Ty,model.R, domain=Binary)
model.T = Var(model.N,model.R, domain=NonNegativeReals)
model.Q_track = Var(model.N,model.R, domain=NonNegativeReals)
model.t = Var(model.Ty, model.S,model.R, domain=NonNegativeReals) # auxilary variable
model.nv = Var(model.R,domain = NonNegativeIntegers)
model.size = Var(model.R,domain= NonNegativeIntegers)
model.dynamic_q = Var(model.N,model.R)
# objective function ###########################################################
def total_cost(model):
obj_1 = 0.0
# expansion sum
for route in model.R:
for i in model.S:
for type_ in model.Ty:
obj_1 += model.t[type_,i,route]
return obj_1
model.Total_Cost = Objective(rule=total_cost, sense=minimize)
# Model Constraints ############################################################
def t_aux(model,type_,s,route):
obj_1 = 0.0
obj_2 = 0.0
obj_3 = 0.0
obj_4 = 0.0
# expansion sum
obj_1 = model.E[type_] * model.n[s]
return - model.M*(2 - model.y[s,route] - model.w[type_,route]) + obj_1 <= model.t[type_,s,route]
model.con_t_aux = Constraint(model.Ty,model.S,model.R,rule=t_aux)
########################
def y_sum(model,route):
obj = 0.0
for s in model.S:
obj += model.y[s,route]
return obj <= 1
model.con_y_sum = Constraint(model.R, rule = y_sum)
#######################
def y_encode(model,route):
obj = 0.0
for s in model.S:
obj += model.y[s,route] * model.n[s]
return obj == model.nv[route]
model.con_y_encode= Constraint(model.R, rule = y_encode)
#######################
def route_number(model,route):
k= 2*model.r + 1
return (model.T[k,route]/model.HW) - .01 <= model.nv[route]
model.con_route_number = Constraint(model.R, rule = route_number)
#######################
def w_sum(model,route):
obj = 0.0
for type_ in model.Ty:
obj += model.w[type_,route]
return obj == 1
model.con_w_sum = Constraint(model.R, rule = w_sum)
#######################
def route_size(model,route):
obj = 0.0
for type_ in model.Ty:
obj += model.w[type_,route] * model.Q[type_]
return obj == model.size[route]
model.con_route_size= Constraint(model.R, rule = route_size)
###################################
def no_loops_z(model,i,route):
return model.z[i,i,route] == 0
model.con_no_loops_z = Constraint(model.N,model.R, rule = no_loops_z)
#######################
def x_and_z(model,i,j,route):
return model.x[i,j,route] <= model.z[i,j,route]
model.con_x_and_z = Constraint(model.N, model.N,model.R, rule = x_and_z )
#######################
def sub_tour_1(model,i,j,route):
if i != j:
obj_1 = 0
obj_2 = 0
for ii in model.N:
obj_1 += model.x[j,ii,route]
for jj in model.N:
obj_2 += model.x[i,jj,route]
return model.z[i,j,route] + model.z[j,i,route] >= obj_1 + obj_2 - 1
else:
return Constraint.Skip
model.con_sub_tour_1 = Constraint(model.N, model.N,model.R, rule = sub_tour_1 )
def sub_tour_1_prime(model,i,j,route):
if i != j:
return model.z[i,j,route] + model.z[j,i,route] <= 1
else:
return Constraint.Skip
model.con_sub_tour_1_prime = Constraint(model.N, model.N,model.R, rule = sub_tour_1_prime )
########################
def sub_tour_2(model,i,j,k,route):
return model.z[i,j,route] + model.z[j,k,route] + model.z[k,i,route] <= 2
model.con_sub_tour_2 = Constraint(model.N, model.N,model.N,model.R, rule = sub_tour_2 )
###########################################################
def one_pickup_and_only_one(model,i):
obj = 0
for route in model.R:
for j in model.N:
obj += model.x[i,j,route]
return obj == 1
model.con_one_pickup_and_only_one = Constraint(model.P, rule = one_pickup_and_only_one )
########################
def pick_up_drop_off_same_vehicle(model,i,route):
obj = 0
k = i + model.r
for j in model.N:
obj += model.x[i,j,route] - model.x[k,j,route]
return obj == 0
model.con_pick_up_drop_off_same_vehicle = Constraint(model.P,model.R, rule = pick_up_drop_off_same_vehicle )
########################
def all_leave_depo(model,route):
obj = 0
for j in model.N:
obj += model.x[0,j,route]
return obj == 1
model.con_all_leave_depo = Constraint(model.R, rule = all_leave_depo )
########################
def if_enter_must_leave(model,i,route):
obj_1 = 0
obj_2 = 0
for j in model.N:
obj_1 += model.x[j,i,route]
for j in model.N:
obj_2 += model.x[i,j,route]
return obj_1 - obj_2 == 0
model.con_if_enter_must_leave = Constraint(model.P_union_D, model.R, rule = if_enter_must_leave )
########################
def all_return_depo(model,route):
obj = 0
k= 2*model.r + 1
for i in model.N:
obj += model.x[i,k,route]
return obj == 1
model.con_all_return_depo = Constraint(model.R, rule = all_return_depo )
#######################
def service_times(model,i,j,route):
return model.T[j,route] >= (model.T[i,route] + model.s[i] + model.tt[i,j]) - model.M*(1 - model.z[i,j,route])
model.con_service_times = Constraint(model.N,model.N,model.R, rule = service_times )
#######################
def precedence(model,i,route):
return model.T[i + model.r,route] - (model.T[i,route] + model.s[i] + model.tt[i,i + model.r]) >= 0
model.con_precedence = Constraint(model.P,model.R, rule = precedence )
#######################
def dynamic_demand(model,i,route,s):
k= 2*model.r + 1
return - model.M*(1 - model.y[s,route]) + ((model.T[k,route]*model.q[i])/(model.n[s] * 60.0)) <= model.dynamic_q[i,route]
model.con_dynamic_demand = Constraint(model.N, model.R, model.S, rule = dynamic_demand)
#######################
def dynamic_vehicle_capacity(model,i,j,route):
return model.Q_track[j,route] >= (model.Q_track[i,route]+ model.dynamic_q[j,route]) - model.M*(1 - model.x[i,j,route])
model.con_dynamic_vehicle_capacity = Constraint(model.N,model.N,model.R, rule = dynamic_vehicle_capacity )
#######################
def conservation_of_mass(model,i,route):
return model.dynamic_q[i,route] + model.dynamic_q[model.r + i,route] == 0
model.conservation_of_mass = Constraint(model.P,model.R, rule = conservation_of_mass)
#######################
def capacity_1_lower(model,i,route):
return 0 <= model.Q_track[i,route]
model.con_capacity_1_lower = Constraint(model.N,model.R, rule = capacity_1_lower)
########################
def capacity_1_upper(model,i,route):
return model.Q_track[i,route] <= model.size[route] + model.dynamic_q[i,route] #+ model.S_Q[i]
model.con_capacity_1_upper = Constraint(model.N,model.R, rule = capacity_1_upper)
#######################
def capacity_2_lower(model,i,route):
return model.dynamic_q[i,route] <= model.Q_track[i,route]
model.con_capacity_2_lower = Constraint(model.N,model.R, rule = capacity_2_lower)
########################
def capacity_2_upper(model,i,route):
return model.Q_track[i,route] <= model.size[route] #+ model.S_Q[i]
model.con_capacity_2_upper = Constraint(model.N,model.R, rule = capacity_2_upper)
#######################
def trip_time(model, i, route):
obj = 0
for j in model.N:
obj += model.x[i,j,route]
return model.T[model.r + i,route] - (model.T[i,route] + model.s[i]) <= model.max_ride_time + (1 - obj)*model.M
model.con_trip_time = Constraint(model.P,model.R, rule = trip_time)
########################
def cant_leave_depo(model,i,route):
k = 2*model.r + 1
return model.x[k,i,route] == 0
model.con_cant_leave_depo = Constraint(model.N,model.R, rule = cant_leave_depo)
########################
def cant_return_to_origin(model,i,route):
return model.x[i,0,route] == 0
model.con_cant_return_to_origin = Constraint(model.N,model.R, rule = cant_return_to_origin)
# ########################
def no_loops(model,i,route):
return model.x[i,i,route] == 0
model.con_no_loops = Constraint(model.N,model.R, rule = no_loops)
# ########################
def no_origin_to_dropoff(model,i,route):
return model.x[0,i,route] == 0
#model.con_no_origin_to_dropoff = Constraint(model.D,model.R, rule = no_origin_to_dropoff)
# ########################
def no_pickup_to_depo(model,i,route):
m = max(model.N)
return model.x[i,m,route] == 0
#model.con_no_pickup_to_depo = Constraint(model.P,model.R, rule = no_pickup_to_depo)
def drop_then_pick_up(model,i,route):
obj = 0
k = 1 + model.r
if i % 2 == 0:
for j in model.N:
obj += model.x[j,i,route]
return obj == model.x[i,i - k,route]
else:
return Constraint.Skip
model.con_drop_then_pick_up = Constraint(model.D,model.R, rule = drop_then_pick_up)
#################################
def upper_bound(model):
obj_1 = 0.0
for route in model.R:
for i in model.S:
for type_ in model.Ty:
obj_1 += model.t[type_,i,route]
return obj_1 <= 800
#model.con_upper_bound = Constraint(rule = upper_bound)
return model | dc95d0f814ca3e9796e946e66b8b54568812ec2b | 3,634,480 |
from typing import Union
def get_image(
difficulty: Union[gd.DemonDifficulty, gd.LevelDifficulty],
is_featured: bool = False,
is_epic: bool = False,
) -> str:
"""Generate name of an image based on difficulty and parameters."""
parts = difficulty.name.lower().split("_")
if is_epic:
parts.append("epic")
elif is_featured:
parts.append("featured")
return "-".join(parts) | f34f59437bfe0e97ae166f8e5ee3be2737073a0b | 3,634,481 |
import json
def jsonpify(func):
"""
Like jsonify but wraps result in a JSONP callback if a 'callback'
query param is supplied.
"""
def inner(*args, **kwargs):
data = func(*args, **kwargs)
callback = request.args.get('callback')
if callback:
response = app.make_response('%s(%s)' % (callback, json.dumps(data)))
response.mimetype = 'text/javascript'
return response
return jsonify(data)
return inner | 818eb424b6f61bc7fa7f068323789c6bff1ea7c1 | 3,634,482 |
def ordered(obj):
""" Sort JSON blob by keys """
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in list(obj.items()))
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj | dba08ec9ece30cfd01d3fcdde4b32e9e42086079 | 3,634,483 |
import copy
def apply_perturbation(X, y, perturbations_info):
"""Application of the perturbations."""
perturb = perturbations_info[3](X, None, perturbations_info[1],
perturbations_info[2])
X_p, y_p = perturb.apply2features(copy.copy(X)).squeeze(2), copy.copy(y)
return X_p, y_p | 2a7ba4e0286fe81f494f2e2d752532d24e895be4 | 3,634,484 |
def iris_sji_color_table(measurement, aialike=False):
"""
Return the standard color table for IRIS SJI files.
"""
# base vectors for IRIS SJI color tables
c0 = np.arange(0, 256)
c1 = (np.sqrt(c0) * np.sqrt(255)).astype(np.uint8)
c2 = (c0**2 / 255.).astype(np.uint8)
c3 = ((c1 + c2 / 2.) * 255. / (np.max(c1) + np.max(c2) / 2.)).astype(
np.uint8)
c4 = np.zeros(256).astype(np.uint8)
c4[50:256] = (1 / 165. * np.arange(0, 206)**2).astype(np.uint8)
c5 = ((1 + c1 + c3.astype(np.uint)) / 2.).astype(np.uint8)
rr = np.ones(256, dtype=np.uint8) * 255
rr[0:176] = np.arange(0, 176) / 175. * 255.
gg = np.zeros(256, dtype=np.uint8)
gg[100:256] = np.arange(0, 156) / 155. * 255.
bb = np.zeros(256, dtype=np.uint8)
bb[150:256] = np.arange(0, 106) / 105. * 255.
agg = np.zeros(256, dtype=np.uint8)
agg[120:256] = np.arange(0, 136) / 135. * 255.
abb = np.zeros(256, dtype=np.uint8)
abb[190:256] = np.arange(0, 66) / 65. * 255.
if aialike:
color_table = {
'1330': (c1, c0, c2),
'1400': (rr, agg, abb),
'2796': (rr, c0, abb),
'2832': (c3, c3, c2),
}
else:
color_table = {
'1330': (rr, gg, bb),
'1400': (c5, c2, c4),
'2796': (c1, c3, c2),
'2832': (c0, c0, c2),
}
color_table.update({
'1600': (c1, c0, c0),
'5000': (c1, c1, c0),
'FUV': (rr, gg, bb),
'NUV': (c1, c3, c2),
'SJI_NUV': (c0, c0, c0)
})
try:
r, g, b = color_table[measurement]
except KeyError:
raise ValueError("Invalid IRIS SJI waveband. Valid values are \n" +
str(list(color_table.keys())))
return _cmap_from_rgb(r, g, b, f'IRIS SJI {measurement:s}') | c00dd6fc5572dfd4563079040fab48ec5d439215 | 3,634,485 |
def clipToCollection(image, featureCollection, keepFeatureProperties=True):
""" Clip an image using each feature of a collection and return an
ImageCollection with one image per feature """
def overFC(feat):
geom = feat.geometry()
clipped = image.clip(geom)
if keepFeatureProperties:
clipped = clipped.copyProperties(feat)
return clipped
ic = ee.ImageCollection(featureCollection.map(overFC))
return ic | c42610e2164e389db17a353ca15a22d21a9cd614 | 3,634,486 |
def bbox_filter(image, bboxes, labels):
"""
Maginot Line
"""
h, w, _ = image.shape
x1 = np.maximum(bboxes[..., 0], 0.)
y1 = np.maximum(bboxes[..., 1], 0.)
x2 = np.minimum(bboxes[..., 2], w - 1e-8)
y2 = np.minimum(bboxes[..., 3], h - 1e-8)
int_w = np.maximum(x2 - x1, 0)
int_h = np.maximum(y2 - y1, 0)
int_area = int_w * int_h
bboxes = np.stack([x1, y1, x2, y2], axis=-1)
# keep_idx = np.any(np.not_equal(bboxes, 0), axis=-1)
keep_idx = int_area > 0.
return image, bboxes[keep_idx], labels[keep_idx] | 38c8a1997e233ff1484c321559b9b7b21e0ff283 | 3,634,487 |
import json
def sign_transaction(source_address, keys, redeem_script, unsigned_hex, input_txs):
"""
Creates a signed transaction
output => dictionary {"hex": transaction <string>, "complete": <boolean>}
source_address: <string> input_txs will be filtered for utxos to this source address
keys: List<string> The private keys you wish to sign with
redeem_script: <string>
unsigned_hex: <string> The unsigned transaction, in hex format
input_txs: List<dict> A list of input transactions to use (bitcoind decoded format)
"""
# For each UTXO used as input, we need the txid, vout index, scriptPubKey, amount, and redeemScript
# to generate a signature
inputs = []
for tx in input_txs:
utxos = get_utxos(tx, source_address)
txid = tx["txid"]
for utxo in utxos:
inputs.append({
"txid": txid,
"vout": int(utxo["n"]),
"amount": utxo["value"],
"scriptPubKey": utxo["scriptPubKey"]["hex"],
"redeemScript": redeem_script
})
signed_tx = bitcoin_cli_json(
"signrawtransactionwithkey",
unsigned_hex, json.dumps(keys), json.dumps(inputs))
return signed_tx | 775fbccd906cbba598eb07d2b8b1f233c32c3954 | 3,634,488 |
import numpy
def CalculateBasakCIC1(mol):
"""
Obtain the complementary information content with order 1 proposed
by Basak.
"""
Hmol = Chem.AddHs(mol)
nAtoms = Hmol.GetNumAtoms()
IC = CalculateBasakIC1(mol)
if nAtoms <= 1:
BasakCIC = 0.0
else:
BasakCIC = numpy.log2(nAtoms) - IC
return BasakCIC | ca5b2e5bea750ce029147fcea61321faa8e98629 | 3,634,489 |
from typing import Any
async def mock_nonpriviledged_user(db: Any, username: str) -> dict:
"""Create a mock user object."""
return { # noqa: S106
"id": ID,
"username": "nonprivileged@example.com",
"password": "password",
"role": "nonprivileged",
} | 23af36146a116373584930eadae9dd0633da1100 | 3,634,490 |
def idea_create(request):
"""
Endpoint to create ideas
---
POST:
serializer: ideas.serializers.IdeaCreationSerializer
response_serializer: ideas.serializers.IdeaSerializer
"""
if request.method == 'POST':
serializer = IdeaCreationSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
author = get_object_or_404(User, pk=serializer.validated_data['author'])
event = get_object_or_404(Event, pk=serializer.validated_data['event'])
title = serializer.validated_data['title']
try:
description = serializer.validated_data['description']
except:
description = None
try:
new_idea = Idea.objects.create(author=author, event=event, title=title, description=description)
except Exception as e:
print(e)
raise NotAcceptable(config.IDEA_EXISTS)
serializer = IdeaSerializer(new_idea)
return Response(serializer.data, status=status.HTTP_201_CREATED) | d780854a33c123c33ee3e8179f5a8056bea4716c | 3,634,491 |
def blocks_to_pem(blobs, marker):
"""Convert binary blobs to a string of concatenated PEM-formatted blocks.
Args:
blobs: an iterable of binary blobs
marker: the marker to use, e.g., CERTIFICATE
Returns:
the PEM string.
"""
return PemWriter.blocks_to_pem_string(blobs, marker) | 498d63aa16990c4046f5fbd25aea17fbff72d7c3 | 3,634,492 |
def scatterList(z):
"""
scatterList reshapes the solution vector z of the N-vortex ODE for easy 2d plotting.
"""
k = int(len(z)/2)
return [z[2*j] for j in range(k)], [z[2*j+1] for j in range(k)] | 422bf448ae999f56e92fdc81d05700189122ad0e | 3,634,493 |
from pathlib import Path
def discover_workflow(path: Path) -> Workflow:
"""
Find a instance of virtool_workflow.Workflow in the
python module located at the given path.
:param path: The :class:`pathlib.Path` to the python file
containing the module.
:returns: The first instance of :class:`virtool_workflow.Workflow`
occurring in `dir(module)`
:raises ValueError:
When no instance of virtool_workflow.Workflow can be found.
"""
module = import_module_from_file(path.name.rstrip(path.suffix), path)
try:
return next(
attr for attr in module.__dict__.values() if isinstance(attr, Workflow)
)
except StopIteration:
return collect(module) | 95b67497b61f3ecf715ce677d7b2986d0817830b | 3,634,494 |
from typing import List
from typing import Mapping
from typing import Optional
def swagger_endpoint_data_to_df(
data: List[Mapping],
headers: Optional[List[str]] = None
) -> pd.DataFrame:
"""Load results from cBioPortal API endpoints to pandas DataFrame.
Parameters
----------
data : List[Mapping]
List of dictionaries at an endpoint of the API
headers : Optional[List[str]]
Headers of the data contained at the endpoint; if None, headers are read
from the first endpoint in the list
returns
-------
pd.DataFrame
Table representation of data at endpoint
"""
if headers is None:
headers = dir(data[0])
return list_dict_to_pandas(headers, data) | 6c4e4c657131b8cc54f3827033e077edc7bb57b1 | 3,634,495 |
def compare():
"""
This path takes two inputs in multiform/data
Name of paramaters:
image1: First image
image2 : Second image
"""
if request.method == 'POST':
if 'image1' not in request.files or 'image2' not in request.files:
return make_response(jsonify("Msg: Upload an image"),415)
image1 = request.files['image1']
image2 = request.files['image2']
if image1.filename == '' or image2.filename == '':
return make_response(jsonify("Msg: Upload an image"),415)
if image1 and allowed_file(image1.filename):
if image2 and allowed_file(image2.filename):
load_image1 = face_recognition.load_image_file(image1)
load_image2 = face_recognition.load_image_file(image2)
face1_encoding = face_recognition.face_encodings(load_image1)[0]
face2_encoding = face_recognition.face_encodings(load_image2)[0]
face_distances = face_recognition.face_distance([face1_encoding], face2_encoding)
complement = 1 - face_distances
percent = complement*100
msg = "The faces are "+str(percent)+ " percent alike"
return make_response(jsonify(msg), 200)
else:
return make_response(jsonify("Msg:upload an image"),415)
else:
return make_response(jsonify("Msg:upload an image"),415)
else:
return make_response(jsonify("Invalid request ", 403)) | e4b6625608051804222074f5972b10ee864659d8 | 3,634,496 |
def provides_facts():
"""
Returns a dictionary keyed on the facts provided by this module. The value
of each key is the doc string describing the fact.
"""
return {
"switch_style": "A string which indicates the Ethernet "
"switching syntax style supported by the device. "
"Possible values are: 'BRIDGE_DOMAIN', 'VLAN', "
"'VLAN_L2NG', or 'NONE'.",
} | d41f97df8a24b67d929017fc6c20596a70ba18cd | 3,634,497 |
def _continuum_emission(energy_edges_keV, temperature_K, abundances):
"""
Calculates emission-measure-normalized X-ray continuum spectrum at the source.
Output must be multiplied by emission measure and divided by 4*pi*observer_distance**2
to get physical values.
Which continuum mechanisms are included --- free-free, free-bound, or two-photon --- are
determined by the file from which the comtinuum parameters are loaded.
To change the file used, see the setup_continuum_parameters() function.
Parameters
----------
energy_edges_keV: 1-D array-like
Boundaries of contiguous spectral bins in units on keV.
temperature_K: 1-D array-like
The temperature(s) of the plasma in unit of K. Must not be a scalar.
abundances: 1-D `numpy.array` of same length a DEFAULT_ABUNDANCES.
The abundances for the all the elements.
"""
# Handle inputs and derive some useful parameters from them
log10T_in = np.log10(temperature_K)
T_in_keV = temperature_K / 11604518 # Convert temperature from K to keV.
# Get energy bins centers based on geometric mean.
energy_gmean_keV = stats.gmean(np.vstack((energy_edges_keV[:-1], energy_edges_keV[1:])))
# Mask Unwanted Abundances
abundance_mask = np.zeros(len(abundances))
abundance_mask[CONTINUUM_GRID["abundance index"]] = 1.
abundances *= abundance_mask
# Calculate Continuum Intensity Summed Over All Elements
##### For Each Temperature as a function of Energy/Wavelength ######
# Before looping over temperatures, let's perform the calculations that are
# used over again in the for loop.
# 1. If many temperatures are input, convolve intensity grid with abundances for all
# temperatures here. If only a few temperatures are input, do this step only
# when looping over input temperatures. This minimizes computation.
n_tband = 3
n_t_grid = len(CONTINUUM_GRID["log10T"])
n_temperature_K = len(temperature_K)
n_thresh = n_temperature_K * n_tband
if n_thresh >= n_t_grid:
intensity_per_em_at_source_allT = np.zeros(CONTINUUM_GRID["intensity"].shape[1:])
for i in range(0, n_t_grid):
intensity_per_em_at_source_allT[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
CONTINUUM_GRID["intensity"][:, i])
# 2. Add dummy axes to energy and temperature grid arrays for later vectorized operations.
repeat_E_grid = CONTINUUM_GRID["E_keV"][np.newaxis, :]
repeat_T_grid = CONTINUUM_GRID["T_keV"][:, np.newaxis]
dE_grid_keV = CONTINUUM_GRID["energy bin widths keV"][np.newaxis, :]
# 3. Identify the indices of the temperature bins containing each input temperature and
# the bins above and below them. For each input temperature, these three bins will
# act as a temperature band over which we'll interpolate the continuum emission.
selt = np.digitize(log10T_in, CONTINUUM_GRID["log10T"]) - 1
tband_idx = selt[:, np.newaxis] + np.arange(n_tband)[np.newaxis, :]
# Finally, loop over input temperatures and calculate continuum emission for each.
flux = np.zeros((n_temperature_K, len(energy_gmean_keV)))
for j, logt in enumerate(log10T_in):
# If not already done above, calculate continuum intensity summed over
# all elements as a function of energy/wavelength over the temperature band.
if n_thresh < n_t_grid:
element_intensities_per_em_at_source = CONTINUUM_GRID["intensity"][:, tband_idx[j]]
intensity_per_em_at_source = np.zeros(element_intensities_per_em_at_source.shape[1:])
for i in range(0, n_tband):
intensity_per_em_at_source[i] = np.matmul(
abundances[CONTINUUM_GRID["sorted abundance index"]],
element_intensities_per_em_at_source[:, i])
else:
intensity_per_em_at_source = intensity_per_em_at_source_allT[tband_idx[j]]
##### Calculate Continuum Intensity at Input Temperature ######
# Do this by interpolating the normalized temperature component
# of the intensity grid to input temperature(s) and then rescaling.
# Calculate normalized temperature component of the intensity grid.
exponent = (repeat_E_grid / repeat_T_grid[tband_idx[j]])
exponential = np.exp(np.clip(exponent, None, 80))
gaunt = intensity_per_em_at_source / dE_grid_keV * exponential
# Interpolate the normalized temperature component of the intensity grid the the
# input temperature.
flux[j] = _interpolate_continuum_intensities(
gaunt, CONTINUUM_GRID["log10T"][tband_idx[j]], CONTINUUM_GRID["E_keV"], energy_gmean_keV, logt)
# Rescale the interpolated intensity.
flux = flux * np.exp(-(energy_gmean_keV[np.newaxis, :] / T_in_keV[:, np.newaxis]))
# Put intensity into correct units.
return flux * CONTINUUM_GRID["intensity unit"] | a9d66263f62acd58edc09f3f1b7f0a40ba5b099d | 3,634,498 |
import pdb
import math
def solve(system, total_integration_time, dt, save_frequency=100, debug=False):
"""Simulate the time evolution of all variables within the system.
Collect all information about the system, create differential
equations from this information and integrate them (numercially)
into the future.
Args:
system (System): The system that is simulated.
total_integration_time (pint.Quantity [T]): The time span which
the system should be solved into the "future". The system will
be simulated for the time period zero to approximately
total_integration_time (depending whether
total_integration_time is a multiple of dt; if not the real
integration horizon will be be bigger than
[0, total_integration_time]).
dt (pint.Quantity [T]): Size of the timestep for the simulation.
The bigger the timestep the faster the simulation will be
calculated, however, if the timestep is chosen too high
there can arise numerical instabilites!
save_frequency (int): Number of timesteps after which the solve
progress is saved to a pickle file. If the solver
is interupted after the state was saved to a pickle file,
the solve function will automatically progress at the latest
saved state.
debug (bool): Activates debugging mode (pdb.set_trace()).
Defaults to False.
"""
# Start time of function
func_start_time = time_module.time()
# Saves the time since the start of the simulate at which the last
# save (pickling) was conducted
last_save_timedelta = 0
if debug:
pdb.set_trace()
# Get number of time steps - round up if there is a remainder
N_timesteps = math.ceil(total_integration_time / dt)
# Recalculate total integration time based on the number of timesteps
total_integration_time = N_timesteps * dt
print('DDATTEE')
print('Start solving the BoxModelSystem...')
print('- total integration time: {}'.format(total_integration_time))
print('- dt (time step): {}'.format(dt))
print('- number of time steps: {}'.format(N_timesteps))
time = total_integration_time * 0
sol = bs_solution.Solution(system, N_timesteps, dt)
# Save initial state to solution
for box in system.box_list:
# sol.df.loc[0] = np.nan
sol.df.loc[0, (box.name,'mass')] = box.fluid.mass.magnitude
sol.df.loc[0, (box.name,'volume')] = \
system.get_box_volume(box).magnitude
for variable in system.variable_list:
var_name = variable.name
sol.df.loc[0, (box.name,var_name)] = \
box.variables[var_name].mass.magnitude
timetesteps_since_last_save = 0
progress = 0
for timestep in range(N_timesteps):
# Calculate progress in percentage of processed timesteps
progress_old = progress
progress = int(float(timestep) / float(N_timesteps)*10) * 10.0
if progress != progress_old:
print("{}%".format(progress))
#print(timetesteps_since_last_save)
# Check if simulation is running long enough to save the state
if timetesteps_since_last_save >= save_frequency:
timetesteps_since_last_save = 1
else:
timetesteps_since_last_save += 1
time += dt
##################################################
# Calculate Mass fluxes
##################################################
dm, f_flow = _calculate_mass_flows(system, time, dt)
##################################################
# Calculate Variable changes due to PROCESSES,
# REACTIONS, FUXES and FLOWS
##################################################
dvar = _calculate_changes_of_all_variables(
system, time, dt, f_flow)
##################################################
# Apply changes to Boxes and save values to
# Solution instance
##################################################
for box in system.box_list:
# Write changes to box objects
box.fluid.mass += dm[box.id]
# Save mass to Solution instance
sol.df.loc[timestep, (box.name, 'mass')] = \
box.fluid.mass.magnitude
sol.df.loc[timestep, (box.name, 'volume')] = \
system.get_box_volume(box).magnitude
for variable in system.variable_list:
var_name = variable.name
system.boxes[box.name].variables[var_name].mass += \
dvar[box.id, variable.id]
sol.df.loc[timestep, (box.name,variable.name)] = \
box.variables[variable.name].mass.magnitude
# End Time of Function
func_end_time = time_module.time()
print(
'Function "solve(...)" used {:3.3f}s'.format(
func_end_time - func_start_time))
return sol | 9ab30eab078832e856ecd6686476eb0966c3e706 | 3,634,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.