content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def parse_variable_char(packed):
""" Map a 6-bit packed char to ASCII """
packed_char = packed
if packed_char == 0:
return ""
if 1 <= packed_char <= 10:
return chr(ord('0') - 1 + packed_char)
elif 11 <= packed_char <= 36:
return chr(ord('A') - 11 + packed_char)
elif 37 <= packed_char <= 62:
return chr(ord('a') - 37 + packed_char)
else:
return "_"
|
e4ff95bca48ae97a22c20dda4fd82e082c32be27
| 3,639,800
|
def get_feature_vector(feature_id, cohort_id_array):
"""
Fetches the data from BigQuery tables for a given feature identifier and
one or more stored cohorts. Returns the intersection of the samples defined
by the feature identifier and the stored cohort.
Each returned data point is represented as a dict containing patient, sample and
aliquot barcodes, and the value as defined by the feature identifier.
Args:
feature_id: Feature identifier
cohort_id_array: Array of cohort identifiers (integers)
Returns:
Data as an array of dicts.
"""
provider = FeatureProviderFactory.from_feature_id(feature_id)
cohort_settings = settings.GET_BQ_COHORT_SETTINGS()
result = provider.get_data(cohort_id_array, cohort_settings.dataset_id, cohort_settings.table_id)
items = []
for data_point in result:
data_item = {key: data_point[key] for key in ['case_id', 'sample_id', 'aliquot_id']}
value = provider.process_data_point(data_point)
# TODO refactor missing value logic
if value is None:
value = 'NA'
data_item['value'] = value
items.append(data_item)
return provider.get_value_type(), items
|
9aee52ef43f964d8d524390fbd5bdc49e91dcdf2
| 3,639,801
|
def analyse(tx):
"""
Analyses a given set of features. Marks the features with zero
variance as the features to be deleted from the data set. Replaces
each instance of a null(-999) valued feature point with the mean
of the non null valued feature points. Also handles the outliers
by clipping the very large and very small features.
Args:
tx: the numpy array representing the given set of features
Returns:
columns_to_remove: indices of the features with zero variance,
which will be removed from the numpy array
"""
num_cols = tx.shape[1]
print('\nNumber of columns in the data matrix: ', num_cols)
columns_to_remove = []
print('Analysis for data:\n')
for col in range(num_cols):
current_col = tx[:, col]
if len(np.unique(current_col)) == 1:
print('The column with index ', col, ' is all the same, it will be deleted.')
columns_to_remove.append(col)
else:
current_col[current_col == -999] = np.median(current_col[current_col != -999])
# Handling the outliers
std_current_col = np.std(current_col)
mean_current_col = np.mean(current_col)
lower_bound = mean_current_col - 2 * std_current_col
upper_bound = mean_current_col + 2 * std_current_col
current_col[current_col < lower_bound] = lower_bound
current_col[current_col > upper_bound] = upper_bound
print('null values in the ', col, ' indexed column are replaced with the mean and outliers are handled.')
return columns_to_remove
|
2bfd46b2cb70da9822a4a86f588e187ab941ea88
| 3,639,802
|
import regex
def validate_word_syntax(word):
"""
This function is designed to validate that the syntax for
a string variable is acceptable.
A validate format is English words that only contain alpha
characters and hyphens.
:param word: string to validate
:return: boolean true or false
"""
if len(word) == 0:
return False
else:
temp = regex.match(r'^[a-zA-Z-\s]*$', word.strip())
if temp:
return True
else:
return False
|
47b732ffc0c2c91c5a092d7367b49bb8946697be
| 3,639,803
|
def next_page(context):
"""
Get the next page for signup or login.
The query string takes priority over the template variable and the default
is an empty string.
"""
if "next" in context.request.GET:
return context.request.GET["next"]
if "next" in context.request.POST:
return context.request.POST["next"]
if "next" in context:
return context["next"]
return ""
|
6abc1c8ef260366e53f335a27ee42f0356c91b63
| 3,639,804
|
import numpy
def make_train_test_sets(input_matrix, label_matrix, train_per_class):
"""Return ((training_inputs, training_labels), (testing_inputs, testing_labels)).
Args:
input_matrix: attributes matrix. Each row is sample, each column is attribute.
label_matrix: labels matrix. Each row is sample, each column is label.
train_per_class: Number of samples for each class in training set.
"""
training_inputs = []
training_labels = []
testing_inputs = []
testing_labels = []
label_counts = {}
# Add each row to training or testing set depending on count of labels
for input_, label in zip(input_matrix, label_matrix):
key = tuple(label)
try:
count = label_counts[key]
except KeyError:
# First time seeing label, count is 0
count = 0
if count < train_per_class:
# Still need more training samples for this label
training_inputs.append(input_)
training_labels.append(label)
else:
# We have enough training samples for this label,
# add to testing set instead
testing_inputs.append(input_)
testing_labels.append(label)
label_counts[key] = count + 1
if testing_inputs == []:
raise ValueError('train_per_class too high, no testing set')
return ((numpy.array(training_inputs), numpy.array(training_labels)),
(numpy.array(testing_inputs), numpy.array(testing_labels)))
|
bd71f48ed9405a89dfa42b3cb6cfe45b064a6b4d
| 3,639,805
|
def add_coords_table(document: Document, cif: CifContainer, table_num: int):
"""
Adds the table with the atom coordinates.
:param document: The current word document.
:param cif: the cif object from CifContainer.
:return: None
"""
atoms = list(cif.atoms())
table_num += 1
headline = "Table {}. Atomic coordinates and ".format(table_num)
h = document.add_heading(headline, 2)
h.add_run('U').font.italic = True
h.add_run('eq').font.subscript = True
h.add_run('{}[{}'.format(protected_space, angstrom))
h.add_run('2').font.superscript = True
h.add_run('] for {}'.format(cif.block.name))
coords_table = document.add_table(rows=len(atoms) + 1, cols=5, style='Table Grid')
# Atom x y z U(eq)
head_row = coords_table.rows[0]
head_row.cells[0].paragraphs[0].add_run('Atom').bold = True
px = head_row.cells[1].paragraphs[0]
ar = px.add_run('x')
ar.bold = True
ar.italic = True
py = head_row.cells[2].paragraphs[0]
ar = py.add_run('y')
ar.bold = True
ar.italic = True
pz = head_row.cells[3].paragraphs[0]
ar = pz.add_run('z')
ar.bold = True
ar.italic = True
pu = head_row.cells[4].paragraphs[0]
ar = pu.add_run('U')
ar.bold = True
ar.italic = True
ar2 = pu.add_run('eq')
ar2.bold = True
ar2.font.subscript = True
# having a list of column cells before is *much* faster!
col0_cells = coords_table.columns[0].cells
col1_cells = coords_table.columns[1].cells
col2_cells = coords_table.columns[2].cells
col3_cells = coords_table.columns[3].cells
col4_cells = coords_table.columns[4].cells
rowidx = 1
for at in atoms:
c0, c1, c2, c3, c4 = col0_cells[rowidx], col1_cells[rowidx], col2_cells[rowidx], \
col3_cells[rowidx], col4_cells[rowidx]
rowidx += 1
c0.text = at[0] # label
c1.text = (str(at[2])) # x
c2.text = (str(at[3])) # y
c3.text = (str(at[4])) # z
c4.text = (str(at[7])) # ueq
p = document.add_paragraph()
p.style = document.styles['tabunterschr']
p.add_run('U').font.italic = True
p.add_run('eq').font.subscript = True
p.add_run(' is defined as 1/3 of the trace of the orthogonalized ')
p.add_run('U').font.italic = True
ij = p.add_run('ij')
ij.font.subscript = True
ij.font.italic = True
p.add_run(' tensor.')
set_column_width(coords_table.columns[0], Cm(2.3))
set_column_width(coords_table.columns[1], Cm(2.8))
set_column_width(coords_table.columns[2], Cm(2.8))
set_column_width(coords_table.columns[3], Cm(2.8))
set_column_width(coords_table.columns[4], Cm(2.8))
document.add_paragraph()
return table_num
|
6b351138624bd530739d199898fbc4f0cfb56bb1
| 3,639,806
|
import numpy
def bz(xp, yp, zp, spheres):
"""
Calculates the z component of the magnetic induction produced by spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored. The ``'magnetization'`` must be a vector.
Returns:
* bz: array
The z component of the magnetic induction
Example:
>>> from fatiando import mesher, gridder, utils
>>> # Create a model formed by two spheres
>>> # The magnetization of each sphere is a vector
>>> model = [
... mesher.Sphere(1000, 1000, 600, 500,
... {'magnetization':utils.ang2vec(13, -10, 28)}),
... mesher.Sphere(-1000, -1000, 600, 500,
... {'magnetization':utils.ang2vec(10, 70, -5)})]
>>> # Create a regular grid at 100m height
>>> shape = (4, 4)
>>> area = (-3000, 3000, -3000, 3000)
>>> xp, yp, zp = gridder.regular(area, shape, z=-100)
>>> # Calculate the bz component
>>> for b in bz(xp, yp, zp, model):
... print '%15.8e' % b
-1.13152279e+01
-3.24362266e+01
-1.63235805e+01
-4.48136597e+00
-1.27492012e+01
2.89101261e+03
-1.30263918e+01
-9.64182996e+00
-6.45566985e+00
3.32987598e+01
-7.08905624e+02
-5.55139945e+01
-1.35745203e+00
2.91949888e+00
-2.78345635e+01
-1.69425703e+01
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = numpy.zeros(size, dtype=numpy.float)
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props):
continue
# Get the magnetization vector components
mx, my, mz = sphere.props['magnetization']
_sphere.bz(xp, yp, zp, sphere.x, sphere.y, sphere.z, sphere.radius,
mx, my, mz, res)
res *= CM * T2NT
return res
|
16d85978e50de16ab6af91c6ee83392b7fa43e3e
| 3,639,807
|
def update_stocks():
"""
method to update the data (used by the spark service)
:return:
"""
global stocks
body = request.get_json(silent=True)
'''
{
"data" : [
{
"symbol" : "string",
"ask_price" : "string",
"last_sale_time" : "string",
"timestamp" : 0
}
],
"companies" : {
"symbol_x" : "string"
}
}
'''
data = body['data']
companies = body['companies']
for stock_data in data:
symbol = stock_data['symbol']
# if stock does not exist
if symbol not in stocks:
obj = {
'companyName': companies[symbol],
'stockPrices': [
{
'value': stock_data['ask_price'],
'timestamp': stock_data['timestamp']
}
]
}
stocks[symbol] = obj
else:
# add to existing stock
stocks[symbol]['stockPrices'].append({'value': stock_data['ask_price'],
'timestamp': stock_data['timestamp']})
return "{}", 200
|
f1db3a9a1ba7cd20aad2e8ddc0bea66997ef11dd
| 3,639,808
|
from typing import Iterable
from re import T
from typing import Sequence
import itertools
def chunks_from_iterable(iterable: Iterable[T], size: int) -> Iterable[Sequence[T]]:
"""Generate adjacent chunks of data"""
it = iter(iterable)
return iter(lambda: tuple(itertools.islice(it, size)), ())
|
76bf5a8742f082da860caec477c72338fcc4510e
| 3,639,809
|
import os
def load_ct_phantom(phantom_dir):
"""
load the CT data from a directory
Parameters
----------
phantom_dir : str
The directory contianing the CT data to load
Returns
-------
ndarray
the CT data array
list
the spacing property for this CT
"""
# dicom parameters
dcm = dicom.read_file(phantom_dir + "image_0")
row_pixel = dcm.Rows
col_pixel = dcm.Columns
row_pixel_spacing = np.round(dcm.PixelSpacing[0], 2)
col_pixel_spacing = np.round(dcm.PixelSpacing[1], 2)
slice_thickness = np.round(dcm.SliceThickness, 2)
num_slices = len(os.listdir(phantom_dir))
phantom = np.zeros((num_slices, row_pixel, col_pixel))
for i in range(num_slices):
dcm = dicom.read_file(phantom_dir + "image_" + str(i))
dcm.image = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept
phantom[i, :, :] = dcm.image.copy()
# Volume Parameters:
volume_size = [slice_thickness, row_pixel_spacing, col_pixel_spacing]
return phantom, volume_size
|
6344889d0b363062754191282d1c96c29f2bea2d
| 3,639,810
|
from typing import Dict
def summary_overall(queryset: QuerySet) -> Dict[str, Decimal]:
"""Summarizes how much money was spent"""
amount_sum = sum([value[0] for value in queryset.values_list('amount')])
return {'overall': amount_sum}
|
6d2d276ca891f99ac171001c99ff0d90b530a5b1
| 3,639,811
|
import torch
def _get_trained_ann(train_exo: "ndarray", train_meth: "ndarray") -> "QdaisANN2010":
"""Return trained ANN."""
train_data = BiogasData(train_exo, train_meth)
ann = QdaisANN2010(train_data.train_exo.shape[1])
try:
ann.load_state_dict(torch.load("./assets/ann.pt"))
except IOError:
optimizer = optim.Adam(ann.parameters())
scheduler = ReduceLROnPlateau(optimizer)
criterion = nn.MSELoss()
train_loader = DataLoader(train_data, batch_size=64, shuffle=True)
for _ in range(1024): # number of epochs
running_loss = 0
for _, (inputs, labels) in enumerate(train_loader):
optimizer.zero_grad()
loss = criterion(ann(inputs), labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
scheduler.step(running_loss)
print(f"Running loss: {running_loss / len(train_data)}", end="\r", flush=True)
torch.save(ann.state_dict(), "./assets/ann.pt")
return ann
|
08bd38450e217c00643ab7fefe1c1860fc5d1ebf
| 3,639,812
|
def pi_float():
"""native float"""
lasts, t, s, n, na, d, da = 0, 3.0, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
return s
|
8a6a6a5942ddd61ecdf65b782bb2fc0f0519ddb5
| 3,639,813
|
def iso3_to_country(iso3):
""" Take user input and convert it to the short version of the country name """
if iso3 == 'Global':
return 'Global'
country = coco.convert(names=iso3, to='name_short')
return country
|
2c2904ba8befe802d531b371f046ff4a3ba4db22
| 3,639,814
|
def test_bivariate(N, n_neighbors, rng, noise):
"""Test with bivariate normal variables"""
mu = np.zeros(2)
cov = np.array([[1., 0.8], [0.8, 1.0]])
xy_gauss = rng.multivariate_normal(mu, cov, size=N)
x, y = xy_gauss[:, 0], xy_gauss[:, 1]
z = rng.normal(size=N)
cmi_analytic = -0.5 * np.log(det(cov))
cmi = compute_cmi(x, y, z, n_neighbors, noise)
mi = compute_mi(x, y, n_neighbors, noise)
return [cmi, mi, cmi_analytic]
|
827b968c6333402066d1fa4282ce90c2cdb175f5
| 3,639,815
|
import math
def osm_tile_number_to_latlon(xtile, ytile, zoom):
""" Returns the latitude and longitude of the north west corner of a tile, based on the tile numbers and the zoom level"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
|
8afbbb218835311bebcfe6a0d611e0e62bfcffc3
| 3,639,816
|
def prodtab_111():
"""
produces angular distance from [1,1,1] plane with other planes up to (321) type
"""
listangles_111 = [anglebetween([1, 1, 1], elemref) for elemref in LISTNEIGHBORS_111]
return np.array(listangles_111)
|
cb49c087e5b5090bca788f9c33862c37c42c2f1c
| 3,639,817
|
def cuda_cos(a):
""" Trigonometric cosine of GPUArray elements.
Parameters:
a (gpu): GPUArray with elements to be operated on.
Returns:
gpu: cos(GPUArray)
Examples:
>>> a = cuda_cos(cuda_give([0, pi / 4]))
array([ 1., 0.70710678])
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return pycuda.cumath.cos(a)
|
e411bde08f133c8f9109ff5815f44a3ff5262282
| 3,639,818
|
from typing import Counter
def top_diffs(spect: list, num_acids: int) -> list:
"""Finds at least num_acids top differences in [57, 200]
Accepts ties
:param spect: a cyclic spectrum to find differences in
:type spect: list (of ints)
:type keep: int
:returns: the trimmed leaderboard
:rtype: list (of lists (of ints))
"""
# must be sorted & start with 0
spect.sort()
if spect[0] != 0:
spect.insert(0, 0)
diffs = [spect[i] - spect[j] for i in range(1, len(spect))
for j in range(i - 1, -1, -1)]
acids = []
last_count = 0
for mass, count in Counter(diffs).most_common():
# leave if over min AND not tying min
if len(acids) >= num_acids and count < last_count:
break
# restricted weight for amino acid masses
if 57 <= mass <= 200:
acids.append(mass)
last_count = count
return acids
|
1ca2b08f6ecbf69b1ab2189b1cbbff9b4e1c2e8d
| 3,639,819
|
import typing
def form_sized_range(range_: Range, substs) -> typing.Tuple[
SizedRange, typing.Optional[Symbol]
]:
"""Form a sized range from the original raw range.
The when a symbol exists in the ranges, it will be returned as the second
result, or the second result will be none.
"""
if not range_.bounded:
raise ValueError(
'Invalid range for optimization', range_,
'expecting explicit bound'
)
lower, upper = [
i.xreplace(substs)
for i in [range_.lower, range_.upper]
]
size_expr = upper - lower
size, symb = form_size(size_expr)
return SizedRange(range_.label, size), symb
|
ac0d4299198f0e5611acb8167107f56f2b49bb0f
| 3,639,820
|
def create_html_app(): # pragma: no cover
"""Returns WSGI app that serves HTML pages."""
app = webapp2.WSGIApplication(
handlers.get_frontend_routes(), debug=utils.is_local_dev_server())
gae_ts_mon.initialize(app, cron_module='backend')
return app
|
c1d9320b9f795466f28a511180f9949ec9c6ecbd
| 3,639,821
|
def line_integrals(vs, uloc, vloc, kind="same"):
"""
calculate line integrals along all islands
Arguments:
kind: "same" calculates only line integral contributions of an island with itself,
while "full" calculates all possible pairings between all islands.
"""
if kind == "same":
s1 = s2 = (slice(None), slice(None), slice(None))
elif kind == "full":
s1 = (slice(None), slice(None), np.newaxis, slice(None))
s2 = (slice(None), slice(None), slice(None), np.newaxis)
else:
raise ValueError("kind must be 'same' or 'full'")
east = vloc[1:-2, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \
+ uloc[1:-2, 2:-1, :] \
* vs.dxu[1:-2, np.newaxis, np.newaxis] \
* vs.cost[np.newaxis, 2:-1, np.newaxis]
west = -vloc[2:-1, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \
- uloc[1:-2, 1:-2, :] \
* vs.dxu[1:-2, np.newaxis, np.newaxis] \
* vs.cost[np.newaxis, 1:-2, np.newaxis]
north = vloc[1:-2, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \
- uloc[1:-2, 1:-2, :] \
* vs.dxu[1:-2, np.newaxis, np.newaxis] \
* vs.cost[np.newaxis, 1:-2, np.newaxis]
south = -vloc[2:-1, 1:-2, :] * vs.dyu[np.newaxis, 1:-2, np.newaxis] \
+ uloc[1:-2, 2:-1, :] \
* vs.dxu[1:-2, np.newaxis, np.newaxis] \
* vs.cost[np.newaxis, 2:-1, np.newaxis]
east = np.sum(east[s1] * (vs.line_dir_east_mask[1:-2, 1:-2] &
vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1))
west = np.sum(west[s1] * (vs.line_dir_west_mask[1:-2, 1:-2] &
vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1))
north = np.sum(north[s1] * (vs.line_dir_north_mask[1:-2, 1:-2]
& vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1))
south = np.sum(south[s1] * (vs.line_dir_south_mask[1:-2, 1:-2]
& vs.boundary_mask[1:-2, 1:-2])[s2], axis=(0, 1))
return east + west + north + south
|
8808c3afd4374465bd64aa0db658d8843a74a4da
| 3,639,822
|
def crop_black_borders(image, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
"""
if len(image.shape) == 3:
flatImage = np.max(image, 2)
else:
flatImage = image
assert len(flatImage.shape) == 2
rows = np.where(np.max(flatImage, 0) > threshold)[0]
if rows.size:
cols = np.where(np.max(flatImage, 1) > threshold)[0]
image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
else:
image = image[:1, :1]
return image
|
77ec884f4f173844f5d5124a29bb94eb641e25ec
| 3,639,823
|
def change_password():
"""Allows user to change password"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# If password and confirmation don't match, accuse error
if request.form.get("new_password") != request.form.get("new_confirmation"):
flash("The New Password and the Confirmation don't match. Try again.")
return render_template("change.html")
else:
# Query database for username
rows = db.execute(
"SELECT * FROM users WHERE username = ?", request.form.get("username")
)
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(
rows[0]["hash"], request.form.get("old_password")
):
flash("Invalid username and/or password.")
return render_template("change.html")
else:
# Hashes new password before storying it into the database
pass_hash = generate_password_hash(
request.form.get("new_password"),
method="pbkdf2:sha256",
salt_length=8,
)
# Store usersname and password into database
db.execute(
"UPDATE users SET hash = ? WHERE username = ?",
pass_hash,
request.form.get("username"),
)
# Display a flash message that the password was changed
flash("Password changed!")
return render_template("change.html")
# Request method = GET
else:
return render_template("change.html")
|
afff43726d3e30a1ea09af20df1099b00c14b843
| 3,639,824
|
def add_placeholders(components):
"""Add placeholders for missing DATA/INSTANCE components"""
headers = [s[:2] for s in components]
for prefix in ("CD", "CR"):
if prefix not in headers:
components.append(prefix + ("C" * 11))
return components
|
303f1590042acc60aa753e5e317417de01fafafc
| 3,639,825
|
from ..items import HieroClipItem
def clipsToHieroClipItems(clips):
"""
@itemUsage hiero.items.HieroClipItem
"""
clipItems = []
if clips:
for c in clips:
i = HieroClipItem(c)
clipItems.append(i)
return clipItems
|
bf4c6aaff649796cc1a24eb7ba3102fb01178ab3
| 3,639,826
|
import hashlib
def md5(s, raw_output=False):
"""Calculates the md5 hash of a given string"""
res = hashlib.md5(s.encode())
if raw_output:
return res.digest()
return res.hexdigest()
|
238c2a6c6b06a046de86e514698c7ef5622f770b
| 3,639,827
|
def new_Q(T, ms, Ps, G):
"""TODO DOC STRING"""
print("==> Tuning Q")
SIGMA = np.zeros_like(Ps[0], dtype=np.complex128)
PHI = np.zeros_like(Ps[0], dtype=np.complex128)
C = np.zeros_like(Ps[0], dtype=np.complex128)
shape = (Ps[0].shape[0], 1)
for k in range(1, T + 1):
m1 = gains_vector(ms[k]).reshape(shape)
m2 = gains_vector(ms[k - 1]).reshape(shape)
SIGMA += Ps[k] + m1 @ m1.conj().T
PHI += Ps[k - 1] + m2 @ m2.conj().T
C += Ps[k] @ G[k - 1].conj().T + m1 @ m2.conj().T
SIGMA *= 1/T
PHI *= 1/T
C *= 1/T
# Diagonal real-values
Q_diag = np.diag(SIGMA - C - C.conj().T + PHI).real
# Return new Q
return np.diag(Q_diag).real
|
a21b96e4be7f1c9f6ba892e54ef75b03e4eb9e42
| 3,639,828
|
def _get_pk_message_increase(cache_dict: dict,
project_list: list) -> str:
"""根据项目列表构建增量模式下PK播报的信息.
### Args:
``cache_dict``: 增量计算的基础.\n
``project_list``: PK的项目列表.\n
### Result:
``message``: PK进展的播报信息.\n
"""
amount_dict = _get_pk_amount(project_list)
increase_dict = dict()
for idol in amount_dict.keys():
increase_dict[idol] = amount_dict[idol] - cache_dict[idol]
increase_dict[idol] = round(increase_dict[idol], 2)
return _build_increase_pk_message(amount_dict, increase_dict)
|
9343626fe8208e5e08c39abda9c6b26252ac6b2b
| 3,639,829
|
def flatten(dic, keep_iter=False, position=None):
"""
Returns a flattened dictionary from a dictionary of nested dictionaries and lists.
`keep_iter` will treat iterables as valid values, while also flattening them.
"""
child = {}
if not dic:
return {}
for k, v in get_iter(dic):
if isstr(k):
k = k.replace('.', '_')
if position:
item_position = '%s.%s' % (position, k)
else:
item_position = '%s' % k
if is_iter(v):
child.update(flatten(dic[k], keep_iter, item_position))
if keep_iter:
child[item_position] = v
else:
child[item_position] = v
return child
|
4597a0330468e955cadda37f89f73bed7db1ecb5
| 3,639,830
|
import json
def game_get_state():
"""The ``/game/state`` endpoint requires authentication and expects no
other arguments.
It can be reached at ``/game/state?secret=<API_SECRET>``.
It is used to retrieve the current state of the game.
The JSON response looks like::
{
"state_id": int,
"game_id": int,
"services": [List of {"service_id": int,
"service_name": string,
"port": int}],
"scripts": [List of {"script_id": int,
"upload_id": int,
"type": ("exploit", "benign", "getflag",
"setflag"),
"script_name": string,
"service_id": int}]
"run_scripts": [{"team_id": int (team_id to run scripts against),
"run_list": [Ordered list of int script_ids]}],
"state_expire": int (approximate remaining seconds in this tick),
}
:return: a JSON dictionary providing information on the current state.
"""
cursor = mysql.cursor()
# Get basic information about the game, like tick info and services
to_return = {}
current_tick, tick_start, seconds_to_next_tick, _ = get_current_tick(cursor)
to_return["state_id"] = current_tick
to_return["state_expire"] = seconds_to_next_tick
cursor.execute("SELECT id FROM game LIMIT 1")
game_cursor = cursor.fetchone()
if game_cursor is None:
to_return["num"] = "621"
to_return["msg"] = "No game is currently running..."
return json.dumps(to_return)
to_return["game_id"] = game_cursor["id"]
cursor.execute("""SELECT services.id AS service_id,
services.name as service_name,
services.port as port,
current_state as state
FROM services""")
to_return["services"] = cursor.fetchall()
# Determine which scripts exists and which should be run
cursor.execute("""SELECT id AS script_id, upload_id, filename AS script_name,
type, service_id,
current_state as state
FROM scripts""")
to_return["scripts"] = cursor.fetchall()
cursor.execute("""SELECT team_id, json_list_of_scripts_to_run AS json_list
FROM team_scripts_run_status
WHERE team_scripts_run_status.tick_id = %s""",
(current_tick,))
run_scripts = []
for team_scripts_to_run in cursor.fetchall():
team_id = team_scripts_to_run["team_id"]
run_list = json.loads(team_scripts_to_run["json_list"])
run_scripts.append({"team_id": team_id,
"run_list": run_list})
to_return["run_scripts"] = run_scripts
return json.dumps(to_return)
|
b5b3bda433764413fb6116b5b8f13d6e9dfef866
| 3,639,831
|
def new(key, mode, iv=None):
"""Return a `Cipher` object that can perform ARIA encryption and
decryption.
ARIA is a block cipher designed in 2003 by a large group of South
Korean researchers. In 2004, the Korean Agency for Technology and
Standards selected it as a standard cryptographic technique.
Parameters:
key (bytes): The key to encrypt decrypt.
mode (int): The mode of operation of the cipher.
iv (bytes or None): The initialization vector (IV). The IV is
required for every mode but ECB and CTR where it is ignored.
If not set, the IV is initialized to all 0, which should not
be used for encryption.
"""
mode = _cipher.Mode(mode)
if mode in {
_cipher.Mode.ECB,
_cipher.Mode.CBC,
# _cipher.Mode.CFB128,
_cipher.Mode.CTR,
_cipher.Mode.GCM,
}:
if len(key) * 8 not in {128, 192, 256}:
raise TLSError(
msg="key size must 16, 24, or 32 bytes, got %i" % len(key)
)
else:
raise TLSError(msg="unsupported mode %r" % mode)
name = ("ARIA-%i-%s" % (len(key) * 8, mode.name)).encode("ascii")
return _cipher.Cipher(name, key, mode, iv)
|
8906d9f5efe36349fc520c8022bf52d888bd37ea
| 3,639,832
|
import logging
def keggapi_info(database, verbose=True, force_download=False, return_format = None, return_url = False):
"""KEGG REST API interface for INFO command
Displays information on a given database
for further info read https://www.kegg.jp/kegg/rest/keggapi.html
Parameters
----------
database : str
database of which you want to obtain infos on
verbose : bool
if set to False displays only the first 4 lines of text (default is True)
force_download : bool
forces overwriting on previous cached files (default is False)
retutn_format : str
optional, specify a return format to return, str | dict (default is None)
Returns
-------
info_str : str
optional, plain text response of API INFO command
info_dict : dict
optional, parsed response of API INFO as a dictionary
"""
valid_return_formats = (None, "str", "dict")
if return_format not in valid_return_formats:
raise ValueError("invalid {} format for keggapi_info return".format(return_format))
org = get_organism_codes()
if database not in db_categories + org:
raise KEGGKeyError(
database, msg="source database {} is not a valid database".format(database)
)
url = "http://rest.kegg.jp/info/{}".format(database)
if return_url == True:
return url
filename = database + "_info"
infos = download_textfile(url, filename, verbose=False, force_download = force_download)
if verbose == True:
logging.info("Infos on %s from KEGG:\n",database)
if return_format == None:
if verbose == False:
print("\n".join(infos.splitlines()[1:4]))
else:
print(infos)
elif return_format == "str":
return infos
elif return_format == "dict":
processed_dict = process_request_text(infos, mode = "columns")
return processed_dict
|
366e4910d54e725e2f8dc8399e7793604a767449
| 3,639,833
|
def round_to_thirty(str_time):
"""STR_TIME is a time in the format HHMM. This function rounds down to the nearest half hour."""
minutes = int(str_time[2:])
if minutes//30 == 1:
rounded = "30"
else:
rounded = "00"
return str_time[0:2] + rounded
|
37e8473dbb6e91fc47a03491c421967db231d4d0
| 3,639,834
|
import os
import shutil
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
|
64a868231cd3bd7199eef2ad19b2b7296e0c32fe
| 3,639,835
|
def remove_uoms(words):
"""
Remove uoms in the form of e.g. 1000m 1543m3
Parameters
----------
words: list of words to process
Returns
-------
A list of words where possible uom have been removed
"""
returnWords=[]
for word in words:
word=word.replace('.', '', 1)
word=word.replace(',', '', 1)
if word[0:len(word)-1].isnumeric()==False and word[0:len(word)-1].isdecimal()==False:
#we do not have a match on e.g. 1543m
if word[0:len(word)-2].isnumeric()==False and word[0:len(word)-2].isdecimal()==False:
#we do not have a match on e.g. 1543m3
#add it
returnWords.append(word)
return returnWords
|
cdb2caf274a58b61c57ebe4fba167ec6275ddf6f
| 3,639,836
|
import logging
import sys
def parse_region(reg: str) -> tuple:
"""
Return a pair of slices (slice1, slice2) corresponding
to the region give as input in numpy slice string format
If the region can't be parsed sys.exit() is called
"""
try:
slices = str_to_slices(reg)
except ValueError as ve:
logging.error("ValueError: %s", ve)
logging.error("Bad region spec: %s", reg)
sys.exit(1)
if len(slices) != 2:
logging.error("Bad region spec: %s", reg)
sys.exit(1)
return slices
|
4d2ab5e546c69f6e5a9ee9560052ae395d189e95
| 3,639,837
|
from unittest.mock import patch
async def create_wall_connector_entry(
hass: HomeAssistant, side_effect=None
) -> MockConfigEntry:
"""Create a wall connector entry in hass."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.2.3.4"},
options={CONF_SCAN_INTERVAL: 30},
)
entry.add_to_hass(hass)
# We need to return vitals with a contactor_closed attribute
# Since that is used to determine the update scan interval
fake_vitals = tesla_wall_connector.wall_connector.Vitals(
{
"contactor_closed": "false",
}
)
with patch(
"tesla_wall_connector.WallConnector.async_get_version",
return_value=get_default_version_data(),
side_effect=side_effect,
), patch(
"tesla_wall_connector.WallConnector.async_get_vitals",
return_value=fake_vitals,
side_effect=side_effect,
), patch(
"tesla_wall_connector.WallConnector.async_get_lifetime",
return_value=None,
side_effect=side_effect,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
5aa8fdc08b13d7fe26df50312f736e9ec1d4bfdd
| 3,639,838
|
def _normalize(data):
"""
Normalizes the data (z-score)
:param data: Data to be normalized
:return: Nomralized data
"""
mean = np.mean(data, axis=0)
sd = np.std(data, axis=0)
# If Std Dev is 0
if not sd:
sd = 1e-7
return (data - mean) / sd
|
1fe31f73d9e7fae03997f6caa1a28ebe929e49bd
| 3,639,839
|
from typing import Dict
from typing import Any
import logging
def merge_target_airport_configs(
weather_flight_features: pd.DataFrame,
configs: pd.DataFrame,
parameters: Dict[str, Any],
)-> pd.DataFrame:
"""
This function merges actual airport configuration values to the main data frame. Multiple future configuration values
are added as defined by the prediction_lookahead and prediction_delta (prediction_lookahead/prediction_delta columns).
The current configuration is also added to the input data
"""
configs= configs.rename(columns={'start_time': 'timestamp'})
configs = configs[['timestamp', 'airport_configuration_name']]
configs = configs.assign(timestamp_config=configs['timestamp'])
configs = configs.sort_values('timestamp_config', ascending=True)
# Sample configuration data
start_datetime = configs['timestamp_config'].min().ceil("H")
end_datetime = configs['timestamp_config'].max().floor("H")
time_df = sampling_times(parameters['prediction_delta'], start_datetime, end_datetime)
configs_sampled = pd.merge_asof(time_df, configs, on="timestamp", direction="backward")
# TODO: for CLT 93.6% of the data is kept after removing stale configs. For other airports the current logic
# could lead to removing too many rows. Need to keep an eye on the logged value below to see if additional logic
# is needed
# Remove stale configs data
is_stale = (configs_sampled['timestamp'] - configs_sampled['timestamp_config']) \
/ np.timedelta64(1,'h') > parameters['stale_airport_config_th']
# Log rows with stale configs
log = logging.getLogger(__name__)
log.info('Kept {:.1f}% of rows when removing stale airport configuration'.format(
100 * (1-(sum(is_stale) / configs_sampled.shape[0])) ))
configs_sampled.loc[is_stale, 'airport_configuration_name'] = None
configs_sampled.drop(columns=['timestamp_config'], inplace=True)
# Restructure data, add future values
configs_wide = future_values_reshape(configs_sampled,
parameters['prediction_lookahead'],
parameters['prediction_delta'],
'timestamp')
# Add current configuration
configs_wide = pd.merge(configs_wide, configs_sampled, on='timestamp')
# Remove NAs, only removing NAs in current config and first future config
fields_remove_na = ['airport_configuration_name', 'airport_configuration_name' + '_' + str(parameters['prediction_delta'])]
is_na = configs_wide[fields_remove_na].isna().any(axis=1)
configs_wide = configs_wide[is_na == False]
# All future airport configuration columns are stored in a single columns
configs_wide = lookahead_cols_to_single_col(configs_wide, 'airport_configuration_name_')
configs_wide = configs_wide.rename(columns={'airport_configuration_name': 'airport_configuration_name_current'})
# Merge target configuration data
data = pd.merge(weather_flight_features, configs_wide, on='timestamp', how='inner')
return data
|
29fbf3a0dc166a5b97bd86fbe4ffd2f7d03efad1
| 3,639,840
|
def chunks(chunkable, n):
""" Yield successive n-sized chunks from l.
"""
chunk_list = []
for i in xrange(0, len(chunkable), n):
chunk_list.append( chunkable[i:i+n])
return chunk_list
|
d59c6afd85705aa1954d7cc0631e98f2e9e5cdcf
| 3,639,841
|
def pass_generate():
"""
Стартовая процедура генератора паролей.
"""
func = generate_pass_block
param = {'min_len': 4, 'max_len': 6}
return f'{func(**param)}-{func(**param)}-{func(**param)}'
|
e0bb5378c308e0c60568bb2b3701ee90d5bf6a8e
| 3,639,842
|
from . import analyzer as anl
def lineSpectrum(pos, image, data, width, scale=1, spacing=3, mode="dual"):
"""
Draw sepectrum bars.
:param pos: (x, y) - position of spectrum bars on image
:param image: PIL.Image - image to draw
:param data: 1D array - sound data
:param width: int - widht of spectrum on image
:param scale: number - scaling of bars length
:param spacing: int - spacing between bars
:param mode: dual | bottom | up - direction of bars
"""
count = int(width // spacing)
spectrum_data = anl.fft(data, count)
return line(pos, image, spectrum_data, scale, spacing, mode)
|
5ee27565bc83ef275d4882b44b11a3b7cd1407b3
| 3,639,843
|
def _unique_arXiv(record, extra_data):
"""Check if the arXiv ID is unique (does not already exist in Scoap3)"""
arxiv_id = get_first_arxiv(record)
# search through ES to find if it exists already
if arxiv_id:
result = current_search_client.search(
'scoap3-records-record',
q='arxiv_eprints.value="{}"'.format(arxiv_id)
)['hits']
if result['total'] == 0:
return True, ('ArXiv ID not found. Unique ID.', ), None
else:
# return all the control numbers in order to check the error
record_control_numbers = ', '.join(
hit['_source']['control_number']
for hit in result['hits']
)
return False, ('ArXiv ID already exists. Please check {}'.format(
record_control_numbers)), None
return True, ('No arXiv id: Out of the scope of this check', ), None
|
6b19a10400da024c5ad87090c87f6099a4018dab
| 3,639,844
|
def profile(username):
"""
Профиль пользователя. Возможность изменить логин, пароль.
В перспективе сохранить свои любимые ссылки.
"""
if username != current_user.nickname:
return redirect(url_for('index'))
types = Types.manager.get_by('', dictionary=True)
return render_template('auth/profile.html', types=types)
|
84bb1304ef5862efdd54ad2402e09d31a8c151a2
| 3,639,845
|
def had_cell_edge(strmfunc, cell="north", edge="north", frac_thresh=0.1,
cos_factor=False, lat_str=LAT_STR, lev_str=LEV_STR):
"""Latitude of poleward edge of either the NH or SH Hadley cell."""
hc_strengths = had_cells_strength(strmfunc, lat_str=lat_str,
lev_str=lev_str)
if cell == "north":
label = "had_cell_nh"
elif cell == "south":
label = "had_cell_sh"
else:
raise ValueError("`cell` must be either 'north' or 'south'; "
f"got {cell}.")
# Restrict to streamfunction at level of the specified cell's maximum.
cell_max = hc_strengths.sel(cell=label)
lat_max = cell_max[lat_str]
lev_max = cell_max[lev_str]
sf_at_max = strmfunc.sel(**{lev_str: float(lev_max), "method": "nearest"})
# Restrict to the latitudes north or south of the max, as specified.
lat = strmfunc[lat_str]
if edge == "north":
which_zero = 0
lat_compar = lat >= lat_max
elif edge == "south":
which_zero = -1
lat_compar = lat <= lat_max
else:
raise ValueError("`edge` must be either 'north' or 'south'; "
f"got {cell}.")
sf_one_side = sf_at_max.where(lat_compar, drop=True)
# Restrict to the latitudes from the max to the nearest point with
# opposite-signed value.
# Apply cubic interpolation in latitude to a refined mesh. Otherwise, the
# cell edge can (unphysically) vary non-monotonically with `frac_thresh`.
lats_interp = np.arange(sf_one_side[lat_str].min(),
sf_one_side[lat_str].max() - 0.01, 0.05)
sf_one_side_interp = sf_one_side.interp(**{lat_str: lats_interp},
method="cubic")
# Explicitly make the last value equal to the original, as otherwise the
# interp step can overwrite it with nan for some reason.
sf_one_side_interp = xr.concat([sf_one_side_interp, sf_one_side[-1]],
dim=lat_str)
# Find where the streamfunction crosses the specified fractional threshold,
# using the Singh 2019 cosine weighting if specified.
if cos_factor:
sf_norm = ((sf_one_side_interp / cosdeg(sf_one_side_interp[lat_str])) /
(cell_max / cosdeg(lat_max)))
else:
sf_norm = sf_one_side_interp / cell_max
sf_thresh_diff = sf_norm - frac_thresh
sf_edge_bounds = zero_cross_bounds(sf_thresh_diff, lat_str, which_zero)
# Interpolate between the bounding points to the crossing.
return interpolate(sf_edge_bounds, sf_edge_bounds[lat_str], 0,
lat_str)[lat_str]
|
b666333bb9b0a54d569df96324dbb55747b33a4c
| 3,639,846
|
def make_score_fn(data):
"""Returns a groupwise score fn to build `EstimatorSpec`."""
context_feature_columns, example_feature_columns = data.create_feature_columns()
def _score_fn(context_features, group_features, mode, unused_params,
unused_config):
"""Defines the network to score a group of documents."""
with tf.name_scope("input_layer"):
group_input = [
tf.layers.flatten(group_features[name])
for name in sorted(example_feature_columns)
]
print(group_input[0].shape)
print(group_input[0].dtype)
context_input = [
tf.layers.flatten(context_features[name])
for name in sorted(context_feature_columns)
]
print(context_input[0].shape)
print(context_input[0].dtype)
final_input = context_input + group_input
input_layer = tf.concat(final_input, 1)
tf.summary.scalar("input_sparsity", tf.nn.zero_fraction(input_layer))
tf.summary.scalar("input_max", tf.reduce_max(input_layer))
tf.summary.scalar("input_min", tf.reduce_min(input_layer))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
cur_layer = tf.layers.batch_normalization(input_layer, training=is_training)
for i, layer_width in enumerate(int(d) for d in FLAGS.hidden_layer_dims):
cur_layer = tf.layers.dense(cur_layer, units=layer_width)
cur_layer = tf.layers.batch_normalization(cur_layer, training=is_training)
cur_layer = tf.nn.relu(cur_layer)
tf.summary.scalar("fully_connected_{}_sparsity".format(i),
tf.nn.zero_fraction(cur_layer))
cur_layer = tf.layers.dropout(
cur_layer, rate=FLAGS.dropout_rate, training=is_training)
logits = tf.layers.dense(cur_layer, units=FLAGS.group_size)
return logits
return _score_fn
|
5f95843413ddeb146c080a30b563b74e863cdb07
| 3,639,847
|
def unicode2str(obj):
""" Recursively convert an object and members to str objects
instead of unicode objects, if possible.
This only exists because of the incoming world of unicode_literals.
:param object obj: object to recurse
:return: object with converted values
:rtype: object
"""
if isinstance(obj, dict):
return {unicode2str(k): unicode2str(v) for k, v in
obj.items()}
elif isinstance(obj, list):
return [unicode2str(i) for i in obj]
elif isinstance(obj, unicode_type()):
return obj.encode("utf-8")
else:
return obj
|
62ef4653fd22b58b463dfce62ff83238f47fb9b5
| 3,639,848
|
def test_elim_cast_same_dtype(tag):
""" test_elim_cast_same_dtype """
fns = FnDict()
cast = P.Cast()
@fns
def fp32_cast_fp32(x, y):
return cast(x, y)
@fns
def after(x, y):
return x
return fns[tag]
|
6aa97e39d8bdf4261e212355be85106ee109659f
| 3,639,849
|
def getRepository():
""" Determine the SVN repostiory for the cwd """
p = Ptyopen2('svn info')
output, status = p.readlinesAndWait()
for line in output:
if len(line) > 3 and line[0:3] == 'URL':
return line[5:].rstrip()
raise Exception('Could not determine SVN repository')
|
754d2b6a343a4b0283be4ba2846aba80b4d1a95f
| 3,639,850
|
import re
def replaceInternalLinks(text):
"""
Replaces internal links of the form:
[[title |...|label]]trail
with title concatenated with trail, when present, e.g. 's' for plural.
See https://www.mediawiki.org/wiki/Help:Links#Internal_links
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
tailRE = re.compile('\w+')
cur = 0
res = ''
for s, e in findBalanced(text):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[s + 2:e - 2]
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
curp = pipe + 1
for s1, e1 in findBalanced(inner):
last = inner.rfind('|', curp, s1)
if last >= 0:
pipe = last # advance
curp = e1
label = inner[pipe + 1:].strip()
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:]
|
23795baffdaf46883fe08a12861313ed6bc0ee54
| 3,639,851
|
from typing import List
from typing import Any
from typing import Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
max_column_length = [] #list for the maximum length of a column
if not labels is None: #add label length to the maximum length
for i in labels:
max_column_length.append(len(str(i)))
else: #add 0 as label length
for i in range(len(rows[0])):
max_column_length.append(0)
for j in range(len(rows[0])): #add the length of a row item if it is longer than the label length or another leangth of a row
for i in rows:
if len(str(i[j])) > max_column_length[j]:
max_column_length[j] = len(str(i[j]))
#top and bot line creation
#top line
top_line = "┌─"
for j in max_column_length:
for i in range(j):
top_line += "─"
top_line += "─┬─"
top_line = top_line[:-2] + "┐\n"
#bot line
bot_line = "└─"
for j in max_column_length:
for i in range(j):
bot_line += "─"
bot_line += "─┴─"
bot_line = bot_line[:-2] + "┘\n"
#table header
table_header = ""
if not labels is None:
table_header += "│ "
for i in labels:
if not centered:
table_header += i + "".join(' ' for l in range(max_column_length[labels.index(i)] - len(str(i)))) + " │ "
elif centered:
table_header += "".join(' ' for l in range(int(floor((max_column_length[labels.index(i)] - len(str(i)))/2)))) + str(i) + "".join(' ' for l in range(int(ceil((max_column_length[labels.index(i)] - len(str(i)))/2)))) + " │ "
table_header = table_header[:-1] + "\n"
table_header += "├─"
for j in max_column_length:
for i in range(j):
table_header += "─"
table_header += "─┼─"
table_header = table_header[:-2] + "┤\n"
#table body
table_body = ""
for j in rows:
for i in range(len(j)):
if not centered:
table_body += "│ " + str(j[i]) + "".join(' ' for l in range(max_column_length[i] - len(str(j[i])) + 1))
elif centered:
table_body += "│ " + "".join(' ' for l in range(int(floor((max_column_length[i] - len(str(j[i])))/2)))) + str(j[i]) + "".join(' ' for l in range(int(ceil((max_column_length[i] - len(str(j[i])))/2)) + 1))
table_body += "│\n"
return top_line + table_header + table_body + bot_line
|
ecdc9972dfbb05795556a5ba36b6cf4cd55399f8
| 3,639,852
|
def format_value(v):
"""
Formats a value to be included in a string.
@param v a string
@return a string
"""
return ("'{0}'".format(v.replace("'", "\\'"))
if isinstance(v, str) else "{0}".format(v))
|
8b8d5452ecf938b4e9e9956577f1a3f1102e49bc
| 3,639,853
|
def worst_solvents(delta_d, delta_p, delta_h, filter_params):
"""Search solvents on the basis of RED (sorted descending) with given Hansen parameters, and with
a formatted string indicating filter parameters. See the function parse_filter_params
for details of filter parameters string."""
results_list = []
filter_dict = parse_filter_params(filter_params)
try:
delta_d = float(delta_d)
delta_p = float(delta_p)
delta_h = float(delta_h)
except ValueError:
return jsonify(results_list)
#Since we need most of the info and the solvents table is not big, we'll just read it
#straight to a DataFrame
solvent_df = pd.read_sql('solvents',db.engine)
for code_key, min_max in filter_dict.items():
solvent_df = solvent_df[(solvent_df[cols_from_codes[code_key]] >= min_max[0]) & \
(solvent_df[cols_from_codes[code_key]] <= min_max[1])]
if len(solvent_df) == 0:
return jsonify(results_list)
solvent_df['RED2'] = 4 * (solvent_df['delta_d'] - delta_d) * (solvent_df['delta_d'] - delta_d) + \
(solvent_df['delta_p'] - delta_p) * (solvent_df['delta_p'] - delta_p) + \
(solvent_df['delta_h'] - delta_h) * (solvent_df['delta_h'] - delta_h)
solvent_df['RED'] = solvent_df['RED2'].apply(np.sqrt)
solvent_df = solvent_df.sort_values(by='RED', ascending=False)
# Limit output to top 5
results_list = solvent_df.head().fillna('').to_dict(orient='records')
return jsonify(results_list)
|
91543a738ef86e77336cedf6edd6175794c4bdcb
| 3,639,854
|
import textwrap
def log(msg, *args, dialog=False, error=False, **kwargs):
"""
Generate a message to the console and optionally as either a message or
error dialog. The message will be formatted and dedented before being
displayed, and will be prefixed with its origin.
"""
msg = textwrap.dedent(msg.format(*args, **kwargs)).strip()
if error:
print("remote_build:")
return sublime.error_message(msg)
for line in msg.splitlines():
print("remote_build: {msg}".format(msg=line))
if dialog:
sublime.message_dialog(msg)
|
b204d4205a4fd90b3f0ca7104e3b6dd336b25b46
| 3,639,855
|
def build_frustum_lineset(K, l, t, r, b):
"""Build a open3d.geometry.LineSet to represent a frustum
Args:
pts_A (np.array or torch.tensor): Point set in form (Nx3)
pts_B (np.array or torch.tensor): Point set in form (Nx3)
idxs (list of int): marks correspondence between A[i] and B[idxs[i]]
Returns:
line_set (open3d.geometry.LineSet)
"""
corners = np.asarray([(l, t), (r - 1, t), (r - 1, b - 1), (l, b - 1)], dtype=np.float32)
rays = unproject(K, corners)
rays /= np.linalg.norm(rays, axis=1)[:, None]
line_idx = [[i * 2 + 0, i * 2 + 1] for i in range(4)]
line_pts = []
for ray in rays:
line_pts.extend([[0, 0, 0], (ray * 100).tolist()])
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(line_pts)
colors = np.zeros((8, 3), dtype=np.uint8)
colors[:, 1] = 255
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set.lines = o3d.utility.Vector2iVector(line_idx)
return line_set
|
73236b73692b61a7d7c78005f626d0d6b0f2c84e
| 3,639,856
|
import os
def _config_file_exists():
"""
Checks if the configuration file exists.
:return: Returns True if the configuration file exists and False otherwise.
:rtype: bool
"""
if os.path.isfile(DEFAULT_CONFIG_FILE):
return True
return False
|
5cbccfd2eb2e87278820e55c23d859cf4644017e
| 3,639,857
|
def postagsget(sent):
"""
sent: Sentence as string
"""
string = ""
ls = pos_tag(list(sent.split()))
for i in ls:
string += i[1] + " "
return string
|
077e7261e0a0e296381bcb09578557576fe4c86c
| 3,639,858
|
def try_convert_to_list_of_numbers(transform_params):
"""
Args:
transform_params: a dict mapping transform parameter names to values
This function tries to convert each parameter value to a list of numbers.
If that fails, then it tries to convert the value to a number.
For example, if transform_params = {'scale':'0.16 1', size='256'}, this will become
{'scale':[0.16, 1], 'size': 256}.
"""
for k, v in transform_params.items():
try:
v = [string_to_num(x) for x in v.split(" ")]
if len(v) == 1:
v = v[0]
except AttributeError:
v = string_to_num(v)
transform_params[k] = v
return transform_params
|
14e33630daab0081d39fd533a606a7b561f5b161
| 3,639,859
|
def from_tensorflow(graph):
""" Load tensorflow graph which is a python tensorflow graph object into nnvm graph.
The companion parameters will be handled automatically.
Parameters
----------
graph : GraphDef object
Tensorflow GraphDef
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.ndarray
Dict of converted parameters stored in tvm.ndarray format
"""
g = GraphProto()
sym, params = g.from_tensorflow(graph)
return sym, params
|
9547430c05559eb094dbaf3b1528ec071b1c9b50
| 3,639,860
|
def isequal(q1, q2, tol=100, unitq=False):
"""
Test if quaternions are equal
:param q1: quaternion
:type q1: array_like(4)
:param q2: quaternion
:type q2: array_like(4)
:param unitq: quaternions are unit quaternions
:type unitq: bool
:param tol: tolerance in units of eps
:type tol: float
:return: whether quaternions are equal
:rtype: bool
Tests if two quaternions are equal.
For unit-quaternions ``unitq=True`` the double mapping is taken into account,
that is ``q`` and ``-q`` represent the same orientation and ``isequal(q, -q, unitq=True)`` will
return ``True``.
.. runblock:: pycon
>>> from spatialmath.base import isequal
>>> q1 = [1, 2, 3, 4]
>>> q2 = [-1, -2, -3, -4]
>>> isequal(q1, q2)
>>> isequal(q1, q2, unitq=True)
"""
q1 = base.getvector(q1, 4)
q2 = base.getvector(q2, 4)
if unitq:
return (np.sum(np.abs(q1 - q2)) < tol * _eps) or (np.sum(np.abs(q1 + q2)) < tol * _eps)
else:
return np.sum(np.abs(q1 - q2)) < tol * _eps
|
6c903bbb547c9015e949e916d0bccda124bad04c
| 3,639,861
|
def MMOE(dnn_feature_columns, num_tasks, task_types, task_names, num_experts=4,
expert_dnn_units=[32,32], gate_dnn_units=[16,16], tower_dnn_units_lists=[[16,8],[16,8]],
l2_reg_embedding=1e-5, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False):
"""Instantiates the Multi-gate Mixture-of-Experts multi-task learning architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1.
:param task_types: list of str, indicating the loss of each tasks, ``"binary"`` for binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
:param task_names: list of str, indicating the predict target of each tasks
:param num_experts: integer, number of experts.
:param expert_dnn_units: list, list of positive integer, its length must be greater than 1, the layer number and units in each layer of expert DNN
:param gate_dnn_units: list, list of positive integer, its length must be greater than 1, the layer number and units in each layer of gate DNN
:param tower_dnn_units_lists: list, list of positive integer list, its length must be euqal to num_tasks, the layer number and units in each layer of task-specific DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:return: a Keras model instance
"""
if num_tasks <= 1:
raise ValueError("num_tasks must be greater than 1")
if len(task_types) != num_tasks:
raise ValueError("num_tasks must be equal to the length of task_types")
for task_type in task_types:
if task_type not in ['binary', 'regression']:
raise ValueError("task must be binary or regression, {} is illegal".format(task_type))
if num_tasks != len(tower_dnn_units_lists):
raise ValueError("the length of tower_dnn_units_lists must be euqal to num_tasks")
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, seed)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
#build expert layer
expert_outs = []
for i in range(num_experts):
expert_network = DNN(expert_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='expert_'+str(i))(dnn_input)
expert_outs.append(expert_network)
expert_concat = tf.keras.layers.concatenate(expert_outs, axis=1, name='expert_concat')
expert_concat = tf.keras.layers.Reshape([num_experts, expert_dnn_units[-1]], name='expert_reshape')(expert_concat) #(num_experts, output dim of expert_network)
mmoe_outs = []
for i in range(num_tasks): #one mmoe layer: nums_tasks = num_gates
#build gate layers
gate_network = DNN(gate_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='gate_'+task_names[i])(dnn_input)
gate_out = tf.keras.layers.Dense(num_experts, use_bias=False, activation='softmax', name='gate_softmax_'+task_names[i])(gate_network)
gate_out = tf.tile(tf.expand_dims(gate_out, axis=-1), [1, 1, expert_dnn_units[-1]]) #let the shape of gate_out be (num_experts, output dim of expert_network)
#gate multiply the expert
gate_mul_expert = tf.keras.layers.Multiply(name='gate_mul_expert_'+task_names[i])([expert_concat, gate_out])
gate_mul_expert = tf.math.reduce_sum(gate_mul_expert, axis=1) #sum pooling in the expert ndim
mmoe_outs.append(gate_mul_expert)
task_outs = []
for task_type, task_name, tower_dnn, mmoe_out in zip(task_types, task_names, tower_dnn_units_lists, mmoe_outs):
#build tower layer
tower_output = DNN(tower_dnn, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed, name='tower_'+task_name)(mmoe_out)
logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(tower_output)
output = PredictionLayer(task_type, name=task_name)(logit)
task_outs.append(output)
model = tf.keras.models.Model(inputs=inputs_list, outputs=task_outs)
return model
|
6be8b400fb58c74d4dc397132327f9ffb819a637
| 3,639,862
|
import numpy
def greater(self, other):
""" Equivalent to the > operator.
"""
return _PropOpB(self, other, numpy.greater, numpy.uint8)
|
04d0a6ed3d0023afc3bcea167c67940af9387dc1
| 3,639,863
|
def load_translation_data(dataset, src_lang='en', tgt_lang='vi'):
"""Load translation dataset
Parameters
----------
dataset : str
src_lang : str, default 'en'
tgt_lang : str, default 'vi'
Returns
-------
"""
common_prefix = 'IWSLT2015_{}_{}_{}_{}'.format(src_lang, tgt_lang,
args.src_max_len, args.tgt_max_len)
if dataset == 'IWSLT2015':
data_train = IWSLT2015('train', src_lang=src_lang, tgt_lang=tgt_lang)
data_val = IWSLT2015('val', src_lang=src_lang, tgt_lang=tgt_lang)
data_test = IWSLT2015('test', src_lang=src_lang, tgt_lang=tgt_lang)
else:
raise NotImplementedError
src_vocab, tgt_vocab = data_train.src_vocab, data_train.tgt_vocab
data_train_processed = load_cached_dataset(common_prefix + '_train')
if not data_train_processed:
data_train_processed = process_dataset(data_train, src_vocab, tgt_vocab,
args.src_max_len, args.tgt_max_len)
cache_dataset(data_train_processed, common_prefix + '_train')
data_val_processed = load_cached_dataset(common_prefix + '_val')
if not data_val_processed:
data_val_processed = process_dataset(data_val, src_vocab, tgt_vocab)
cache_dataset(data_val_processed, common_prefix + '_val')
data_test_processed = load_cached_dataset(common_prefix + '_test')
if not data_test_processed:
data_test_processed = process_dataset(data_test, src_vocab, tgt_vocab)
cache_dataset(data_test_processed, common_prefix + '_test')
fetch_tgt_sentence = lambda src, tgt: tgt.split()
val_tgt_sentences = list(data_val.transform(fetch_tgt_sentence))
test_tgt_sentences = list(data_test.transform(fetch_tgt_sentence))
return data_train_processed, data_val_processed, data_test_processed, \
val_tgt_sentences, test_tgt_sentences, src_vocab, tgt_vocab
|
f571f8bda3b46dfba7f15f15ac7e46addac6273e
| 3,639,864
|
import copy
def objective_function(decision_variables, root_model, mode="by_age", country=Region.UNITED_KINGDOM, config=0,
calibrated_params={}):
"""
:param decision_variables: dictionary containing
- mixing multipliers by age as a list if mode == "by_age" OR
- location multipliers as a list if mode == "by_location"
:param root_model: integrated model supposed to model the past epidemic
:param mode: either "by_age" or "by_location"
:param country: the country name
:param config: the id of the configuration being considered
:param calibrated_params: a dictionary containing a set of calibrated parameters
"""
running_model = RegionApp(country)
build_model = running_model.build_model
params = copy.deepcopy(running_model.params)
# reformat decision vars if locations
if mode == "by_location":
new_decision_variables = {
"other_locations": decision_variables[0],
"school": decision_variables[1],
"work": decision_variables[2]
}
decision_variables = new_decision_variables
# Define scenario-1-specific params
sc_1_params_update = build_params_for_phases_2_and_3(decision_variables, config, mode)
# Rebuild the default parameters
params["default"].update(opti_params["default"])
params["default"] = update_params(params['default'], calibrated_params)
params['scenario_start_time'] = PHASE_2_START_TIME - 1
# Create scenario 1
sc_1_params = update_params(params['default'], sc_1_params_update)
params["scenarios"][1] = sc_1_params
scenario_1 = Scenario(build_model, idx=1, params=params)
# Run scenario 1
scenario_1.run(base_model=root_model)
models = [root_model, scenario_1.model]
#____________________________ Perform diagnostics ______________________
# How many deaths and years of life lost during Phase 2 and 3
start_phase2_index = models[1].derived_outputs["times"].index(PHASE_2_START_TIME)
end_phase2_index = models[1].derived_outputs["times"].index(phase_2_end[config])
total_nb_deaths = sum(models[1].derived_outputs["infection_deathsXall"][start_phase2_index:])
years_of_life_lost = sum(models[1].derived_outputs["years_of_life_lost"][start_phase2_index:])
# What proportion immune at end of Phase 2
recovered_indices = [
i
for i in range(len(models[1].compartment_names))
if "recovered" in models[1].compartment_names[i]
]
nb_reco = sum([models[1].outputs[end_phase2_index, i] for i in recovered_indices])
total_pop = sum([models[1].outputs[end_phase2_index, i] for i in range(len(models[1].compartment_names))])
prop_immune = nb_reco / total_pop
# Has herd immunity been reached?
herd_immunity = has_immunity_been_reached(models[1], end_phase2_index)
return herd_immunity, total_nb_deaths, years_of_life_lost, prop_immune, models
|
81d2aeacdabeb20e2910e3cf42ece12e112e055b
| 3,639,865
|
def kick(code, input):
""" kick <user> [reason] - Kicks a user from the current channel, with a reason if supplied. """
text = input.group(2).split()
if len(text) == 1:
target = input.group(2)
reason = False
else:
target = text[0]
reason = ' '.join(text[1::])
if not reason:
reason = kick_reason()
if target != code.nick:
return code.write(['KICK', input.sender, target], reason)
else:
return code.say('...')
|
59e8bc095076d9605aaa6b03363894c78d06b730
| 3,639,866
|
def check_invalid(string,*invalids,defaults=True):
"""Checks if input string matches an invalid value"""
# Checks string against inputted invalid values
for v in invalids:
if string == v:
return True
# Checks string against default invalid values, if defaults=True
if defaults == True:
default_invalids = ['INC','inc','incomplete','NaN','nan','N/A','n/a','missing']
for v in default_invalids:
if string == v:
return True
# For valid strings
return False
|
6e9e20beebe8e0b0baed680219fd93453d7f4ce3
| 3,639,867
|
def sse_content(response, handler, **sse_kwargs):
"""
Callback to collect the Server-Sent Events content of a response. Callbacks
passed will receive event data.
:param response:
The response from the SSE request.
:param handler:
The handler for the SSE protocol.
"""
# An SSE response must be 200/OK and have content-type 'text/event-stream'
raise_for_not_ok_status(response)
raise_for_header(response, 'Content-Type', 'text/event-stream')
finished, _ = _sse_content_with_protocol(response, handler, **sse_kwargs)
return finished
|
45e6a1fe058a78e28aeda9e9837a09dce6facd1a
| 3,639,868
|
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
client.check_auth()
return "ok"
|
c672b29017d415bc3793d6561ed5cd40716c0745
| 3,639,869
|
def get_more_spec_pos(tokens):
"""Return frequencies for more specific POS"""
# adverbs and preps, particles
adverbs = [t for t in tokens if t.full_pos == 'ADV']
apprart = [t for t in tokens if t.full_pos == 'APPRART']
postpos = [t for t in tokens if t.full_pos == 'APPO']
circum_pos = [t for t in tokens if t.full_pos == 'APZR']
compare_conj = [t for t in tokens if t.full_pos == 'KOKOM']
# foreign words, interjections
fremds = [t for t in tokens if t.full_pos == 'FM']
interj = [t for t in tokens if t.full_pos == 'ITJ']
# proper names and adjectives
prop_name = [t for t in tokens if t.full_pos == 'NE']
adja = [t for t in tokens if t.full_pos.startswith('ADJA')]
adjd = [t for t in tokens if t.full_pos.startswith('ADJA')]
# pronouns
dem_pro_s = [t for t in tokens if t.full_pos == 'PDS']
dem_pro_a = [t for t in tokens if t.full_pos == 'PDAT']
ind_pro_s = [t for t in tokens if t.full_pos == 'PIS']
ind_pro_a = [t for t in tokens if t.full_pos in ['PIAT','PIDAT']]
pers_pron = [t for t in tokens if t.full_pos == 'PPER']
poss_s = [t for t in tokens if t.full_pos == 'PPOSS']
poss_a = [t for t in tokens if t.full_pos == 'PPOSAT']
refl_pron = [t for t in tokens if t.full_pos == 'PRF']
inter_pron = [t for t in tokens if t.full_pos == ['PWS','PWAT','PWAV']]
all_prons = dem_pro_s+dem_pro_a+ind_pro_s+ind_pro_a+poss_s+poss_a+refl_pron+inter_pron
# compartives, punctuation
comp = [t for t in tokens if t.full_pos == 'TRUNC']
sent_int_interpunct = [t for t in tokens if t.full_pos == '$(']
# pronom adverbs and others
pro_adv = [t for t in tokens if t.full_pos == 'PROAV' and t.function == 'pp']
part_kvz = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'avz']
inf_with_zu = [t for t in tokens if t.full_pos == 'PTKVZ' and t.function == 'VVIZU']
for t in poss_s+poss_a:
t.pos_color.append('Poss pronouns')
for t in refl_pron:
t.pos_color.append('Refl pronouns')
return (len(adverbs), len(apprart), len(postpos), len(circum_pos), len(fremds), len(interj), \
len(prop_name), len(adja), len(adjd),
len(dem_pro_s), len(dem_pro_a), len(dem_pro_s)+len(dem_pro_a), len(ind_pro_s), len(ind_pro_a), \
len(ind_pro_s)+len(ind_pro_a),
len(pers_pron), len(poss_s), len(poss_a), len(poss_s)+len(poss_a), len(refl_pron), \
len(inter_pron), len(comp),
len(sent_int_interpunct), len(pro_adv), len(part_kvz), len(compare_conj), \
len(inf_with_zu), len(all_prons))
|
5ea2ae19d61c84ca8750999aa14a14dd426fe6f7
| 3,639,870
|
def reconcile_suggest_property(prefix: str = ""):
"""Given a search prefix, return all the type/schema properties which match
the given text. This is used to auto-complete property selection for detail
filters in OpenRefine."""
matches = []
for prop in model.properties:
if not prop.schema.is_a(settings.BASE_SCHEMA):
continue
if prop.hidden or prop.type == prop.type == registry.entity:
continue
if match_prefix(prefix, prop.name, prop.label):
matches.append(get_freebase_property(prop))
return {
"code": "/api/status/ok",
"status": "200 OK",
"prefix": prefix,
"result": matches,
}
|
655796e8b00f8b36cae9b373e27f11077ccb49d4
| 3,639,871
|
def make_boxes(df_data, category, size_factor, x, y, height, width, pad=[1,1], main_cat=None):
"""Generates the coordinates for the boxes of the category"""
totals = df_data[size_factor].groupby(df_data[category]).sum()
box_list = totals.sort_values(ascending=False).to_frame()
box_list.columns = ['value']
if main_cat:
box_list['cat'] = main_cat
box_list['norm'] = sq.normalize_sizes(box_list.value, width, height)
box_list['rect'] = sq.squarify(box_list.norm, x, y, width, height)
box_list['rect'] = box_list.apply(lambda row: pad_rect(row['rect'], pad), axis=1)
return box_list
|
a4bc28cf13a330863054fa3b5e514c50ba2c9e98
| 3,639,872
|
import types
import joblib
def hash_codeobj(code):
"""Return hashed version of a code object"""
bytecode = code.co_code
consts = code.co_consts
consts = [hash_codeobj(c) if isinstance(c, types.CodeType) else c
for c in consts]
return joblib.hash((bytecode, consts))
|
43d0094ccb5345ca4f8b30b5fb03d167b8e21aa5
| 3,639,873
|
def us_census():
"""Data Source for the US census.
Arguments:
None
Returns:
pandas.DataFrame
"""
df = us_census_connector()
return us_census_formatter(df)
|
8aba9df470ab17a59437897a2e13a80be2b6e9d9
| 3,639,874
|
from pathlib import Path
def get_notebook_path(same_config_path, same_config_file_contents) -> str:
"""Returns absolute value of the pipeline path relative to current file execution"""
return str(Path.joinpath(Path(same_config_path).parent, same_config_file_contents["notebook"]["path"]))
|
4b9f8952bdb7c2308fdfa290ec108d432b6b6a0b
| 3,639,875
|
import re
import glob
def get_path_from_dependency(
recipe_dependency_value: str,
recipe_base_folder_path: str
) -> str:
""" Searches the base folder for a file, that corresponse to the dependency passed.
:param recipe_dependency_value: Value of the "From:" section from a
recipe file, used by singularity
to find the base image.
:param recipe_base_folder_path: Full path of the base folder,
containing all recipes.
:returns: Full path to the parent recipe or
an empty string '' if it is not a local
dependency.
"""
if not is_own_dependency(recipe_dependency_value):
return ''
_dependency_value_regex = re.compile(
r'^(?:.*?\/)?' # Match possible host address and ignore it
r'(?P<collection>.+?)\/' # Match collection
r'(?P<container>.+?)' # Match container/image name
r'(?::(?P<version>.*?))?$' # Match possible version Tag
)
_filename_components = re.search(_dependency_value_regex, recipe_dependency_value)
_glob_dict = {'basepath': recipe_base_folder_path}
_glob_dict.update(_filename_components.groupdict())
_glob_string = ''
# lastest tag translates to a filename without
if 'version' in _glob_dict:
if _glob_dict['version'] == 'latest':
_glob_dict.pop('version')
if "version" in _glob_dict:
if _glob_dict != 'latest':
_glob_string = (
'{basepath}/**/{collection}/{container}.{version}.recipe'.format(
**_glob_dict)
)
else:
_glob_string = (
'{basepath}/**/{collection}/{container}.recipe'.format(
**_glob_dict)
)
# Find corresponding Files
_glob_results = glob.glob(
_glob_string,
recursive=True
)
if len(_glob_results) > 1:
raise RuntimeError(
(
"The naming schema of recipe {} clashes with. "
"They cannot both exist in one sregistry."
).format(', '.join(_glob_results))
)
if not _glob_results:
raise RuntimeError(
"Unresolved dependency on {}".format(
recipe_dependency_value
)
)
return _glob_results[0]
|
79de99c407998193e4ab6b3d760f894d3a5039ab
| 3,639,876
|
def about_us():
""" The about us page. """
return render_template(
"basic/about_us.html",
)
|
64d94e998855e7c99506ced7b48da36c5cbfa57a
| 3,639,877
|
import torch
def sfb1d_atrous(lo, hi, g0, g1, mode='periodization', dim=-1, dilation=1,
pad1=None, pad=None):
""" 1D synthesis filter bank of an image tensor with no upsampling. Used for
the stationary wavelet transform.
"""
C = lo.shape[1]
d = dim % 4
# If g0, g1 are not tensors, make them. If they are, then assume that they
# are in the right order
if not isinstance(g0, torch.Tensor):
g0 = torch.tensor(np.copy(np.array(g0).ravel()),
dtype=torch.float, device=lo.device)
if not isinstance(g1, torch.Tensor):
g1 = torch.tensor(np.copy(np.array(g1).ravel()),
dtype=torch.float, device=lo.device)
L = g0.numel()
shape = [1,1,1,1]
shape[d] = L
# If g aren't in the right shape, make them so
if g0.shape != tuple(shape):
g0 = g0.reshape(*shape)
if g1.shape != tuple(shape):
g1 = g1.reshape(*shape)
g0 = torch.cat([g0]*C,dim=0)
g1 = torch.cat([g1]*C,dim=0)
# Calculate the padding size.
# With dilation, zeros are inserted between the filter taps but not after.
# that means a filter that is [a b c d] becomes [a 0 b 0 c 0 d].
centre = L / 2
fsz = (L-1)*dilation + 1
newcentre = fsz / 2
before = newcentre - dilation*centre
# When conv_transpose2d is done, a filter with k taps expands an input with
# N samples to be N + k - 1 samples. The 'padding' is really the opposite of
# that, and is how many samples on the edges you want to cut out.
# In addition to this, we want the input to be extended before convolving.
# This means the final output size without the padding option will be
# N + k - 1 + k - 1
# The final thing to worry about is making sure that the output is centred.
short_offset = dilation - 1
centre_offset = fsz % 2
a = fsz//2
b = fsz//2 + (fsz + 1) % 2
# a = 0
# b = 0
pad = (0, 0, a, b) if d == 2 else (a, b, 0, 0)
lo = mypad(lo, pad=pad, mode=mode)
hi = mypad(hi, pad=pad, mode=mode)
unpad = (fsz - 1, 0) if d == 2 else (0, fsz - 1)
unpad = (0, 0)
y = F.conv_transpose2d(lo, g0, padding=unpad, groups=C, dilation=dilation) + \
F.conv_transpose2d(hi, g1, padding=unpad, groups=C, dilation=dilation)
# pad = (L-1, 0) if d == 2 else (0, L-1)
# y = F.conv_transpose2d(lo, g0, padding=pad, groups=C, dilation=dilation) + \
# F.conv_transpose2d(hi, g1, padding=pad, groups=C, dilation=dilation)
#
#
# Calculate the pad size
# L2 = (L * dilation)//2
# # pad = (0, 0, L2, L2+dilation) if d == 2 else (L2, L2+dilation, 0, 0)
# a = dilation*2
# b = dilation*(L-2)
# if pad1 is None:
# pad1 = (0, 0, a, b) if d == 2 else (a, b, 0, 0)
# print(pad1)
# lo = mypad(lo, pad=pad1, mode=mode)
# hi = mypad(hi, pad=pad1, mode=mode)
# if pad is None:
# p = (a + b + (L - 1)*dilation)//2
# pad = (p, 0) if d == 2 else (0, p)
# print(pad)
return y/(2*dilation)
|
c977115b311dbe67f0d72d2990fb3d9a9a206506
| 3,639,878
|
import torch
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image)
|
c2a928bbebf55587ff83347b572c7d73079cdbae
| 3,639,879
|
def delete_notebook(notebook_id: str) -> tuple[dict, int]:
"""Delete an existing notebook.
The user can call this operation only for their own notebooks. This
operation requires the following header with a fresh access token:
"Authorization: Bearer fresh_access_token"
Request parameters:
- notebook_id (string): Notebook ID.
Response status codes:
- 200 (Success)
- 401 (Unauthorized)
- 403 (Forbidden)
- 422 (Unprocessable Entity)
Response data (JSON string):
- message (string): Message.
- message_type (string): Message type.
:param notebook_id: Notebook ID.
:return: Tuple containing the response data and the response status code.
"""
# Validate the ID. A "marshmallow.ValidationError" exception is raised if
# the ID is invalid, which produces a 400 response.
notebook_id = id_schema.load({"id": notebook_id})["id"]
# JWT payload data
req_user_id = get_jwt()["user_id"]
# Get notebook
db = get_db()
notebook = db.notebooks.get_by_id(notebook_id)
# Check that the notebook exists and the permissions
if notebook is None or notebook["user_id"] != req_user_id:
d = get_response_data(USER_UNAUTHORIZED, ERROR_UNAUTHORIZED_USER)
return d, 403
# Delete notebook
db.notebooks.delete(notebook_id)
# Delete all notebook's notes
for n in db.notes.get_by_filter(notebook_id):
db.notes.delete(n["id"])
return get_response_data(DELETED, OK), 200
|
cf7a5074d9814024bc6ed8fa56a8157a0cc4290d
| 3,639,880
|
import time
def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator
|
a809eb70488338991636912b4eba33bf4aa93acc
| 3,639,881
|
import warnings
def compute_avg_merge_candidate(catavg, v, intersection_idx):
"""
Given intersecting deltas in catavg and v, compute average delta
one could merge into running average. If one cat is an outlier,
picking that really distorts the vector we merge into running
average vector. So, effectively merge using all as the ref
cat in common by merging in average of all possible refcats.
When there is no noise in y, the average merge candidate is
the same as any single candidate. So, with no noise, we get
exact answer; averaging here doesn't cost us anything. It
only helps to spread noise across categories.
"""
merge_candidates = []
for i in intersection_idx:
merge_candidates.append(v - v[i] + catavg[i])
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# We get "Mean of empty slice" when all entries are Nan but we want that.
v = np.nanmean(merge_candidates, axis=0)
return v
|
25c50ad0ad98e7a6951bd67cae8dc8fe6c78dbbb
| 3,639,882
|
def plot_annotations(img, bbox, labels, scores, confidence_threshold,
save_fig_path='predicted_img.jpeg', show=False, save_fig=True):
"""
This function plots bounding boxes over image with text labels and saves the image to a particualr location.
"""
# Default colors and mappings
colors_map={'1':'#5E81AC','2':'#A3BE8C','3':'#B48EAD'}
labels_map={'1':'Vehicle','2':'Person','3':'Cyclist'}
# Create figure and axes
fig, ax = plt.subplots(figsize = (200,200))
# Display the image
ax.imshow(img)
i=0
scores_ind = [idx for idx,x in enumerate(scores) if x>confidence_threshold] # Filter for scores greater than certain threshold
for idx, entry in enumerate(bbox):
if idx in scores_ind:
h = entry[2]-entry[0]
w = entry[3]-entry[1]
# Create a Rectangle patch
rect = patches.Rectangle((entry[0],entry[1]), h, w,
linewidth=60,
edgecolor=colors_map[str(labels[idx])],
facecolor='none')
# Add classification category
plt.text(entry[0], entry[1], s=labels_map[str(labels[idx])],
color='white', verticalalignment='top',
bbox={'color': colors_map[str(labels[idx])], 'pad': 0},
font={'size':500})
# Add the patch to the Axes
ax.add_patch(rect)
i+=1
if show==True:
plt.show()
plt.savefig(save_fig_path,
bbox_inches = 'tight',
pad_inches = 0,
dpi=5)
return save_fig_path
|
f32afb61b9f43fe44b9e78896a51d67e52638eab
| 3,639,883
|
def get_registry_image_tag(app_name: str, image_tag: str, registry: dict) -> str:
"""Returns the image name for a given organization, app and tag"""
return f"{registry['organization']}/{app_name}:{image_tag}"
|
16c71f99ff3a3c2514c24cb417b93f3b88f7cf42
| 3,639,884
|
import os
import shutil
def save_wind_generated_waves_to_subdirectory(args):
""" Copy the wave height and wave period to the outputs/ directory.
Inputs:
args['wave_height'][sector]: uri to "sector"'s wave height data
args['wave_period'][sector]: uri to "sector"'s wave period data
args['prefix']: prefix to be appended to the new filename
Outputs:
data_uri: dictionary containing the uri where the data is saved
"""
intermediate_directory = \
os.path.join(args['intermediate_directory'], args['subdirectory'])
wave_height_list = args['wave_heights']
wave_period_list = args['wave_periods']
data_uri = {}
for wave_height_uri in wave_height_list:
shutil.copy(wave_height_uri, intermediate_directory)
for wave_period_uri in wave_period_list:
shutil.copy(wave_period_uri, intermediate_directory)
return data_uri
|
5b203f5237ebd9ac3fbddcecc5b9c609677eb5ae
| 3,639,885
|
def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error('EXCEPTION {!r} .. {}'.format(filename, e))
return 1
|
36f1723c67ab32a25cd7cba50b9989f00ea3e452
| 3,639,886
|
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.float) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype])
|
8499bb79f1869474752cd59fbfdb9a5bc0a23c6a
| 3,639,887
|
def solveq(K, f, bcPrescr, bcVal=None):
"""
Solve static FE-equations considering boundary conditions.
Parameters:
K global stiffness matrix, dim(K)= nd x nd
f global load vector, dim(f)= nd x 1
bcPrescr 1-dim integer array containing prescribed dofs.
bcVal 1-dim float array containing prescribed values.
If not given all prescribed dofs are assumed 0.
Returns:
a solution including boundary values
Q reaction force vector
dim(a)=dim(Q)= nd x 1, nd : number of dof's
"""
nDofs = K.shape[0]
nPdofs = bcPrescr.shape[0]
if bcVal is None:
bcVal = np.zeros([nPdofs], 'd')
bc = np.ones(nDofs, 'bool')
bcDofs = np.arange(nDofs)
bc[np.ix_(bcPrescr-1)] = False
bcDofs = bcDofs[bc]
fsys = f[bcDofs]-K[np.ix_((bcDofs), (bcPrescr-1))] * \
np.asmatrix(bcVal).reshape(nPdofs, 1)
asys = np.linalg.solve(K[np.ix_((bcDofs), (bcDofs))], fsys)
a = np.zeros([nDofs, 1])
a[np.ix_(bcPrescr-1)] = np.asmatrix(bcVal).reshape(nPdofs, 1)
a[np.ix_(bcDofs)] = asys
Q = K*np.asmatrix(a)-f
return (np.asmatrix(a), Q)
|
45aedf376f6eb2bdc6ba2a4889628f2584d13db1
| 3,639,888
|
def get_output_names(hf):
"""
get_output_names(hf)
Returns a list of the output variables names in the HDF5 file.
Args:
hf: An open HDF5 filehandle or a string containing the HDF5
filename to use.
Returns:
A sorted list of the output variable names in the HDF5 file.
"""
return sorted(map(str, hf['/output/data'].keys()))
|
6607197166c9a63d834398b188e996a811b081ce
| 3,639,889
|
from typing import Union
from typing import Sequence
from typing import Optional
from typing import List
from typing import Tuple
from pathlib import Path
from typing import Mapping
from typing import Any
def gene_trends(
adata: AnnData,
model: _input_model_type,
genes: Union[str, Sequence[str]],
lineages: Optional[Union[str, Sequence[str]]] = None,
backward: bool = False,
data_key: str = "X",
time_key: str = "latent_time",
time_range: Optional[Union[_time_range_type, List[_time_range_type]]] = None,
transpose: bool = False,
callback: _callback_type = None,
conf_int: Union[bool, float] = True,
same_plot: bool = False,
hide_cells: bool = False,
perc: Optional[Union[Tuple[float, float], Sequence[Tuple[float, float]]]] = None,
lineage_cmap: Optional[matplotlib.colors.ListedColormap] = None,
abs_prob_cmap: matplotlib.colors.ListedColormap = cm.viridis,
cell_color: Optional[str] = None,
cell_alpha: float = 0.6,
lineage_alpha: float = 0.2,
size: float = 15,
lw: float = 2,
cbar: bool = True,
margins: float = 0.015,
sharex: Optional[Union[str, bool]] = None,
sharey: Optional[Union[str, bool]] = None,
gene_as_title: Optional[bool] = None,
legend_loc: Optional[str] = "best",
obs_legend_loc: Optional[str] = "best",
ncols: int = 2,
suptitle: Optional[str] = None,
return_models: bool = False,
n_jobs: Optional[int] = 1,
backend: Backend_t = _DEFAULT_BACKEND,
show_progress_bar: bool = True,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
plot_kwargs: Mapping[str, Any] = MappingProxyType({}),
**kwargs: Any,
) -> Optional[_return_model_type]:
"""
Plot gene expression trends along lineages.
Each lineage is defined via it's lineage weights which we compute using :func:`cellrank.tl.lineages`. This
function accepts any model based off :class:`cellrank.ul.models.BaseModel` to fit gene expression,
where we take the lineage weights into account in the loss function.
Parameters
----------
%(adata)s
%(model)s
%(genes)s
lineages
Names of the lineages to plot. If `None`, plot all lineages.
%(backward)s
data_key
Key in :attr:`anndata.AnnData.layers` or `'X'` for :attr:`anndata.AnnData.X` where the data is stored.
time_key
Key in :attr:`anndata.AnnData.obs` where the pseudotime is stored.
%(time_range)s
This can also be specified on per-lineage basis.
%(gene_symbols)s
transpose
If ``same_plot = True``, group the trends by ``lineages`` instead of ``genes``.
This forces ``hide_cells = True``.
If ``same_plot = False``, show ``lineages`` in rows and ``genes`` in columns.
%(model_callback)s
conf_int
Whether to compute and show confidence interval. If the ``model`` is :class:`cellrank.ul.models.GAMR`,
it can also specify the confidence level, the default is `0.95`.
same_plot
Whether to plot all lineages for each gene in the same plot.
hide_cells
If `True`, hide all cells.
perc
Percentile for colors. Valid values are in interval `[0, 100]`.
This can improve visualization. Can be specified individually for each lineage.
lineage_cmap
Categorical colormap to use when coloring in the lineages. If `None` and ``same_plot``,
use the corresponding colors in :attr:`anndata.AnnData.uns`, otherwise use `'black'`.
abs_prob_cmap
Continuous colormap to use when visualizing the absorption probabilities for each lineage.
Only used when ``same_plot = False``.
cell_color
Key in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names` used for coloring the cells.
cell_alpha
Alpha channel for cells.
lineage_alpha
Alpha channel for lineage confidence intervals.
size
Size of the points.
lw
Line width of the smoothed values.
cbar
Whether to show colorbar. Always shown when percentiles for lineages differ.
Only used when ``same_plot = False``.
margins
Margins around the plot.
sharex
Whether to share x-axis. Valid options are `'row'`, `'col'` or `'none'`.
sharey
Whether to share y-axis. Valid options are `'row'`, `'col'` or `'none'`.
gene_as_title
Whether to show gene names as titles instead on y-axis.
legend_loc
Location of the legend displaying lineages. Only used when `same_plot = True`.
obs_legend_loc
Location of the legend when ``cell_color`` corresponds to a categorical variable.
ncols
Number of columns of the plot when plotting multiple genes. Only used when ``same_plot = True``.
suptitle
Suptitle of the figure.
%(return_models)s
%(parallel)s
%(plotting)s
plot_kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.plot`.
kwargs
Keyword arguments for :meth:`cellrank.ul.models.BaseModel.prepare`.
Returns
-------
%(plots_or_returns_models)s
"""
if isinstance(genes, str):
genes = [genes]
genes = _unique_order_preserving(genes)
_check_collection(
adata,
genes,
"obs" if data_key == "obs" else "var_names",
use_raw=kwargs.get("use_raw", False),
)
lineage_key = Key.obsm.abs_probs(backward)
if lineage_key not in adata.obsm:
raise KeyError(f"Lineages key `{lineage_key!r}` not found in `adata.obsm`.")
if lineages is None:
lineages = adata.obsm[lineage_key].names
elif isinstance(lineages, str):
lineages = [lineages]
elif all(ln is None for ln in lineages): # no lineage, all the weights are 1
lineages = [None]
cbar = False
logg.debug("All lineages are `None`, setting the weights to `1`")
lineages = _unique_order_preserving(lineages)
if isinstance(time_range, (tuple, float, int, type(None))):
time_range = [time_range] * len(lineages)
elif len(time_range) != len(lineages):
raise ValueError(
f"Expected time ranges to be of length `{len(lineages)}`, found `{len(time_range)}`."
)
kwargs["time_key"] = time_key
kwargs["data_key"] = data_key
kwargs["backward"] = backward
kwargs["conf_int"] = conf_int # prepare doesnt take or need this
models = _create_models(model, genes, lineages)
all_models, models, genes, lineages = _fit_bulk(
models,
_create_callbacks(adata, callback, genes, lineages, **kwargs),
genes,
lineages,
time_range,
return_models=True,
filter_all_failed=False,
parallel_kwargs={
"show_progress_bar": show_progress_bar,
"n_jobs": _get_n_cores(n_jobs, len(genes)),
"backend": _get_backend(models, backend),
},
**kwargs,
)
lineages = sorted(lineages)
tmp = adata.obsm[lineage_key][lineages].colors
if lineage_cmap is None and not transpose:
lineage_cmap = tmp
plot_kwargs = dict(plot_kwargs)
plot_kwargs["obs_legend_loc"] = obs_legend_loc
if transpose:
all_models = pd.DataFrame(all_models).T.to_dict()
models = pd.DataFrame(models).T.to_dict()
genes, lineages = lineages, genes
hide_cells = same_plot or hide_cells
else:
# information overload otherwise
plot_kwargs["lineage_probability"] = False
plot_kwargs["lineage_probability_conf_int"] = False
tmp = pd.DataFrame(models).T.astype(bool)
start_rows = np.argmax(tmp.values, axis=0)
end_rows = tmp.shape[0] - np.argmax(tmp[::-1].values, axis=0) - 1
if same_plot:
gene_as_title = True if gene_as_title is None else gene_as_title
sharex = "all" if sharex is None else sharex
if sharey is None:
sharey = "row" if plot_kwargs.get("lineage_probability", False) else "none"
ncols = len(genes) if ncols >= len(genes) else ncols
nrows = int(np.ceil(len(genes) / ncols))
else:
gene_as_title = False if gene_as_title is None else gene_as_title
sharex = "col" if sharex is None else sharex
if sharey is None:
sharey = (
"row"
if not hide_cells or plot_kwargs.get("lineage_probability", False)
else "none"
)
nrows = len(genes)
ncols = len(lineages)
plot_kwargs = dict(plot_kwargs)
if plot_kwargs.get("xlabel", None) is None:
plot_kwargs["xlabel"] = time_key
fig, axes = plt.subplots(
nrows=nrows,
ncols=ncols,
sharex=sharex,
sharey=sharey,
figsize=(6 * ncols, 4 * nrows) if figsize is None else figsize,
tight_layout=True,
dpi=dpi,
)
axes = np.reshape(axes, (nrows, ncols))
cnt = 0
plot_kwargs["obs_legend_loc"] = None if same_plot else obs_legend_loc
logg.info("Plotting trends")
for row in range(len(axes)):
for col in range(len(axes[row])):
if cnt >= len(genes):
break
gene = genes[cnt]
if (
same_plot
and plot_kwargs.get("lineage_probability", False)
and transpose
):
lpc = adata.obsm[lineage_key][gene].colors[0]
else:
lpc = None
if same_plot:
plot_kwargs["obs_legend_loc"] = (
obs_legend_loc if row == 0 and col == len(axes[0]) - 1 else None
)
_trends_helper(
models,
gene=gene,
lineage_names=lineages,
transpose=transpose,
same_plot=same_plot,
hide_cells=hide_cells,
perc=perc,
lineage_cmap=lineage_cmap,
abs_prob_cmap=abs_prob_cmap,
lineage_probability_color=lpc,
cell_color=cell_color,
alpha=cell_alpha,
lineage_alpha=lineage_alpha,
size=size,
lw=lw,
cbar=cbar,
margins=margins,
sharey=sharey,
gene_as_title=gene_as_title,
legend_loc=legend_loc,
figsize=figsize,
fig=fig,
axes=axes[row, col] if same_plot else axes[cnt],
show_ylabel=col == 0,
show_lineage=same_plot or (cnt == start_rows),
show_xticks_and_label=((row + 1) * ncols + col >= len(genes))
if same_plot
else (cnt == end_rows),
**plot_kwargs,
)
# plot legend on the 1st plot
cnt += 1
if not same_plot:
plot_kwargs["obs_legend_loc"] = None
if same_plot and (col != ncols):
for ax in np.ravel(axes)[cnt:]:
ax.remove()
fig.suptitle(suptitle, y=1.05)
if save is not None:
save_fig(fig, save)
if return_models:
return all_models
|
cda039d6e97d5853b48aaf144cdf4763adac05a8
| 3,639,890
|
def create_hostclass_snapshot_dict(snapshots):
"""
Create a dictionary of hostclass name to a list of snapshots for that hostclass
:param list[Snapshot] snapshots:
:return dict[str, list[Snapshot]]:
"""
snapshot_hostclass_dict = {}
for snap in snapshots:
# build a dict of hostclass+environment to a list of snapshots
# use this dict for the --keep-num option to know how many snapshots are there for each hostclass
if snap.tags and snap.tags.get('hostclass') and snap.tags.get('env'):
key_name = snap.tags.get('hostclass') + '_' + snap.tags.get('env')
hostclass_snapshots = snapshot_hostclass_dict.setdefault(key_name, [])
hostclass_snapshots.append(snap)
return snapshot_hostclass_dict
|
dd568eaeb76fee96a876b5a57d963cd2fc8f870e
| 3,639,891
|
from datetime import datetime
def refund_order(id):
"""
List all departments
"""
check_admin()
order = Order.query.filter_by(id=id).first()
payment_id = order.payment_id
try:
payment = Payment.find(payment_id)
except ResourceNotFound:
flash("Payment Not Found", "danger")
return redirect(redirect_url())
except ServerError:
flash("There was a problem with PayPal. Please try again later.", "warning")
return redirect(redirect_url())
sale_id = payment.transactions[0].related_resources[0].sale.id
# refund full ammount
sale_amount = {
'amount': {
'currency': payment.transactions[0].related_resources[0].sale.amount.currency,
'total': payment.transactions[0].related_resources[0].sale.amount.total }
}
sale = Sale.find(sale_id)
refund = sale.refund(sale_amount) # refund full ammount
if refund.success():
flash("Refund [%s] Success" % (refund.id), 'info')
status = OrderStatus.query.filter_by(name='Refunded').first()
order.status_id = status.id
order.cancelled = True
order.updated_at = datetime.now()
order_items = OrderItem.query.filter_by(order_id=order.id).all()
product_items = []
try:
for item in order_items:
product = item.product
product.quantity += item.quantity
db.session.merge(item)
db.session.merge(order)
db.session.flush()
activity = Activity(verb='update', object=order)
db.session.add(activity)
db.session.commit()
except:
flash('Items counld not be returned to inventory', "warning")
# XXX: this can fail at any point below. Not good, as refund is registed
else:
flash(refund.error['message'], 'warning')
print(refund.error)
return redirect(redirect_url())
|
d0de52c4f69cb933c4344e4f4534934709a9f7cb
| 3,639,892
|
def get_dprime_from_regions(*regions):
"""Get the full normalized linkage disequilibrium (D') matrix for n
regions.
This is a wrapper which determines the correct normalized linkage
function to call based on the number of regions. Only two-dimensional
normalized linkage matrices are currently supported. Where only one
region is given, normalized linkage is calculated for that region against
itself.
:param list regions: List of :ref:`regions <regions>`.
:returns: :ref:`proximity matrix <proximity_matrices>` giving the normalized linkage \
disequilibrium of all possible combinations of windows within the different regions.
"""
regions = prepare_regions(regions)
if len(regions) == 2:
dprime_func = dprime_2d
else:
raise NotImplementedError(
'There is currently no implementation of normalized linkage '
'disequilibrium for more than 2 dimensions')
return dprime_func(*regions)
|
2676c4be712989bf7e1419bf19a83ddf84c0ffec
| 3,639,893
|
def get_nav_class_state(url, request, partial=False):
""" Helper function that just returns the 'active'/'inactive'
link class based on the passed url. """
if partial:
_url = url_for(
controller=request.environ['pylons.routes_dict']['controller'],
action=None,
id=None
)
else:
_url = url_for(
controller=request.environ['pylons.routes_dict']['controller'],
action=request.environ['pylons.routes_dict']['action'],
id=None
)
if url == request.path_info:
return 'active'
elif url.startswith(_url) and partial:
return 'active'
elif url == _url:
return 'active'
else:
return 'inactive'
|
1ec83fc46fa04868449d8ebaf611cee9ff88fcd5
| 3,639,894
|
import htcondor
import os
import unittest
def needs_htcondor(test_item):
"""
Use a decorator before test classes or methods to only run them if the HTCondor Python bindings are installed.
"""
test_item = _mark_test('htcondor', test_item)
try:
htcondor.Collector(os.getenv('TOIL_HTCONDOR_COLLECTOR')).query(constraint='False')
except ImportError:
return unittest.skip("Install the HTCondor Python bindings to include this test.")(test_item)
except IOError:
return unittest.skip("HTCondor must be running to include this test.")(test_item)
except RuntimeError:
return unittest.skip("HTCondor must be installed and configured to include this test.")(test_item)
else:
return test_item
|
de778dfde4362cdc9db402dc3c3fac50a6a59a9c
| 3,639,895
|
from typing import Tuple
import torch
from typing import List
def tile_image(
image: Image.Image, tile_size: Tuple[int, int], overlap: int
) -> Tuple[torch.Tensor, List[Tuple[int, int]]]:
"""Take in an image and tile it into smaller tiles for inference.
Args:
image: The input image to tile.
tile_size: The (width, height) of the tiles.
overlap: The overlap between adjacent tiles (height, width).
Returns:
A tensor of the tiles and a list of the (x, y) offset for the tiles.
The offets are needed to keep track of which tiles have targets.
Examples::
>>> tiles, coords = tile_image(Image.new("RGB", (1000, 1000)), (512, 512), 50)
>>> tiles.shape[0]
9
>>> len(coords)
9
>>> tiles.shape[-2:]
torch.Size([512, 512])
"""
tiles, coords = [], []
width, height = image.size
x_step = width if width == tile_size[0] else tile_size[0] - overlap
y_step = height if height == tile_size[1] else tile_size[1] - overlap
for x in range(0, width - overlap, x_step):
# Shift back to extract tiles on the image
if x + tile_size[0] >= width and x != 0:
x = width - tile_size[0]
for y in range(0, height - overlap, y_step):
if y + tile_size[1] >= height and y != 0:
y = height - tile_size[1]
tile = normalize(
np.array(image.crop((x, y, x + tile_size[0], y + tile_size[1])))
)
tiles.append(torch.Tensor(tile))
coords.append((x, y))
# Transpose the images from BHWC -> BCHW
tiles = torch.stack(tiles).permute(0, 3, 1, 2)
return tiles, coords
|
c5c9e09a5a95cdd63f8f2082e4c5a87e339f18ef
| 3,639,896
|
from datetime import datetime
def parse(data):
"""
Parses the input of the Santander text file.
The format of the bank statement is as follows:
"From: <date> to <date>"
"Account: <number>"
"Date: <date>"
"Description: <description>"
"Amount: <amount>"
"Balance: <amount>"
<second_transaction_entry>
<nth_transaction_entry>
:param data: A list containing each line of the bank statement.
:return: A pandas DataFrame.
"""
dates = []
descs = []
amounts = []
balances = []
# Skip unnecessary headers
data = data[4:]
# Remove empty lines
data = [d.strip() for d in data]
data = list(filter(None, data))
# Creates sublist for each transaction
data = [data[d:d+4] for d in range(0, len(data), 4)]
# Parsing data into a 2D list
for entry in data:
# Removing field descriptors
for e in entry:
if e.startswith("Date"):
e = e.replace("Date: ", "").strip()
dates.append(datetime.datetime.strptime(e, "%d/%m/%Y"))
if e.startswith("Description"):
descs.append(e.replace("Description: ", "").strip())
if e.startswith("Amount"):
e = e.replace("Amount: ", "").replace(" GBP", "").strip()
amounts.append(float(e))
if e.startswith("Balance"):
e = e.replace("Balance: ", "").replace(" GBP", "").strip()
balances.append(float(e))
# Stores data in a Pandas data container
data = list(zip(dates, balances, amounts, descs))
cols = ["DATES", "BALANCE", "AMOUNT", "DESCRIPTION"]
parsed = pd.DataFrame(data, columns=cols, index=dates)
return parsed
|
267e1769553cebcd28f1c3190107712bebafb53a
| 3,639,897
|
def solve_duffing(tmax, dt_per_period, t_trans, x0, v0, gamma, delta, omega):
"""Solve the Duffing equation for parameters gamma, delta, omega.
Find the numerical solution to the Duffing equation using a suitable
time grid: tmax is the maximum time (s) to integrate to; t_trans is
the initial time period of transient behaviour until the solution
settles down (if it does) to some kind of periodic motion (these data
points are dropped) and dt_per_period is the number of time samples
(of duration dt) to include per period of the driving motion (frequency
omega).
Returns the time grid, t (after t_trans), position, x, and velocity,
xdot, dt, and step, the number of array points per period of the driving
motion.
"""
# Time point spacings and the time grid
period = 2*np.pi/omega
dt = 2*np.pi/omega / dt_per_period
step = int(period / dt)
t = np.arange(0, tmax, dt)
# Initial conditions: x, xdot
X0 = [x0, v0]
X = odeint(deriv, X0, t, args=(gamma, delta, omega))
idx = int(t_trans / dt)
return t[idx:], X[idx:], dt, step
|
b1c246d3eb680852de60c7b9f0c55034d617e71a
| 3,639,898
|
def create_prog_assignment_registry():
"""Create the registry for course properties."""
reg = FieldRegistry(
'Prog Assignment Entity', description='Prog Assignment',
extra_schema_dict_values={
'className': 'inputEx-Group new-form-layout'})
# Course level settings.
course_opts = reg.add_sub_registry('prog_assignment', 'Assignment Config')
course_opts.add_property(SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
description='Unique Id of the Assignment'))
course_opts.add_property(SchemaField(
'pa_id', 'PA_ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'},
description='Unique id of the test cases in this assignment.'))
course_opts.add_property(SchemaField('parent_unit', 'Parent Unit', 'string', select_data=[]))
course_opts.add_property(
SchemaField('type', 'Type', 'string', editable=False))
course_opts.add_property(
SchemaField('title', 'Title', 'string', optional=False))
course_opts.add_property(
SchemaField('weight', 'Weight', 'number', optional=False))
course_opts.add_property(SchemaField(
content_key('question'), 'Problem Statement', 'html', optional=False,
description=('Problem Statement and description of program, visible'
' to student.'),
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'className': 'inputEx-Field content'}))
course_opts.add_property(SchemaField(
'html_check_answers', 'Allow "Compile & Run"', 'boolean',
optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(SchemaField(
content_key('evaluator'), 'Program Evaluator', 'string', optional=True,
select_data=[
(eid, eid)
for eid in evaluator.ProgramEvaluatorRegistory.list_ids()]))
course_opts.add_property(SchemaField(
content_key('ignore_presentation_errors'), 'Ignore Presentation Errors',
'boolean', optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
course_opts.add_property(
SchemaField(workflow_key(courses.SUBMISSION_DUE_DATE_KEY),
'Submission Due Date', 'string', optional=True,
description=str(messages.DUE_DATE_FORMAT_DESCRIPTION)))
course_opts.add_property(SchemaField(
content_key('show_sample_solution'),
'Show sample solution after deadline', 'boolean', optional=True,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-editor-check-answers'}))
test_case_opts = FieldRegistry('', '')
test_case_opts.add_property(SchemaField(
'input', 'Input', 'text', optional=True,
extra_schema_dict_values={}))
test_case_opts.add_property(SchemaField(
'output', 'Output', 'text', optional=True,
extra_schema_dict_values={'className': 'inputEx-Field content'}))
test_case_opts.add_property(SchemaField(
'weight', 'Weight', 'number', optional=False,
extra_schema_dict_values={'className': 'inputEx-Field content','value':1}))
public_test_cases = FieldArray(
content_key('public_testcase'), '', item_type=test_case_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Public Test Case',
'listRemoveLabel': 'Delete'})
public_tests_reg = course_opts.add_sub_registry(
'public_testcase', title='Public Test Cases')
public_tests_reg.add_property(public_test_cases)
private_test_cases = FieldArray(
content_key('private_testcase'), '', item_type=test_case_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Private Test Case',
'listRemoveLabel': 'Delete'})
private_tests_reg = course_opts.add_sub_registry(
'private_testcase', title='Private Test Cases')
private_tests_reg.add_property(private_test_cases)
lang_reg = course_opts.add_sub_registry(
'allowed_languages', title='Allowed Programming Languages')
language_opts = FieldRegistry('', '')
language_opts.add_property(
SchemaField(
'language', 'Programming Language', 'string',
select_data=base.ProgAssignment.PROG_LANG_FILE_MAP.items()))
language_opts.add_property(SchemaField(
'prefixed_code', 'Prefixed Fixed Code', 'text', optional=True,
description=('The uneditable code for the assignment. '
'This will be prepended at the start of user code'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'code_template', 'Template Code', 'text', optional=True,
description=('The default code that is populated on opening ' +
'an assignment.'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'uneditable_code', 'Suffixed Fixed Code', 'text', optional=True,
description=('The uneditable code for the assignment. '
'This will be appended at the end of user code'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'suffixed_invisible_code', 'Invisible Code', 'text', optional=True,
description=('This code will not be visible to the student and will be'
' appended at the very end.'),
extra_schema_dict_values={'className': 'inputEx-Field content'}))
language_opts.add_property(SchemaField(
'sample_solution', 'Sample Solution', 'text',
optional=True,
extra_schema_dict_values={'className': 'inputEx-Field'}))
language_opts.add_property(SchemaField(
'filename', 'Sample Solution Filename', 'string',
optional=True,
extra_schema_dict_values={'className': 'inputEx-Field'}))
allowed_languages = FieldArray(
content_key('allowed_languages'), '',
item_type=language_opts,
extra_schema_dict_values={
'sortable': False,
'listAddLabel': 'Add Language',
'listRemoveLabel': 'Delete',
'minItems': 1})
lang_reg.add_property(allowed_languages)
course_opts.add_property(
SchemaField('is_draft', 'Status', 'boolean',
select_data=[(True, DRAFT_TEXT), (False, PUBLISHED_TEXT)],
extra_schema_dict_values={
'className': 'split-from-main-group'}))
return reg
|
c2f17e812d30abe851ba9cfd18602ca458acec56
| 3,639,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.