content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import pickle
def load_pickle(indices, image_data):
""""
0: Empty
1: Active
2: Inactive
"""
size = 13
# image_data = "./data/images.pkl"
with open(image_data, "rb") as f:
images = pickle.load(f)
x = []
y = []
n = []
cds = []
for idx in indices:
D_dict = images[idx]
img = D_dict['image']
label = D_dict['label']
row, col = label.shape
length, width = img.shape
img = np.expand_dims(img, axis=-1)
img_r, img_c = 40, 40
for g_r in range(1, row-1):
img_c = 40
for g_c in range(1, col-1):
# Check whether it's empty
if label[g_r][g_c] == 0.0:
pass
else:
l = img_c - size
u = img_r - size
r = img_c + size + 1
d = img_r + size + 1
pt = img[u:d, l:r]
nb = get_neibs_cds(img, l, u)
lb = label[g_r][g_c]
x.append(pt)
y.append(lb)
n.append(nb)
cds.append((img_r, img_c))
img_c += 27
img_r += 27
x = np.array(x)
y = np.array(y)
n = np.array(n)
return x, y, n, cds
|
dff3eeb151c8f32511c8d62d8bc9fa313bc36019
| 3,639,200
|
def summarize_vref_locs(locs:TList[BaseObjLocation]) -> pd.DataFrame:
"""
Return a table with cols (partition, num vrefs)
"""
vrefs_by_partition = group_like(objs=locs, labels=[loc.partition for loc in locs])
partition_sort = sorted(vrefs_by_partition)
return pd.DataFrame({
'Partition': partition_sort,
'Number of vrefs': [len(vrefs_by_partition[k]) for k in partition_sort]
})
|
3894404874004e70ab0cc243af4f645f5cf84582
| 3,639,201
|
def rescale_list_to_range(original, limits):
"""
Linearly rescale values in original list to limits (minimum and maximum).
:example:
>>> rescale_list_to_range([1, 2, 3], (0, 10))
[0.0, 5.0, 10.0]
>>> rescale_list_to_range([1, 2, 3], (-10, 0))
[-10.0, -5.0, 0.0]
>>> rescale_list_to_range([1, 2, 3], (0j, 10j))
[0j, 5j, 10j]
:param original: Original list or list-like to be rescaled.
:type original: list
:param limits: Tuple of two floats, min and max, to constrain the new list
:type limits: tuple
:return: Original list rescaled to fit between min and max
:rtype: list
"""
new_min, new_max = limits[0:2]
old_min, old_max = min(original), max(original)
return (new_max + new_min) / 2 * original / old_min if old_min == old_max \
else [new_max * (v - old_min) / (old_max - old_min) +
new_min * (old_max - v) / (old_max - old_min) for v in original]
|
bdd38bb24b597648e4ca9045ed133dfe93ad4bd8
| 3,639,202
|
from typing import Optional
from typing import Union
from typing import Mapping
def build_list_request(
filters: Optional[dict[str, str]] = None
) -> Union[IssueListInvalidRequest, IssueListValidRequest]:
"""Create request from filters."""
accepted_filters = ["obj__eq", "state__eq", "title__contains"]
invalid_req = IssueListInvalidRequest()
if filters is not None:
if not isinstance(filters, Mapping):
invalid_req.add_error("filters", "Is not iterable")
return invalid_req
for key, value in filters.items():
if key not in accepted_filters:
invalid_req.add_error("filters", f"Key {key} cannot be used.")
if (key == "obj__eq" and value not in ["pull request", "issue", "all"]) or (
key == "state__eq" and value not in ["all", "open", "closed"]
):
invalid_req.add_error(
"filters", f"Value {value} for key 'obj__eq' cannot be used."
)
if invalid_req.has_errors():
return invalid_req
return IssueListValidRequest(filters=filters)
|
b0fc85921f11ef28071eba8be4ab1a7a4837b56c
| 3,639,203
|
def get_ratings(labeled_df):
"""Returns list of possible ratings."""
return labeled_df.RATING.unique()
|
2b88b1703ad5b5b0a074ed7bc4591f0e88d97f92
| 3,639,204
|
from typing import Dict
def split_edge_cost(
edge_cost: EdgeFunction, to_split: LookupToSplit
) -> Dict[Edge, float]:
"""Assign half the cost of the original edge to each of the split edges.
Args:
edge_cost: Lookup from edges to cost.
to_split: Lookup from original edges to pairs of split edges
(see [lookup_to_split][tspwplib.converter.lookup_to_split]).
Returns:
Lookup from split edges to cost.
Notes:
The cost is cast to a float.
"""
split_cost = {}
for edge, cost in edge_cost.items():
first_split, second_split = to_split[edge]
half_cost = float(cost) / 2.0
split_cost[first_split] = half_cost
split_cost[second_split] = half_cost
return split_cost
|
8e307f6dfd19d65ec1979fa0eafef05737413b3d
| 3,639,205
|
def get_ants_brain(filepath, metadata, channel=0):
"""Load .nii brain file as ANTs image."""
nib_brain = np.asanyarray(nib.load(filepath).dataobj).astype('uint32')
spacing = [float(metadata.get('micronsPerPixel_XAxis', 0)),
float(metadata.get('micronsPerPixel_YAxis', 0)),
float(metadata.get('micronsPerPixel_ZAxis', 0)),
float(metadata.get('sample_period', 0))]
spacing = [spacing[x] for x in range(4) if metadata['image_dims'][x] > 1]
if len(nib_brain.shape) > 4: # multiple channels
# trim to single channel
return ants.from_numpy(np.squeeze(nib_brain[..., channel]), spacing=spacing)
else:
# return ants.from_numpy(np.squeeze(nib_brain[..., :300]), spacing=spacing) # TESTING
return ants.from_numpy(np.squeeze(nib_brain), spacing=spacing)
|
5011d1f609d818c1769900542bc07b8194a4a10f
| 3,639,206
|
def numpy_max(x):
"""
Returns the maximum of an array.
Deals with text as well.
"""
return numpy_min_max(x, lambda x: x.max(), minmax=True)
|
0b32936cde2e0f6cbebf62016c30e4265aba8b57
| 3,639,207
|
import copy
def get_train_val_test_splits(X, y, max_points, seed, confusion, seed_batch,
split=(2./3, 1./6, 1./6)):
"""Return training, validation, and test splits for X and y.
Args:
X: features
y: targets
max_points: # of points to use when creating splits.
seed: seed for shuffling.
confusion: labeling noise to introduce. 0.1 means randomize 10% of labels.
seed_batch: # of initial datapoints to ensure sufficient class membership.
split: percent splits for train, val, and test.
Returns:
indices: shuffled indices to recreate splits given original input data X.
y_noise: y with noise injected, needed to reproduce results outside of
run_experiments using original data.
"""
np.random.seed(seed)
X_copy = copy.copy(X)
y_copy = copy.copy(y)
# Introduce labeling noise
y_noise = flip_label(y_copy, confusion)
indices = np.arange(len(y))
if max_points is None:
max_points = len(y_noise)
else:
max_points = min(len(y_noise), max_points)
train_split = int(max_points * split[0])
val_split = train_split + int(max_points * split[1])
assert seed_batch <= train_split
# Do this to make sure that the initial batch has examples from all classes
min_shuffle = 3
n_shuffle = 0
y_tmp = y_noise
# Need at least 4 obs of each class for 2 fold CV to work in grid search step
while (any(get_class_counts(y_tmp, y_tmp[0:seed_batch]) < 4)
or n_shuffle < min_shuffle):
np.random.shuffle(indices)
y_tmp = y_noise[indices]
n_shuffle += 1
X_train = X_copy[indices[0:train_split]]
X_val = X_copy[indices[train_split:val_split]]
X_test = X_copy[indices[val_split:max_points]]
y_train = y_noise[indices[0:train_split]]
y_val = y_noise[indices[train_split:val_split]]
y_test = y_noise[indices[val_split:max_points]]
# Make sure that we have enough observations of each class for 2-fold cv
assert all(get_class_counts(y_noise, y_train[0:seed_batch]) >= 4)
# Make sure that returned shuffled indices are correct
assert all(y_noise[indices[0:max_points]] ==
np.concatenate((y_train, y_val, y_test), axis=0))
return (indices[0:max_points], X_train, y_train,
X_val, y_val, X_test, y_test, y_noise)
|
3f76dade9dd012666f29742b3ec3749d9bcfafe2
| 3,639,208
|
def require_apikey(key):
"""
Decorator for view functions and API requests. Requires
that the user pass in the API key for the application.
"""
def _wrapped_func(view_func):
def _decorated_func(*args, **kwargs):
passed_key = request.args.get('key', None)
if passed_key == key:
return view_func(*args, **kwargs)
else:
abort(401)
return _decorated_func
return _wrapped_func
|
9db9be28c18cd84172dce27d27be9bfcc6f7376e
| 3,639,209
|
from math import cos,pi
from numpy import zeros
def gauss_legendre(ordergl,tol=10e-14):
"""
Returns nodal abscissas {x} and weights {A} of
Gauss-Legendre m-point quadrature.
"""
m = ordergl + 1
def legendre(t,m):
p0 = 1.0; p1 = t
for k in range(1,m):
p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )
p0 = p1; p1 = p
dp = m*(p0 - t*p1)/(1.0 - t**2)
return p1,dp
A = zeros(m)
x = zeros(m)
nRoots = (m + 1)// 2 # Number of non-neg. roots
for i in range(nRoots):
t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root
for j in range(30):
p,dp = legendre(t,m) # Newton-Raphson
dt = -p/dp; t = t + dt # method
if abs(dt) < tol:
x[i] = t; x[m-i-1] = -t
A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)
A[m-i-1] = A[i]
break
return x,A
|
5353373ee59cd559817a737271b4ff89cc031709
| 3,639,210
|
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None
|
c6b021a4345f51f58fdf530441596001843b0506
| 3,639,211
|
def accept(value):
"""Accept header class and method decorator."""
def accept_decorator(t):
set_decor(t, 'header', CaseInsensitiveDict({'Accept': value}))
return t
return accept_decorator
|
f7b392c2b9ab3024e96856cbcda9752a9076ea73
| 3,639,212
|
from pathlib import Path
def screenshot(widget, path=None, dir=None):
"""Save a screenshot of a Qt widget to a PNG file.
By default, the screenshots are saved in `~/.phy/screenshots/`.
Parameters
----------
widget : Qt widget
Any widget to capture (including OpenGL widgets).
path : str or Path
Path to the PNG file.
"""
path = path or screenshot_default_path(widget, dir=dir)
path = Path(path).resolve()
if isinstance(widget, QOpenGLWindow):
# Special call for OpenGL widgets.
widget.grabFramebuffer().save(str(path))
else:
# Generic call for regular Qt widgets.
widget.grab().save(str(path))
logger.info("Saved screenshot to %s.", path)
return path
|
dbb221f25f1b2dbe4b439afda225c452692b24fb
| 3,639,213
|
def xyz_to_rtp(x, y, z):
"""
Convert 1-D Cartesian (x, y, z) coords. to 3-D spherical coords.
(r, theta, phi).
The z-coord. is assumed to be anti-parallel to the r-coord. when
theta = 0.
"""
# First establish 3-D versions of x, y, z
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
# Calculate 3-D spherical coordinate vectors.
rr = np.sqrt(xx**2 + yy**2 + zz**2)
tt = np.arccos(zz / rr)
pp = np.arccos(xx / np.sqrt(xx**2 + yy**2))
return rr, tt, pp
|
db8fbcb50cde2c529fe94e546b0caaea79327df6
| 3,639,214
|
import re
def irccat_targets(bot, targets):
"""
Go through our potential targets and place them in an array so we can
easily loop through them when sending messages.
"""
result = []
for s in targets.split(','):
if re.search('^@', s):
result.append(re.sub('^@', '', s))
elif re.search('^#', s) and s in bot.config.core.channels:
result.append(s)
elif re.search('^#\*$', s):
for c in bot.config.core.channels:
result.append(c)
return result
|
b7dce597fc301930aae665c338a9e9ada5f2be7e
| 3,639,215
|
import struct
def _watchos_stub_partial_impl(
*,
ctx,
actions,
binary_artifact,
label_name,
watch_application):
"""Implementation for the watchOS stub processing partial."""
bundle_files = []
providers = []
if binary_artifact:
# Create intermediate file with proper name for the binary.
intermediate_file = intermediates.file(
actions,
label_name,
"WK",
)
actions.symlink(
target_file = binary_artifact,
output = intermediate_file,
)
bundle_files.append(
(processor.location.bundle, "_WatchKitStub", depset([intermediate_file])),
)
providers.append(_AppleWatchosStubInfo(binary = intermediate_file))
if watch_application:
binary_artifact = watch_application[_AppleWatchosStubInfo].binary
bundle_files.append(
(processor.location.archive, "WatchKitSupport2", depset([binary_artifact])),
)
return struct(
bundle_files = bundle_files,
providers = providers,
)
|
dd4342893eb933572262a3b3bd242112c1737b3b
| 3,639,216
|
def contour_area_filter(image, kernel=(9,9), resize=1.0, uint_mode="scale",
min_area=100, min_area_factor=3, factor=3, **kwargs):
"""
Checks that a contour can be returned for two thresholds of the image, a
mean threshold and an otsu threshold.
Parameters
----------
image : np.ndarray
Image to check for contours.
kernel : tuple, optional
Kernel to use when gaussian blurring.
resize : float, optional
How much to resize the image by before doing any calculations.
uint_mode : str, optional
Conversion mode to use when converting to uint8.
min_area : float, optional
Minimum area of the otsu thresholded beam.
factor : float
Factor to pass to the mean threshold.
min_area_factor : float
The amount to scale down the area for comparison with the mean threshold
contour area.
Returns
-------
passes : bool
True if the image passes the check, False if it does not
"""
image_prep = uint_resize_gauss(image, mode=uint_mode, kernel=kernel,
fx=resize, fy=resize)
# Try to get contours of the image
try:
_, area_mean = get_largest_contour(
image_prep, thresh_mode="mean", factor=factor, **kwargs)
_, area_otsu = get_largest_contour(
image_prep, thresh_mode="otsu", **kwargs)
# Do the check for area
if area_otsu < min_area or area_mean < min_area/min_area_factor:
logger.debug("Filter - Contour area, {0} is below the min area, "
"{1}.".format(area_otsu, min_area))
return False
return True
except NoContoursDetected:
logger.debug("Filter - No contours found on image.")
return False
|
8f0a21210b714f85142a6f72e5d778cee8baf7ba
| 3,639,217
|
def catMullRomFit(p, nPoints=100):
"""
Return as smoothed path from a list of QPointF objects p, interpolating points if needed.
This function takes a set of points and fits a CatMullRom Spline to the data. It then
interpolates the set of points and outputs a smoothed path with the desired number of points
on it.
p : the path to be smoothed
nPoints : the desired number of points in the smoothed path
"""
N = len(p)
#there is no re interpolation required
if N == nPoints:
return p
interp = []
dj = 1.0 / nPoints
for j in range(0, nPoints):
di = j * dj * (N - 1)
i = int(di)
x = di - i
xx = x * x
xxx = x * x * x
c0 = 2.0 * xxx - 3.0 * xx + 1.0
c1 = xxx - 2.0 * xx + x
c2 = -2.0 * xxx + 3.0 * xx
c3 = xxx - xx
p0 = p[i]
p1 = p0
p2 = p0
p3 = p0
if i + 1 < N:
p1 = p[i + 1]
if i - 1 > -1:
p2 = p[i - 1]
if i + 2 < N:
p3 = p[i + 2]
m0 = toVector(p1 - p2) * 0.5
m1 = toVector(p3 - p0) * 0.5
px = (c0 * toVector(p0)) + (c1 * m0) + (c2 * toVector(p1)) + (c3 * m1)
interp.append(toPoint(px))
# pop back the last one
interp.pop()
# make sure the last point in the original polygon is still the last one
interp.append(p[-1])
return interp
|
fb63e67b2bf9fd78e04436cd7f12d214bb6904c7
| 3,639,218
|
def pdf_from_ppf(quantiles, ppfs, edges):
"""
Reconstruct pdf from ppf and evaluate at desired points.
Parameters
----------
quantiles: numpy.ndarray, shape=(L)
L quantiles for which the ppf_values are known
ppfs: numpy.ndarray, shape=(1,...,L)
Corresponding ppf-values for all quantiles
edges: numpy.ndarray, shape=(M+1)
Binning of the desired binned pdf
Returns
-------
pdf_values: numpy.ndarray, shape=(1,...,M)
Recomputed, binned pdf
"""
# recalculate pdf values through numerical differentiation
pdf_interpolant = np.nan_to_num(np.diff(quantiles) / np.diff(ppfs, axis=-1))
# Unconventional solution to make this usable with np.apply_along_axis for readability
# The ppf bin-mids are computed since the pdf-values are derived through derivation
# from the ppf-values
xyconcat = np.concatenate(
(ppfs[..., :-1] + np.diff(ppfs) / 2, pdf_interpolant), axis=-1
)
def interpolate_ppf(xy):
ppf = xy[:len(xy) // 2]
pdf = xy[len(xy) // 2:]
interpolate = interp1d(ppf, pdf, bounds_error=False, fill_value=(0, 0))
result = np.nan_to_num(interpolate(edges[:-1]))
return np.diff(edges) * result
# Interpolate pdf samples and evaluate at bin edges, weight with the bin_width to estimate
# correct bin height via the midpoint rule formulation of the trapezoidal rule
pdf_values = np.apply_along_axis(interpolate_ppf, -1, xyconcat)
return pdf_values
|
52c3d19ee915d1deeb99f39ce036deca59c536b3
| 3,639,219
|
import types
import re
def get_arg_text(ob):
"""Get a string describing the arguments for the given object"""
arg_text = ""
if ob is not None:
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text
|
5dc6d262dfe7e10a5ba93fd26c49a0d6bae3bb37
| 3,639,220
|
import random
def create_ses_weights(d, ses_col, covs, p_high_ses, use_propensity_scores):
"""
Used for training preferentially on high or low SES people. If use_propensity_scores is True, uses propensity score matching on covs.
Note: this samples from individual images, not from individual people. I think this is okay as long as we're clear about what's being done. If p_high_ses = 0 or 1, both sampling methods are equivalent. One reason to sample images rather than people is that if you use propensity score weighting, covs may change for people over time.
"""
assert p_high_ses >= 0 and p_high_ses <= 1
high_ses_idxs = (d[ses_col] == True).values
n_high_ses = high_ses_idxs.sum()
n_low_ses = len(d) - n_high_ses
assert pd.isnull(d[ses_col]).sum() == 0
n_to_sample = min(n_high_ses, n_low_ses) # want to make sure train set size doesn't change as we change p_high_ses from 0 to 1 so can't have a train set size larger than either n_high_ses or n_low_ses
n_high_ses_to_sample = int(p_high_ses * n_to_sample)
n_low_ses_to_sample = n_to_sample - n_high_ses_to_sample
all_idxs = np.arange(len(d))
high_ses_samples = np.array(random.sample(list(all_idxs[high_ses_idxs]), n_high_ses_to_sample))
low_ses_samples = np.array(random.sample(list(all_idxs[~high_ses_idxs]), n_low_ses_to_sample))
print("%i high SES samples and %i low SES samples drawn with p_high_ses=%2.3f" %
(len(high_ses_samples), len(low_ses_samples), p_high_ses))
# create weights.
weights = np.zeros(len(d))
if len(high_ses_samples) > 0:
weights[high_ses_samples] = 1.
if len(low_ses_samples) > 0:
weights[low_ses_samples] = 1.
if not use_propensity_scores:
assert covs is None
weights = weights / weights.sum()
return weights
else:
assert covs is not None
# fit probability model
propensity_model = sm.Logit.from_formula('%s ~ %s' % (ses_col, '+'.join(covs)), data=d).fit()
print("Fit propensity model")
print(propensity_model.summary())
# compute inverse propensity weights.
# "A subject's weight is equal to the inverse of the probability of receiving the treatment that the subject actually received"
# The treatment here is whether they are high SES,
# and we are matching them on the other covariates.
high_ses_propensity_scores = propensity_model.predict(d).values
high_ses_weights = 1 / high_ses_propensity_scores
low_ses_weights = 1 / (1 - high_ses_propensity_scores)
propensity_weights = np.zeros(len(d))
propensity_weights[high_ses_idxs] = high_ses_weights[high_ses_idxs]
propensity_weights[~high_ses_idxs] = low_ses_weights[~high_ses_idxs]
assert np.isnan(propensity_weights).sum() == 0
# multply indicator vector by propensity weights.
weights = weights * propensity_weights
# normalize weights so that high and low SES sum to the right things.
print(n_high_ses_to_sample, n_low_ses_to_sample)
if n_high_ses_to_sample > 0:
weights[high_ses_idxs] = n_high_ses_to_sample * weights[high_ses_idxs] / weights[high_ses_idxs].sum()
if n_low_ses_to_sample > 0:
weights[~high_ses_idxs] = n_low_ses_to_sample * weights[~high_ses_idxs] / weights[~high_ses_idxs].sum()
assert np.isnan(weights).sum() == 0
# normalize whole vector, just to keep things clean
weights = weights / weights.sum()
return weights
|
de5b401ef1419d61664c565f5572d3dd80c6fdfb
| 3,639,221
|
import os
def vectors_intersect(vector_1_uri, vector_2_uri):
"""Take in two OGR vectors (we're assuming that they're in the same
projection) and test to see if their geometries intersect. Return True of
so, False if not.
vector_1_uri - a URI to an OGR vector
vector_2_uri - a URI to an OGR vector
Returns True or False"""
utils.assert_files_exist([vector_1_uri, vector_2_uri])
LOGGER.debug('Opening vector %s', vector_1_uri)
basename_1 = os.path.basename(vector_1_uri)
vector_1 = ogr.Open(vector_1_uri)
layer_1 = vector_1.GetLayer()
LOGGER.debug('Opening vector %s', vector_2_uri)
basename_2 = os.path.basename(vector_2_uri)
vector_2 = ogr.Open(vector_2_uri)
layer_2 = vector_2.GetLayer()
for feature_1 in layer_1:
prep_polygon = offsets.build_shapely_polygon(feature_1, prep=True)
for feature_2 in layer_2:
polygon = offsets.build_shapely_polygon(feature_2)
if prep_polygon.intersects(polygon):
fid_1 = feature_1.GetFID()
fid_2 = feature_2.GetFID()
LOGGER.debug('%s (fid %s) and %s (fid %s) intersect',
basename_1, fid_1, basename_2, fid_2)
return True
layer_2.ResetReading()
LOGGER.debug('No Features intersect.')
return False
|
dbbf0bbfd91e8641ddf43b1d9eea4f732e9ade7a
| 3,639,222
|
def decoder_g(zxs):
"""Define decoder."""
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
hidden_layer = zxs
for i, n_hidden_units in enumerate(FLAGS.n_hidden_units_g):
hidden_layer = tf.layers.dense(
hidden_layer,
n_hidden_units,
activation=tf.nn.relu,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
i = len(FLAGS.n_hidden_units_g)
y_hat = tf.layers.dense(
hidden_layer,
FLAGS.dim_y,
name='decoder_{}'.format(i),
reuse=tf.AUTO_REUSE,
kernel_initializer='normal')
return y_hat
|
6974624dccecae7bbb5f650f0ebe0c819df4aa67
| 3,639,223
|
def make_evinfo_str(json_str):
"""
[メソッド概要]
DB登録用にイベント情報を文字列に整形
"""
evinfo_str = ''
for v in json_str[EventsRequestCommon.KEY_EVENTINFO]:
if evinfo_str:
evinfo_str += ','
if not isinstance(v, list):
evinfo_str += '"%s"' % (v)
else:
temp_val = '['
for i, val in enumerate(v):
if i > 0:
temp_val += ','
temp_val += '"%s"' % (val)
temp_val += ']'
evinfo_str += '%s' % (temp_val)
return evinfo_str
|
6717652f1adf227b03864f8b4b4268524eb7cbc4
| 3,639,224
|
def parse_cisa_data(parse_file: str) -> object:
"""Parse the CISA Known Exploited Vulnerabilities file and create a new dataframe."""
inform("Parsing results")
# Now parse CSV using pandas, GUID is CVE-ID
new_dataframe = pd.read_csv(parse_file, parse_dates=['dueDate', 'dateAdded'])
# extend dataframe
new_dataframe['AssetsVulnerableCount'] = int(0)
pd.to_numeric(new_dataframe['AssetsVulnerableCount'])
# force these fields to be dtype objects
new_dataframe['AssetsVulnerable'] = pd.NaT
new_dataframe['AssetsVulnerable'] = new_dataframe['AssetsVulnerable'].astype('object').dtypes
return new_dataframe
|
7bc95a4d60b869395f20d8619f80b116156de4ad
| 3,639,225
|
def camera():
"""Video streaming home page."""
return render_template('index.html')
|
75c501daa3d9a8b0090a0e9174b29a0b848057be
| 3,639,226
|
import os
import shutil
def new_doc():
"""Creating a new document."""
if request.method == 'GET' or request.form.get('act') != 'create':
return render_template('new.html', title='New document', permalink=url_for('.new_doc'))
else:
slug = request.form['slug'].strip()
src = os.path.join(app.config['DOCPATH'], 'template', 'template.tex')
dstdir = os.path.join(app.config['DOCPATH'], slug)
dst = os.path.join(dstdir, slug + '.tex')
try:
os.mkdir(dstdir)
shutil.copy(src, dst)
except:
flash('Failed to create new document.', 'error')
return redirect(url_for('.new_doc'), 302)
reload(slug)
return redirect(url_for('.doc', slug=slug), 302)
|
f961076dda04d0a0d6c0c9f11dc8d29b373183a5
| 3,639,227
|
import tqdm
def fit_alternative(model, dataloader, optimizer, train_data, labelled=True):
"""
fit method using alternative loss, executes one epoch
:param model: VAE model to train
:param dataloader: input dataloader to fatch batches
:param optimizer: which optimizer to utilize
:param train_data: useful for plotting completion bar
:param labelled: to know if the data is composed of (data, target) or only data
:return: train loss
"""
model.train() # set in train mode
running_loss, running_kld_loss, running_rec_loss = 0.0, 0.0, 0.0 # set up losses to accumulate over
for i, data in tqdm(enumerate(dataloader), total=int(len(train_data) / dataloader.batch_size)):
data = data[0] if labelled else data # get the train batch
data = data.view(data.size(0), -1) # unroll
optimizer.zero_grad() # set gradient to zero
mu_rec, mu_latent, logvar_latent = model(data) # feedforward
loss = elbo_loss_alternative(mu_rec, model.log_var_rec, mu_latent, logvar_latent, data) # get loss value
# update losses
running_kld_loss += loss[0].item()
running_rec_loss += loss[1].item()
running_loss += loss[2].item()
loss[2].backward() # set up gradient with total loss
optimizer.step() # backprop
# set up return variable for all three losses
train_loss = [running_kld_loss / len(dataloader.dataset),
running_rec_loss / len(dataloader.dataset),
running_loss / len(dataloader.dataset)]
return train_loss
|
3889d2d72ce71095d3016427c87795ef65aa9fa4
| 3,639,228
|
def FlagOverrider(**flag_kwargs):
"""A Helpful decorator which can switch the flag values temporarily."""
return flagsaver.flagsaver(**flag_kwargs)
|
39a39b1884c246ae45d8166c2eae9bb68dea2c70
| 3,639,229
|
def cli(ctx, path, max_depth=1):
"""List files available from a remote repository for a local path as a tree
Output:
None
"""
return ctx.gi.file.tree(path, max_depth=max_depth)
|
4be4fdffce7862332aa27a40ee684aae31fd67b5
| 3,639,230
|
def warp_p(binary_img):
"""
Warps binary_image using hard coded source and destination
vertices. Returns warped binary image, warp matrix and
inverse matrix.
"""
src = np.float32([[580, 450],
[180, 720],
[1120, 720],
[700, 450]])
dst = np.float32([[350, 0],
[350, 720],
[900, 720],
[900, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
binary_warped = cv2.warpPerspective(binary_img, M, (binary_img.shape[1], binary_img.shape[0]), flags=cv2.INTER_LINEAR)
return binary_warped, M, Minv
|
ea0ca98138ff9fbf52201186270c3d2561f57ec2
| 3,639,231
|
def _get_xml_sps(document):
"""
Download XML file and instantiate a `SPS_Package`
Parameters
----------
document : opac_schema.v1.models.Article
Returns
-------
dsm.data.sps_package.SPS_Package
"""
# download XML file
content = reqs.requests_get_content(document.xml)
xml_sps = SPS_Package(content)
# change assets uri
xml_sps.remote_to_local(xml_sps.package_name)
return xml_sps
|
908ceb96ca2b524899435f269e60ddd9b7db3f0c
| 3,639,232
|
def plot_confusion_matrix(ax, y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
From scikit-learn example:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cm)
# fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
# fig.tight_layout()
return ax
|
ba88d9f96f9b9da92987fa3df4d38270162fc903
| 3,639,233
|
def _in_docker():
""" Returns: True if running in a Docker container, else False """
with open('/proc/1/cgroup', 'rt') as ifh:
if 'docker' in ifh.read():
print('in docker, skipping benchmark')
return True
return False
|
4a0fbd26c5d52c5fe282b82bc4fe14986f8aef4f
| 3,639,234
|
def asPosition(flags):
""" Translate a directional flag from an actions into a tuple indicating
the targeted tile. If no directional flag is found in the inputs,
returns (0, 0).
"""
if flags & NORTH:
return 0, 1
elif flags & SOUTH:
return 0, -1
elif flags & EAST:
return 1, 0
elif flags & WEAST:
return -1, 0
return 0, 0
|
9e1b2957b1cd8b71033b644684046e71e85f5105
| 3,639,235
|
from nibabel import load
import numpy as np
def pickvol(filenames, fileidx, which):
"""Retrieve index of named volume
Parameters
----------
filenames: list of 4D file names
fileidx: which 4D file to look at
which: 'first' or 'middle'
Returns
-------
idx: index of first or middle volume
"""
if which.lower() == 'first':
idx = 0
elif which.lower() == 'middle':
idx = int(np.ceil(load(filenames[fileidx]).get_shape()[3] / 2))
else:
raise Exception('unknown value for volume selection : %s' % which)
return idx
|
7090ab35959289c221b6baab0ba1719f0c518ef4
| 3,639,236
|
def merge(d, **kwargs):
"""Recursively merges given kwargs int to a
dict - only if the values are not None.
"""
for key, value in kwargs.items():
if isinstance(value, dict):
d[key] = merge(d.get(key, {}), **value)
elif value is not None:
d[key] = value
return d
|
168cc66cce0a04b086a17089ebcadc16fbb4c1d0
| 3,639,237
|
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.VelbusConfigFlow()
flow.hass = hass
return flow
|
6eccc23ceca6b08268701486ed2e79c47c220e13
| 3,639,238
|
from typing import Dict
from datetime import datetime
from typing import FrozenSet
def read_service_ids_by_date(path: str) -> Dict[datetime.date, FrozenSet[str]]:
"""Find all service identifiers by date"""
feed = load_raw_feed(path)
return _service_ids_by_date(feed)
|
60e39ccb517f00243db97835b223e894c9f64540
| 3,639,239
|
def get_all_services(org_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:return:
"""
return services_view.return_services(organization_id=org_id)
|
d779e7312d363ad507c994c38ba844912bf49e9c
| 3,639,240
|
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
|
fa6aca01bd96c6cb97af5e68f4221d285e482612
| 3,639,241
|
import math
def findh_s0(h_max, h_min, q):
"""
Znajduje siłę naciągu metodą numeryczną (wykorzystana metoda bisekcji),
należy podać granice górną i dolną dla metody bisekcji
:param h_max: Górna granica dla szukania siły naciągu
:param h_min: Dolna granica dla szukania siły naciągu
:param q: całkowite obciążenie kabla [N/m]
:return: h - siła naciągu, i - ilość potrzebnych iteracji
"""
i = 1
h = (h_min + h_max) / 2
print("Wstępne H = " + str(h))
f_m = calculatefm(h, q)
while (math.fabs(f_m - f_0_m) >= 1 * 10 ** -8):
if f_m < f_0_m:
h_max = h
else:
h_min = h
# print("iteracja #" + str(i) + " h_max = " + str(h_max) + " h_min = "
# + str(h_min) + " nowe H: " + str(h) + " f_m = " + str(f_m)
# + " docelowe: " + str(f_0_m))
h = (h_min + h_max) / 2
f_m = calculatefm(h, q)
i += 1
return h, i
|
28926742c6d786ffa47a084a318f54fafb3da98c
| 3,639,242
|
def velocity_dependent_covariance(vel):
"""
This function computes the noise in the velocity channel.
The noise generated is gaussian centered around 0, with sd = a + b*v;
where a = 0.01; b = 0.05 (Vul, Frank, Tenenbaum, Alvarez 2009)
:param vel:
:return: covariance
"""
cov = []
for v in vel:
ans = 0.01 + 0.05 * np.linalg.norm(vel)
cov.append(ans)
cov = np.array(cov)
return cov
|
4a1bb6c8f6c5956585bd6f5a09f4d80ee397bbe5
| 3,639,243
|
import os
def get_db_path():
"""Return the path to Dropbox's info.json file with user-settings."""
if os.name == 'posix': # OSX-specific
home_path = os.path.expanduser('~')
dbox_db_path = os.path.join(home_path, '.dropbox', 'info.json')
elif os.name == 'nt': # Windows-specific
home_path = os.getenv('LOCALAPPDATA')
dbox_db_path = os.path.join(home_path, 'Dropbox', 'info.json')
else:
raise NotImplementedError("Unknown Platform: {0}".format(os.name))
return dbox_db_path
|
04ee901faea224dde382a11b433f913557c7cb21
| 3,639,244
|
def msd_Correlation(allX):
"""Autocorrelation part of MSD."""
M = allX.shape[0]
# numpy with MKL (i.e. intelpython distribution), the fft wont be
# accelerated unless axis along 0 or -1
# perform FT along n_frame axis
# (n_frams, n_particles, n_dim) -> (n_frames_Ft, n_particles, n_dim)
allFX = np.fft.rfft(allX, axis=0, n=M*2)
# sum over n_dim axis
corr = np.sum(abs(allFX)**2, axis=(1, -1)) # (n_frames_ft,)
# IFT over n_frame_ft axis (axis=0), whole operation euqals to
# fx = fft(_.T[0]), fy =... for _ in
# allX.swapaxes(0,1) -> (n_particles, n_frames, n_dim)
# then sum fx, fy, fz...fndim
# rfft for real inputs, higher eff
return np.fft.irfft(corr, n=2 * M)[:M].real/np.arange(M, 0, -1)
# (n_frames,), the n_particles dimension is added out
|
c212e216d32814f70ab861d066c8000cf7e8e238
| 3,639,245
|
import math
def convert_table_value(fuel_usage_value):
"""
The graph is a little skewed, so this prepares the data for that.
0 = 0
1 = 25%
2 = 50%
3 = 100%
4 = 200%
5 = 400%
6 = 800%
7 = 1600% (not shown)
Intermediate values scale between those values. (5.5 is 600%)
"""
if fuel_usage_value < 25:
return 0.04 * fuel_usage_value
else:
return math.log((fuel_usage_value / 12.5), 2)
|
15e4deedb4809eddd830f7d586b63075b71568ef
| 3,639,246
|
import TestWin
def FindMSBuildInstallation(msvs_version = 'auto'):
"""Returns path to MSBuild for msvs_version or latest available.
Looks in the registry to find install location of MSBuild.
MSBuild before v4.0 will not build c++ projects, so only use newer versions.
"""
registry = TestWin.Registry()
msvs_to_msbuild = {
'2013': r'12.0',
'2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
'2010': r'4.0'}
msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
if not registry.KeyExists(msbuild_basekey):
print 'Error: could not find MSBuild base registry entry'
return None
msbuild_version = None
if msvs_version in msvs_to_msbuild:
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding MSBuild "%s" was not found.' %
(msvs_version, msbuild_version))
if not msbuild_version:
for msvs_version in sorted(msvs_to_msbuild, reverse=True):
msbuild_test_version = msvs_to_msbuild[msvs_version]
if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
msbuild_version = msbuild_test_version
break
if not msbuild_version:
print 'Error: could not find MSBuild registry entry'
return None
msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
'MSBuildToolsPath')
if not msbuild_path:
print 'Error: could not get MSBuild registry entry value'
return None
return os.path.join(msbuild_path, 'MSBuild.exe')
|
daf5151c08e52b71110075b3dd59071a3a6f124f
| 3,639,247
|
def create_toc_xhtml(metadata: WorkMetadata, spine: list[Matter]) -> str:
"""
Load the default `toc.xhtml` file, and generate the required terms for the creative work. Return xhtml as a string.
Parameters
----------
metadata: WorkMetadata
All the terms for updating the work, not all compulsory
spine: list of Matter
Spine and guide list of Matter, with `dedication` at 0, if present
Returns
-------
str: xhtml response for `toc.xhtml` as a str.
"""
with open(DATA_PATH / "xhtml" / DEFAULT_TOC_XHTML, "r+", encoding="utf-8") as toc_file:
toc_xml = toc_file.read()
# Table of Contents
toc_xhtml = ""
chapter = 1
for matter in spine:
if matter.content == FrontMatter.dedication:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/dedication.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
if matter.partition == MatterPartition.body:
toc_xhtml += F'\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-{chapter}.xhtml">{matter.title}</a>\n\t\t\t\t</li>\n'
chapter += 1
toc_xml = toc_xml.replace('\t\t\t\t<li>\n\t\t\t\t\t<a href="text/chapter-1.xhtml"></a>\n\t\t\t\t</li>\n',
toc_xhtml)
# Landmark Title
toc_xml = toc_xml.replace('<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">WORK_TITLE</a>',
F'<a href="text/chapter-1.xhtml" epub:type="bodymatter z3998:fiction">{metadata.title}</a>')
return toc_xml
|
9971d408f39056b6d2078e5157f2c39dbce8c202
| 3,639,248
|
def convertSLToNumzero(sl, min_sl=1e-3):
"""
Converts a (neg or pos) significance level to
a count of significant zeroes.
Parameters
----------
sl: float
Returns
-------
float
"""
if np.isnan(sl):
return 0
if sl < 0:
sl = min(sl, -min_sl)
num_zero = np.log10(-sl)
elif sl > 0:
sl = max(sl, min_sl)
num_zero = -np.log10(sl)
else:
raise RuntimeError("Cannot have significance level of 0.")
return num_zero
|
c8cbea09904a7480e36529ffc7a62e6cdddc7a47
| 3,639,249
|
def calibrate_time_domain(power_spectrum, data_pkt):
"""
Return a list of the calibrated time domain data
:param list power_spectrum: spectral data of the time domain data
:param data_pkt: a RTSA VRT data packet
:type data_pkt: pyrf.vrt.DataPacket
:returns: a list containing the calibrated time domain data
"""
i_data, q_data, stream_id, spec_inv = _decode_data_pkts(data_pkt)
# Time domain data calibration
if stream_id in (VRT_IFDATA_I14, VRT_IFDATA_I24):
td_data = i_data -np.mean(i_data)
complex_coefficient = 1
if stream_id == VRT_IFDATA_I14Q14:
td_data = i_data + 1j * q_data
td_data = td_data - np.mean(td_data)
complex_coefficient = 2
P_FD_Ln = 10**(power_spectrum/10)
P_FD_av = np.mean(P_FD_Ln)
v_volt = td_data * np.sqrt(1e-3) * np.sqrt(P_FD_av/np.var(td_data)) * 50 * np.sqrt(complex_coefficient*len(td_data)/128.0)
return v_volt
|
a4bfa279ac4ada5ffe6d7bd6e8cf64e59ae0bf61
| 3,639,250
|
def func(x):
"""
:param x: [b, 2]
:return:
"""
z = tf.math.sin(x[...,0]) + tf.math.sin(x[...,1])
return z
|
daf4e05c6a8c1f735842a0ef6fa115b14e85ef40
| 3,639,251
|
from typing import Tuple
from typing import Dict
from typing import Any
from typing import List
def parse_handler_input(handler_input: HandlerInput,
) -> Tuple[UserMessage, Dict[str, Any]]:
"""Parses the ASK-SDK HandlerInput into Slowbro UserMessage.
Returns the UserMessage object and serialized SessionAttributes.
"""
request_envelope = handler_input.request_envelope
text: str
asr_hypos: List[AsrHypothesisUtterance] = []
if is_request_type("LaunchRequest")(handler_input):
# This is a launch request.
text = ''
elif is_request_type("IntentRequest")(handler_input):
slots = request_envelope.request.intent.slots
slot_text = slots.get('Text', None)
if slot_text is not None:
text = slot_text.value
else:
text = ''
if hasattr(request_envelope.request, 'speechRecognition'):
hypotheses = request_envelope.request.speechRecognition.get(
'hypotheses', [])
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token['value'], token['confidence'],
token['startOffsetInMilliseconds'],
token['endOffsetInMilliseconds'])
for token in hypo['tokens']
], hypo['confidence']) for hypo in hypotheses
])
elif text:
# NOTE: create a fake ASR hypo using the text field.
asr_hypos.extend([
AsrHypothesisUtterance([
AsrHypothesisToken(token, -1, -1, -1)
for token in text.split(' ')
], -1)
])
if not text:
# Try to recover the text using asr_hypos.
# Otherwise, raise an exception.
if asr_hypos:
text = asr_hypos[0].__str__()
else:
raise Exception('Unable to find "text" from handler input:',
handler_input)
else:
raise Exception('Unable to parse handler input:', handler_input)
serializer = DefaultSerializer()
user_message = UserMessage(payload=serializer.serialize(request_envelope),
channel='alexaprize',
request_id=request_envelope.request.request_id,
session_id=request_envelope.session.session_id,
user_id=request_envelope.session.user.user_id,
text=text,
asr_hypos=asr_hypos)
attributes_manager = handler_input.attributes_manager
ser_session_attributes = attributes_manager.session_attributes
return (user_message, ser_session_attributes)
|
5be16af3f460de41af9e33cacc4ce94c447ceb45
| 3,639,252
|
def _validate_show_for_invoking_user_only(show_for_invoking_user_only):
"""
Validates the given `show_for_invoking_user_only` value.
Parameters
----------
show_for_invoking_user_only : `None` or `bool`
The `show_for_invoking_user_only` value to validate.
Returns
-------
show_for_invoking_user_only : `bool`
The validated `show_for_invoking_user_only` value.
Raises
------
TypeError
If `show_for_invoking_user_only` was not given as `None` nor as `bool` instance.
"""
if show_for_invoking_user_only is None:
show_for_invoking_user_only = False
else:
show_for_invoking_user_only = preconvert_bool(
show_for_invoking_user_only, 'show_for_invoking_user_only'
)
return show_for_invoking_user_only
|
a1f9612927dfc1423d027f242d759c982b11a8b8
| 3,639,253
|
def test_db_transaction_n1(monkeypatch):
"""Raise _DB_TRANSACTION_ATTEMPTS OperationalErrors to force a reconnection.
A cursor for each SQL statement should be returned in the order
the statement were submitted.
0. The first statement execution produce no results _DB_TRANSACTION_ATTEMPTS times (OperationalError)
1. A reconnection will occur
2. The first statement will be re-executed
3. The second statement will be executed
4. The third statement will be executed
Should get 3 cursors with the values _DB_TRANSACTION_ATTEMPTS, _DB_TRANSACTION_ATTEMPTS+1, & _DB_TRANSACTION_ATTEMPTS+2
The next mock_connection_ref should be 2
"""
db_disconnect_all()
mock_connection_ref = sequential_reference()
mock_cursor_ref = sequential_reference()
class mock_cursor():
def __init__(self) -> None: self.value = next(mock_cursor_ref)
def execute(self, sql_str):
if self.value < _DB_TRANSACTION_ATTEMPTS:
raise OperationalError
def fetchone(self): return self.value
class mock_connection():
def __init__(self) -> None: self.value = next(mock_connection_ref)
def cursor(self): return mock_cursor()
def close(self): self.value = None
def mock_connect(*args, **kwargs): return mock_connection()
monkeypatch.setattr(database, 'connect', mock_connect)
dbcur_list = db_transaction(_MOCK_DBNAME, _MOCK_CONFIG, ("SQL0", "SQL1", "SQL2"))
assert len(dbcur_list) == 3
assert dbcur_list[0].fetchone() == _DB_TRANSACTION_ATTEMPTS
assert dbcur_list[1].fetchone() == _DB_TRANSACTION_ATTEMPTS + 1
assert dbcur_list[2].fetchone() == _DB_TRANSACTION_ATTEMPTS + 2
assert next(mock_connection_ref) == 2
|
4dcb32f14d8a938765f4fde5375b6b686a6a5f5c
| 3,639,254
|
import requests
from datetime import datetime
def fetch_status():
"""
解析サイト<https://redive.estertion.win> からクラバト情報を取ってくる
return
----
```
{
"cb_start": datetime,
"cb_end": datetime,
"cb_days": int
}
```
"""
# クラバト開催情報取得
r = requests.get(
"https://redive.estertion.win/ver_log_redive/?page=1&filter=clan_battle"
).json()
# クラバト開始日取得
cb_start = r["data"][0]["clan_battle"][0]["start"]
cb_start = datetime.strptime(cb_start, "%Y/%m/%d %H:%M:%S")
# クラバト終了日取得
cb_end = r["data"][0]["clan_battle"][0]["end"]
cb_end = datetime.strptime(cb_end, "%Y/%m/%d %H:%M:%S")
# クラバト開催日数
cb_days = (cb_end - cb_start).days + 1
return {"cb_start": cb_start, "cb_end": cb_end, "cb_days": cb_days}
|
683c9fe84bf346a1cce703063da8683d3469ccc2
| 3,639,255
|
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post(uuid, tapi_path_computation_routing_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post
creates tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_routing_constraint: tapi.path.computation.RoutingConstraint to be added to list
:type tapi_path_computation_routing_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_routing_constraint = TapiPathComputationRoutingConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
7d56e6a544b2ac720aa311127aa5db9b3153a0c3
| 3,639,256
|
def A004086(i: int) -> int:
"""Digit reversal of i."""
result = 0
while i > 0:
unit = i % 10
result = result * 10 + unit
i = i // 10
return result
|
b0a65b7e203b7a92f7d6a1846888798c369ac869
| 3,639,257
|
def should_raise_sequencingerror(wait, nrep, jump_to, goto, num_elms):
"""
Function to tell us whether a SequencingError should be raised
"""
if wait not in [0, 1]:
return True
if nrep not in range(0, 16384):
return True
if jump_to not in range(-1, num_elms+1):
return True
if goto not in range(0, num_elms+1):
return True
return False
|
fc7c4bdb29cd5b90faec59a4f6705b920304aae0
| 3,639,258
|
from typing import Optional
from typing import Mapping
import functools
def add_task_with_sentinels(
task_name: str,
num_sentinels: Optional[int] = 1):
"""Adds sentinels to the inputs/outputs of a task.
Adds num_sentinels sentinels to the end of 'inputs' and at the beginning
of 'targets'. This is known to help fine-tuning span corruption models,
especially on smaller datasets.
This will also rename the task by adding a "_{num_sentinels}_sentinel" suffix
to the task name, but making sure it comes before the following suffixes:
'_train', '_dev', '_test', '.'.
Example before:
'inputs': What is the captial of illinois?
'targets': Springfield.
Example after:
'inputs': What is the captial of illinois? <extra_id_0>
'targets': <extra_id_0> Springfield.
Args:
task_name: a str, which is the name of the task you want to have sentinels
added to. Note this will not override the current task, but will create
a new one.
num_sentinels: integer, number of sentinels to end of inputs and the
beginning of targets.
"""
def _append_eos_after_trim_and_preserve(
dataset: tf.data.Dataset,
output_features: Mapping[str, dataset_providers.Feature],
sequence_length: Optional[Mapping[str, int]] = None,
preserve_final_n_tokens_when_trimming: Optional[int] = None
) -> tf.data.Dataset:
"""Version of append_eos_after_trim with option to preserve last n tokens."""
def _maybe_add_eos_and_trim(key: str, value: tf.Tensor) -> tf.Tensor:
if key not in output_features or not output_features[key].add_eos:
return value
eos_id = output_features[key].vocabulary.eos_id
if (sequence_length is not None and
sequence_length.get(key, None) is not None):
max_length = sequence_length[key]
if (preserve_final_n_tokens_when_trimming is not None and
preserve_final_n_tokens_when_trimming > 0):
# Compute the new length of the sequence excluding the EOS token.
trimmed_length = tf.minimum(max_length, tf.shape(value)[0] + 1)
# Can't preserve more tokens than the sequence length.
n_tokens_to_preserve = tf.minimum(
preserve_final_n_tokens_when_trimming, trimmed_length - 1)
# pylint: disable=invalid-unary-operand-type
return tf.concat(
[value[:trimmed_length-(n_tokens_to_preserve + 1)],
value[-n_tokens_to_preserve:],
[eos_id]], axis=0)
# pylint: enable=invalid-unary-operand-type
else:
return tf.concat([value[:max_length-1], [eos_id]], axis=0)
else:
return tf.concat([value, [eos_id]], axis=0)
return dataset.map(
lambda ex: {k: _maybe_add_eos_and_trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _create_new_task_name(task_name):
"""Creates the new task name with sentinels added."""
sentinel_name = '_{}_sentinel'.format(num_sentinels)
# Avoid messing up evaluation suffixes, so insert the sentinel name right
# before these keywords.
for suffix in ['_train', '_dev', '_test', '_eval', '.']:
idx = task_name.find(suffix)
if idx >= 0:
return task_name[:idx] + sentinel_name + task_name[idx:]
return task_name + sentinel_name
def _sentinel_id(vocabulary, sentinel_num=0):
"""Token ID to use as a sentinel.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
sentinel_num: an optional interger, what sentinel should be returned.
By default it returns the first sentinel.
Returns:
an integer
"""
return vocabulary.vocab_size - 1 - sentinel_num
def _add_sentinels(dataset, sequence_length, output_features):
"""Adds sentinels to end of inputs and beginning of targets."""
del sequence_length
input_vocab = output_features['inputs'].vocabulary
target_vocab = output_features['targets'].vocabulary
@utils.map_over_dataset
def _my_fn(x):
sentinels_input = [
_sentinel_id(input_vocab, idx) for idx in range(num_sentinels)]
sentinels_output = [
_sentinel_id(target_vocab, idx) for idx in range(num_sentinels)]
x['inputs'] = tf.concat([x['inputs'], sentinels_input], 0)
x['targets'] = tf.concat([sentinels_output, x['targets']], 0)
return x
return _my_fn(dataset)
def _postprocess_fn_remove_sentinel(string_label, *args, **kwargs):
"""If sentinels are appended to the task, then remove them before eval."""
del args
del kwargs
vocab = task.output_features['targets'].vocabulary
sentinel_str = vocab.decode(
[_sentinel_id(vocab, idx) for idx in range(num_sentinels)])
if string_label.startswith(sentinel_str):
string_label = string_label[len(sentinel_str):].strip()
return string_label
def _wrap_postprocess_fn_remove_sentinel(postprocess_fn):
"""Wrap around another postprocess_fn to remove sentinels first."""
def new_fn(string_label, *args, **kwargs):
string_label = _postprocess_fn_remove_sentinel(
string_label, *args, **kwargs)
return postprocess_fn(string_label, *args, **kwargs)
return new_fn
# Create the new task name.
task = TaskRegistry.get(task_name)
sentinel_task_name = _create_new_task_name(task_name)
# Make the new preprocessors that will insert sentinels and make sure
# sentinels are preserved if the sequences are trimmed.
new_preprocessors = list(task.preprocessors)
if new_preprocessors[-1] is seqio_preprocessors.append_eos_after_trim:
new_eos_funtion = functools.partial(
_append_eos_after_trim_and_preserve,
preserve_final_n_tokens_when_trimming=num_sentinels)
new_preprocessors[-1] = new_eos_funtion
new_preprocessors.insert(-1, _add_sentinels)
else:
new_preprocessors.append(_add_sentinels)
# Remove the inserted sentinels in the postprocessor.
postprocess_fn = task.postprocessor
if postprocess_fn is not None:
new_postprocess_fn = _wrap_postprocess_fn_remove_sentinel(postprocess_fn)
else:
new_postprocess_fn = _postprocess_fn_remove_sentinel
TaskRegistry.add(
sentinel_task_name,
source=task.source,
preprocessors=new_preprocessors,
output_features=task.output_features,
postprocess_fn=new_postprocess_fn,
metric_fns=task.metric_fns,
)
|
2d040f37d4346770e836c5a8b71b90c1acce9d1d
| 3,639,259
|
import sys
def to_routing_header(params):
"""Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string.
"""
if sys.version_info[0] < 3:
# Python 2 does not have the "safe" parameter for urlencode.
return urlencode(params).replace('%2F', '/')
return urlencode(
params,
# Per Google API policy (go/api-url-encoding), / is not encoded.
safe='/')
|
654118e165c95c2c541e969a5a1d9cbc87e86bea
| 3,639,260
|
def mk_llfdi(data_id, data): # measurement group 10
"""
transforms a k-llfdi.json form into the triples used by insertMeasurementGroup to
store each measurement that is in the form
:param data_id: unique id from the json form
:param data: data array from the json form
:return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements
"""
val_list = [(220, 2, data_id),
(55, 7, data['f1']), (56, 7, data['f2']), (57, 7, data['f3']),
(58, 7, data['f4']), (59, 7, data['f5']), (60, 7, data['f6']),
(61, 7, data['f7']), (62, 7, data['f8']), (63, 7, data['f9']),
(64, 7, data['f10']), (65, 7, data['f11']), (66, 7, data['f12']),
(67, 7, data['f13']), (68, 7, data['f14']), (69, 7, data['f15']),
(70, 7, data['f16']), (71, 7, data['f17']), (72, 7, data['f18']),
(73, 7, data['f19']), (74, 7, data['f20']), (75, 7, data['f21']),
(76, 7, data['f22']), (77, 7, data['f23']), (78, 7, data['f24']),
(79, 7, data['f25']), (80, 7, data['f26']), (81, 7, data['f27']),
(82, 7, data['f28']), (83, 7, data['f29']), (84, 7, data['f30']),
(85, 7, data['f31']), (86, 7, data['f32'])]
for sublist in lwh.mk_optional_int(87, 224, data, 'fd7'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(88, 225, data, 'fd8'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(89, 226, data, 'fd14'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(90, 227, data, 'fd15'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(91, 228, data, 'fd26'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(92, 229, data, 'fd29'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(93, 230, data, 'fd30'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(94, 231, data, 'fd32'):
val_list.append(sublist)
return val_list
|
42717f4d182b3df60e27f213c36278c894597ded
| 3,639,261
|
def valid_distro(x):
"""
Validates that arg is a Distro type, and has
:param x:
:return:
"""
if not isinstance(x, Distro):
return False
result = True
for required in ["arch", "variant"]:
val = getattr(x, required)
if not isinstance(val, str):
result = False
elif val.strip() == "":
result = False
return result
|
8fc68700a4d024b7ba756c186225ef22622db584
| 3,639,262
|
import time
import os
def validate(dataloader,
model,
criterion,
total_batches,
debug_steps=100,
local_logger=None,
master_logger=None,
save='./'):
"""Validation for the whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
total_batches: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
local_logger: logger for local process/gpu, default: None
master_logger: logger for main process, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current processes/gpus
master_loss_meter.avg: float, average loss on all processes/gpus
master_acc1_meter.avg: float, average top1 accuracy on all processes/gpus
val_time: float, validation time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
master_loss_meter = AverageMeter()
master_acc1_meter = AverageMeter()
time_st = time.time()
# output path
local_rank = paddle.distributed.get_rank()
ofile = open(os.path.join(save, f'pred_{local_rank}.txt'), 'w')
for batch_id, data in enumerate(dataloader):
# get data
images = data[0]
label = data[1]
image_path = data[2]
batch_size = images.shape[0]
output = model(images)
if label is not None:
loss = criterion(output, label)
loss_value = loss.item()
pred = paddle.nn.functional.softmax(output)
if label is not None:
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1)).item()
# sync from other gpus for overall loss and acc
master_loss = all_reduce_mean(loss_value)
master_acc1 = all_reduce_mean(acc1)
master_batch_size = all_reduce_mean(batch_size)
master_loss_meter.update(master_loss, master_batch_size)
master_acc1_meter.update(master_acc1, master_batch_size)
val_loss_meter.update(loss_value, batch_size)
val_acc1_meter.update(acc1, batch_size)
if batch_id % debug_steps == 0:
local_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {val_loss_meter.avg:.4f}, "
f"Avg Acc@1: {val_acc1_meter.avg:.4f}")
master_message = (f"Step[{batch_id:04d}/{total_batches:04d}], "
f"Avg Loss: {master_loss_meter.avg:.4f}, "
f"Avg Acc@1: {master_acc1_meter.avg:.4f}")
write_log(local_logger, master_logger, local_message, master_message)
else:
if batch_id % debug_steps == 0:
local_message = f"Step[{batch_id:04d}/{total_batches:04d}]"
master_message = f"Step[{batch_id:04d}/{total_batches:04d}]"
write_log(local_logger, master_logger, local_message, master_message)
# write results to pred
for idx, img_p in enumerate(image_path):
pred_prob, pred_label = paddle.topk(pred[idx], 1)
pred_label = pred_label.cpu().numpy()[0]
ofile.write(f'{img_p} {pred_label}\n')
val_time = time.time() - time_st
ofile.close()
return (val_loss_meter.avg,
val_acc1_meter.avg,
master_loss_meter.avg,
master_acc1_meter.avg,
val_time)
|
cf879823f6051a4f758c145cf2c060a296302f03
| 3,639,263
|
def encode(message):
"""
Кодирует строку в соответсвие с таблицей азбуки Морзе
>>> encode('MAI-PYTHON-2020') # doctest: +SKIP
'-- .- .. -....-
.--. -.-- - .... --- -. -....-
..--- ----- ..--- -----'
>>> encode('SOS')
'...
---
...'
>>> encode('МАИ-ПИТОН-2020') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'М'
"""
encoded_signs = [
LETTER_TO_MORSE[letter] for letter in message
]
return ' '.join(encoded_signs)
|
efa312c510738f89608af0febff3435b17235eb8
| 3,639,264
|
def get_group_to_elasticsearch_processor():
"""
This processor adds users from xform submissions that come in to the User Index if they don't exist in HQ
"""
return ElasticProcessor(
elasticsearch=get_es_new(),
index_info=GROUP_INDEX_INFO,
)
|
12e9371282298c96968263e76d1d02848fc5dcb3
| 3,639,265
|
import torch
def loss_function(recon_x, x, mu, logvar, flattened_image_size = 1024):
"""
from https://github.com/pytorch/examples/blob/master/vae/main.py
"""
BCE = nn.functional.binary_cross_entropy(recon_x, x.view(-1, flattened_image_size), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
|
73abe5c0944f646b4c9240fdb80e17cabf83a22d
| 3,639,266
|
def remove_poly(values, poly_fit=0):
"""
Calculates best fit polynomial and removes it from the record
"""
x = np.linspace(0, 1.0, len(values))
cofs = np.polyfit(x, values, poly_fit)
y_cor = 0 * x
for co in range(len(cofs)):
mods = x ** (poly_fit - co)
y_cor += cofs[co] * mods
return values - y_cor
|
3699dcd3cae6021a5f2a0b4cad08882a4383d09c
| 3,639,267
|
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
|
a632fac96d555d3ce21d75183c00c6e7627ba5ac
| 3,639,268
|
import os
import yaml
def from_path(path, vars=None, *args, **kwargs):
"""Read a scenario configuration and construct a new scenario instance.
Args:
path (basestring): Path to a configuration file. `path` may be a directory
containing a single configuration file.
*args: Arguments passed to Scenario __init__.
**kwargs: Arguments passed to Scenario __init__.
Returns:
Scenario: A new scenario instance.
"""
# If path is a directory, find a configuration file inside that directory.
if os.path.isdir(path):
paths = Scenario.find_configuration_files(path)
if not paths:
raise ValueError("No configuration files found at '%s'" % path)
elif len(paths) > 1:
raise ValueError("Multiple configuration files found at '%s': %r" %
(path, paths))
else:
path = paths[0]
# Parse the configuration file and construct a new scenario.
directory, filename = os.path.split(path)
extension = os.path.splitext(filename)[1]
if extension.lower() in [".yml", ".yaml"]:
with open(path) as config_file:
try:
scenario = from_yaml_str(config_file.read(), *args,
vars=vars, source_directory=directory, **kwargs)
except yaml.parser.ParserError as err:
raise CurieTestException("Unable to parse YAML at path %r, check "
"syntax: %r" % (path, str(err)))
else:
raise ValueError("Invalid file type '%s'" % path)
return scenario
|
ab9131427c1c759e72a9a0e73d735a0b6c3a0388
| 3,639,269
|
def SogouNews(*args, **kwargs):
""" Defines SogouNews datasets.
The labels includes:
- 0 : Sports
- 1 : Finance
- 2 : Entertainment
- 3 : Automobile
- 4 : Technology
Create supervised learning dataset: SogouNews
Separately returns the training and test dataset
Args:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.SogouNews(ngrams=3)
"""
return _setup_datasets(*(("SogouNews",) + args), **kwargs)
|
e10eaf10ba6e999d40a40f09f7e79b47eb5aa8a5
| 3,639,270
|
def add_volume (activity_cluster_df,
activity_counts):
"""Scales log of session counts of each activity and merges into activities dataframe
Parameters
----------
activity_cluster_df : dataframe
Pandas dataframe of activities, skipgrams features, and cluster label from DBSCAN
activity_counts: dictionary
Dictionary (from activities.create_corpus func) of activity and session counts
Returns
-------
pandas dataframe of activities, skipgrams features, x-value, y-value, and activity volume percentiles
"""
assert isinstance(activity_counts, dict) == True, "activity_counts should be a dictionary."
assert len(activity_counts) >= len(activity_cluster_df), "activity_counts must contain the same number or more activity entries than activity_cluster_df."
# Map activities to capture unique session ID acount in activities dataframe
activity_cluster_df['volume_pctl'] = activity_cluster_df.index.map(activity_counts)
# Replace absolute volume with percentile rank integer
activity_cluster_df['volume_pctl'] = activity_cluster_df['volume_pctl'].rank(pct=True) * 100
return activity_cluster_df
|
1ea67909e2c48500ca2f022a3ae5ebcbe28da6c8
| 3,639,271
|
def handle_message(message):
"""
Where `message` is a string that has already been stripped and lower-cased,
tokenize it and find the corresponding Hand in the database. (Also: return some
helpful examples if requested, or an error message if the input cannot be parsed.)
"""
if 'example' in message:
return example()
message_tokens = filter(lambda x: x != '', message.split(" "))
if len(message_tokens) != 4:
# maybe use a better error message here?
return STANDARD_ERRORMSG
# handle the described poker hand.
rank1 = parsers.get_rank(message_tokens[0])
rank2 = parsers.get_rank(message_tokens[1])
suiting = parsers.get_suiting(message_tokens[2])
players = parsers.get_players(message_tokens[3])
check = check_input(rank1, rank2, suiting, players)
if check != None:
return check
try:
p_win, p_tie, expected_gain = get_stats(rank1, rank2, suiting, players)
except:
print "Input valid but bad db lookup." + str([rank1, rank2, suiting, players])
return "Error! Input valid but DataBase lookup failed? Please report this bug."
return (
"P(win): " + str(p_win * 100) + "%\n"
"P(tie): " + str(p_tie * 100) + "%\n"
"Expected unit gain: " + str(expected_gain)
)
|
910f07a3c612c9d8e58762b99dd508e76ad2f5aa
| 3,639,272
|
import functools
def MemoizedSingleCall(functor):
"""Decorator for simple functor targets, caching the results
The functor must accept no arguments beyond either a class or self (depending
on if this is used in a classmethod/instancemethod context). Results of the
wrapped method will be written to the class/instance namespace in a specially
named cached value. All future invocations will just reuse that value.
Note that this cache is per-process, so sibling and parent processes won't
notice updates to the cache.
"""
# TODO(build): Should we rebase to snakeoil.klass.cached* functionality?
# pylint: disable=protected-access
@functools.wraps(functor)
def wrapper(obj):
key = wrapper._cache_key
val = getattr(obj, key, None)
if val is None:
val = functor(obj)
setattr(obj, key, val)
return val
# Use name mangling to store the cached value in a (hopefully) unique place.
wrapper._cache_key = '_%s_cached' % (functor.__name__.lstrip('_'),)
return wrapper
|
1757583cd416900d59c297a090800114a1bfcb3b
| 3,639,273
|
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
|
0dc8327abf94126fca5bbcc836bc1c404c92148e
| 3,639,274
|
def weighted_categorical_crossentropy(target, output, n_classes = 3, axis = None, from_logits=False):
"""Categorical crossentropy between an output tensor and a target tensor.
Automatically computes the class weights from the target image and uses
them to weight the cross entropy
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if axis is None:
axis = len(output.get_shape()) - 1
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output,
axis=axis,
keep_dims=True)
# manual computation of crossentropy
_epsilon = _to_tensor(K.epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
target_cast = tf.cast(target, K.floatx())
class_weights = 1.0/np.float(n_classes)*tf.divide(tf.reduce_sum(target_cast), tf.reduce_sum(target_cast, axis = [0,1,2]))
print class_weights.get_shape()
return - tf.reduce_sum(tf.multiply(target * tf.log(output), class_weights), axis=axis)
else:
raise Exception("weighted_categorical_crossentropy cannot take logits")
|
e7fe2c583b4158afe5c04632c53402af1c64cc20
| 3,639,275
|
from django.conf import settings
def get_config(key, default):
"""
Get the dictionary "IMPROVED_PERMISSIONS_SETTINGS"
from the settings module.
Return "default" if "key" is not present in
the dictionary.
"""
config_dict = getattr(settings, 'IMPROVED_PERMISSIONS_SETTINGS', None)
if config_dict:
if key in config_dict:
return config_dict[key]
return default
|
8e4d03b71f568e6c3450e6674d16624ae44181a8
| 3,639,276
|
import os
def fetch_protein_interaction(data_home=None):
"""Fetch the protein-interaction dataset
Constant features were removed
=========================== ===================================
Domain drug-protein interaction network
Features Biological (see [1])
output interaction network
Drug matrix (sample, features) = (1554, 876)
Newtork interaction matrix (samples, labels) = (1554, 1862)
=========================== ===================================
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels and
'feature_names', the original names of the dataset columns.
References
----------
.. [1] Yamanishi, Y., Pauwels, E., Saigo, H., & Stoven, V. (2011).
Extracting sets of chemical substructures and protein domains
governing drug-target interactions. Journal of chemical information
and modeling, 51(5), 1183-1194.
"""
data_home = _fetch_drug_protein(data_home=data_home)
protein_fname = os.path.join(data_home, "target_repmat.txt")
data = np.loadtxt(protein_fname, dtype=float, skiprows=1,
usecols=range(1, 877)) # skip id column
mask_constant = np.var(data, axis=0) != 0.
data = data[:, mask_constant] # remove constant columns
with open(protein_fname, 'r') as fhandle:
feature_names = fhandle.readline().split("\t")
feature_names = np.array(feature_names)[mask_constant].tolist()
interaction_fname = os.path.join(data_home, "inter_admat.txt")
target = np.loadtxt(interaction_fname, dtype=float, skiprows=1)
target = target[:, 1:] # skip id column
target = target.T
return Bunch(data=data, target=target, feature_names=feature_names)
|
14e033e690889fb8c560b0f79caee8ec35c144ca
| 3,639,277
|
def prefetched_iterator(query, chunk_size=2000):
"""
This is a prefetch_related-safe version of what iterator() should do.
It will sort and batch on the default django primary key
Args:
query (QuerySet): the django queryset to iterate
chunk_size (int): the size of each chunk to fetch
"""
# walk the records in ascending id order
base_query = query.order_by("id")
def _next(greater_than_id):
"""Returns the next batch"""
return base_query.filter(id__gt=greater_than_id)[:chunk_size]
batch = _next(0)
while batch:
item = None
# evaluate each batch query here
for item in batch:
yield item
# next batch starts after the last item.id
batch = _next(item.id) if item is not None else None
|
e8a8feeea8073161283018f19de742c9425e2f94
| 3,639,278
|
import os
def get_dir(foldername, path):
""" Get directory relative to current file - if it doesn't exist create it. """
file_dir = os.path.join(path, foldername)
if not os.path.isdir(file_dir):
os.mkdir(os.path.join(path, foldername))
return file_dir
|
8574dfc0503c8cc6410dc013a23689ac2b77f5d6
| 3,639,279
|
def dicom_strfname( names: tuple) -> str:
"""
doe john s -> dicome name (DOE^JOHN^S)
"""
return "^".join(names)
|
864ad0d4c70c9bb4acbc65c92bf83a97415b9d35
| 3,639,280
|
import json
def plot_new_data(logger):
"""
Plots mixing ratio data, creating plot files and queueing the files for upload.
This will plot data, regardless of if there's any new data since it's not run continously.
:param logger: logging logger to record to
:return: bool, True if ran corrected, False if exit on error
"""
logger.info('Running plot_new_data()')
try:
engine, session = connect_to_db(DB_NAME, CORE_DIR)
except Exception as e:
logger.error(f'Error {e.args} prevented connecting to the database in plot_new_data()')
return False
remotedir = BOULDAIR_BASE_PATH + '/MR_plots'
compounds_to_plot = (session.query(Quantification.name)
.join(Standard, Quantification.standard_id == Standard.id)
.filter(Standard.name == 'quantlist').all())
compounds_to_plot[:] = [q.name for q in compounds_to_plot]
date_limits, major_ticks, minor_ticks = create_monthly_ticks(6, days_per_minor=7)
with open(JSON_PUBLIC_DIR / 'zug_plot_info.json', 'r') as file:
compound_limits = json.loads(file.read())
for name in compounds_to_plot:
params = (GcRun.date, Compound.mr)
filters = (
Compound.name == name,
GcRun.date >= date_limits['left'],
*ambient_filters
)
results = abstract_query(params, filters, GcRun.date)
dates = [r.date for r in results]
mrs = [r.mr for r in results]
p = MixingRatioPlot(
{name: (dates, mrs)},
limits={**date_limits, **compound_limits[name]},
major_ticks=major_ticks,
minor_ticks=minor_ticks,
filepath=MR_PLOT_DIR / f'{name}_plot.png'
)
p.plot()
file_to_upload = FileToUpload(p.filepath, remotedir, staged=True)
add_or_ignore_plot(file_to_upload, session)
session.commit()
session.close()
engine.dispose()
return True
|
186b11d496c8b1097087f451e43d235b40d7a2ba
| 3,639,281
|
def plot_graphs(graphs=compute_graphs()):
""" Affiche les graphes avec la bibliothèque networkx """
GF, Gf = graphs
pos = {1: (2, 1), 2: (4, 1), 3: (5, 2), 4: (4, 3), 5: (1, 3), 6: (1, 2), 7: (3, 4)}
plt.figure(1)
nx.draw_networkx_nodes(GF, pos, node_size=500)
nx.draw_networkx_labels(GF, pos)
nx.draw_networkx_edges(GF, pos, arrows=True)
plt.title("Graphe fort")
plt.show() # display
plt.figure(2)
nx.draw_networkx_nodes(Gf, pos, node_size=500)
nx.draw_networkx_labels(Gf, pos)
nx.draw_networkx_edges(Gf, pos, arrows=True, style="dashed")
plt.title("Graphe faible")
plt.show() # display
return GF, Gf
|
4db21b3f5a823b5a7a17264a611435d2aa3825a4
| 3,639,282
|
def get_polygon_name(polygon):
"""Returns the name for a given polygon.
Since not all plygons store their name in the same field, we have to figure
out what type of polygon it is first, then reference the right field.
Args:
polygon: The polygon object to get the name from.
Returns:
The name for that polygon object.
"""
if isinstance(polygon, StatePolygon):
name = polygon.name
elif isinstance(polygon, CountyPolygon):
if polygon.geo_code < 10000000:
name = polygon.name[:-5]
else:
name = polygon.name + ' County'
elif isinstance(polygon, PumaPolygon):
name = polygon.puma_name[:-5]
return name
|
da89efece12fbb27a5ceafef83b73ade392644cb
| 3,639,283
|
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return redirect("/login")
# Ensure password was submitted
elif not request.form.get("password"):
return redirect("/login")
# Query database for username
username = request.form.get("username")
rows = list(db.execute(f"SELECT * FROM users WHERE name = '{username}'"))
# Ensure username exists and password is correct
pass_string = request.form.get("password")
if len(rows) != 1 or not check_password_hash(rows[0][2], pass_string):
return redirect("/login")
# Remember which user has logged in
session["user_id"] = rows[0][0]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
|
8699a3f0f162706c2e0a0ab9565b8b595cbb7574
| 3,639,284
|
from . import paval as pv
import configparser
def read_option(file_path, section, option, fallback=None):
"""
Parse config file and read out the value of a certain option.
"""
try:
# For details see the notice in the header
pv.path(file_path, "config", True, True)
pv.string(section, "section string")
pv.string(option, "option string")
except NameError:
pass
c = configparser.RawConfigParser()
c.read(file_path)
value = ""
try:
value = c.get(section, option)
except configparser.NoSectionError:
if fallback:
return str(fallback)
else:
raise Exception("This section does not exist in the given " \
"config file.")
except configparser.NoOptionError:
if fallback:
return str(fallback)
else:
raise Exception("This option does not exist in the given " \
"section.")
return str(value)
|
6a9b839e36509630813c3cab5e45402b37377837
| 3,639,285
|
import pathlib
import os
def find_theme_file(theme_filename: pathlib.Path) -> pathlib.Path:
"""Find the real address of a theme file from the given one.
First check if the user has the file in his themes.
:param theme_file_path: The name of the file to look for.
:return: A file path that exists with correct theme.
"""
# Tries in the themes file from appdata
data_theme_file_path = os.path.join(THEMES_DIR, theme_filename)
if os.path.isfile(data_theme_file_path):
return data_theme_file_path
else:
# Tries the default path
return os.path.join(DEFAULT_THEMES_DIR, "default.json")
|
73784d715f325fef0e547a7e2f467755ac0ba32b
| 3,639,286
|
import json
def msg_to_json(msg: Msg) -> json.Data:
"""Convert message to json serializable data"""
return {'facility': msg.facility.name,
'severity': msg.severity.name,
'version': msg.version,
'timestamp': msg.timestamp,
'hostname': msg.hostname,
'app_name': msg.app_name,
'procid': msg.procid,
'msgid': msg.msgid,
'data': msg.data,
'msg': msg.msg}
|
ee01821bdbcdcbe88f5c63f0a1f22d050814aa7f
| 3,639,287
|
def get_direct_dependencies(definitions_by_node: Definitions, node: Node) -> Nodes:
"""Get direct dependencies of a node"""
dependencies = set([node])
def traverse_definition(definition: Definition):
"""Traverses a definition and adds them to the dependencies"""
for dependency in definition['nodes']:
if dependency not in dependencies:
dependencies.add(dependency)
for children_definition in definition['children_definitions']:
traverse_definition(children_definition)
traverse_definition(definitions_by_node[node])
dependencies.discard(node)
return dependencies
|
6dfbfd9068ecc3759764b3542be62f270c45e4c1
| 3,639,288
|
def get_timeseries_metadata(request, file_type_id, series_id, resource_mode):
"""
Gets metadata html for the aggregation type (logical file type)
:param request:
:param file_type_id: id of the aggregation (logical file) object for which metadata in html
format is needed
:param series_id: if of the time series for which metadata to be displayed
:param resource_mode: a value of either edit or view. In resource edit mode metadata html
form elements are returned. In view mode normal html for display of metadata is returned
:return: json data containing html string
"""
if resource_mode != "edit" and resource_mode != 'view':
err_msg = "Invalid metadata type request."
ajax_response_data = {'status': 'error', 'message': err_msg}
return JsonResponse(ajax_response_data, status=status.HTTP_400_BAD_REQUEST)
logical_file, json_response = _get_logical_file("TimeSeriesLogicalFile", file_type_id)
if json_response is not None:
return json_response
series_ids = logical_file.metadata.series_ids_with_labels
if series_id not in series_ids.keys():
# this will happen only in case of CSV file upload when data is written
# first time to the blank sqlite file as the series ids get changed to
# uuids
series_id = series_ids.keys()[0]
try:
if resource_mode == 'view':
metadata = logical_file.metadata.get_html(series_id=series_id)
else:
metadata = logical_file.metadata.get_html_forms(series_id=series_id)
ajax_response_data = {'status': 'success', 'metadata': metadata}
except Exception as ex:
ajax_response_data = {'status': 'error', 'message': ex.message}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
|
056707f6bd1947dd227c61dccb99b4f9d46ce9c9
| 3,639,289
|
def standardize(tag):
"""Put an order-numbering ID3 tag into our standard form.
This function does nothing when applied to a non-order-numbering tag.
Args:
tag: A mutagen ID3 tag, which is modified in-place.
Returns:
A 2-tuple with the decoded version of the order string.
raises:
BadOrderError: if the tag is obviously bad.
"""
if not _is_order_tag(tag):
return
tag.text[0] = standardize_str(tag.text[0])
return decode(tag.text[0])
|
66edb2f402e2781deaf39ae470b5f3c54411c1c3
| 3,639,290
|
def _count_objects(osm_pbf):
"""Count objects of each type in an .osm.pbf file."""
p = run(["osmium", "fileinfo", "-e", osm_pbf], stdout=PIPE, stderr=DEVNULL)
fileinfo = p.stdout.decode()
n_objects = {"nodes": 0, "ways": 0, "relations": 0}
for line in fileinfo.split("\n"):
for obj in n_objects:
if f"Number of {obj}" in line:
n_objects[obj] = int(line.split(":")[-1])
return n_objects
|
f3792b457e3cc922b6df3cef69dfb4c8d00c68d9
| 3,639,291
|
def combine_multi_uncertainty(unc_lst):
"""Combines Uncertainty Values From More Than Two Sources"""
ur = 0
for i in range(len(unc_lst)):
ur += unc_lst[i] ** 2
ur = np.sqrt(float(ur))
return ur
|
6f06afc7bda7d65b8534e7294411dbe5e499b755
| 3,639,292
|
def export_performance_df(
dataframe: pd.DataFrame, rule_name: str = None, second_df: pd.DataFrame = None, relationship: str = None
) -> pd.DataFrame:
"""
Function used to calculate portfolio performance for data after calculating a trading signal/rule and relationship.
"""
if rule_name is not None:
if rule_name in algorithm_functions["infertrade"]["allocation"].keys():
used_calculation = calculate_infertrade_allocation
elif rule_name in algorithm_functions["ta"]["signal"].keys():
used_calculation = calculate_ta_signal
elif rule_name in algorithm_functions["ta"]["allocation"].keys():
used_calculation = calculate_ta_allocation
elif rule_name in algorithm_functions["infertrade"]["signal"].keys():
used_calculation = calculate_infertrade_signal
elif rule_name in ta_export_regression_allocations.keys():
used_calculation = calculate_ta_regression_allocation
elif rule_name not in algorithm_functions:
raise ValueError("Algorithm not found")
df_with_performance = used_calculation(dataframe=dataframe, rule_name=rule_name)
else:
df_with_performance = dataframe
if relationship is not None:
if second_df is not None:
if rule_name is not None:
second_df_with_performance = used_calculation(dataframe=second_df, rule_name=rule_name)
else:
second_df_with_performance = second_df
second_df_with_relationship = calculate_infertrade_allocation(
dataframe=second_df_with_performance, rule_name=relationship
)
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
complete_relationship = df_with_relationship.append(second_df_with_relationship, ignore_index=False)
return complete_relationship
else:
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
return df_with_relationship
else:
return df_with_performance
|
e0587a658aab2e629bff7c307e5f1aaec63a80fe
| 3,639,293
|
def attention(x, scope, n_head, n_timesteps):
"""
perform multi-head qkv dot-product attention and linear project result
"""
n_state = x.shape[-1].value
with tf.variable_scope(scope):
queries = conv1d(x, 'q', n_state)
keys = conv1d(x, 'k', n_state)
values = conv1d(x, 'v', n_state)
# note that split/merge heads is fused into attention ops (no resahpe/transpose needed)
bst = get_blocksparse_attention_ops(n_timesteps, n_head)
attention_energies = bst.query_key_op(queries, keys)
attention_weights = bst.masked_softmax(attention_energies, scale=tf.rsqrt(n_state / n_head))
weighted_values = bst.weight_value_op(attention_weights, values)
result = conv1d(weighted_values, 'proj', n_state)
return result
|
63456ce40c4e72339638f460a8138dcd143e7352
| 3,639,294
|
def std_ver_minor_inst_valid_possible(std_ver_minor_uninst_valid_possible): # pylint: disable=redefined-outer-name
"""Return an instantiated IATI Version Number."""
return iati.Version(std_ver_minor_uninst_valid_possible)
|
9570918df11a63faf194da9db82aa4ea1745c920
| 3,639,295
|
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.name_scope( name,
"sequence_loss_by_example",logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
target = tf.reshape(target, [-1])
crossent = tf.sparse_softmax_cross_entropy_with_logits(
logit, target)
else:
crossent = softmax_loss_function(logit, target)
log_perp_list.append(crossent * weight)
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
|
adf8a063c6f41b41e174852466489f535c7e0761
| 3,639,296
|
def skip(line):
"""Returns true if line is all whitespace or shebang."""
stripped = line.lstrip()
return stripped == '' or stripped.startswith('#!')
|
4ecfb9c0f2d497d52cc9d9e772e75d042cc0bcce
| 3,639,297
|
def get_dss_client(deployment_stage: str):
"""
Returns appropriate DSSClient for deployment_stage.
"""
dss_env = MATRIX_ENV_TO_DSS_ENV[deployment_stage]
if dss_env == "prod":
swagger_url = "https://dss.data.humancellatlas.org/v1/swagger.json"
else:
swagger_url = f"https://dss.{dss_env}.data.humancellatlas.org/v1/swagger.json"
logger.info(f"ETL: Hitting DSS with Swagger URL: {swagger_url}")
dss_config = hca.HCAConfig()
dss_config['DSSClient'] = {}
dss_config['DSSClient']['swagger_url'] = swagger_url
client = hca.dss.DSSClient(config=dss_config)
return client
|
4e260b37c6f74261362cc10b77b3b28d1464d49d
| 3,639,298
|
def bounce_off(bounce_obj_rect: Rect, bounce_obj_speed,
hit_obj_rect: Rect, hit_obj_speed):
"""
The alternative version of `bounce_off_ip`. The function returns the result
instead of updating the value of `bounce_obj_rect` and `bounce_obj_speed`.
@return A tuple (`new_bounce_obj_rect`, `new_bounce_obj_speed`)
"""
new_bounce_obj_rect = bounce_obj_rect.copy()
new_bounce_obj_speed = bounce_obj_speed.copy()
bounce_off_ip(new_bounce_obj_rect, new_bounce_obj_speed,
hit_obj_rect, hit_obj_speed)
return new_bounce_obj_rect, new_bounce_obj_speed
|
84b038c05f5820065293ba90b73497f0d1e7a7b9
| 3,639,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.