content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def plot(
key: Array,
gp: gpx.Prior,
params: dict,
data: gpx.Dataset,
n_samples: int = 10,
title: str = None,
ax=None,
):
"""
Plot samples from the Gaussian process prior distribution.
:param key: A Jax PRNGKey object to ensure reproducibility when sampling from the prior distribution.
:param gp: A generic Gaussian process prior
:param params: The Gaussian process priors's corresponding parameter set.
:param data: The training dataset
:param n_samples: The number of samples to be drawn from the predictive posterior's distribution. The default argument is 0 which corresponds to no samples being plotteed.
:param title: What title, if any, should be added to the plot.
:param ax: Optional matplotlib axes argument.
:return:
"""
samples = gpx.sample(key, gp, params, data, n_samples=n_samples)
cols = get_colours()
if ax is None:
fig, ax = plt.subplots()
ax.plot(data.X, samples.T, alpha=0.3, color=cols["base"])
ax.set_xlabel("X")
ax.set_ylabel("y")
ax.set_xlim(jnp.min(data.X), jnp.max(data.X))
ax.set_title(title, loc="left") | 35,200 |
def deconv4x4_block(in_channels,
out_channels,
stride=1,
padding=3,
ext_padding=(2, 1, 2, 1),
out_padding=0,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
4x4 version of the standard deconvolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default (2, 1, 2, 1)
Padding value for deconvolution layer.
ext_padding : tuple/list of 4 int, default None
Extra padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return DeconvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
stride=stride,
padding=padding,
ext_padding=ext_padding,
out_padding=out_padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation) | 35,201 |
def main():
"""Program main, called after args are parsed into FLAGS.
Example:
python runner.py --workspace=/workspace
--bench-home=/mxnet_repo/incubator-mxnet/example/image-classification
--train-data-dir=/mxnet_repo/train/data
"""
test_runner = TestRunner(
FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)
test_runner.run_tests(FLAGS.test_list.split(',')) | 35,202 |
def get_package_requirements():
""" Used to read requirements from requirements.txt file.
:return: list of requirements
:rtype: list
"""
requirements = []
for line in read_file_contents("requirements.txt").splitlines():
line = line.strip()
if line == "" or line.startswith("#"):
continue
requirements.append(line)
return requirements | 35,203 |
def dem_coregistration_custom(masterDEM, slaveDEM, glaciermask=None, landmask=None, outdir='.', pts=False, full_ext=False,magnlimit=1.):
#This coreg code is from Robert McNaab and Chris Nuth: pybob
"""
Iteratively co-register elevation data, based on routines described in Nuth and Kaeaeb, 2011.
Parameters
----------
masterDEM : string or GeoImg
Path to filename or GeoImg dataset representing "master" DEM.
slaveDEM : string or GeoImg
Path to filename or GeoImg dataset representing "slave" DEM.
glaciermask : string, optional
Path to shapefile representing points to exclude from co-registration
consideration (i.e., glaciers).
landmask : string, optional
Path to shapefile representing points to include in co-registration
consideration (i.e., stable ground/land).
outdir : string, optional
Location to save co-registration outputs.
pts : bool, optional
If True, program assumes that masterDEM represents point data (i.e., ICESat),
as opposed to raster data. Slope/aspect are then calculated from slaveDEM.
masterDEM should be a string representing an HDF5 file continaing ICESat data.
full_ext : bool, optional
If True, program writes full extents of input DEMs. If False, program writes
input DEMs cropped to their common extent. Default is False.
"""
def preprocess(stable_mask, slope, aspect, master, slave):
if isinstance(master, GeoImg):
stan = np.tan(np.radians(slope)).astype(np.float32)
dH = master.copy(new_raster=(master.img - slave.img))
dH.img[stable_mask] = np.nan
master_mask = isinstance(master.img, np.ma.masked_array)
slave_mask = isinstance(slave.img, np.ma.masked_array)
if master_mask and slave_mask:
dH.mask(np.logical_or(master.img.mask, slave.img.mask))
elif master_mask:
dH.mask(master.img.mask)
elif slave_mask:
dH.mask(slave.img.mask)
if dH.isfloat:
dH.img[stable_mask] = np.nan
#adding a 5NMAD filtering for robustness at various resolutions
myfirstkeep = ((np.absolute(dH.img) < 200.0) & np.isfinite(dH.img) & (aspect>0))
nmad = 1.4826 * np.median(np.abs(dH.img[myfirstkeep]) - np.median(dH.img[myfirstkeep]))
dHtan = dH.img / stan
#here too
mykeep = ((np.absolute(dH.img) < 200.0) & np.isfinite(dH.img) &
(slope > 7.0) & (dH.img != 0.0) & (aspect >= 0) & (np.absolute(dH.img - np.median(dH.img[myfirstkeep])) < 5*nmad))
dH.img[np.invert(mykeep)] = np.nan
xdata = aspect[mykeep]
ydata = dHtan[mykeep]
sdata = stan[mykeep]
elif isinstance(master, ICESat):
slave_pts = slave.raster_points(master.xy)
dH = master.elev - slave_pts
slope_pts = slope.raster_points(master.xy)
stan = np.tan(np.radians(slope_pts))
aspect_pts = aspect.raster_points(master.xy)
smask = stable_mask.raster_points(master.xy) > 0
dH[smask] = np.nan
dHtan = dH / stan
mykeep = ((np.absolute(dH) < 200.0) & np.isfinite(dH) &
(slope_pts > 3.0) & (dH != 0.0) & (aspect_pts >= 0))
dH[np.invert(mykeep)] = np.nan
xdata = aspect_pts[mykeep]
ydata = dHtan[mykeep]
sdata = stan[mykeep]
return dH, xdata, ydata, sdata
# if the output directory does not exist, create it.
outdir = os.path.abspath(outdir)
try:
os.makedirs(outdir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(outdir):
pass
else:
raise
# make a file to save the coregistration parameters to.
paramf = open(outdir + os.path.sep + 'coreg_params.txt', 'w')
# create the output pdf
pp = PdfPages(outdir + os.path.sep + 'CoRegistration_Results.pdf')
if full_ext:
print('Writing full extents of output DEMs.')
else:
print('Writing DEMs cropped to common extent.')
if type(masterDEM) is str:
mfilename = os.path.basename(masterDEM)
mfiledir = os.path.dirname(masterDEM)
else:
mfilename = masterDEM.filename
mfiledir = masterDEM.in_dir_path
if type(slaveDEM) is str:
sfilename = os.path.basename(slaveDEM)
else:
sfilename = slaveDEM.filename
slaveDEM = get_geoimg(slaveDEM)
# if we're dealing with ICESat/pt data, change how we load masterDEM data
if pts:
masterDEM = ICESat(masterDEM)
masterDEM.project('epsg:{}'.format(slaveDEM.epsg))
mybounds = [slaveDEM.xmin, slaveDEM.xmax, slaveDEM.ymin, slaveDEM.ymax]
masterDEM.clip(mybounds)
masterDEM.clean()
slope_geo = get_slope(slaveDEM)
aspect_geo = get_aspect(slaveDEM)
slope_geo.write('tmp_slope.tif', out_folder=outdir)
aspect_geo.write('tmp_aspect.tif', out_folder=outdir)
smask = create_stable_mask(slaveDEM, glaciermask, landmask)
slaveDEM.mask(smask)
stable_mask = slaveDEM.copy(new_raster=smask) # make the mask a geoimg
else:
orig_masterDEM = get_geoimg(masterDEM)
masterDEM = orig_masterDEM.reproject(slaveDEM) # need to resample masterDEM to cell size of slave.
# masterDEM.img[masterDEM.img<1]=np.nan
stable_mask = create_stable_mask(masterDEM, glaciermask, landmask)
slope_geo = get_slope(masterDEM)
aspect_geo = get_aspect(masterDEM)
slope_geo.write('tmp_slope.tif', out_folder=outdir)
aspect_geo.write('tmp_aspect.tif', out_folder=outdir)
masterDEM.mask(stable_mask)
slope = slope_geo.img
aspect = aspect_geo.img
mythresh = np.float64(200) # float64 really necessary?
mystd = np.float64(200)
mycount = 0
tot_dx = np.float64(0)
tot_dy = np.float64(0)
tot_dz = np.float64(0)
magnthresh = 200
# magnlimit = 1
mytitle = 'DEM difference: pre-coregistration'
if pts:
this_slave = slaveDEM
this_slave.mask(stable_mask.img)
else:
this_slave = slaveDEM.reproject(masterDEM)
this_slave.mask(stable_mask)
while mythresh > 2 and magnthresh > magnlimit:
if mycount != 0:
# slaves.append(slaves[-1].reproject(masterDEM))
# slaves[-1].mask(stable_mask)
mytitle = "DEM difference: After Iteration {}".format(mycount)
mycount += 1
print("Running iteration #{}".format(mycount))
print("Running iteration #{}".format(mycount), file=paramf)
# if we don't have two DEMs, showing the false hillshade doesn't work.
if not pts:
dH, xdata, ydata, sdata = preprocess(stable_mask, slope, aspect, masterDEM, this_slave)
false_hillshade(dH, mytitle, pp)
dH_img = dH.img
else:
dH, xdata, ydata, sdata = preprocess(stable_mask, slope_geo, aspect_geo, masterDEM, this_slave)
dH_img = dH
if mycount == 1:
dH0 = dH_img
# calculate threshold, standard deviation of dH
# mythresh = 100 * (mystd-np.nanstd(dH_img))/mystd
# mystd = np.nanstd(dH_img)
# USE RMSE instead ( this is to make su that there is improvement in the spread)
mythresh = 100 * (mystd - RMSE(dH_img)) / mystd
mystd = RMSE(dH_img)
mytitle2 = "Co-registration: Iteration {}".format(mycount)
dx, dy, dz = coreg_fitting(xdata, ydata, sdata, mytitle2, pp)
tot_dx += dx
tot_dy += dy
tot_dz += dz
magnthresh = np.sqrt(np.square(dx) + np.square(dy) + np.square(dz))
print(tot_dx, tot_dy, tot_dz)
print(tot_dx, tot_dy, tot_dz, file=paramf)
# print np.nanmean(slaves[-1].img)
# print slaves[-1].xmin, slaves[-1].ymin
# shift most recent slave DEM
this_slave.shift(dx, dy) # shift in x,y
# print tot_dx, tot_dy
# no idea why slaves[-1].img += dz doesn't work, but the below seems to.
zupdate = np.ma.array(this_slave.img.data + dz, mask=this_slave.img.mask) # shift in z
this_slave = this_slave.copy(new_raster=zupdate)
if pts:
this_slave.mask(stable_mask.img)
slope_geo.shift(dx, dy)
aspect_geo.shift(dx, dy)
stable_mask.shift(dx, dy)
else:
this_slave = this_slave.reproject(masterDEM)
this_slave.mask(stable_mask)
print("Percent-improvement threshold and Magnitute threshold:")
print(mythresh, magnthresh)
# slaves[-1].display()
if mythresh > 2 and magnthresh > magnlimit:
dH = None
dx = None
dy = None
dz = None
xdata = None
ydata = None
sdata = None
else:
if not pts:
dH, xdata, ydata, sdata = preprocess(stable_mask, slope, aspect, masterDEM, this_slave)
mytitle = "DEM difference: After Iteration {}".format(mycount)
# adjust final dH
# myfadj=np.nanmean([np.nanmean(dH.img),np.nanmedian(dH.img)])
# myfadj=np.nanmedian(dH.img)
# tot_dz += myfadj
# dH.img = dH.img-myfadj
false_hillshade(dH, mytitle, pp)
dHfinal = dH.img
else:
mytitle2 = "Co-registration: FINAL"
dH, xdata, ydata, sdata = preprocess(stable_mask, slope_geo, aspect_geo, masterDEM, this_slave)
dx, dy, dz = coreg_fitting(xdata, ydata, sdata, mytitle2, pp)
dHfinal = dH
# Create final histograms pre and post coregistration
# shift = [tot_dx, tot_dy, tot_dz] # commented because it wasn't actually used.
final_histogram(dH0, dHfinal, pp)
# create new raster with dH sample used for co-registration as the band
# dHSample = dH.copy(new_raster=dHpost_sample)
# dHSample.write(outdir + os.path.sep + 'dHpost_sample.tif') # have to fill these in!
# save full dH output
# dHfinal.write('dHpost.tif', out_folder=outdir)
# save adjusted slave dem
if sfilename is not None:
slaveoutfile = '.'.join(sfilename.split('.')[0:-1]) + '_adj.tif'
else:
slaveoutfile = 'slave_adj.tif'
if pts:
outslave = slaveDEM.copy()
else:
if full_ext:
outslave = get_geoimg(slaveDEM)
else:
outslave = slaveDEM.reproject(masterDEM)
outslave.shift(tot_dx, tot_dy)
outslave.img = outslave.img + tot_dz
outslave.write(slaveoutfile, out_folder=outdir)
outslave.filename = slaveoutfile
if pts:
slope_geo.write('tmp_slope.tif', out_folder=outdir)
aspect_geo.write('tmp_aspect.tif', out_folder=outdir)
# Final Check --- for debug
if not pts:
dH, xdata, ydata, sdata = preprocess(stable_mask, slope, aspect, masterDEM, outslave)
false_hillshade(dH, 'FINAL CHECK', pp)
if mfilename is not None:
mastoutfile = '.'.join(mfilename.split('.')[0:-1]) + '_adj.tif'
else:
mastoutfile = 'master_adj.tif'
if full_ext:
masterDEM = orig_masterDEM
masterDEM.write(mastoutfile, out_folder=outdir)
pp.close()
print("Fin.")
print("Fin.", file=paramf)
paramf.close()
plt.close('all')
out_offs = [tot_dx, tot_dy, tot_dz]
return masterDEM, outslave, out_offs, mystd | 35,204 |
def check_types_of_edge_constraint(sub_graph):
""" Go through the subgraph for operations and checks that the constraints\
are compatible.
:param sub_graph: the subgraph to search through
:return:
"""
for partition in sub_graph.partitions:
fixed_key = utility_calls.locate_constraints_of_type(
partition.constraints, KeyAllocatorFixedKeyAndMaskConstraint)
fixed_mask = utility_calls.locate_constraints_of_type(
partition.constraints, KeyAllocatorFixedMaskConstraint)
fixed_field = utility_calls.locate_constraints_of_type(
partition.constraints, KeyAllocatorFixedFieldConstraint)
flexi_field = utility_calls.locate_constraints_of_type(
partition.constraints, KeyAllocatorFlexiFieldConstraint)
if (len(fixed_key) > 1 or len(fixed_field) > 1 or
len(fixed_mask) > 1 or len(flexi_field) > 1):
raise exceptions.PacmanConfigurationException(
"There are more than one of the same constraint type on "
"the partition {} for edges {}. Please fix and try again."
.format(partition.identifer, partition.edges))
fixed_key = len(fixed_key) == 1
fixed_mask = len(fixed_mask) == 1
fixed_field = len(fixed_field) == 1
flexi_field = len(flexi_field) == 1
# check for fixed key and a fixed mask. as these should have been
# merged before now
if fixed_key and fixed_mask:
raise exceptions.PacmanConfigurationException(
"The partition {} with edges {} has a fixed key and fixed "
"mask constraint. These can be merged together, but is "
"deemed an error here"
.format(partition.identifer, partition.edges))
# check for a fixed key and fixed field, as these are incompatible
if fixed_key and fixed_field:
raise exceptions.PacmanConfigurationException(
"The partition {} for edges {} has a fixed key and fixed "
"field constraint. These may be merge-able together, but "
"is deemed an error here"
.format(partition.identifer, partition.edges))
# check that a fixed mask and fixed field have compatible masks
if fixed_mask and fixed_field:
_check_masks_are_correct(partition)
# check that if there's a flexible field, and something else, throw
# error
if flexi_field and (fixed_mask or fixed_key or fixed_field):
raise exceptions.PacmanConfigurationException(
"The partition {} for edges {} has a flexible field and "
"another fixed constraint. These maybe be merge-able, but "
"is deemed an error here"
.format(partition.identifer, partition.edges)) | 35,205 |
def setup_args():
"""Setup training arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--is_distributed", type=str2bool, default=False,
help="Whether to run distributed training.")
parser.add_argument("--save_path", type=str, default="output",
help="The path where to save models.")
parser.add_argument("--train_file", type=str, required=True,
help="The training dataset: file / filelist. "
"See more details in `docs/usage.md`: `file_format`.")
parser.add_argument("--valid_file", type=str, required=True,
help="The validation datasets: files / filelists. "
"The files / filelists are separated by `,`. "
"See more details in `docs/usage.md`: `file_format`.")
parser.add_argument("--start_step", type=int, default=0,
help="The start step of training. It will be updated if you load from a checkpoint.")
parser.add_argument("--num_epochs", type=int, default=20,
help="The number of times that the learning algorithm will work through the entire training dataset.")
parser.add_argument("--log_steps", type=int, default=100,
help="Display training / evaluation log information every X steps.")
parser.add_argument("--validation_steps", type=int, default=1000,
help="Run validation every X training steps.")
parser.add_argument("--save_steps", type=int, default=0,
help="Save the lastest model every X training steps. "
"If `save_steps = 0`, then it only keep the lastest checkpoint.")
parser.add_argument("--eval_metric", type=str, default="-loss",
help="Keep the checkpoint with best evaluation metric.")
parser.add_argument("--save_checkpoint", type=str2bool, default=True,
help="Save completed checkpoint or parameters only. "
"The checkpoint contains all states for continuous training.")
models.add_cmdline_args(parser)
tasks.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.display()
return args | 35,206 |
def graph2tree(mat, root, closedset=None):
"""Convert a graph to a tree data structure"""
if closedset is None:
closedset = set()
tree = Tree()
def walk(name):
node = TreeNode(name)
node.dist = 0
closedset.add(name)
for child in mat[name]:
if child not in closedset:
child_node = walk(child)
child_node.dist = mat[name][child]
tree.add_child(node, child_node)
return node
tree.root = walk(root)
tree.nextname = max(name for name in tree.nodes if isinstance(name, int))
return tree | 35,207 |
def test_simple_roundtrip_tuple(cls_and_vals, dv: bool):
"""
Simple classes with metadata can be unstructured and restructured.
"""
converter = Converter(
unstruct_strat=UnstructureStrategy.AS_TUPLE, detailed_validation=dv
)
cl, vals, _ = cls_and_vals
inst = cl(*vals)
unstructured = converter.unstructure(inst)
assert "Hyp" not in repr(unstructured)
assert inst == converter.structure(unstructured, cl) | 35,208 |
def is_hex(hex_str):
"""Helper function to verify a string is a hex value."""
return re.fullmatch('[0-9a-f]+', hex_str) | 35,209 |
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct' | 35,210 |
def jet(data, range=None, exp=1.0):
"""
Creates a JET colormap from data
Parameters
----------
data : np.array [N,1]
Data to be converted into a colormap
range : tuple (min,max)
Optional range value for the colormap (if None, use min and max from data)
exp : float
Exponential value to weight the color differently
Returns
-------
colormap : np.array [N,3]
Colormap obtained from data
"""
# Return if data is not available
if data is None or data.size == 0 or isinstance(data, tuple):
return data
else:
# If data is a tensor, convert to numpy
if is_tensor(data):
data = data.detach().cpu().numpy()
# If data is [N,1], remove second dimensions
if len(data.shape) > 1:
data = data.reshape(-1)
# Determine range if not available
if range is None:
data = data.copy() - np.min(data)
data = data / (np.max(data) + 1e-6)
else:
data = (data - range[0]) / (range[1] - range[0])
data = np.maximum(np.minimum(data, 1.0), 0.0)
# Use exponential if requested
if exp != 1.0:
data = data ** exp
# Initialize colormap
jet = np.ones((data.shape[0], 3), dtype=np.float32)
# First stage
idx = (data <= 0.33)
jet[idx, 1] = data[idx] / 0.33
jet[idx, 0] = 0.0
# Second stage
idx = (data > 0.33) & (data <= 0.67)
jet[idx, 0] = (data[idx] - 0.33) / 0.33
jet[idx, 2] = 1.0 - jet[idx, 0]
# Third stage
idx = data > 0.67
jet[idx, 1] = 1.0 - (data[idx] - 0.67) / 0.33
jet[idx, 2] = 0.0
# Return colormap
return jet | 35,211 |
def get_context(context):
"""Returns the application documentation context.
:param context: application documentation context"""
context.brand_html = "ERPNext OCR"
context.source_link = source_link
context.docs_base_url = docs_base_url
context.headline = headline
context.sub_heading = sub_heading | 35,212 |
def collatz_seq(n, collatz_dict={}):
""" Takes an integer n and returs the resulting Collatz sequence as a list. """
seq = [n]
while n > 1:
n = next_collatz(n)
if n in collatz_dict:
seq.extend(collatz_dict[n])
collatz_dict[seq[0]] = seq
return seq
else:
seq.append(n)
collatz_dict[seq[0]] = seq
return seq | 35,213 |
def create_app():
"""
Create a Flask application using the app factory pattern.
:param settings_override: Override settings
:return: Flask app
"""
app = Flask(__name__)
app.config.from_object('config.settings')
app.register_blueprint(contact)
return app | 35,214 |
def config_update():
"""
上传dockerfile和相关配置文件
:return:
"""
config_build()
config_upload() | 35,215 |
def get_prebuilt_piccolo():
"""
:return: pair of picollo feature model filename and fm.json as a string
"""
DEFAULT_PREBUILT_PICCOLO = f'/home/besspinuser/tool-suite/tutorial/piccolo-simple-pregen.fm.json'
with open(DEFAULT_PREBUILT_PICCOLO, 'r') as f:
feature_model = f.read()
return 'piccolo-simple-pregen.fm.json', feature_model | 35,216 |
def conflict_algorithm_session(date, start_time, end_time, venue):
#converting string to datetime type variable
"""
conflict_algorithm_session:
this algorithm is used to find if there any avaiable slot for the given date , stat_time ,end_time and venue
from the session_info to book the slot else it returns error.
@param:
date - date of the slot
start_time - starting time for the slot
end_time - ending time for the slot
venue - venue for the slot
@variables:
booked_session - contains the session_info of all previously alloted slots
"""
start_time = datetime.datetime.strptime(start_time, '%H:%M').time()
end_time = datetime.datetime.strptime(end_time, '%H:%M').time()
booked_Sessions = Session_info.objects.select_related('club','club__co_ordinator','club__co_ordinator__id','club__co_ordinator__id__user','club__co_ordinator__id__department','club__co_coordinator','club__co_coordinator__id','club__co_coordinator__id__user','club__co_coordinator__id__department','club__faculty_incharge','club__faculty_incharge__id','club__faculty_incharge__id__user','club__faculty_incharge__id__department').filter(date=date, venue=venue)
#placing start time and end time in tuple fashion inside this list
slots = [(start_time, end_time)]
for value in booked_Sessions:
slots.append((value.start_time, value.end_time))
slots.sort()
#if there isn't any slot present for the selected day just book the session
if (len(slots) == 1):
return "success"
else:
#this whole logic checks if the end time of any slot is less than the start time of next slot
counter = slots[0][1]
flag = 0
i=1
while i < len(slots):
if (slots[i][0] < counter):
flag = 1
break
counter = slots[i][1]
i = i + 1
if (flag == 0):
return "success"
else:
return "error" | 35,217 |
def get_income_share_summary(df_centile, k):
"""
:param df_centile: pd.DataFrame
preprocessed {region}_{unit}_centile.csv !! (rank is 1~100)
:param k: str
key
"""
centile_range = {
'하위 20%': (0, 20),
'다음 30%': (20, 50),
'하위 50%': (0, 50),
'중위 30%': (50, 80),
'상위 20%': (80, 100),
'상위 10%': (90, 100),
'상위 1%': (99, 100),
}
results = list()
groupcols = ['std_yyyy', 'var']
yearly_count = df_centile.groupby(['var', 'std_yyyy']).max()['year_count'].rename('max_freq')
cpi = load_cpi(translate(k.split('_')[0]))
freq_adjustments = {}
# how many centiles(?) are just 0?
zero_thresh_mask = df_centile['rank'].diff() > 1.0
zero_thresh = df_centile.loc[zero_thresh_mask]
# add 0 ranks
expanded_centiles = []
for (year, var), gdf in df_centile.groupby(groupcols):
zero_fillers = {'std_yyyy': [], 'var': [], 'rank': [], 'freq': [], 'rank_sum': [], 'share': []}
mask = (zero_thresh['std_yyyy'] == year) & (zero_thresh['var'] == var)
if mask.sum() == 0:
expanded_centiles.append(gdf)
continue
t = int(zero_thresh[mask].iloc[0]['rank'])
year_total = yearly_count[(var, year)]
for i in range(2, t):
zero_fillers['std_yyyy'].append(year)
zero_fillers['var'].append(var)
zero_fillers['rank'].append(i)
zero_fillers['freq'].append(int(np.around(year_total / 100)))
zero_fillers['rank_sum'].append(0)
zero_fillers['share'].append(0)
gdf.loc[gdf['rank'] == 1, 'freq'] = year_total * ((t-1) / 100) - np.sum(zero_fillers['freq'])
gdf.loc[gdf['rank'] == t, 'freq'] = year_total - gdf.loc[(gdf['rank'] < t) | (gdf['rank'] > t), 'freq'].sum() - np.sum(zero_fillers['freq'])
expanded = pd.concat([gdf, pd.DataFrame(zero_fillers)]).sort_values(by=groupcols + ['rank'])
expanded_centiles.append(expanded)
expanded_centiles = pd.concat(expanded_centiles)
for name, r in centile_range.items():
mask = (expanded_centiles['rank'] > r[0]) & (expanded_centiles['rank'] <= r[1])
if mask.sum() == 0:
# Find max_freq: expected number of people in this income group
# (number of ppl can be very different because during quantile ranking,
# in case of a tie the individual was assigned the lower rank)
max_freq = ((r[1] - r[0]) * yearly_count / 100).apply(lambda x: int(np.around(x)))
_df = yearly_count.reset_index().drop(columns=['year_count'])
_df = _df.merge(max_freq.rename('freq').reset_index())
_df['share'] = 0
_df['group_mean'] = 0
_df['group_mean_real'] = 0
else:
_df = expanded_centiles[mask].copy()
_df = _df.groupby(groupcols).agg({'rank_sum': 'sum', 'freq': 'sum', 'share': 'sum'}).reset_index()
_df = _df.merge(cpi, on='std_yyyy', how='left')
_df['group_mean'] = _df['rank_sum'] / _df['freq']
_df['group_mean_real'] = _df['group_mean'] / _df.cpi
_df = _df.drop(columns=['rank_sum'])
_df.loc[:, 'income_group'] = name
results.append(_df)
df = pd.concat(results, axis=0).sort_values(by=['std_yyyy', 'var'])
df = df.pivot(index=['var', 'std_yyyy'], columns=['income_group'], values=['freq', 'group_mean', 'group_mean_real', 'share']).reset_index()
sorted_groups = ['하위 20%', '다음 30%', '하위 50%', '중위 30%', '상위 20%', '상위 10%', '상위 1%']
df = df[[('var', ''), ('std_yyyy', '')] +
[('freq', k) for k in sorted_groups] +
[('group_mean', k) for k in sorted_groups] +
[('group_mean_real', k) for k in sorted_groups] +
[('share', k) for k in sorted_groups]]
return df | 35,218 |
def _get_cmdline_descriptors_for_hashtree_descriptor(ht):
"""Generate kernel cmdline descriptors for dm-verity.
Arguments:
ht: A AvbHashtreeDescriptor
Returns:
A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline
instructions. There is one for when hashtree is not disabled and one for
when it is.
"""
c = 'dm="1 vroot none ro 1,'
c += '0' # start
c += ' {}'.format((ht.image_size / 512)) # size (# sectors)
c += ' verity {}'.format(ht.dm_verity_version) # type and version
c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev
c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev
c += ' {}'.format(ht.data_block_size) # data_block
c += ' {}'.format(ht.hash_block_size) # hash_block
c += ' {}'.format(ht.image_size / ht.data_block_size) # #blocks
c += ' {}'.format(ht.image_size / ht.data_block_size) # hash_offset
c += ' {}'.format(ht.hash_algorithm) # hash_alg
c += ' {}'.format(str(ht.root_digest).encode('hex')) # root_digest
c += ' {}'.format(str(ht.salt).encode('hex')) # salt
if ht.fec_num_roots > 0:
c += ' 10' # number of optional args
c += ' $(ANDROID_VERITY_MODE)'
c += ' ignore_zero_blocks'
c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'
c += ' fec_roots {}'.format(ht.fec_num_roots)
# Note that fec_blocks is the size that FEC covers, *not* the
# size of the FEC data. Since we use FEC for everything up until
# the FEC data, it's the same as the offset.
c += ' fec_blocks {}'.format(ht.fec_offset / ht.data_block_size)
c += ' fec_start {}'.format(ht.fec_offset / ht.data_block_size)
else:
c += ' 2' # number of optional args
c += ' $(ANDROID_VERITY_MODE)'
c += ' ignore_zero_blocks'
c += '" root=/dev/dm-0'
# Now that we have the command-line, generate the descriptor.
desc = AvbKernelCmdlineDescriptor()
desc.kernel_cmdline = c
desc.flags = (
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED)
# The descriptor for when hashtree verification is disabled is a lot
# simpler - we just set the root to the partition.
desc_no_ht = AvbKernelCmdlineDescriptor()
desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)'
desc_no_ht.flags = (
AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED)
return [desc, desc_no_ht] | 35,219 |
def setup_global_errors():
"""
This updates HTTPException Class with custom error function
"""
for cls in HTTPException.__subclasses__():
app.register_error_handler(cls, global_error_handler) | 35,220 |
def show_hex(byte_array, out=None):
"""
Prints byte array in hex display format, matching that of xxd
(try :%!xxd in vi editor)
"""
out = out if out else sys.stdout
line = ['.'] * 16
i = 0
for by in byte_array:
if i % 16 == 0:
out.write(" {0}\n{1:07x}: ".format("".join(line), i))
out.write("{0:02x}".format(by))
c = chr(by)
line[i % 16] = c if (c in string.printable and (not c.isspace() or c == ' ')) else '.'
if i % 2 == 1:
out.write(" ")
i += 1
leftover = i % 16
if leftover != 0:
out.write(" " * (41 - leftover*2 - (leftover >> 1)))
out.write("".join(line[:leftover]))
out.write("\n")
out.flush() | 35,221 |
def assign_time():
"""Get latest time stamp value"""
return datetime.strftime(datetime.now(), format='%Y-%m-%d %T') | 35,222 |
def main(hub, ref, use_tag, override_ref, overrides, interactive, quiet,
reverse, skip_invalid, dry, orgs, branches):
"""Create/remove tags & branches on GitHub repos for Open edX releases."""
repos = openedx_release_repos(hub, orgs, branches)
if not repos:
raise ValueError(u"No repos marked for openedx-release in their openedx.yaml files!")
repos = trim_dependent_repos(repos)
repos = override_repo_refs(
repos,
override_ref=override_ref,
overrides=dict(overrides or ()),
)
existing_refs = get_ref_for_repos(repos, ref, use_tag=use_tag)
if reverse:
if not existing_refs:
msg = (
u"Ref {ref} is not present in any repos, cannot remove it"
).format(
ref=ref,
)
click.echo(msg)
return False
if interactive or not quiet:
click.echo(todo_list(existing_refs))
if interactive:
if not click.confirm(u"Remove these refs?"):
return
modified = remove_ref_for_repos(repos, ref, use_tag=use_tag, dry=dry)
if not quiet:
if modified:
click.echo(u"Success!")
else:
click.echo(u"No refs modified")
return modified
else:
if existing_refs:
msg = (
u"The {ref} ref already exists in the following repos: {repos}"
).format(
ref=ref,
repos=", ".join(existing_refs.keys()),
)
raise ValueError(msg)
ref_info = commit_ref_info(repos, hub, skip_invalid=skip_invalid)
if interactive or not quiet:
click.echo(todo_list(ref_info))
if interactive:
if not click.confirm(u"Is this correct?"):
return
result = create_ref_for_repos(ref_info, ref, use_tag=use_tag, dry=dry)
if not quiet:
if result:
click.echo(u"Success!")
else:
raise ValueError(u"Failed to create refs, but rolled back successfully")
return result | 35,223 |
def read_pb2(filename, binary=True):
"""Convert a Protobuf Message file into mb.Compound.
Parameters
----------
filename : str
binary: bool, default True
If True, will print a binary file
If False, will print to a text file
Returns
-------
root_compound : mb.Compound
"""
root_proto = compound_pb2.Compound()
if binary:
with open(filename, "rb") as f:
root_proto.ParseFromString(f.read())
else:
with open(filename, "r") as f:
Merge(f.read(), root_proto)
proto_to_cmpd = {}
root_compound = _proto_to_mb(root_proto)
proto_to_cmpd[root_proto.id] = root_compound
for sub_proto, parent_proto in _proto_successors(root_proto):
if parent_proto.id not in proto_to_cmpd:
parent_cmpd = _proto_to_mb(parent_proto)
proto_to_cmpd[parent_proto.id] = parent_cmpd
parent_cmpd = proto_to_cmpd[parent_proto.id]
if sub_proto.id not in proto_to_cmpd:
sub_cmpd = _proto_to_mb(sub_proto)
proto_to_cmpd[sub_proto.id] = sub_cmpd
sub_cmpd = proto_to_cmpd[sub_proto.id]
parent_cmpd.add(sub_cmpd)
_add_mb_bonds(root_proto, root_compound, proto_to_cmpd)
return root_compound | 35,224 |
def _OneSubChunk(wav_file):
"""Reads one subchunk and logs it.
Returns:
Returns a chunk if a chunk is found. None otherwise.
"""
chunk_id = _ReadChunkId(wav_file)
if not chunk_id:
return None
size = _ReadSize(wav_file)
data = wav_file.read(size)
logging.info('Subchunk: {} {}'.format(chunk_id, size))
return Chunk(chunk_id, data) | 35,225 |
def _convert_tensorshape_to_tensor(value, dtype=None):
"""Copied from TF's TensorShape conversion."""
if not value.is_fully_defined():
raise ValueError(
'Cannot convert a partially known TensorShape to a Tensor: {}'.format(
value))
value_list = value.as_list()
int64_value = 0
for dim in value_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
if dtype == np.int32 and int64_value:
raise ValueError('Cannot convert a TensorShape to dtype int32; '
'a dimension is too large ({})'.format(int64_value))
else:
dtype = np.int64 if int64_value else np.int32
return convert_to_tensor(value_list, dtype=dtype) | 35,226 |
def delete_account(account_name: str):
"""Removes account from database"""
conn = create_connection()
with conn:
cursor = conn.cursor()
cursor.execute(f"DELETE FROM balances WHERE account= '{account_name}'")
conn.commit() | 35,227 |
def superglue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: SuperGLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = superglue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = superglue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
if isinstance(example, SpanClassificationExample):
inputs_a, span_locs_a = tokenize_tracking_span(tokenizer, example.text_a, example.spans_a)
if example.spans_b is not None:
inputs_b, span_locs_b = tokenize_tracking_span(tokenizer, example.text_b, example.spans_b)
num_non_special_tokens = len(inputs_a["input_ids"]) + len(inputs_b["input_ids"]) - 4
# TODO(AW): assumption is same number of non-special tokens + sos + eos
# This handles varying number of intervening tokens (e.g. different models)
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
num_joiner_specials = len(inputs["input_ids"]) - num_non_special_tokens - 2
offset = len(inputs_a["input_ids"]) - 1 + num_joiner_specials - 1
span_locs_b = [(s + offset, e + offset) for s, e in span_locs_b]
span_locs = span_locs_a + span_locs_b
input_ids = inputs["input_ids"]
token_type_ids = inputs["token_type_ids"]
if num_joiner_specials == 1:
tmp = inputs_a["input_ids"] + inputs_b["input_ids"][1:]
elif num_joiner_specials == 2:
tmp = inputs_a["input_ids"] + inputs_b["input_ids"]
else:
assert False, "Something is wrong"
# check that the length of the input ids is expected (not necessarily the exact ids)
assert len(input_ids) == len(tmp), "Span tracking tokenization produced inconsistent result!"
else:
input_ids, token_type_ids = inputs_a["input_ids"], inputs_a["token_type_ids"]
span_locs = span_locs_a
else:
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
# TODO(AW): will fuck up span tracking
assert False, "Not implemented correctly wrt span tracking!"
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode in ["classification", "span_classification"]:
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input text: %s" % tokenizer.decode(input_ids, clean_up_tokenization_spaces=False))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
if isinstance(example, SpanClassificationExample):
feats = SpanClassificationFeatures(guid=example.guid,
input_ids=input_ids,
span_locs=span_locs,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label)
else:
feats = InputFeatures(guid=example.guid,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label)
features.append(feats)
if is_tf_available() and is_tf_dataset:
# TODO(AW): include span classification version
def gen():
for ex in features:
yield (
{
"guid": ex.guid,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"guid": ex.guid,
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features | 35,228 |
def get_upside_capture(
nav_data,
benchmark_nav_data,
risk_free_rate=None,
window=250 * 3,
annualiser=250,
tail=True,
):
"""
The up-market capture ratio is the statistical measure of an investment manager's overall
performance in up-markets. It is used to evaluate how well an investment manager performed
relative to an index during periods when that index has risen. The ratio is calculated by
dividing the manager's returns by the returns of the index during the up-market and
multiplying that factor by 100. (Investopedia)
:param nav_data:
:param benchmark_nav_data:
:param risk_free_rate: float
:param window: int
:param annualiser: int
:param tail: bool
:return:
"""
nav_dataframe = _transform_df(nav_data)
benchmark_nav_dataframe = _transform_df(benchmark_nav_data)
df = RatioCalculator(
nav_dataframe,
benchmark_nav_dataframe=benchmark_nav_dataframe,
risk_free_rate=risk_free_rate,
annualiser=annualiser,
).get_upside_capture(window)
return float(df["upside_capture_ratio"][-1]) if tail else df | 35,229 |
def generateToken(username, password, portalUrl):
"""Retrieves a token to be used with API requests."""
context = ssl._create_unverified_context() if NOSSL else None
params = urllib.urlencode({'username' : username,
'password' : password, 'client' : 'referer',
'referer': portalUrl, 'expiration': 60, 'f' : 'json'})
resp = urlopen(portalUrl + '/sharing/rest/generateToken?', params, context=context)
jsonResponse = json.load(resp)
if 'token' in jsonResponse:
return jsonResponse['token']
elif 'error' in jsonResponse:
errMsg = jsonResponse['error']['message']
for detail in jsonResponse['error']['details']:
errMsg += "\n"+ detail
raise Exception( errMsg ) | 35,230 |
def get_logreg(prof, tm, j, prods):
"""
Train logistic regression (Markov-chain approach).
prof: task-mode data generated using lhs.py
tm: task-mode
j: name of unit
prods: list of products
"""
# Filter relevant data
dfj = prof.loc[prof["unit"] == j, ].copy()
dfj["tm"] = [row["task"] + "-" + row["mode"] for i, row in dfj.iterrows()]
dfj["tm-1"] = dfj["tm"].shift(-1)
dfj.loc[pd.isna(dfj["tm-1"]), "tm-1"] = "None-None"
dfj = dfj[dfj["tm"] == tm]
# Train logistic regression
if dfj.shape[0] > 0 and len(np.unique(dfj["tm-1"])) > 1:
X = np.array(dfj[prods])
Y = np.array(dfj["tm-1"])
if(len(np.unique(Y)) > 2):
# Multinomial if more than 2 classes
logreg = linear_model.LogisticRegression(multi_class="multinomial",
solver="lbfgs",
# solver="sag",
max_iter=10000,
verbose=2)
else:
# Binomial if only two classes
logreg = linear_model.LogisticRegression(max_iter=10000,
verbose=2)
logreg.fit(X, Y)
return logreg
elif dfj.shape[0] > 0:
return np.array(dfj["tm-1"])[0]
else:
return "None-None" | 35,231 |
def slowness2speed(value):
"""invert function of speed2slowness"""
speed = (31 - value) / 30
return speed | 35,232 |
def complex_covariance_from_real(Krr, Kii, Kri):
"""Summary
Parameters
----------
Krr : TYPE
Description
Kii : TYPE
Description
Kri : TYPE
Description
Returns
-------
TYPE
Description
"""
K = Krr + Kii + 1j * (Kri.T - Kri)
Kp = Krr - Kii + 1j * (Kri.T + Kri)
return K, Kp | 35,233 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the USPS platform."""
if discovery_info is None:
return
usps = hass.data[DATA_USPS]
add_entities([USPSPackageSensor(usps), USPSMailSensor(usps)], True) | 35,234 |
def is_in_cap(objs, radecrad):
"""Determine which of an array of objects lie inside an RA, Dec, radius cap.
Parameters
----------
objs : :class:`~numpy.ndarray`
An array of objects. Must include at least the columns "RA" and "DEC".
radecrad : :class:`list`, defaults to `None`
3-entry list of coordinates [ra, dec, radius] forming a cap or
"circle" on the sky. ra, dec and radius are all in degrees.
Returns
-------
:class:`~numpy.ndarray`
``True`` for objects in the cap, ``False`` for objects outside of the cap.
Notes
-----
- Tests the separation with <=, so include objects on the cap boundary.
- See also is_in_circle() which handles multiple caps.
"""
ra, dec, radius = radecrad
cobjs = SkyCoord(objs["RA"]*u.degree, objs["DEC"]*u.degree)
center = SkyCoord(ra*u.degree, dec*u.degree)
ii = center.separation(cobjs) <= radius*u.degree
return ii | 35,235 |
def get_lcmv_vector(atf_vectors, response_vector, noise_psd_matrix):
"""
:param atf_vectors: Acoustic transfer function vectors for
each source with shape (targets k, bins f, sensors d)
:param response_vector: Defines, which sources you are interested in.
Set it to [1, 0, ..., 0], if you are interested in the first speaker.
It has the shape (targets,)
:param noise_psd_matrix: Noise PSD matrix
with shape (bins f, sensors d, sensors D)
:return: Set of beamforming vectors with shape (bins f, sensors d)
"""
response_vector = np.asarray(response_vector)
# TODO: If it is a list, a list of response_vectors is returned.
Phi_inverse_times_H = solve(
noise_psd_matrix[None, ...], # 1, f, d, D
atf_vectors # k, f, d
) # k, f, d
H_times_Phi_inverse_times_H = np.einsum(
'k...d,K...d->...kK',
atf_vectors.conj(),
Phi_inverse_times_H
) # f, k, K
temp = solve(
H_times_Phi_inverse_times_H,
response_vector[None, ...], # 1, K
) # f, k
beamforming_vector = np.einsum(
'k...d,...k->...d',
Phi_inverse_times_H,
temp
)
return beamforming_vector | 35,236 |
def _ensure_list(value: Any) -> List[Any]:
"""If value is a scalar, converts it to a list of size 1."""
if isinstance(value, list):
return value
if isinstance(value, str) or isinstance(value, numbers.Number):
return [value]
raise TypeError(
f'Value must be a list, number or a string. Got {type(value)}') | 35,237 |
def _ExtractMetaFeature( # pylint: disable=invalid-name
extracts: types.Extracts,
new_features_fn: Callable[[types.FeaturesPredictionsLabels],
types.DictOfFetchedTensorValues]
) -> types.Extracts:
"""Augments FPL dict with new feature(s)."""
# Create a new feature from existing ones.
fpl_copy = get_fpl_copy(extracts)
new_features = new_features_fn(fpl_copy)
# Add the new features to the existing ones.
update_fpl_features(fpl_copy, new_features)
result = copy.copy(extracts)
result[constants.FEATURES_PREDICTIONS_LABELS_KEY] = fpl_copy
return result | 35,238 |
def parse_notebook_index(ntbkpth):
"""
Parse the top-level notebook index file at `ntbkpth`. Returns a list of
subdirectories in order of appearance in the index file, and a dict
mapping subdirectory name to a description.
"""
# Convert notebook to RST text in string
rex = RSTExporter()
rsttxt = rex.from_filename(ntbkpth)[0]
# Clean up trailing whitespace
rsttxt = re.sub(r'\n ', r'', rsttxt, re.M | re.S)
pthidx = {}
pthlst = []
lines = rsttxt.split('\n')
for l in lines:
m = re.match(r'^-\s+`([^<]+)\s+<([^>]+).ipynb>`__', l)
if m:
# List of subdirectories in order of appearance in index.rst
pthlst.append(m.group(2))
# Dict mapping subdirectory name to description
pthidx[m.group(2)] = m.group(1)
return pthlst, pthidx | 35,239 |
def import_requirements():
"""Import ``requirements.txt`` file located at the root of the repository."""
with open(Path(__file__).parent / 'requirements.txt') as file:
return [line.rstrip() for line in file.readlines()] | 35,240 |
def _get_timestamp_range_edges(
first: Timestamp,
last: Timestamp,
freq: BaseOffset,
closed: Literal["right", "left"] = "left",
origin="start_day",
offset: Timedelta | None = None,
) -> tuple[Timestamp, Timestamp]:
"""
Adjust the `first` Timestamp to the preceding Timestamp that resides on
the provided offset. Adjust the `last` Timestamp to the following
Timestamp that resides on the provided offset. Input Timestamps that
already reside on the offset will be adjusted depending on the type of
offset and the `closed` parameter.
Parameters
----------
first : pd.Timestamp
The beginning Timestamp of the range to be adjusted.
last : pd.Timestamp
The ending Timestamp of the range to be adjusted.
freq : pd.DateOffset
The dateoffset to which the Timestamps will be adjusted.
closed : {'right', 'left'}, default "left"
Which side of bin interval is closed.
origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
offset : pd.Timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
A tuple of length 2, containing the adjusted pd.Timestamp objects.
"""
if isinstance(freq, Tick):
index_tz = first.tz
if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):
raise ValueError("The origin must have the same timezone as the index.")
elif origin == "epoch":
# set the epoch based on the timezone to have similar bins results when
# resampling on the same kind of indexes on different timezones
origin = Timestamp("1970-01-01", tz=index_tz)
if isinstance(freq, Day):
# _adjust_dates_anchored assumes 'D' means 24H, but first/last
# might contain a DST transition (23H, 24H, or 25H).
# So "pretend" the dates are naive when adjusting the endpoints
first = first.tz_localize(None)
last = last.tz_localize(None)
if isinstance(origin, Timestamp):
origin = origin.tz_localize(None)
first, last = _adjust_dates_anchored(
first, last, freq, closed=closed, origin=origin, offset=offset
)
if isinstance(freq, Day):
first = first.tz_localize(index_tz)
last = last.tz_localize(index_tz)
else:
first = first.normalize()
last = last.normalize()
if closed == "left":
first = Timestamp(freq.rollback(first))
else:
first = Timestamp(first - freq)
last = Timestamp(last + freq)
return first, last | 35,241 |
def create_export(request, username, id_string, export_type):
"""
Create async export tasks view.
"""
owner = get_object_or_404(User, username__iexact=username)
xform = get_form({'user': owner, 'id_string__iexact': id_string})
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
if export_type == Export.EXTERNAL_EXPORT:
# check for template before trying to generate a report
if not MetaData.external_export(xform):
return HttpResponseForbidden(_(u'No XLS Template set.'))
credential = None
if export_type == Export.GOOGLE_SHEETS_EXPORT:
credential = _get_google_credential(request)
if isinstance(credential, HttpResponseRedirect):
return credential
query = request.POST.get("query")
force_xlsx = request.POST.get('xls') != 'true'
# export options
group_delimiter = request.POST.get("options[group_delimiter]", '/')
if group_delimiter not in ['.', '/']:
return HttpResponseBadRequest(
_("%s is not a valid delimiter" % group_delimiter))
# default is True, so when dont_.. is yes
# split_select_multiples becomes False
split_select_multiples = request.POST.get(
"options[dont_split_select_multiples]", "no") == "no"
binary_select_multiples = getattr(settings, 'BINARY_SELECT_MULTIPLES',
False)
remove_group_name = request.POST.get("options[remove_group_name]", "false")
value_select_multiples = request.POST.get(
"options[value_select_multiples]", "false")
# external export option
meta = request.POST.get("meta")
options = {
'group_delimiter': group_delimiter,
'split_select_multiples': split_select_multiples,
'binary_select_multiples': binary_select_multiples,
'value_select_multiples': str_to_bool(value_select_multiples),
'remove_group_name': str_to_bool(remove_group_name),
'meta': meta.replace(",", "") if meta else None,
'google_credentials': credential
}
try:
create_async_export(xform, export_type, query, force_xlsx, options)
except ExportTypeError:
return HttpResponseBadRequest(
_("%s is not a valid export type" % export_type))
else:
audit = {"xform": xform.id_string, "export_type": export_type}
audit_log(Actions.EXPORT_CREATED, request.user, owner,
_("Created %(export_type)s export on '%(id_string)s'.") % {
'export_type': export_type.upper(),
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(
reverse(
export_list,
kwargs={
"username": username,
"id_string": id_string,
"export_type": export_type
})) | 35,242 |
def main() -> None:
"""Example program for tcod.event"""
event_log: List[str] = []
motion_desc = ""
with tcod.context.new(width=WIDTH, height=HEIGHT) as context:
console = context.new_console()
while True:
# Display all event items.
console.clear()
console.print(0, console.height - 1, motion_desc)
for i, item in enumerate(event_log[::-1]):
y = console.height - 3 - i
if y < 0:
break
console.print(0, y, item)
context.present(console, integer_scaling=True)
# Handle events.
for event in tcod.event.wait():
context.convert_event(event) # Set tile coordinates for event.
print(repr(event))
if isinstance(event, tcod.event.Quit):
raise SystemExit()
if isinstance(event, tcod.event.WindowResized) and event.type == "WINDOWRESIZED":
console = context.new_console()
if isinstance(event, tcod.event.MouseMotion):
motion_desc = str(event)
else:
event_log.append(str(event)) | 35,243 |
def draft_bp(app):
"""Callable draft blueprint (we need an application context)."""
with app.app_context():
return BibliographicDraftResource(
service=BibliographicRecordService()
).as_blueprint("bibliographic_draft_resource") | 35,244 |
def _unpack(stream: bytes, path: str) -> str:
"""Unpack archive in bytes string into directory in ``path``."""
with tarfile.open(fileobj=io.BytesIO(stream)) as tar:
tar.extractall(path)
return path | 35,245 |
def create_client(client):
"""Creates a new client."""
rv = client.post('/v1/oauth/', follow_redirects=True, data={
'submit': 'Add Client',
})
db.app = jobaddservice.app
oauth_clients = Client.query.all()
client_id = oauth_clients[0].client_id
return client_id | 35,246 |
def get_payload(configs):
"""Common Xpaths were detected so try to consolidate them.
Parameter
---------
configs: list of {xpath: {name: value}} dicts
"""
# Number of updates are limited so try to consolidate into lists.
xpaths_cfg = []
first_key = set()
# Find first common keys for all xpaths_cfg of collection.
for config in configs:
xpath = next(iter(config.keys()))
# Change configs to tuples (xpath, config) for easier management
xpaths_cfg.append((xpath, config[xpath]))
xpath_split = xpath.split("/")
for seg in xpath_split:
if "[" in seg:
first_key.add(seg)
break
# Common first key/configs represents one GNMI update
updates = []
for key in first_key:
update = []
remove_cfg = []
for config in xpaths_cfg:
xpath, cfg = config
if key in xpath:
update.append(config)
else:
for k, v in cfg.items():
if '[{0}="{1}"]'.format(k, v) not in key:
break
else:
# This cfg sets the first key so we don't need it
remove_cfg.append((xpath, cfg))
if update:
for upd in update:
# Remove this config out of main list
xpaths_cfg.remove(upd)
for rem_cfg in remove_cfg:
# Sets a key in update path so remove it
xpaths_cfg.remove(rem_cfg)
updates.append(update)
break
# Add remaining configs to updates
if xpaths_cfg:
updates.append(xpaths_cfg)
# Combine all xpath configs of each update if possible
xpaths = []
compressed_updates = []
for update in updates:
xpath_consolidated = {}
config_compressed = []
for seg in update:
xpath, config = seg
if xpath in xpath_consolidated:
xpath_consolidated[xpath].update(config)
else:
xpath_consolidated[xpath] = config
config_compressed.append((xpath, xpath_consolidated[xpath]))
xpaths.append(xpath)
# Now get the update path for this batch of configs
common_xpath = os.path.commonprefix(xpaths)
cfg_compressed = []
keys = []
# Need to reverse the configs to build the dict correctly
config_compressed.reverse()
compressed_count = 0
for seg in config_compressed:
is_key = False
prepend_path = ""
xpath, config = seg
end_path = xpath[len(common_xpath):]
if not end_path:
prepend_path = common_xpath
elif end_path.startswith("["):
# Don't start payload with a list
tmp = common_xpath.split("/")
prepend_path = "/" + tmp.pop()
common_xpath = "/".join(tmp)
end_path = prepend_path + end_path
# Building json, need to identify configs that set keys
for key in keys:
if [k for k in config.keys() if k in key]:
is_key = True
keys += re.findall(RE_FIND_KEYS, end_path)
cfg_compressed.append((end_path, config, is_key))
compressed_count += 1
update = (common_xpath, cfg_compressed)
compressed_updates.append(update)
updates = []
if compressed_count == 1:
common_xpath, cfg = compressed_updates[0]
xpath, payload, is_key = cfg[0]
updates.append({xpath: payload})
else:
for update in compressed_updates:
common_xpath, cfgs = update
payload = xpath_to_json(cfgs)
updates.append({common_xpath: payload})
return updates | 35,247 |
def FPS(name, sort, explicit_name=None):
"""
Creates a floating-point symbol.
:param name: The name of the symbol
:param sort: The sort of the floating point
:param explicit_name: If False, an identifier is appended to the name to ensure uniqueness.
:return: An FP AST.
"""
n = _make_name(name, sort.length, False if explicit_name is None else explicit_name, prefix='FP_')
return FP('FPS', (n, sort), variables={n}, symbolic=True, length=sort.length) | 35,248 |
def showCumulOverlap(mode, modes, *args, **kwargs):
"""Show cumulative overlap using :func:`~matplotlib.pyplot.plot`.
:arg modes: multiple modes
:type modes: :class:`.ModeSet`, :class:`.ANM`, :class:`.GNM`, :class:`.PCA`
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
if not isinstance(mode, (Mode, Vector)):
raise TypeError('mode must be NMA, ModeSet, Mode or Vector, not {0}'
.format(type(mode)))
if not isinstance(modes, (NMA, ModeSet)):
raise TypeError('modes must be NMA, ModeSet, or Mode, not {0}'
.format(type(modes)))
cumov = (calcOverlap(mode, modes) ** 2).cumsum() ** 0.5
if isinstance(modes, NMA):
arange = np.arange(0.5, len(modes)+0.5)
else:
arange = modes.getIndices() + 0.5
if SETTINGS['auto_show']:
plt.figure()
show = plt.plot(arange, cumov, *args, **kwargs)
plt.title('Cumulative overlap with {0}'.format(str(mode)))
plt.xlabel('{0} mode index'.format(modes))
plt.ylabel('Cumulative overlap')
ax = plt.gca()
loc = MaxNLocator(integer=True)
ax.xaxis.set_major_locator(loc)
if SETTINGS['auto_show']:
showFigure()
return show | 35,249 |
def download(data_dir, yaml_path, overwrite=False):
"""Download the data files specified in YAML file to a directory.
Return false if the downloaded file or the local copy (if not overwrite) has a different checksum.
"""
sample_data = _loadYAML(yaml_path)
logger.info("Downloading data for %s", sample_data.sample)
def _downloadFile(path, url):
logger.info("Downloading %s from %s", path, url)
import requests
r = requests.get(url, stream=True, timeout=5)
size = int(r.headers.get('content-length', 0))
from tqdm import tqdm
progress_bar = tqdm(total=size, unit='iB', unit_scale=True)
with open(path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
progress_bar.update(len(chunk))
fd.write(chunk)
progress_bar.close()
allGood = True
for f in sample_data.files:
fpath = os.path.join(data_dir, f.path)
if os.path.exists(fpath):
if _checkMD5(fpath, f.checksum):
logger.info("Found local copy %s, skip downloading.", fpath)
continue
else:
logger.warning("Local copy %s has a different checksum!", fpath)
if overwrite:
logging.warning("Removing local copy %s", fpath)
os.remove(fpath)
else:
allGood = False
continue
_createDirIfNeeded(fpath)
_downloadFile(fpath, f.url)
if not _checkMD5(fpath, f.checksum):
logger.error("The downloaded file %s has a different checksum!", fpath)
allGood = False
return allGood | 35,250 |
def look_up_socrata_credentials() -> Tuple[str, str]:
"""Collect Socrata auth credentials from the local environment.
Looks up credentials under several common Socrata environment
variable names, and returns the first complete pair it finds. Raises
a MissingCredentialsError if no complete pair is found.
"""
environment_variable_pairs = [
('SOCRATA_KEY_ID', 'SOCRATA_KEY_SECRET'),
('SOCRATA_USERNAME', 'SOCRATA_PASSWORD'),
('MY_SOCRATA_USERNAME', 'MY_SOCRATA_PASSWORD'),
('SODA_USERNAME', 'SODA_PASSWORD'),
]
for identifier, secret in environment_variable_pairs:
try:
credentials = (os.environ[identifier], os.environ[secret])
except KeyError:
continue
else:
return credentials
else:
raise MissingCredentialsError('No Socrata credentials found in local environment') | 35,251 |
def _calc_cat_outlier(df: dd.DataFrame, col_x: str, threshold: int = 1) -> Intermediate:
"""
calculate outliers based on the threshold for categorical values.
:param df: the input dataframe
:param col_x: the column of df (univariate outlier detection)
:return: dict(index: value) of outliers
"""
groups = df.groupby([col_x]).size()
result = {"outlier_index": list(groups[groups <= threshold].index.compute())}
raw_data = {"df": df, "col_x": col_x, "threshold": threshold}
return Intermediate(result, raw_data) | 35,252 |
def add_precursor_mz(spectrum_in: SpectrumType) -> SpectrumType:
"""Add precursor_mz to correct field and make it a float.
For missing precursor_mz field: check if there is "pepmass"" entry instead.
For string parsed as precursor_mz: convert to float.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if isinstance(spectrum.get("precursor_mz", None), str):
spectrum.set("precursor_mz", float(spectrum.get("precursor_mz").strip()))
elif spectrum.get("precursor_mz", None) is None:
pepmass = spectrum.get("pepmass")
if isinstance(pepmass[0], float):
spectrum.set("precursor_mz", pepmass[0])
else:
print("No precursor_mz found in metadata.")
return spectrum | 35,253 |
def echo_result(function):
"""Decorator that prints subcommand results correctly formatted.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
context = click.get_current_context()
params = context.params
output_format = params["output_format"]
formatter = FORMATTERS[output_format]
if isinstance(formatter, dict):
# For the text formatter, there's a separate formatter for each subcommand
formatter = formatter[context.command.name]
output = formatter(result, params.get("verbose", False)).strip("\n")
click.echo(
output, file=params.get("output_file", click.open_file("-", mode="w"))
)
return wrapper | 35,254 |
def load_cifar10():
""" Load the CIFAR-10 dataset.
"""
def post(inputs, labels):
return inputs.astype(numpy.float32) / 255, labels.flatten().astype(numpy.int32)
return NumpySet.from_keras(tf.keras.datasets.cifar10.load_data, post=post) | 35,255 |
def parse_names(filename):
"""
Parse an NCBI names.dmp file.
"""
taxid_to_names = dict()
with xopen(filename, 'rt') as fp:
for n, line in enumerate(fp):
line = line.rstrip('\t|\n')
x = line.split('\t|\t')
taxid, name, uniqname, name_class = x
taxid = int(taxid)
if name_class == 'scientific name':
taxid_to_names[taxid] = (name, uniqname, name_class)
return taxid_to_names | 35,256 |
def nancy(root_path, meta_file):
"""Normalizes the Nancy meta data file to TTS format"""
txt_file = os.path.join(root_path, meta_file)
items = []
with open(txt_file, 'r') as ttf:
for line in ttf:
id = line.split()[1]
text = line[line.find('"')+1:line.rfind('"')-1]
wav_file = root_path + 'wavn/' + id + '.wav'
items.append([text, wav_file])
random.shuffle(items)
return items | 35,257 |
def parse(pm, doc):
""" Parse one document using the given parsing model
:type pm: ParsingModel
:param pm: an well-trained parsing model
:type fedus: string
:param fedus: file name of an document (with segmented EDUs)
"""
pred_rst = pm.sr_parse(doc)
return pred_rst | 35,258 |
def two_sum(nums, target):
"""
Given an array of integers, return indices of the two numbers such that
they add up to a specific target.
You may assume that each input would have exactly one solution, and you
may not use the same element twice.
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
diffs_idx = {}
for i in range(len(nums)):
if nums[i] in diffs_idx:
return [diffs_idx[nums[i]], i]
diffs_idx[target - nums[i]] = i | 35,259 |
async def joined(ctx, member: guilded.Member):
"""Says when a member joined."""
print(dir(member))
await ctx.send('{0.name} joined in {0.joined_at}'.format(member)) | 35,260 |
def remove_columns(tx, header, columns_to_remove):
"""
Removes the given features from the given set of features.
Args:
tx: the numpy array representing the given set of features
header: the header line of the .csv representing the data set
columns_to_remove: The indices of the features that will be removed
from the numpy array of features
"""
print("\nRemoving columns...")
num_removed = 0
for col in columns_to_remove:
tx = np.delete(tx, col - num_removed, 1)
header = np.delete(header, col - num_removed + 2)
num_removed += 1
print("\n... finished.")
return tx, header | 35,261 |
def run_CEH(notebookDir, params_filename, ceh_options,
verbose=True):
""" Run the continuous enzymatic hydrolysis operation.
This function runs the CEH unit operation.
Through the ``ceh_options`` widgets, the user controls the following
values:
* DMR material properties with EH conditions
* Final Time (float)
* Show plots (bool)
Args:
notebookDir (str):
The path to the Jupyter Notebook, used to specify the location
of the input file and reset the working directory after this operation
is finished.
params_filename (str):
The filename for the parameters yaml file including
extension, e.g., ``'virteng_params.yaml'``
eh_options (WidgetCollection):
A ``WidgetCollection`` object containing all of widgets used
to solicit user input for CEH properties.
verbose (bool, optional):
Option to show print messages from executed file, default True.
Returns:
None
"""
print('\nRunning Continuous Enzymatic Hydrolysis Model')
# Export the CEH options to a global yaml file
ceh_dict = ceh_options.export_widgets_to_dict(parent_name='CEH_input')
dict_to_yaml(ceh_dict, params_filename, merge_with_existing=True)
# Run the CEH model
path_to_input_file = os.path.join(notebookDir, params_filename)
os.chdir('two_phase_batch_model/')
#run_script("VE_driver_batch_lignocellulose_CEH_FY21Q4.py", path_to_input_file, verbose=verbose)
#run_script("VE_ceh_steady_multi_design_solve_withPowerConsumption.py", path_to_input_file, verbose=verbose)
# cstr only CEH 03162022
run_script("VE_cstr_only_ceh_steady_multi_design_solve_with_membraneLoop.py", path_to_input_file, verbose=verbose)
os.chdir(notebookDir)
print('\nFinished Continuous Enzymatic Hydrolysis') | 35,262 |
def vol_clear():
"""CLEAR VOLUME DISPENSED"""
cmd = (str(pump)+'CLDWDR\x0D').encode()
ser.write(cmd)
cmd = (str(pump)+'CLDINF\x0D').encode()
ser.write(cmd)
ser.readline() | 35,263 |
def pangenome(groups_file, fasta_list):
#creating the len_dict
"""
The len_dict is used to remember all of the common names and lengths of EVERY gene in each species, which are matched to the arbitrary name.
The len_dict is in form len_dict = {speciesA:{arb_name1:[com_name, length], arb_name2:[com_name,length]}, speciesB: {arb_name1:[com_name, length], arb_name2:[com_name,length]}}
"""
len_dict={}
for the_file in fasta_list:
each_file=open(str(the_file), 'r')
the_file = the_file.split('.')
if the_file[0] == 'E':
if the_file[1][0] == 'v':
species = 'verr'
elif the_file[2] == 'typeA':
species = 'pistA'
elif the_file[2] == 'typeB':
species = 'pistB'
else:
species = the_file[1][:5]
else:
species = the_file[0]
len_dict[species] = {}
for each_line in each_file:
if each_line[0] == '>':
information = get_info(each_line)
length = 0
else:
#sequence line
for amino_acid in each_line:
length +=3
arb_name = information[0]
com_name= information[1]
len_dict[species][arb_name] = [com_name, length]
each_file.close()
# creating the gene_dict
groups_file = open(str(groups_file), 'r')
"""
The gene_dict is used to match and remember the genes within each cluster (similar genes), lengths of the cluster as a whole, and which species they correspond to.
The gene_dict only accounts for the genes that are in MULTIPLE species, NOT THE GENES ONLY IN A SINGLE SPECIES.
The gene_dict is in the form gene_dict = {speciesA: {cluster1: [[arb_name1, arb_name2], [len1, len2]]}, speciesB: {cluster1:[[arb_name1, arbname2], [len1, len2]]}}
"""
gene_dict = {}
arb_dict = {}
for each_line in groups_file:
# each_line consists of a cluster of genes, each with a different arbitrary name and species it belongs to
each_line = each_line.split()
cluster = each_line[0].rstrip(':')
n=0
for each_segment in each_line:
# each segment looks like 'species|arbitrary_name'
if n == 0:
# cluster name
n = 1
else:
n+=1
each_segment = each_segment.split('|')
species = each_segment[0]
arb_name = each_segment[1]
# The arb_dict is simply a quick reference of ONLY the genes that are in MULTIPLE SPECIES. Genes that only have a single copy in one species will not be included.
arb_dict[arb_name] = 0
if species not in gene_dict:
gene_dict[species] = {}
if cluster not in gene_dict[species]:
gene_dict[species][cluster] = [[],[]]
gene_dict[species][cluster][0].append(arb_name)
length = len_dict[species][arb_name][1]
gene_dict[species][cluster][1].append(length)
"""'
The cluster_dict is used to remember the lengths of entire clusters (similar genes; essentially genes with very similar function).
If speciesA has 3 copies of the gene and speciesB only has 1 copy, speciesA will take up more space. The purpose of this dict is to
have all of the same genes line up visually. We want the maximum length for all the species. In the example, speciesB will have three slots for the gene and 2 will be empty.
The cluster dict is in the format cluster_dict= {cluster1: integer, cluster2: integer}
"""
cluster_dict = {}
for species in gene_dict:
for cluster in gene_dict[species]:
if cluster not in cluster_dict or cluster_dict[cluster] < sum(gene_dict[species][cluster][1]):
cluster_dict[cluster] = sum(gene_dict[species][cluster][1])
return len_dict, gene_dict, cluster_dict, arb_dict | 35,264 |
def argmin(x):
"""Deterministic argmin.
Different from torch.argmin, which may have undetermined result if the are
multiple elements equal to the min, this argmin is guaranteed to return the
index of the first element equal to the min in each row.
Args:
x (Tensor): only support rank-2 tensor
Returns:
rank-1 int64 Tensor represeting the column of the first element in each
row equal to the minimum of the row.
"""
assert x.ndim == 2
m, _ = x.min(dim=1, keepdims=True)
r, c = torch.nonzero(x == m, as_tuple=True)
r, num_mins = torch.unique(r, return_counts=True)
i = torch.cumsum(num_mins, 0)
i = torch.cat([torch.tensor([0]), i[:-1]])
return c[i] | 35,265 |
def check_geometry_size(footprint):
"""
Excessive large geometries are problematic of AWS SQS (max size 256kb) and cause
performance issues becuase they are stored in plain text in the JSON
blob.
This func reads the geojson and applies a simple heuristic to reduce the
footprint size through simplification. With each iteration, the geometry
is simplified by 0.01 degrees.
Parameters
----------
footprint : obj
A shapely Polygon or MultiPolygon
Returns
-------
geojson : dict
A geojson representation of the geometry
"""
geojson = footprint.__geo_interface__
as_str = json.dumps(geojson)
geomsize = len(as_str.encode('utf-8'))
n_iterations = 0
while geomsize > 125000:
footprint = footprint.simplify(0.01)
geojson = footprint.__geo_interface__
as_str = json.dumps(geojson)
geomsize = len(as_str.encode('utf-8'))
n_iterations += 1
return geojson | 35,266 |
def get_global_comments():
"""Returns all global comments"""
return GlobalComment.query.all() | 35,267 |
def job_complete_pr_status(job, do_status_update=True):
"""
Indicates that the job has completed.
This will update the CI status on the Git server and
try to add a comment.
"""
if job.event.cause == models.Event.PULL_REQUEST:
git_api = job.event.build_user.api()
if do_status_update:
status_dict = { models.JobStatus.FAILED_OK:(git_api.SUCCESS, "Failed but allowed"),
models.JobStatus.CANCELED: (git_api.CANCELED, "Canceled"),
models.JobStatus.FAILED: (git_api.FAILURE, "Failed"),
models.JobStatus.INTERMITTENT_FAILURE: (git_api.SUCCESS, "Intermittent failure"),
}
status, msg = status_dict.get(job.status, (git_api.SUCCESS, "Passed"))
git_api.update_pr_status(
job.event.base,
job.event.head,
status,
job.absolute_url(),
msg,
job.unique_name(),
git_api.STATUS_JOB_COMPLETE,
)
add_comment(git_api, job.event.build_user, job) | 35,268 |
def is_descending(path):
"""
Return ``True`` if this profile is a descending
profile.
"""
return _re_search(path, _re_descending) | 35,269 |
def mises_promo_gain_cote(cotes, mise_minimale, rang, output=False):
"""
Calcule la répartition des mises pour la promotion "gain en freebet de la cote gagnée"
"""
mis = []
gains = cotes[rang] * 0.77 + mise_minimale * cotes[rang]
for cote in cotes:
mis.append((gains / cote))
mis[rang] = mise_minimale
if output:
print("somme mises=", sum(mis))
print("gain=", gains)
return mis | 35,270 |
def recalculateResult(request, question_id):
"""Called when poll owner wants to recalculate result manually."""
question = get_object_or_404(Question, pk=question_id)
getPollWinner(question)
return HttpResponseRedirect(request.META.get('HTTP_REFERER')) | 35,271 |
def covden_win(cov_resampled, lut):
"""
Method to associate resampled vegitation coverage to PRMS covden_win
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
covden = covden_sum(cov_resampled, lut)
covden.name = "covden_win"
return covden | 35,272 |
def login_webauthn_route():
"""login webauthn route"""
user = User.query.filter(User.id == session.get('webauthn_login_user_id')).one_or_none()
if not user:
return login_manager.unauthorized()
form = WebauthnLoginForm()
if form.validate_on_submit():
try:
assertion = cbor.decode(b64decode(form.assertion.data))
webauthn.authenticate_complete(
session.pop('webauthn_login_state'),
webauthn_credentials(user),
assertion['credentialRawId'],
ClientData(assertion['clientDataJSON']),
AuthenticatorData(assertion['authenticatorData']),
assertion['signature'])
regenerate_session()
login_user(user)
return redirect_after_login()
except (KeyError, ValueError) as e:
current_app.logger.exception(e)
flash('Login error during Webauthn authentication.', 'error')
return render_template('auth/login_webauthn.html', form=form) | 35,273 |
def generate_feature_matrix(genotypes, phenotypes, reg_type,phewas_cov=''): # diff - done
"""
Generates the feature matrix that will be used to run the regressions.
:param genotypes:
:param phenotypes:
:type genotypes:
:type phenotypes:
:returns:
:rtype:
"""
feature_matrix = np.zeros((3, genotypes.shape[0], phewas_codes.shape[0]), dtype=float)
count = 0
for i in genotypes['id']:
if reg_type == 0:
temp = pd.DataFrame(phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD','count']]).drop_duplicates()
match = phewas_codes['phewas_code'].isin(list(phenotypes[phenotypes['id'] == i]['phewas_code']))
cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count']
cts[np.isnan(cts)] = 0
match = (match)&(cts>0)
feature_matrix[0][count, match[match == True].index] = 1
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
else:
if reg_type == 1:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD', 'count','lor']]).drop_duplicates()
cts = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['count']
cts[np.isnan(cts)] = 0
if temp.empty!=1:
cts=cts/temp['lor'].iloc[0]
feature_matrix[0][count, :] = cts
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
elif reg_type == 2:
temp = pd.DataFrame(
phenotypes[phenotypes['id'] == i][['phewas_code', 'MaxAgeAtICD', 'duration','lor']]).drop_duplicates()
dura = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['duration']
dura[np.isnan(dura)] = 0
if temp.empty!=1:
dura=dura/temp['lor'].iloc[0]
feature_matrix[0][count, :] = dura
age = pd.merge(phewas_codes, temp, on='phewas_code', how='left')['MaxAgeAtICD']
#assert np.all(np.isfinite(age)), "make sure MaxAgeAtVisit is filled"
age[np.isnan(age)] = genotypes[genotypes['id'] == i].iloc[0]['MaxAgeBeforeDx']
feature_matrix[1][count, :] = age
if phewas_cov:
feature_matrix[2][count, :] = int(
phewas_cov in list(phenotypes[phenotypes['id'] == i]['phewas_code']))
count += 1
return feature_matrix | 35,274 |
def import_journals(json_file: str, session: Session):
"""Fachada com passo a passo de processamento e carga de periódicos
em formato JSON para a base Kernel"""
try:
journals_as_json = reading.read_json_file(json_file)
journals_as_kernel = conversion.conversion_journals_to_kernel(
journals=journals_as_json
)
for journal in journals_as_kernel:
manifest = ManifestDomainAdapter(manifest=journal)
try:
session.journals.add(data=manifest)
session.changes.add(
{"timestamp": utcnow(), "entity": "Journal", "id": manifest.id()}
)
except AlreadyExists as exc:
logger.info(str(exc))
except (FileNotFoundError, ValueError) as exc:
logger.debug(str(exc)) | 35,275 |
def get_entity(db: SQLAlchemy, id: str) -> EntityOut:
"""Get and entity by id."""
db_session = db.session_class()
entry = repository.get_entity(db_session, id)
if not entry:
raise exc.NotFound()
return EntityOut(entry) | 35,276 |
def main(*args):
"""
Runs the Git trace extractor.
"""
parser = create_parser()
options = parser.parse_args()
try:
extractor = gitrace.TraceExtractor(options.outpath, header=options.header)
extractor.extract(options.repos, options.branch)
except Exception as e:
parser.error(str(e)) | 35,277 |
def remove_low_confidence(confidence, data, target, feature_labels):
"""Remove data points with low confidence
Parameters
----------
confidence : float
Minimum confidence value
data : list
The data to be transformed
target : list
The labels for the data to be transformed
feature_labels : list
The names of the features defining a data point
"""
confidence_index = feature_labels.index("hand_confidence")
for i, gesture in enumerate(data):
if gesture[confidence_index] <= confidence:
del data[i]
del target[i] | 35,278 |
def cov(sources):
"""
Given the array of sources for all image patches, calculate the covariance
array between all modes.
Parameters
----------
sources : numpy array (floats)
The {NUM_MODES x NUM_PATCHES} array of sources.
Returns
-------
numpy array (floats)
The {NUM_MODES x NUM_MODES} covariance array between all modes.
"""
return (sources @ sources.T)/sources.shape[1] | 35,279 |
def update_threat_intel_set(DetectorId=None, ThreatIntelSetId=None, Name=None, Location=None, Activate=None):
"""
Updates the ThreatIntelSet specified by the ThreatIntelSet ID.
See also: AWS API Documentation
Exceptions
:example: response = client.update_threat_intel_set(
DetectorId='string',
ThreatIntelSetId='string',
Name='string',
Location='string',
Activate=True|False
)
:type DetectorId: string
:param DetectorId: [REQUIRED]\nThe detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.\n
:type ThreatIntelSetId: string
:param ThreatIntelSetId: [REQUIRED]\nThe unique ID that specifies the ThreatIntelSet that you want to update.\n
:type Name: string
:param Name: The unique ID that specifies the ThreatIntelSet that you want to update.
:type Location: string
:param Location: The updated URI of the file that contains the ThreateIntelSet.
:type Activate: boolean
:param Activate: The updated Boolean value that specifies whether the ThreateIntelSet is active or not.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
GuardDuty.Client.exceptions.BadRequestException
GuardDuty.Client.exceptions.InternalServerErrorException
:return: {}
:returns:
(dict) --
"""
pass | 35,280 |
def VerifierMiddleware(verifier):
"""Common wrapper for the authentication modules.
* Parses the request before passing it on to the authentication module.
* Sets 'pyoidc' cookie if authentication succeeds.
* Redirects the user to complete the authentication.
* Allows the user to retry authentication if it fails.
:param verifier: authentication module
"""
@wraps(verifier.verify)
def wrapper(environ, start_response):
data = get_post(environ)
kwargs = dict(urlparse.parse_qsl(data))
kwargs["state"] = json.loads(urlparse.unquote(kwargs["state"]))
val, completed = verifier.verify(**kwargs)
if not completed:
return val(environ, start_response)
if val:
set_cookie, cookie_value = verifier.create_cookie(val, "auth")
cookie_value += "; path=/"
url = "{base_url}?{query_string}".format(
base_url="/authorization",
query_string=kwargs["state"]["query"])
response = SeeOther(url, headers=[(set_cookie, cookie_value)])
return response(environ, start_response)
else: # Unsuccessful authentication
url = "{base_url}?{query_string}".format(
base_url="/authorization",
query_string=kwargs["state"]["query"])
response = SeeOther(url)
return response(environ, start_response)
return wrapper | 35,281 |
def hls_stream(hass, hass_client):
"""Create test fixture for creating an HLS client for a stream."""
async def create_client_for_stream(stream):
stream.ll_hls = True
http_client = await hass_client()
parsed_url = urlparse(stream.endpoint_url(HLS_PROVIDER))
return HlsClient(http_client, parsed_url)
return create_client_for_stream | 35,282 |
def AsinhNorm(a=0.1):
"""Custom Arcsinh Norm.
Parameters
----------
a : float, optional
Returns
-------
ImageNormalize
"""
return ImageNormalize(stretch=AsinhStretch(a=a)) | 35,283 |
def sub(x, y):
"""sub two numbers"""
return y-x | 35,284 |
def addSplashScreen(splashSDKName, decompileDir):
""" add splash screen
channel hasn't Splash if channel["bHasSplash"] = 0
otherwise channel["bHasSplash"] express orientation and color
"""
channelHasSplash ="0";
try:
#read has splash to funcellconfig.xml
config = ET.parse(file_operate.getConfigXmlPath())
root = config.getroot()
splash = root.find("splash")
channelHasSplash = splash.get('hasSplash');
except Exception,e:
print e
print "Error: cannot parse file: funcellconfig.xml."
print 'channelHasSplash = '+channelHasSplash
if channelHasSplash == "0":
return (0, False)
SplashPath = decompileDir + '/ForSplash/' + channelHasSplash + '/'
SplashPath = file_operate.getFullPath(SplashPath)
print "SplashPath : "+SplashPath
SplashCodePath = 'channel/SplashActivity.smali'
SplashCodePath = file_operate.getFullPath(SplashCodePath)
print "SplashCodePath : "+SplashCodePath
SplashCode2Path = 'channel/SplashActivity$1.smali'
SplashCode2Path = file_operate.getFullPath(SplashCode2Path)
print "SplashCode2Path : "+SplashCode2Path
xmlSplashSrc = 'channel/funcell_plugin_splash.xml'
xmlSplashSrc = file_operate.getFullPath(xmlSplashSrc)
print "xmlSplashSrc : "+xmlSplashSrc
if not os.path.exists(SplashPath) or not os.path.exists(SplashCodePath) or not os.path.exists(SplashCode2Path) or not os.path.exists(xmlSplashSrc):
error_operate.error(111)
return (1, False)
codeDir = decompileDir+'/oldApkDir/' + '/smali/com/haowan/funcell/sdk/api/splash'
newSplashCodePath = codeDir + '/SplashActivity.smali'
print "newSplashCodePath : "+newSplashCodePath
file_operate.copyFile(SplashCodePath, newSplashCodePath)
newSplashCode2Path = codeDir + '/SplashActivity$1.smali'
file_operate.copyFile(SplashCode2Path, newSplashCode2Path)
activityName = removeStartActivity(channelHasSplash, decompileDir+'/oldApkDir/')
modifyManifestForSplash(channelHasSplash, decompileDir+'/oldApkDir/')
xmlSplashTarget = decompileDir+'/oldApkDir/' + '/res/layout'
if not os.path.exists(xmlSplashTarget):
os.mkdir(xmlSplashTarget)
xmlSplashTarget = xmlSplashTarget + '/funcell_plugin_splash.xml'
file_operate.copyFile(xmlSplashSrc, xmlSplashTarget)
resDir = decompileDir +'/oldApkDir/'+ '/res'
file_operate.copyFiles(SplashPath, resDir)
# assetsDir = decompileDir + '/assets'
# developerFile = assetsDir + '/developerInfo.xml'
# if not os.path.exists(assetsDir):
# os.makedirs(assetsDir)
# targetTree = None
# targetRoot = None
# if not os.path.exists(developerFile):
# targetTree = ElementTree()
# targetRoot = Element('developer')
# targetTree._setroot(targetRoot)
# else:
# targetTree = ET.parse(developerFile)
# targetRoot = targetTree.getroot()
# infoNode = targetRoot.find('channel')
# if infoNode is None:
# infoNode = SubElement(targetRoot, 'channel')
# infoNode.set('GameMainActivity', activityName)
# targetTree.write(developerFile, 'UTF-8')
print "add splash activity name : "+activityName
file_operate.modifyFileContent(newSplashCodePath, '.smali', '###FuncellSdk_Start_Activity###', activityName)
return (0, True) | 35,285 |
def load_batch(f_path, label_key='labels'):
"""Internal utility for parsing CIFAR data.
# Arguments
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
# Returns
A tuple `(data, labels)`.
"""
with open(f_path, 'rb') as f:
if sys.version_info < (3,):
d = pickle.load(f)
else:
d = pickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels | 35,286 |
def get_k8s_helper(namespace=None, silent=False):
"""
:param silent: set to true if you're calling this function from a code that might run from remotely (outside of a
k8s cluster)
"""
global _k8s
if not _k8s:
_k8s = K8sHelper(namespace, silent=silent)
return _k8s | 35,287 |
def authenticate():
"""Authorize."""
return redirect(Vend().authenticate()) | 35,288 |
def non_device_name_convention(host):
"""
Helper filter function to filter hosts based targeting
host names which do NOT match a specified naming convention
Examples:
- lab-junos-08.tstt.dfjt.local
- dfjt-arista-22.prd.dfjt.local
- lab-nxos-001.lab.dfjt.local
:param host: The host you want to filter on
:return bool: True if does not match, False if it matches the convention
"""
# Perform regex match on host name and return boolean
if re.match("\w{3}\-\w+\-\d{2}.\w{3}.dfjt.local", host.name):
return False
else:
return True | 35,289 |
def enable_accessify():
"""
Enabling the accessify.
"""
try:
del os.environ['DISABLE_ACCESSIFY']
except KeyError:
pass | 35,290 |
def recursive_swagger_spec(minimal_swagger_dict, node_spec):
"""
Return a swager_spec with a #/definitions/Node that is
recursive.
"""
minimal_swagger_dict['definitions']['Node'] = node_spec
return Spec(minimal_swagger_dict) | 35,291 |
def _get_base_parts(config):
"""
Builds the base ip array for the first N octets based on
supplied base or on the /N subnet mask in the cidr
"""
if 'base' in config:
parts = config.get('base').split('.')
else:
parts = []
if 'cidr' in config:
cidr = config['cidr']
if '/' in cidr:
mask = cidr[cidr.index('/') + 1:]
if not mask.isdigit():
raise datagen.SpecException('Invalid Mask in cidr for config: ' + json.dumps(config))
if int(mask) not in [8, 16, 24]:
raise datagen.SpecException('Invalid Subnet Mask in cidr for config: ' + json.dumps(config)
+ ' only one of /8 /16 or /24 supported')
ip_parts = cidr[0:cidr.index('/')].split('.')
if len(ip_parts) < 4 or not all(part.isdigit() for part in ip_parts):
raise datagen.SpecException('Invalid IP in cidr for config: ' + json.dumps(config))
if mask == '8':
parts = ip_parts[0:1]
if mask == '16':
parts = ip_parts[0:2]
if mask == '24':
parts = ip_parts[0:3]
else:
raise datagen.SpecException('Invalid Subnet Mask in cidr for config: ' + json.dumps(config)
+ ' only one of /8 /16 or /24 supported')
return parts | 35,292 |
def diff_exp(counts, group1, group2):
"""Computes differential expression between group 1 and group 2
for each column in the dataframe counts.
Returns a dataframe of Z-scores and p-values."""
mean_diff = counts.loc[group1].mean() - counts.loc[group2].mean()
pooled_sd = np.sqrt(counts.loc[group1].var() / len(group1)
+ counts.loc[group2].var() / len(group2))
z_scores = mean_diff / pooled_sd
z_scores = z_scores.fillna(0)
# t-test
p_vals = (1 - stats.norm.cdf(np.abs(z_scores))) * 2
df = pd.DataFrame({'z': z_scores})
df['p'] = p_vals
return df | 35,293 |
def run():
"""
Run doba
:return:
"""
containers = get_containers()
required_handlers = get_required_handlers(containers)
preflight_data_handlers(required_handlers)
for container in containers:
backup(container) | 35,294 |
def load(source):
"""HTSフルコンテキストラベル(Sinsy用)を読み取る
source: path, lines
"""
song = HTSFullLabel()
return song.load(source) | 35,295 |
def _binary_clf_curve(y_true, y_score, pos_label=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=1)
The label of the positive class
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
y_true, y_score = check_arrays(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# Sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = y_true.cumsum()[threshold_idxs]
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs] | 35,296 |
def apply_config(config: Dict[str, Callable[[], None]]):
"""Apply curried config handlers."""
for prepped in config.values():
prepped() | 35,297 |
def term_open_failed(data=None):
"""
Construct a template for a term event
"""
tpl = term()
tpl.addKey(name='event', data="open-failed")
if data is not None:
tpl.addKey(name='data', data=data)
return tpl | 35,298 |
def create_error_payload(exception, message, endpoint_id):
"""
Creates an error payload to be send as a response in case of failure
"""
print(f'{exception}: {message}')
error_payload = {
'status': 'MESSAGE_NOT_SENT',
'endpointId': endpoint_id if endpoint_id else 'NO_ENDPOINT_ID',
'message': f'{exception}: {message}'
}
return error_payload | 35,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.