content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def core_rotd(sym_factor, flux_file_name, stoich):
""" Writes the string that defines the `Core` section for a
variational reaction-coordinate transition-state theory model of a
transition state for a MESS input file by
formatting input information into strings a filling Mako template.
:param sym_factor: symmetry factor of transition state
:type sym_factor: float
:param flux_file_name:
:type flux_file_name: str
:param stoich: combined stoichiometry of dissociation species 1 and 2
:type stoich: str
:rtype: str
"""
# Create dictionary to fill template
core_keys = {
'sym_factor': sym_factor,
'flux_file_name': flux_file_name,
'stoich': stoich
}
return build_mako_str(
template_file_name='core_rotd.mako',
template_src_path=SPEC_INFO_PATH,
template_keys=core_keys) | 6213ec9da79340738142ccd76ffbab3d30470d36 | 29,200 |
def last_gen(genobj):
"""
迭代一个生成器对象,返回最后一个元素
:param genobj:
:return:
"""
for i in genobj:
last_e = i
return last_e | 04e7cc57bf6406832cacaa04aa01b2ec877307df | 29,201 |
def get_section_name_mapping(lattice):
"""."""
lat = lattice[:]
section_map = ['' for i in range(len(lat))]
# find where the nomenclature starts counting and shift the lattice:
start = _pyaccel.lattice.find_indices(lat, 'fam_name', 'start')[0]
b1 = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')
if b1[0] > start:
ind_shift = (b1[-1] + 1) # Next element of last b1
else:
for i in b1[::-1]: # except there is a b1 before start
if i < start:
ind_shift = i + 1
break
lat = _pyaccel.lattice.shift(lat, ind_shift)
# Find indices important to define the change of the names of
# the subsections
b = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')
qf = _pyaccel.lattice.find_indices(lat, 'fam_name', 'QF')
b_nrsegs = len(b)//50
# divide the ring in 50 sectors defined by the b1 dipoles:
Sects = []
ini = 0
for i in range(len(b)//b_nrsegs):
fim = b[(i+1)*b_nrsegs-1] + 1
Sects.append(list(range(ini, fim)))
ini = fim
# Names of the subsections:
sub_secs = ['U', 'D']
for i, sec in enumerate(Sects, 1):
# conditions that define change in subsection name:
# define changes to ''
sec_b = [x for x in b if sec[0] <= x <= sec[-1]]
relev_inds = [sec_b[-1]]
# define changes to '' and D
sec_qf = [x for x in qf if sec[0] <= x <= sec[-1]]
relev_inds += [sec_qf[-1]]
relev_inds.sort()
# fill the section_map variable
ref = 0
for j in sec:
section_map[(ind_shift+j) % len(lat)] = "{0:02d}".format(i)
section_map[(ind_shift+j) % len(lat)] += sub_secs[ref]
if j >= relev_inds[ref]:
ref += 1
return section_map | 52a8352f6e8747ee6f6f9a1c85b34f551fd04dad | 29,202 |
import math
def from_quaternion(quaternions, name=None):
"""Converts quaternions to Euler angles.
Args:
quaternions: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
represents a normalized quaternion.
name: A name for this op that defaults to "euler_from_quaternion".
Returns:
A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents
the three Euler angles.
"""
def general_case(r00, r10, r21, r22, r20, eps_addition):
"""Handles the general case."""
theta_y = -tf.asin(r20)
sign_cos_theta_y = safe_ops.nonzero_sign(tf.cos(theta_y))
r00 = safe_ops.nonzero_sign(r00) * eps_addition + r00
r22 = safe_ops.nonzero_sign(r22) * eps_addition + r22
theta_z = tf.atan2(r10 * sign_cos_theta_y, r00 * sign_cos_theta_y)
theta_x = tf.atan2(r21 * sign_cos_theta_y, r22 * sign_cos_theta_y)
return tf.stack((theta_x, theta_y, theta_z), axis=-1)
def gimbal_lock(r01, r02, r20, eps_addition):
"""Handles Gimbal locks."""
sign_r20 = safe_ops.nonzero_sign(r20)
r02 = safe_ops.nonzero_sign(r02) * eps_addition + r02
theta_x = tf.atan2(-sign_r20 * r01, -sign_r20 * r02)
theta_y = -sign_r20 * tf.constant(math.pi / 2.0, dtype=r20.dtype)
theta_z = tf.zeros_like(theta_x)
angles = tf.stack((theta_x, theta_y, theta_z), axis=-1)
return angles
with tf.compat.v1.name_scope(name, "euler_from_quaternion", [quaternions]):
quaternions = tf.convert_to_tensor(value=quaternions)
shape.check_static(
tensor=quaternions,
tensor_name="quaternions",
has_dim_equals=(-1, 4))
x, y, z, w = tf.unstack(quaternions, axis=-1)
tx = safe_ops.safe_shrink(2.0 * x, -2.0, 2.0, True)
ty = safe_ops.safe_shrink(2.0 * y, -2.0, 2.0, True)
tz = safe_ops.safe_shrink(2.0 * z, -2.0, 2.0, True)
twx = tx * w
twy = ty * w
twz = tz * w
txx = tx * x
txy = ty * x
txz = tz * x
tyy = ty * y
tyz = tz * y
tzz = tz * z
# The following is clipped due to numerical instabilities that can take some
# enties outside the [-1;1] range.
r00 = safe_ops.safe_shrink(1.0 - (tyy + tzz), -1.0, 1.0, True)
r10 = safe_ops.safe_shrink(txy + twz, -1.0, 1.0, True)
r21 = safe_ops.safe_shrink(tyz + twx, -1.0, 1.0, True)
r22 = safe_ops.safe_shrink(1.0 - (txx + tyy), -1.0, 1.0, True)
r20 = safe_ops.safe_shrink(txz - twy, -1.0, 1.0, True)
r01 = safe_ops.safe_shrink(txy - twz, -1.0, 1.0, True)
r02 = safe_ops.safe_shrink(txz + twy, -1.0, 1.0, True)
eps_addition = asserts.select_eps_for_addition(quaternions.dtype)
general_solution = general_case(r00, r10, r21, r22, r20, eps_addition)
gimbal_solution = gimbal_lock(r01, r02, r20, eps_addition)
# The general solution is unstable close to the Gimbal lock, and the gimbal
# solution is not toooff in these cases.
is_gimbal = tf.less(tf.abs(tf.abs(r20) - 1.0), 1.0e-6)
gimbal_mask = tf.stack((is_gimbal, is_gimbal, is_gimbal), axis=-1)
return tf.where(gimbal_mask, gimbal_solution, general_solution) | 4f00f734599699aaadf5c66a1f8e1457321bd689 | 29,203 |
def is_polygonal(n, num):
"""
Predicate for if num is a n-gonal number.
Works for all n >= 3 and num >= 1.
"""
if n < 3 or num < 1:
return False
t = int((sqrt(8*num*(n-2) + (n-4)**2) + (n-4)) / (2 * (n-2)))
return poly(n, t) == num | f3abde644544c05da17faeb9891916428b265602 | 29,204 |
def map2pix(geoTransform, x, y):
""" transform map coordinates to local image coordinates
Parameters
----------
geoTransform : tuple, size=(6,1)
georeference transform of an image.
x : np.array, size=(m), ndim={1,2,3}, dtype=float
horizontal map coordinate.
y : np.array, size=(m), ndim={1,2,3}, dtype=float
vertical map coordinate.
Returns
-------
i : np.array, ndim={1,2,3}, dtype=float
row coordinate(s) in local image space
j : np.array, ndim={1,2,3}, dtype=float
column coordinate(s) in local image space
See Also
--------
pix2map
Notes
-----
Two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| i | x
--------+--------> --------+-------->
| |
| |
image | j map |
based v based |
"""
if isinstance(x, np.ndarray):
assert x.shape==y.shape, ('arrays should be of the same size')
assert isinstance(geoTransform, tuple), ('geoTransform should be a tuple')
# # offset the center of the pixel
# x -= geoTransform[1] / 2.0
# y -= geoTransform[5] / 2.0
# ^- this messes-up python with its pointers....
j = x - geoTransform[0]
i = y - geoTransform[3]
if geoTransform[2] == 0:
j = j / geoTransform[1]
else:
j = (j / geoTransform[1]
+ i / geoTransform[2])
if geoTransform[4] == 0:
i = i / geoTransform[5]
else:
i = (j / geoTransform[4]
+ i / geoTransform[5])
return i, j | bd044a4ec6d1b97f304086c21d18485b70afbee2 | 29,205 |
def fk_plaintext_dict_from_db_record(session,element,db_record,excluded=None):
"""Return a dictionary of <name>:<value> for any <name>_Id that is a foreign key
in the <element> table, excluding any foreign key in the list <excluded>"""
fk_dict = {}
fk_df = dbr.get_foreign_key_df(session,element)
if excluded:
for i,r in fk_df.iterrows():
# TODO normalize: do elts of <excluded> end in '_Id' or not?
if i not in excluded and i[:-3] not in excluded:
fk_dict[i] = dbr.name_from_id(session,r['foreign_table_name'],db_record[i])
else:
for i,r in fk_df.iterrows():
fk_dict[i] = dbr.name_from_id(session,r['foreign_table_name'],db_record[i])
return fk_dict | 8edb1a4a30e7d1cb712690d5a0b01673b73ca4b5 | 29,206 |
def checkmarkers(tubemarker, tubechecker):
"""Check for required markers in each tube
The tube-specific markers that are to be merged are desceribed in constants.py
Args:
tubemarker: required markers for that tube
tubechecker: markers in the given tube that needs to be validated
Returns:
True if all the required markers are found, False otherwise
"""
check = True
marker_list = list(map(map_elements, tubemarker))
tubechecker = list(map(map_elements, tubechecker))
for m in marker_list:
if m not in tubechecker:
if any(xs in m for xs in EMPTY_MARKER_NAMES):
continue
else:
return False
return check | 5004a9158db93164dbcf024d6e06d832bf35cf30 | 29,207 |
def _in_Kexp(z):
""" Returns true if z is in the exponential cone """
alpha, beta, delta = z
if ((beta > 0) and (delta > 0)
and (np.log(delta) >= np.log(beta) + alpha / beta)) \
or ((alpha <= 0) and (np.abs(beta) < 1e-12) and (delta >= 0)):
return True
else:
return False | 19e57dbb10e420ead5ce02e9801cd1e087f3afad | 29,208 |
def split_blocks(bytestring, block_size):
"""Splits bytestring in block_size-sized blocks.
Raises an error if len(string) % blocksize != 0.
"""
if block_size == 1:
return map(b_chr, bytearray(bytestring))
rest_size = len(bytestring) % block_size
if rest_size:
raise ValueError("Input 'bytestring' must be a multiple of "
"block_size / segment_size (CFB mode) in length")
block_count = len(bytestring) // block_size
return (
bytestring[i * block_size:((i + 1) * block_size)]
for i in range(block_count)) | f85caf419de35c75d5d920d531519b2f641cc7b3 | 29,209 |
def put_newest_oldest_files(f, author, path_earliest_latest, n_files, is_newest):
"""Write a report of files that were least recently changed by `author` (`is_newest` is False),
or was first most least recently changed by `author` in current revision.
f: file handle to write to
path_earliest_latest: {path: (earliest, latest)}
n_files: Max number of files to write
is_newest: Write newest commits if True, otherwise oldest commits
"""
def put(s):
f.write('%s\n' % s)
if is_newest:
def date_key(path):
return path_earliest_latest[path][0]
else:
def date_key(path):
return path_earliest_latest[path][1]
paths_by_date = sorted(path_earliest_latest.keys(), key=date_key)
if is_newest:
paths_by_date.reverse()
put('=' * 80)
put('%s: %d files' % (author, len(path_earliest_latest)))
for i, path in enumerate(paths_by_date[:n_files]):
earliest, latest = path_earliest_latest[path]
put('%3d: %s %s %s' % (i, date_str(earliest), date_str(latest), path)) | 19aa6a9f45d41f04d72b5699ac1599a2b8c2aa28 | 29,210 |
def pearson_correlation(trajectory_data):
""" Calculates the Pearson Correlation Matrix for node pairs
Usage: node_correlation, node_variance, node_average = pearson_correlation(trajectory_data)
Arguments:
trajectory_data: multidimensional numpy array; first index (rows) correspond to timestep, second index correspond to positions of each node;
Returns:
node_correlation: a nNodes x nNodes square matrix (numpy array) filled with the Pearson Correlation Coefficients for all node pairs
node_variance: one dimensional numpy array containing the variances of the data
node_average: one dimensional numpy array containing the averages of the data
"""
# ----------------------------------------
# CALCULATING THE AVERAGE OF TRAJECTORY DATA
# ----------------------------------------
nSteps = len(trajectory_data)
nSteps_range = range(nSteps)
nNodes = len(trajectory_data[0])
nNodes_range = range(nNodes)
for ts in nSteps_range:
# removing center of geometry translational motion
center_of_geometry = np.mean(trajectory_data[ts])
trajectory_data[ts] -= center_of_geometry
# no rotations to worry about...
node_average = np.sum(trajectory_data,axis=0)/nSteps
# ----------------------------------------
# PREPARE NUMPY ARRAYS
# ----------------------------------------
node_variance = np.zeros(nNodes,dtype=np.float64)
node_covariance = np.zeros((nNodes,nNodes),dtype=np.float64)
node_correlation = np.zeros((nNodes,nNodes),dtype=np.float64)
# ----------------------------------------
# CALCULATING PEARSON CORRELATION COEFFICIENT MATRIX
# ----------------------------------------
for ts in nSteps_range:
for i in nNodes_range:
node_variance[i] += trajectory_data[ts,i]**2
for j in nNodes_range[i:]:
node_covariance[i,j] += trajectory_data[ts,i]*trajectory_data[ts,j]
node_variance /= nSteps
node_variance -= node_average**2
node_covariance /= nSteps
for i in nNodes_range:
for j in nNodes_range[i:]:
node_covariance[i,j] -= node_average[i]*node_average[j]
node_correlation[i,j] = node_covariance[i,j]/np.sqrt(node_variance[i]*node_variance[j])
node_correlation[j,i] = node_correlation[i,j]
# ----------------------------------------
# OUTPUT OF AVERAGE, VARIANCE, COVARIANCE, AND CORRELATION MATRICES
# ----------------------------------------
np.savetxt('node_positional_average.dat',node_average)
np.savetxt('node_positional_variance.dat',node_variance)
np.savetxt('node_positional_covariance.dat',node_covariance)
np.savetxt('node_positional_correlation.dat',node_correlation)
# ----------------------------------------
# PLOTTING VARIANCE
# ----------------------------------------
plt.plot(nNodes_range,node_variance,'k')
plt.xlabel('Node Index',size=14)
plt.ylabel(r'Node Variance ($\AA^{2})',size=14)
plt.tight_layout()
plt.savefig('node_positional_variance.png',dpi=600,transparent=True)
plt.close()
# ----------------------------------------
# PLOTTING COVARIANCE
# ----------------------------------------
fig, ax = plt.subplots()
temp = plt.pcolormesh(nNodes_range,nNodes_range,node_covariance,cmap='Blues')
cb1 = plt.colorbar()
cb1.set_label(r'Node-Node Covariance ($AA^{2}$)')
xlabels = [str(int(x)) for x in temp.axes.get_xticks()[:]]
ylabels = [str(int(y)) for y in temp.axes.get_yticks()[:]]
temp.axes.set_xticks(temp.axes.get_xticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_xticks(temp.axes.get_xticks()[:]+0.5)
temp.axes.set_yticks(temp.axes.get_yticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_yticks(temp.axes.get_yticks()[:]+0.5)
temp.axes.set_xticklabels(xlabels)
temp.axes.set_yticklabels(ylabels)
plt.xlim((-0.5,nNodes+0.5))
plt.ylim((-0.5,nNodes+0.5))
plt.xlabel('Node Index',size=14)
plt.ylabel('Node Index',size=14)
ax.set_aspect('equal')
plt.tight_layout()
plt.savefig('node_positional_covariance.png',dpi=600,transparent=True)
plt.close()
# ----------------------------------------
# PLOTTING CORRELATION
# ----------------------------------------
fig, ax = plt.subplots()
temp = plt.pcolormesh(nNodes_range,nNodes_range,node_correlation,cmap='bwr',vmin=-1.0,vmax=1.0)
cb1 = plt.colorbar()
cb1.set_label('Node-Node Correlation')
xlabels = [str(int(x)) for x in temp.axes.get_xticks()[:]]
ylabels = [str(int(y)) for y in temp.axes.get_yticks()[:]]
temp.axes.set_xticks(temp.axes.get_xticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_xticks(temp.axes.get_xticks()[:]+0.5)
temp.axes.set_yticks(temp.axes.get_yticks(minor=True)[:]+0.5,minor=True)
temp.axes.set_yticks(temp.axes.get_yticks()[:]+0.5)
temp.axes.set_xticklabels(xlabels)
temp.axes.set_yticklabels(ylabels)
plt.xlim((-0.5,nNodes+0.5))
plt.ylim((-0.5,nNodes+0.5))
plt.xlabel('Node Index',size=14)
plt.ylabel('Node Index',size=14)
ax.set_aspect('equal')
plt.tight_layout()
plt.savefig('node_positional_correlation.png',dpi=600,transparent=True)
plt.close()
return node_correlation, node_variance, node_average | c12f4a4fd0959424e6ccfd66c55046a0bcc93cdb | 29,211 |
import random
def dglstep(q,maxf=10000,replfunc=None):
"""returns: next dgl step, function that was replaced, with what"""
#choose rnd func to replace
allfunc=q.listfunc()
f=random.choice(list(allfunc))
par=q.findparamforfunc(f)
#choose replacement function
global pdgl
r=None
shallreplace=True
while r is None:
r=random.choice(list(pdgl))
if not replfunc is None:
r=replfunc#.copy()
replfunc=None
if r is None or (type(r) is str and r=="funcvar"):
pv=list(q.listvar())
if len(pv)==0:continue
r=fmath.variable(random.choice(pv))
shallreplace=False
else:
break
#dont allow to many functions in the equation
if len(allfunc)-1+len(r.listfunc())>maxf:
return dglstep(q,maxf)#just retry
#chance all the function names
if shallreplace:
inval1=q.listall()
inval2=r.listall()
inval=inval1|inval2
for var in r.listvar():
nn=genname(inval)
r=r.applyvar(var,fmath.variable(nn))#does nothing to the parameters of protofunctions, this is needed to keep the variable names consistent in applyfunc, but leads to unused runs of this loop, since listvar still returns them
inval.add(nn)
#should be able to remove the old name now, but this could lead to weird bugs
#inval.remove(var)
for func in r.listfunc():
nn=genname(inval)
# print("before replace",r,func,nn)
r=r.applyfunc(func,fmath.func(nn,*par))#enter the function parameters of the old function
# print("after replace",r)
inval.add(nn)
#should be able to remove the old name now, but this could lead to weird bugs
#inval.remove(var)
#set the parameters to match all parameters of the dgl (or partially I guess)
#already done live
#apply function, hope for good simplify
ret=q.applyfunc(f,r)
ret=ret.S()
#needs some dgl specific simplification
return ret,fmath.func(f,*par),r.copy() | 83d10ac23261cfe5803ff732c698879dc0829c9d | 29,212 |
import time
import re
import sys
def main():
"""ifstat main loop"""
f_netdev = open("/proc/net/dev", "r")
# We just care about ethN interfaces. We specifically
# want to avoid bond interfaces, because interface
# stats are still kept on the child interfaces when
# you bond. By skipping bond we avoid double counting.
f_netdev.seek(0)
ts = int(time.time())
for line in f_netdev:
m = re.match("\s+(eth\d+):(.*)", line)
if not m:
continue
intf = m.group(1)
stats = m.group(2).split(None)
def direction(i):
if i >= 8:
return "out"
else:
return "in"
for i in xrange(16):
if (stats[i] != "0") and (i == 0 or i == 8):
print ("net.%s.%s.%s %d %s cf_datatype=counter"
% (FIELDS[i], intf, direction(i), ts, stats[i]))
sys.stdout.flush() | 1cdc6b7dfb3dd7dc0cd391a206317f8bdb126d8b | 29,213 |
from typing import Optional
def upper_band(df: 'Dataframe', metric_col: str, rolling_window: Optional[int] = 20) -> pd.DataFrame:
"""Calculates the lower bound of a stock's price movements.
Args:
df: the dataframe to append a column onto
metric_col: the column to calculate over (usually the 'Close' price)
rolling_window: the time window to calculate over
Returns:
the original dataframe with the lower bound appended
**Example**
.. code-block:: python
from wsbtrading import maths
df_mapped = maths.upper_band(df=df, metric_col='Close')
"""
rolling_window_string = str(rolling_window)
df = sma(df=df, metric_col=metric_col, rolling_window=rolling_window)
df = rolling_stddev(df=df, metric_col=metric_col, rolling_window=rolling_window)
df['upper_band'] = df[f'{rolling_window_string}sma'] + (2 * df[f'{rolling_window_string}stddev'])
return df | f831e95ca2027ccedf787178dc630e7e60403ce3 | 29,214 |
import torch
def reparametisation_trick(mu, log_var, device):
"""
:param mu: The mean of the latent variable to be formed (nbatch, n_z)
:param log_var: The log variance of the latent variable to be formed (nbatch, n_z)
:param device: CPU or GPU
:return: latent variable (nbatch, n_z)
"""
noise = torch.normal(mean=0, std=1.0, size=log_var.shape).to(torch.device(device))
z = mu + torch.mul(torch.exp(log_var / 2.0), noise)
return z | 9cb646132f49fa79b6a8690d10fd188968931978 | 29,215 |
import re
def process_gaf_input(gaf_file_path, gaf_file_cache, ignore_cache, node_lengths, edge_lengths, num_jobs):
"""
The current implementation only works if the GAF file has an informative MAPQ column
"""
segment_re = re.compile(SEGMENT_ORIENTATION_SYMBOLS)
process_read_aln = fnt.partial(process_read_alignments, node_lengths, edge_lengths, segment_re)
complete_stats = col.Counter()
edges = []
nodes_support = []
issues = []
with mp.Pool(num_jobs) as pool:
resit = pool.imap_unordered(process_read_aln, read_gaf_input(gaf_file_path, gaf_file_cache, ignore_cache))
for node_support, edge_support, found_issues, stats in resit:
complete_stats['reads'] += 1
if complete_stats['reads'] % 10000 == 0:
logger.info(f'Processed {complete_stats["reads"]} read alignment records')
complete_stats['edges'] += len(edge_support)
complete_stats['nodes_support'] += len(node_support)
complete_stats['issues'] += len(found_issues)
edges.extend(edge_support)
nodes_support.extend(node_support)
issues.extend(found_issues)
complete_stats.update(stats)
logger.debug('All GAF records processed')
logger.debug(f'Total reads {complete_stats["reads"]} '
f'/ edges {complete_stats["edges"]} '
f'/ sequence support records {complete_stats["nodes_support"]}')
return nodes_support, edges, issues, complete_stats | 7a7bb52641840ab5b4bf09bcd95d7ca23227725d | 29,216 |
async def prefix_wrapper_async_callable(prefix_factory, re_flags, message):
"""
Function to execute asynchronous callable prefix.
This function is a coroutine.
Parameters
----------
prefix_factory : `async-callable`
Async callable returning the prefix.
re_flags : `int`
Regex matching flags.
message : ``Message``
The received message to parse the prefix from.
Returns
-------
prefix : `None`, `str`
The prefix used by the user. Returned as `None` of parsing failed.
end : `int`
The start of the content after the prefix. Returned as `-1` if parsing failed.
"""
prefix = await prefix_factory(message)
if isinstance(prefix, str):
escaped_prefix = re_escape(prefix)
elif isinstance(prefix, tuple) and (len(prefix) > 0):
escaped_prefix = '|'.join(re_escape(prefix_part) for prefix_part in prefix)
else:
return None, -1
content = message.content
if content is None:
prefix = None
end = -1
else:
parsed = re_match(escaped_prefix, content, re_flags)
if parsed is None:
prefix = None
end = -1
else:
prefix = parsed.group(0)
end = parsed.end()
return prefix, end | d09499b4808a24bb643ae904a46049e89c77b7f3 | 29,217 |
def draw_mask(img0, img1, mask, size=14, downscale_ratio=1):
"""
Args:
img: color image.
mask: 14x28 mask data.
size: mask size.
Returns:
display: image with mask.
"""
resize_imgs = []
resize_imgs.append(cv2.resize(
img0, (int(img0.shape[1] * downscale_ratio), int(img0.shape[0] * downscale_ratio))))
resize_imgs.append(cv2.resize(
img1, (int(img1.shape[1] * downscale_ratio), int(img1.shape[0] * downscale_ratio))))
masks = []
masks.append(ndimage.binary_fill_holes(np.reshape(mask[:size * size], (size, size))))
masks.append(ndimage.binary_fill_holes(np.reshape(mask[size * size:], (size, size))))
for idx, val in enumerate(masks):
h_interval = np.ceil(float(resize_imgs[idx].shape[0]) / val.shape[0])
w_interval = np.ceil(float(resize_imgs[idx].shape[1]) / val.shape[1])
for i in range(resize_imgs[idx].shape[0]):
for j in range(resize_imgs[idx].shape[1]):
p = int(np.floor(i / h_interval))
q = int(np.floor(j / w_interval))
if val[p, q]:
resize_imgs[idx][i, j, 0] = 255
display = np.concatenate(resize_imgs, axis=1)
return display | 82149bd4fb9a313f76e029fb3234e6aff32cad2e | 29,218 |
def serialize(
obj: object,
worker: AbstractWorker = None,
simplified: bool = False,
force_full_simplification: bool = False,
) -> bin:
"""This method can serialize any object PySyft needs to send or store.
This is the high level function for serializing any object or collection
of objects which PySyft needs to send over the wire. It includes three
steps, Simplify, Serialize, and Compress as described inline below.
Args:
obj (object): the object to be serialized
simplified (bool): in some cases we want to pass in data which has
already been simplified - in which case we must skip double
simplification - which would be bad.... so bad... so... so bad
force_full_simplification (bool): Some objects are only partially serialized
by default. For objects where this is the case, setting this flag to True
will force the entire object to be serialized. For example, setting this
flag to True will cause a VirtualWorker to be serialized WITH all of its
tensors while by default VirtualWorker objects only serialize a small
amount of metadata.
Returns:
binary: the serialized form of the object.
"""
init_global_vars_msgpack()
if worker is None:
# TODO[jvmancuso]: This might be worth a standalone function.
worker = syft.framework.hook.local_worker
simple_objects = _serialize_msgpack_simple(obj, worker, simplified, force_full_simplification)
return _serialize_msgpack_binary(simple_objects) | b76fedee6e27e14db5ac07f8723eae01ec056457 | 29,219 |
def sine_data_generation(no, seq_len, dim):
"""Sine data generation.
Args:
- no: the number of samples
- seq_len: sequence length of the time-series
- dim: feature dimensions
Returns:
- data: generated data
"""
# Initialize the output
data = list()
# Generate sine data
for i in range(no):
# Initialize each time-series
temp = list()
# For each feature
for k in range(dim):
# Randomly drawn frequency and phase
freq = np.random.uniform(0, 0.1)
phase = np.random.uniform(0, 0.1)
# Generate sine signal based on the drawn frequency and phase
temp_data = [np.sin(freq * j + phase) for j in range(seq_len)]
temp.append(temp_data)
# Align row/column
temp = np.transpose(np.asarray(temp))
# Normalize to [0,1]
temp = (temp + 1) * 0.5
# Stack the generated data
data.append(temp)
return data | 1d363cce8788b62f84ab3fd05b11ff98cf5719a3 | 29,220 |
def apply_wilcoxon_test(wide_optimal, dep_var, OVRS_NAMES, alpha):
"""Performs a Wilcoxon signed-rank test"""
pvalues = []
for ovr in OVRS_NAMES:
mask = np.repeat(True, len(wide_optimal))
pvalues.append(
wilcoxon(
wide_optimal.loc[mask, ovr], wide_optimal.loc[mask, dep_var]
).pvalue
)
wilcoxon_results = pd.DataFrame(
{
"Oversampler": OVRS_NAMES,
"p-value": pvalues,
"Significance": np.array(pvalues) < alpha,
}
)
return wilcoxon_results | a1a219c7b1bb6f917da11e5fe35c6992ccc60a8c | 29,221 |
import torch
def get_accuracy(targets, outputs, k=1, ignore_index=None):
""" Get the accuracy top-k accuracy between two tensors.
Args:
targets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure
saccuracy
outputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector
ignore_index (int, optional): Specifies a target index that is ignored
Returns:
:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and
total (:class:`int`)
Example:
>>> import torch
>>> from torchnlp.metrics import get_accuracy
>>> targets = torch.LongTensor([1, 2, 3, 4, 5])
>>> outputs = torch.LongTensor([1, 2, 2, 3, 5])
>>> accuracy, n_correct, n_total = get_accuracy(targets, outputs, ignore_index=3)
>>> accuracy
0.8
>>> n_correct
4
>>> n_total
5
"""
n_correct = 0.0
for target, output in zip(targets, outputs):
if not torch.is_tensor(target) or is_scalar(target):
target = torch.LongTensor([target])
if not torch.is_tensor(output) or is_scalar(output):
output = torch.LongTensor([[output]])
predictions = output.topk(k=min(k, len(output)), dim=0)[0]
for prediction in predictions:
if torch_equals_ignore_index(
target.squeeze(), prediction.squeeze(), ignore_index=ignore_index):
n_correct += 1
break
return n_correct / len(targets), int(n_correct), len(targets) | df7f60f37abd9e85b63ca616fb086b84a6ae17d9 | 29,222 |
def cameraPs2Ts(cameraPOs):
"""
convert multiple POs to Ts.
----------
input:
cameraPOs: list / numpy
output:
cameraTs: list / numpy
"""
if type(cameraPOs) is list:
N = len(cameraPOs)
else:
N = cameraPOs.shape[0]
cameraT_list = []
for _cameraPO in cameraPOs:
cameraT_list.append(__cameraP2T__(_cameraPO))
return cameraT_list if type(cameraPOs) is list else np.stack(cameraT_list) | 10d6fb11a244eded26b4c9b989e88c131832357b | 29,223 |
def dimerization_worker(primer_1, primer_2):
""" Returns the total number of complementary bases and the longest
run of complementary bases (weighted by HYBRID_SCORES), the median
length of all runs and the array of complementary bases.
"""
p1 = [set(AMB[i]) for i in primer_1]
p2 = [set(AMB[i]) for i in primer_2]
complementary = [
max(
[HYBRID_SCORES[base] for base in s1.intersection(s2)],
default=0) for s1, s2 in zip(p1, p2)]
total = sum(complementary)
complementary_runs = [list(comp) for run, comp in groupby(
complementary, key=lambda x: x > 0)]
max_run = sum(max(complementary_runs, key=sum))
run_lens = [sum(c) for c in complementary_runs if sum(c) > 0]
if run_lens:
run_lens.append(0)
median = np.median(run_lens) if run_lens else 0
return (total, max_run, median, complementary) | 6bd1fd8a990c35f8cd0cde134714c9d4a19cfdb1 | 29,224 |
from skimage import img_as_ubyte
def load_dataset(ds, elsize=[], axlab='',
outlayout='', dtype='',
dataslices=None, uint8conv=False):
"""Load data from a proxy and select/transpose/convert/...."""
slices = get_slice_objects(dataslices, ds.shape)
data = slice_dataset(ds, slices)
if list(axlab) != list(outlayout):
in2out = [axlab.index(l) for l in outlayout]
data = np.transpose(data, in2out)
elsize = np.array(elsize)[in2out]
axlab = outlayout
slices = [slices[i] for i in in2out]
if dtype:
data = data.astype(dtype, copy=False)
if uint8conv:
data = normalize_data(data)[0]
data = img_as_ubyte(data)
return data, elsize, axlab, slices | 8ccbc4d3c42bcf0861daec23b7b2410b91b89c5c | 29,225 |
import copy
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames | a3ba0960a3711b30c46e8fc75237786d5297f5eb | 29,226 |
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False | cfc41693e3d3321bcb63dae079abf2e768f97905 | 29,227 |
def get_channel_number_from_frequency(frequency):
"""gets the 802.11 channel for a corresponding frequency
in units of kilohertz (kHz). does not support FHSS."""
try:
return _20MHZ_CHANNEL_LIST.get(frequency, "Unknown")
except KeyError:
return "Unknown" | 0867a458e98a5a97b3d8925aeeee14f6f5af58a5 | 29,228 |
import sys
def diurnal_cycle(data, var, stat, stat_config):
"""
Calculate diurnal cycle
"""
# Type of diurnal cycle; amount or frequency
dcycle_stat = stat_config[stat]['dcycle stat']
# Threshold; must be defined for frequency
in_thr = stat_config[stat]['thr']
if in_thr is not None:
if var in in_thr:
thr = in_thr[var]
data = data.where(data[var] >= thr)
else:
thr = None
else:
thr = in_thr
# Check time stamps in data
data = _check_hours(data)
if dcycle_stat == 'amount':
tstat = stat_config[stat]['stat method']
if 'percentile' in tstat:
q = tstat.partition(' ')[2]
errmsg = ("Make sure percentile(s) in stat method is given "
"correctly; i.e. with a white space e.g. "
"'percentile 95'")
if not q:
raise ValueError(errmsg)
else:
q = float(q)
dc_pctls = xa.apply_ufunc(
_percentile_func, data[var].groupby('time.hour'),
input_core_dims=[['time']],
dask='parallelized', output_dtypes=[float],
kwargs={'q': q, 'axis': -1, 'thr': thr})
dcycle = dc_pctls.to_dataset()
elif 'pdf' in tstat:
# Bins
assert 'bins' in stat_config[stat]['method kwargs'],\
"\n\tBins are missing in 'method kwargs'!\n"
bin_r = stat_config[stat]['method kwargs']['bins']
bins = np.arange(bin_r[0], bin_r[1], bin_r[2])
lbins = bins.size - 1
dc_pdf = xa.apply_ufunc(
_pdf_calc, data[var].groupby('time.hour'),
input_core_dims=[['time']], output_core_dims=[['bins']],
dask='parallelized', output_dtypes=[float],
dask_gufunc_kwargs={'output_sizes': {'bins': lbins+1}},
kwargs={
'keepdims': True, 'bins': bins, 'axis': -1, 'thr': thr})
dims = list(dc_pdf.dims)
dcycle = dc_pdf.to_dataset().assign_coords(
{'bins': bins}).transpose('bins', 'hour', dims[0], dims[1])
else:
dcycle = eval("data.groupby('time.hour').{}('time')".format(tstat))
statnm = "Amount | stat: {} | thr: {}".format(tstat, thr)
elif dcycle_stat == 'frequency':
errmsg = "For frequency analysis, a threshold ('thr') must be set!"
assert thr is not None, errmsg
dcycle = data.groupby('time.hour').count('time')
totdays = np.array([(data['time.hour'].values == h).sum()
for h in np.arange(24)])
statnm = "Frequency | stat: counts | thr: {}".format(thr)
else:
print("Unknown configured diurnal cycle stat: {}".format(dcycle_stat))
sys.exit()
dcycle = dcycle.chunk({'hour': -1})
_hrs = stat_config[stat]['hours']
hrs = _hrs if _hrs is not None else dcycle.hour
st_data = dcycle.sel(hour=hrs)
if dcycle_stat == 'frequency':
st_data = st_data.assign({'ndays_per_hour': ('nday', totdays)})
st_data.attrs['Description'] =\
"Diurnal cycle | {}".format(statnm)
return st_data | 6cbec0a8ab3dc9487c5e6e59b231e88f70a8f1e6 | 29,229 |
def fetch_single_minutely_equity(code, start, end):
"""
从本地数据库读取单个股票期间分钟级别交易明细数据
**注意**
交易日历分钟自9:31~11:30 13:01~15:00
在数据库中,分钟级别成交数据分日期存储
Parameters
----------
code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
return
----------
DataFrame: OHLCV列的DataFrame对象。
Examples
--------
>>> stock_code = '000333'
>>> start = '2020-06-29'
>>> end = pd.Timestamp('2020-06-30')
>>> df = fetch_single_minutely_equity(stock_code, start, end)
>>> df.tail()
close high low open volume
2018-04-19 14:56:00 51.55 51.56 51.50 51.55 376400
2018-04-19 14:57:00 51.55 51.55 51.55 51.55 20000
2018-04-19 14:58:00 51.55 51.55 51.55 51.55 0
2018-04-19 14:59:00 51.55 51.55 51.55 51.55 0
2018-04-19 15:00:00 51.57 51.57 51.57 51.57 353900
"""
calendar = get_calendar('XSHG')
fmt = r"%Y-%m-%d"
dates = calendar.sessions_in_range(
start.strftime(fmt), end.strftime(fmt)).tz_localize(None)
cols = ['open', 'high', 'low', 'close', 'volume']
# 指数分钟级别数据
if len(code) == 7:
return _index_minute_data(code, dates)
db = get_db('wy_quotes')
func = partial(_fetch_single_minutely_equity,
stock_code=code, db=db, is_index=False)
with ThreadPoolExecutor(MAX_WORKER) as executor:
dfs = executor.map(func, dates)
return pd.concat(dfs).sort_index() | cc125997fe2f0313295732235b1df2e886d3fcad | 29,230 |
import os
def aggregate_components_from_owners(root):
"""Traverses the given dir and parse OWNERS files for team and component tags.
Args:
root (str): the path to the src directory.
Returns:
A pair (data, warnings) where data is a dict of the form
{'component-to-team': {'Component1': 'team1@chr...', ...},
'dir-to-component': {'/path/to/1': 'Component1', ...}}
and warnings is a list of strings.
"""
warnings = []
component_to_team = defaultdict(set)
dir_to_component = {}
for dirname, _, files in os.walk(root):
# Proofing against windows casing oddities.
owners_file_names = [f for f in files if f.upper() == 'OWNERS']
if owners_file_names:
owners_full_path = os.path.join(dirname, owners_file_names[0])
owners_rel_path = os.path.relpath(owners_full_path, root)
team, component = parse(owners_full_path)
if component:
dir_to_component[os.path.relpath(dirname, root)] = component
if team:
component_to_team[component].add(team)
else:
warnings.append('%s has no COMPONENT tag' % owners_rel_path)
mappings = {'component-to-team': component_to_team,
'dir-to-component': dir_to_component}
errors = validate_one_team_per_component(mappings)
return unwrap(mappings), warnings, errors | 870cda558bb30173ead1b1158631baeb900397a9 | 29,231 |
def method2():
"""Provide an examples of doc strings that are too long.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
""" # noqa W505: doc line too long (127 > 100 characters) (auto-generated noqa)
return 7 | 689c50c2cfb62d39cd35eec125813830c6068fdb | 29,232 |
def get_parent_compartment_ocid(teamname):
"""
Retrieves the OCID for the compartment based on the team name (assuming the model of root -- team -- individual
structure of compartments.
Args:
teamname (str): name of the team level compartment
Returns:
str: The OCId or None - None is only returned in the event of an internal error
"""
global logger
parent_compartment_ocid = None
try:
if (teamname != None):
parent_compartment_ocid = find (teamname, QRY_CONST.COMPARTMENT)
if (parent_compartment_ocid == None):
raise LookupError ("No compartment found")
else:
parent_compartment_ocid = config_props[CONFIG_CONST.TENANCY]
except LookupError as le:
logger.error ("Compartment lookup failed", le)
return parent_compartment_ocid | bec0bb1d98bb8da0c4e670efebcaa6adcfb8d494 | 29,233 |
import os
def list_files_path(path):
"""
List files from a path.
:param path: Folder path
:type path: str
:return: A list containing all files in the folder
:rtype: List
"""
return sorted_alphanumeric([path + f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]) | 84ec5263ca4549075341c90c9b84b330af455393 | 29,234 |
def set_recommended_watch_points(session_id):
"""Set recommended watch points."""
body = _read_post_request(request)
request_body = body.get('requestBody')
if request_body is None:
raise ParamMissError('requestBody')
set_recommended = request_body.get('set_recommended')
reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended)
return reply | 81f37e60ba108c2b59c79271342153ea8879697a | 29,235 |
def epochJulian2JD(Jepoch):
"""
----------------------------------------------------------------------
Purpose: Convert a Julian epoch to a Julian date
Input: Julian epoch (nnnn.nn)
Returns: Julian date
Reference: See JD2epochJulian
Notes: e.g. 1983.99863107 converts into 2445700.5
Inverse of function JD2epochJulian
----------------------------------------------------------------------
"""
return (Jepoch-2000.0)*365.25 + 2451545.0 | 2738940ad390f979317177984c9120b34fa7d2af | 29,236 |
import os
def _isfile(path):
"""Variant of os.path.isfile that is somewhat type-resilient."""
if not path:
return False
return os.path.isfile(path) | 1e5c6e993008b7256c22fe38af174fe87fd01d20 | 29,237 |
def GetHighlightColour():
"""
Gets the default highlight color.
:rtype: :class:`wx.Colour`
"""
if wx.Platform == '__WXMAC__':
if CARBON:
if wx.VERSION < (2, 9, 0, 0, ''):
# kThemeBrushButtonPressedLightHighlight
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushFocusHighlight)
return brush.GetColour()
else:
color = wx.MacThemeColour(Carbon.Appearance.kThemeBrushFocusHighlight)
return color
# Fallback to text highlight color
return wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT) | 1654393d9b5f3d5ea610c9dae9706d04d7f37d54 | 29,238 |
import os
def video_to_frames(width, height, video_path, frames_dir, overwrite=False, every=1):
"""
Extracts the frames from a video
:param width: width of input video
:param height: height of input video
:param video_path: path to the video
:param frames_dir: directory to save the frames
:param overwrite: overwrite frames if they exist?
:param every: extract every this many frames
:return: path to the directory where the frames were saved, or None if fails
"""
video_path = os.path.normpath(video_path) # make the paths OS (Windows) compatible
frames_dir = os.path.normpath(frames_dir) # make the paths OS (Windows) compatible
video_dir, video_filename = os.path.split(video_path) # get the video path and filename from the path
video_filename = video_filename[:-4] # remove end tag
# make directory to save frames, its a sub dir in the frames_dir with the video name
os.makedirs(frames_dir, exist_ok=True)
print("Extracting frames from {}".format(video_filename))
extract_frames(width, height, video_filename, video_path, frames_dir, start=25, every=every) # let's now extract the frames
return os.path.join(frames_dir, video_filename) | 9462aa8eb07bf9bd5cace8af29305dd080cc0846 | 29,239 |
import configparser
def ignores(path):
"""Pull the flake8 ignores out of the tox file"""
toxini = path + "/tox.ini"
LOG.debug("Tox %s\n" % toxini)
config = configparser.ConfigParser()
config.read(toxini)
options = {}
for option in ('ignore', 'import-order-style',
'application-import-names', 'max-line-length'):
if config.has_option('flake8', option):
options[option] = config.get('flake8', option)
return options | c3a7556cc55fb0215384d946744c8c3b9c8069e0 | 29,240 |
import inspect
def get_classes(mod):
"""Return a list of all classes in module 'mod'"""
return [
key
for key, _ in inspect.getmembers(mod, inspect.isclass)
if key[0].isupper()
] | be04546650a6243a3abfe4053a4dcaa9d71f85d7 | 29,241 |
import struct
def ustring_to_string(ptr, length=None):
"""Convert a pointer to UTF-16 data into a Python string encoded with utf-8.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
char_vals = [int((ptr + i).dereference()) for i in range(length)]
string = struct.pack('H' * length, *char_vals).decode('utf-16', 'replace').encode('utf-8')
return string + error_message | 9981d15eb26816fbc7f2cb0e3cac99b4d738c25a | 29,242 |
import logging
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
def onerror():
logging.exception("Exception while processing SMS %s" % msg._id)
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number, onerror=onerror)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True | 4b189ef37965ff5725a77af615b52bc019df5910 | 29,243 |
def add_entry(ynew: float, s: float, s2: float, n: int, calc_var: bool):
"""Adds an entry to the metrics, s, s2, and n.
s: previous value of sum of y[]
s2: previous value of sum of y[]*y[]
n: previous number of entries in the metric
"""
n = n + 1
s = s + ynew
s2 = s2 + ynew * ynew
if calc_var:
var = (s2 - s * s / n) / n # This is sigma**2
else:
var = None
return s, s2, n, var | 8aed2d9f5acb85273b1a152b0747156e49f1ebdc | 29,244 |
def get_conversion_dict(conversion_name):
"""Retrieves a hard-coded label conversion dictionary.
When coarsening the label set of a task based on a predefined
conversion scheme like Penn Treebank tags to Universal PoS tags,
this function provides the map, out of a fixed list of known
maps addressed by a keyword string.
"""
if conversion_name == PTB_UNIVERSAL_CONVERSION_STRING:
return ptb_to_univ_map
elif conversion_name == WSD_COARSENING_CONVERSION_STRING:
return coarse_wsd_map
else:
raise ValueError("Unknown conversion name: {}".format(conversion_name)) | 9137a50e2f9900abf6d60b51c8bc44e67f13df86 | 29,245 |
import scipy
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1] | 82bcb686840191b7cef650b30e14308393331fa2 | 29,246 |
def compute_rgb_scales(alpha_thres=0.9):
"""Computes RGB scales that match predicted albedo to ground truth,
using just the first validation view.
"""
config_ini = configutil.get_config_ini(FLAGS.ckpt)
config = ioutil.read_config(config_ini)
# First validation view
vali_dir = join(config_ini[:-4], 'vis_vali')
data_root = config.get('DEFAULT', 'data_root')
epoch_dirs = xm.os.sortglob(vali_dir, 'epoch?????????')
epoch_dir = epoch_dirs[-1]
batch_dirs = xm.os.sortglob(epoch_dir, 'batch?????????')
batch_dir = batch_dirs[0]
# Find GT path
metadata_path = join(batch_dir, 'metadata.json')
metadata = xm.io.json.load(metadata_path)
view = metadata['id']
pred_path = join(batch_dir, 'pred_albedo.png')
gt_path = join(data_root, view, 'albedo.png')
# Load prediction and GT
pred = xm.io.img.read(pred_path) # gamma corrected
gt = xm.io.img.read(gt_path) # linear
pred = xm.img.normalize_uint(pred)
gt = xm.img.normalize_uint(gt)
pred = pred ** 2.2 # undo gamma
gt = xm.img.resize(gt, new_h=pred.shape[0], method='tf')
alpha = gt[:, :, 3]
gt = gt[:, :, :3]
# Compute color correction scales, in the linear space
is_fg = alpha > alpha_thres
opt_scale = []
for i in range(3):
x_hat = pred[:, :, i][is_fg]
x = gt[:, :, i][is_fg]
scale = x_hat.dot(x) / x_hat.dot(x_hat)
opt_scale.append(scale)
opt_scale = tf.convert_to_tensor(opt_scale, dtype=tf.float32)
return opt_scale | 44341b8c878ca02179ded0a3cc1c173f1eaea009 | 29,247 |
def pick_pareto_front(points, keep_equal=False):
"""Returns the Pareto-optimal points from a set of points,
assuming all objectives are minimisation objectives.
The n points in d-dimensional objective space are given in an ndarray
of shape (n,d), and the return value is of shape (m,d) where m is the
number of Pareto-optimal points.
If keep_equal is False, equal points are pruned so that only one of them
is returned.
"""
pareto = flag_pareto_front(points, keep_equal)
return points[pareto,:] | 4dbd528bb2731dafd8a6f82cbd9dd53a83bd1c4c | 29,248 |
import csv
def get_market_conditions(filename):
"""
Creates the market condition table from the scenario file and risk factors
"""
market_conditions = []
print("Generate conditions")
#open the csv file to be read
with open(filename) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
#loop through each row to check with the risk factor and get the stress shift
for row in readCSV:
if len(row) > 14:
risk_factor_csv = row[5]
stress_shift_csv = row[13]
#check risk factor and get the stress shift
for rf in riskfactors:
for key in rf:
if risk_factor_csv == key and stress_shift_csv != '1':
#create the output obj with the values to return
obj = {"Risk_factor": rf[key], "Stress_shift": stress_shift_csv}
market_conditions.append(obj)
return market_conditions | f7876a8cf4b2d6399e1fd12f2e8a4f7614892874 | 29,249 |
def PaperSize(s):
"""Return a tuple (width, height) for a given paper format string. 'A4-L' will
return (842, 595), the values for A4 landscape. Suffix '-P' and no suffix
returns portrait."""
size = s.lower()
f = "p"
if size.endswith("-l"):
f = "l"
size = size[:-2]
if size.endswith("-p"):
size = size[:-2]
rc = paperSizes.get(size, (-1, -1))
if f == "p":
return rc
return (rc[1], rc[0]) | e0439e7535bba1b7f4bd5309b8f934b689f1b67f | 29,250 |
def clean_detections(npts, on_off):
"""Removes spurious seismic detections that occur within a window following
a detection.
Parameters
----------
npts : int
Length of window in data samples.
on_off : array
On/off indexes for detections.
Returns
-------
array
Corrected on/off indexes.
"""
on = on_off[:,0]
off = on_off[:,1]
idx_on = [on[0]]
idx_off = [off[0]]
lowest_idx = on[0]
for ion, ioff in zip(on, off):
if ion > lowest_idx + npts:
idx_on.append(ion)
idx_off.append(ioff)
lowest_idx = ion
return np.asarray((idx_on, idx_off)).T | 539ce2b53ee42f8038f4d1b8c35af55247e13204 | 29,251 |
def register_validator(fn):
"""
collect validator functions into ckanext.scheming.all_helpers dict
"""
all_validators[fn.__name__] = fn
return fn | e1dc4c4a9294400d1916b6308192c85702d3fe69 | 29,252 |
def save_config():
"""
:return: Completed save.
"""
global host_id_start
# Save start and end to file.
cfgfile = open("config.ini",'w')
try:
Config.add_section('Host ID')
except ConfigParser.DuplicateSectionError, e:
# File already exists.
pass
Config.set('Host ID','start',host_id_start)
Config.write(cfgfile)
cfgfile.close()
return True | ba15a2d61e153fc251c5b6c9ebc6b48b5f34066e | 29,253 |
def encode_data_image_to_16bit(data_image: np.ndarray, max_data_value: int = 120) -> np.ndarray:
"""
this method sets all data values above max_data_value to zero, scales it by the max_data_value
and rescales the depth image to the uint16 range.
:param data_image:
:param max_data_value:
:return: image data in 16-bit format
"""
# only consider depth values within max distance and normalize on that
data_image[data_image > max_data_value] = 0
data_image = data_image / max_data_value
# scale depth image to uint16 range
data_image = data_image * np.iinfo(np.uint16).max
return data_image.astype(np.uint16) | fef90a4664ce851f2ace61c4b739c320f6cef3e8 | 29,254 |
from typing import Optional
from typing import Mapping
def get_database(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:
"""
Use this data source to access information about an existing SQL Azure Database.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.sql.get_database(name="example_db",
server_name="example_db_server",
resource_group_name="example-resources")
pulumi.export("sqlDatabaseId", example.id)
```
:param str name: The name of the SQL Database.
:param str resource_group_name: Specifies the name of the Resource Group where the Azure SQL Database exists.
:param str server_name: The name of the SQL Server.
:param Mapping[str, str] tags: A mapping of tags assigned to the resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:sql/getDatabase:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value
return AwaitableGetDatabaseResult(
collation=__ret__.collation,
default_secondary_location=__ret__.default_secondary_location,
edition=__ret__.edition,
elastic_pool_name=__ret__.elastic_pool_name,
failover_group_id=__ret__.failover_group_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
read_scale=__ret__.read_scale,
resource_group_name=__ret__.resource_group_name,
server_name=__ret__.server_name,
tags=__ret__.tags) | 1f4afe29ce88d1a85bdb8cee93cf731a6c37b301 | 29,255 |
import yaml
def read_status_yaml(path: str, year: int = None):
"""Reads a yaml file containing categories and date ranges for each categories."""
with open(path, "rt") as f:
data = yaml.load(f, Loader=yaml.Loader)
categories = []
for category, datestrlist in data.items():
status = Status(category.lower())
if datestrlist is not None:
for datestr in datestrlist:
if datestr is None:
raise ValueError(f"category: {category}: empty date string")
# date is a range.
if "-" in datestr:
dates = date_range_from_str(datestr, year)
status.add_range(dates)
status.date_list.extend(dates)
else:
status.date_list.append(str_to_date(datestr, year))
categories.append(status)
return categories | 51765d53264c52a3817c48b125ddad6254fca816 | 29,256 |
import numpy
import math
def S_inv_eulerZYX_body_deriv(euler_coordinates, omega):
""" Compute dE(euler_coordinates)*omega/deuler_coordinates
cfo, 2015/08/13
"""
y = euler_coordinates[1]
z = euler_coordinates[2]
"""
w1 = omega[0]; w2 = omega[1]; w3 = omega[2]
J = numpy.zeros((3,3))
J[0,0] = 0
J[0,1] = math.tan(y) / math.cos(y) * (math.sin(z) * w2 + math.cos(z) * w3)
J[0,2] = w2/math.cos(y)*math.cos(z) - w3/math.cos(y)*math.sin(z)
J[1,0] = 0
J[1,1] = 0
J[1,2] = -w2*math.sin(z) - w3*math.cos(z)
J[2,0] = w1
J[2,1] = 1.0/math.cos(y)**2 * (w2 * math.sin(z) + w3 * math.cos(z))
J[2,2] = w2*math.tan(y)*math.cos(z) - w3*math.tan(y)*math.sin(z)
"""
#second version, x = psi, y = theta, z = phi
# J_x = numpy.zeros((3,3))
J_y = numpy.zeros((3,3))
J_z = numpy.zeros((3,3))
# dE^-1/dtheta
J_y[0,1] = math.tan(y)/math.cos(y)*math.sin(z)
J_y[0,2] = math.tan(y)/math.cos(y)*math.cos(z)
J_y[2,1] = math.sin(z)/(math.cos(y))**2
J_y[2,2] = math.cos(z)/(math.cos(y))**2
# dE^-1/dphi
J_z[0,1] = math.cos(z)/math.cos(y)
J_z[0,2] = -math.sin(z)/math.cos(y)
J_z[1,1] = -math.sin(z)
J_z[1,2] = -math.cos(z)
J_z[2,1] = math.cos(z)*math.tan(y)
J_z[2,2] = -math.sin(z)*math.tan(y)
J = numpy.zeros((3,3))
J[:,1] = numpy.dot(J_y, omega)
J[:,2] = numpy.dot(J_z, omega)
return J | f7688b61084b0421288be002042b7299a7f8e867 | 29,257 |
def _mini_batch_step(X, sample_weight, x_squared_norms, centers, weight_sums,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
sample_weight : array-like, shape (n_samples,)
The weights for each observation in X.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization and to
pick new clusters amongst observations with uniform probability. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, sample_weight,
x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low weight
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = \
np.argsort(weight_sums)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers.astype(np.intp),
np.where(to_reassign)[0].astype(np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, sample_weight, x_squared_norms, centers, weight_sums,
nearest_center, old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
wsum = sample_weight[center_mask].sum()
if wsum > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= weight_sums[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += \
np.sum(X[center_mask] *
sample_weight[center_mask, np.newaxis], axis=0)
# update the count statistics for this center
weight_sums[center_idx] += wsum
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / weight_sums[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff | 68bd702efc61a95721d25d6fd89dd5cb54fc41fa | 29,258 |
def get_user_ida_projects():
"""
List IDA projects for current user without the prefix.
Returns:
list(str) -- List of projects.
"""
user_ida_groups = get_user_ida_groups()
if user_ida_groups is None:
log.error('Could not get user IDA projects.\n')
return None
try:
return [project.split(":")[1] for project in user_ida_groups]
except IndexError as e:
log.error('Index error while parsing user IDA projects:\n{0}'.format(e))
return None | d94888849dd1c3c170e99d7ba57a7ec641c5ee39 | 29,259 |
def bf_get_issue_config(major, minor):
# type: (str, str) -> IssueConfig
"""Returns the issue config for the active network."""
return IssueConfig.from_dict(
restv2helper.get_issue_config(bf_session, major, minor)
) | fd8b20c28cac7b8535d7f5045b7fbbaca89f6e71 | 29,260 |
import torch
def normalize_rotmat(mat: torch.Tensor) -> torch.Tensor:
"""Normalizes rotation matrix to a valid one"""
u, _, v = torch.svd(mat)
s = torch.zeros_like(mat)
s[..., 0, 0] = 1.0
s[..., 1, 1] = 1.0
s[..., 2, 2] = torch.det(u) * torch.det(v)
return u @ s @ v.transpose(-1, -2) | 1ba33c73a943392d6fe06448e81c346a5e7dc9f6 | 29,261 |
def responder():
"""Responder fixture."""
return MockResponder() | 097e90423a19c7d1ca231daf717afc15a1597fc1 | 29,262 |
def ortho(U, V, A, X=1, G=None, Z=0):
"""
U is NxL array of cell factors
V is JxL array of loadings onto genes
X is NxKo array of cell specific covariates
A is JxKo array of coefficients of X
Z is JxKf array of gene specific covariates
G is NxKf array of coefficients of Z
assume the data Y is of dimension JxN
imputed expression: E[Y] = g^{-1}(R) where R = VU'+AX'+ZG'
"""
if np.all(X == 1): X = cvec1(nrow(U))
if np.all(Z == 0): Z = np.zeros((nrow(V), 1))
if np.all(G == 0): G = None
# we assume A is not null or zero
# remove correlation between U and A
# at minimum, this will cause factors to have mean zero
betax = np.linalg.lstsq(X, U, rcond=None)[0] # extract coef from linreg
factors = U - X @ betax # residuals from linear regression
A += tcrossprod(V, betax)
# remove correlation between V and G
if G is None:
loadings = V
else: # G is not empty
betaz = np.linalg.lstsq(Z, V, rcond=None)[0] # extract coef from linreg
loadings = V - Z @ betaz # residuals from regression
G += tcrossprod(factors, betaz)
# rotate factors to make loadings orthornormal
loadings, d, Qt = np.linalg.svd(loadings, full_matrices=False)
factors = tcrossprod(factors, Qt) * d # d vector broadcasts across cols
# arrange latent dimensions in decreasing L2 norm
o = (-colNorms(factors)).argsort()
factors = factors[:, o]
loadings = loadings[:, o]
return {"factors": factors, "loadings": loadings, "coefX": A, "coefZ": G} | 843d895d7d88717859cf73da939266a335214518 | 29,263 |
def get_all_users_projects(is_authenticated):
"""Pull every assigned project for all users from Users_Projects table"""
if is_authenticated and auth0.is_admin():
app.logger.info(
f"Received request at {const.INTERMEDIATE_API_ALL_USERS_PROJECTS_ENDPOINT}"
)
auth0_users, _ = auth0.get_users()
return db.get_all_users_project_permissions(auth0_users)
raise auth0.AuthError() | cf33a581d9dcea988af8640b98cb816b61640c3d | 29,264 |
import requests
def get_status_code(ep):
"""Function that gets an endpoint and returns its HTTP status code"""
req = requests.get(ep)
return req.status_code | bd15853ac4615e96306c2b259c6945a4c46dd17b | 29,265 |
def process_repository(repo_dict):
""" Takes a dictionary containing keys: path, branches and revisions and
returns a Repository object. This method should only be called by
read_yaml. """
path = repo_dict[REPOSITORY_PATH]
revisions = {}
if REPOSITORY_REVISIONS in repo_dict:
for revision_id in repo_dict[REPOSITORY_REVISIONS]:
revisions[revision_id] = None
branches = {}
# if the fixture has branches defined, set them to be None
if REPOSITORY_BRANCHES in repo_dict:
for branch_name in repo_dict[REPOSITORY_BRANCHES]:
branches[branch_name] = None
return Repository(path, branches, revisions, True) | 3a1122edf7ce42b268a8e735bf060844b7a7fc5d | 29,266 |
import re
def hash_algorithm(alg, allow_raw=False):
"""Parse an hash algorithm"""
# canonical name: only alphanumeric uppercase characters
alg = re.sub(r'[^0-9A-Z]', '', alg.upper())
if alg in DIGEST_ASN1_PREFIXES:
return alg
elif allow_raw and alg == 'RAW':
return alg
raise ValueError("Unknown hash algorithm {}".format(alg)) | dc9287d7c2be76c9b2417a367e8c4e0653c4b515 | 29,267 |
def fista(gradient_op, linear_op, prox_op, cost_op, kspace_generator=None, estimate_call_period=None,
lambda_init=1.0, max_nb_of_iter=300, x_init=None,
metric_call_period=5, metrics={},
verbose=0, **lambda_update_params):
"""FISTA sparse reconstruction.
Parameters
----------
gradient_op: instance of class GradBase
the gradient operator.
linear_op: instance of LinearBase
the linear operator: seek the sparsity, ie. a wavelet transform.
prox_op: instance of ProximityParent
the proximal operator.
cost_op: instance of costObj
the cost function used to check for convergence during the
optimization.
kspace_generator: instance of class BaseKspaceGenerator, default None
If not None, run the algorithm in an online way, where the data is
updated between iterations.
estimate_call_period: int, default None
In an online configuration (kspace_generator is defined),
retrieve partial results at this interval.
lambda_init: float, (default 1.0)
initial value for the FISTA step.
max_nb_of_iter: int (optional, default 300)
the maximum number of iterations in the Condat-Vu proximal-dual
splitting algorithm.
x_init: np.ndarray (optional, default None)
Inital guess for the image
metric_call_period: int (default 5)
the period on which the metrics are compute.
metrics: dict (optional, default None)
the list of desired convergence metrics: {'metric_name':
[@metric, metric_parameter]}. See modopt for the metrics API.
verbose: int (optional, default 0)
the verbosity level.
lambda_update_params: dict,
Parameters for the lambda update in FISTA mode
Returns
-------
x_final: ndarray
the estimated FISTA solution.
costs: list of float
the cost function values.
metrics: dict
the requested metrics values during the optimization.
"""
# Define the initial primal and dual solutions
if x_init is None:
x_init = np.squeeze(np.zeros((gradient_op.linear_op.n_coils,
*gradient_op.fourier_op.shape),
dtype=np.complex))
alpha_init = linear_op.op(x_init)
# Welcome message
if verbose > 0:
print(" - mu: ", prox_op.weights)
print(" - lipschitz constant: ", gradient_op.spec_rad)
print(" - data: ", gradient_op.fourier_op.shape)
if hasattr(linear_op, "nb_scale"):
print(" - wavelet: ", linear_op, "-", linear_op.nb_scale)
print(" - max iterations: ", max_nb_of_iter)
print(" - image variable shape: ", gradient_op.fourier_op.shape)
print(" - alpha variable shape: ", alpha_init.shape)
print("-" * 40)
beta_param = gradient_op.inv_spec_rad
if lambda_update_params.get("restart_strategy") == "greedy":
lambda_update_params["min_beta"] = gradient_op.inv_spec_rad
# this value is the recommended one by J. Liang in his article
# when introducing greedy FISTA.
# ref: https://arxiv.org/pdf/1807.04005.pdf
beta_param *= 1.3
# Define the optimizer
opt = ForwardBackward(
x=alpha_init,
grad=gradient_op,
prox=prox_op,
cost=cost_op,
auto_iterate=False,
metric_call_period=metric_call_period,
metrics=metrics,
linear=linear_op,
lambda_param=lambda_init,
beta_param=beta_param,
**lambda_update_params)
if kspace_generator is not None:
return run_online_algorithm(opt, kspace_generator, estimate_call_period, verbose)
return run_algorithm(opt, max_nb_of_iter, verbose) | b05c2e860ce3423724f9385dc6f654b98dfdafd3 | 29,268 |
def get_entry_from_args():
"""
Handle finding the file entry using the user supplied arguments
return the entry or quits script entirely.
:rtype: object containing the entry of the found file and the entry_id or error & exit
"""
# Get the pcap file from arguments
entry_id = None
if 'pcapFileName' in demisto.args() \
and 'entryID' not in demisto.args():
PCAP_FILE = demisto.args()["pcapFileName"]
entry_id = _find_entry_id_by_name(PCAP_FILE, [".pcap", ".cap", ".pcapng"])
elif 'entryID' in demisto.args():
entry_id = demisto.args()["entryID"]
else:
return_error('You must set pcapFileName or entryID when executing the PcapHTTPExtract script.')
res = demisto.executeCommand('getFilePath', {'id': entry_id})
if len(res) > 0 and res[0]['Type'] == entryTypes['error']:
return_error(f'Failed to get the file path for entry: {entry_id}')
return res, entry_id | 5d7ea26bf17d8a8fc54ece069b05cb1918fc746f | 29,269 |
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return 'https://%s/' % host | caf5c9015a4cd863e407fb889d473ddebd7bbabc | 29,270 |
import re
import os
from datetime import datetime
import logging
def _source() -> Source:
"""
checks HTTP header source is present and points to an available resource
:return: parsed HTTP headers, transformed into a Source object
"""
source_header = request.headers.get("source")
if not source_header:
abort(Response("HTTP header 'source' not found", 400))
if re.match("^http[s]?://", source_header):
abort(Response("HTTP source not supported", 400))
if not source_header.startswith("s3://"):
if not os.path.exists(source_header):
abort(Response("couldn't find source [{}]".format(source_header), 400))
sim_year = request.headers.get("sim_year")
if not sim_year:
sim_year = int(os.getenv("SIM_YEAR", datetime.now().year))
logging.info("Using pinned simulation year: {}".format(sim_year))
return Source.of(source_header, int(sim_year)) | 5617e92a455f4b1546c5efe484045afc156063ca | 29,271 |
def deep_copy(pcollection):
"""Create a deep copy of a PCollection up to materialization boundaries."""
if not isinstance(pcollection, pvalue.PCollection):
raise ValueError('Input to deep_copy must be a PCollection.')
# AppliedPTransform.update_input_refcounts() is a vestigial method that
# uses an incorrect heuristic; it will be removed in a future version of
# Beam, since its results aren't used anyway. Until then, we work around
# this (see https://issues.apache.org/jira/browse/BEAM-4593).
if getattr(beam_pipeline.AppliedPTransform,
'update_input_refcounts', None) is not None:
beam_pipeline.AppliedPTransform.update_input_refcounts = lambda _: None
to_clone = _get_items_to_clone(pcollection)
pcollection_replacements = _clone_items(pcollection.pipeline, to_clone)
return pcollection_replacements[pcollection] | 7cfaa902d8e4ea5ce45f779decfeaaf54da2a435 | 29,272 |
def compute_lm_accuracy(eval_preds):
"""Compute the accuracy given the predictions and labels, contained in `eval_preds`.
It assumes the logits have been reduced with argmax(-1) by `preprocess_logits_for_accuracy`.
"""
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by `preprocess_logits_for_metrics` but we need to shift the labels.
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return accuracy_metric.compute(predictions=preds, references=labels) | fc095cf1662b9cc2e164a8d9bf5e32eb30327460 | 29,273 |
import os
def is_folder(dir_path):
"""Determines if a given local directory should map to a SmugMug Folder or
Gallery by presence of a .smfolder file"""
if os.path.isfile(dir_path + "/.smfolder"):
return True
else:
return False | 1825f4bbe328fc7dd2b79cdb55468ad75732e8ef | 29,274 |
from pathlib import Path
def fix_path(path):
"""Fix a path and convert to an absolute location.
Parameters
----------
path : str
Path to fix.
Returns
-------
str
Absolute path.
"""
if THIS_PLATFORM == 'Windows':
path = PureWindowsPath(path)
return str(Path(path).absolute()) | a8b648f6bb9859412f347649fd93cf9c264d22b9 | 29,275 |
import numpy
def partition_skymodel_by_flux(sc, model, flux_threshold=-numpy.inf):
"""
:param sc:
:param model:
:param flux_threshold:
:return:
"""
brightsc = filter_skycomponents_by_flux(sc, flux_min=flux_threshold)
weaksc = filter_skycomponents_by_flux(sc, flux_max=flux_threshold)
log.info('Converted %d components into %d bright components and one image containing %d components'
% (len(sc), len(brightsc), len(weaksc)))
im = copy_image(model)
im = insert_skycomponent(im, weaksc)
return SkyModel(components=[copy_skycomponent(comp) for comp in brightsc],
images=[copy_image(im)],
fixed=False) | 305b18621a3313cdf3ef1cdb14a855bbe4608276 | 29,276 |
def getNumberOfDaysBetween(d1, d2, tz_str='UTC'):
""" d1 and d2 are datetime objects in UTC """
d1_date = utcInTz(d1, tz_str)
d2_date = utcInTz(d2, tz_str)
d1_date = deepcopy(d1_date).replace(hour = 0, minute = 0, second = 0, microsecond = 0)
d2_date = deepcopy(d2_date).replace(hour = 0, minute = 0, second = 0, microsecond = 0)
return abs((d1_date - d2_date).days) | c4cdf2febef6fe7c7bf0695c1fa0dcdc3bc6caeb | 29,277 |
def get_language_specifics() -> BaseLanguage:
"""Gets the language specific actions."""
return PluginLoader().languageSpecifics | 8959bce727b6c068d5fc08041d74820fd3738ef5 | 29,278 |
from .model_store import get_model_file
import os
def get_darknet53(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = DarkNet53(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net | 38771bab3d48bfef17f0c3453ce661e92f764b48 | 29,279 |
def add_item():
"""Create a new item. This will list all login uuser created """
if 'username' not in log_session:
flash("Please login in to continue.")
return redirect(url_for('login'))
elif request.method == 'POST':
item = session.query(Item).filter_by(name=request.form['name']).first()
if item:
if item.name == request.form['name']:
flash('The item already exist')
return redirect(url_for('add_item'))
new_item = Item(
name=request.form['name'],
category_id=request.form['category'],
description=request.form['description'],
user_id=login_session['user_id']
)
session.add(new_item)
session.commit()
flash('New item created')
return redirect(url_for('home'))
else:
items = session.query(Item).\
filter.by(user_id=login_session['user_id']).all()
categories = session.query(Category).filter.by(
user_id=login_session['user_id']).all()
return render_template(
'new-item.html',
items=items,
catergories=categories
) | 31cc94f58a01f473731fcd689769b688bcb97f72 | 29,280 |
def SGLD(nburnin, nsample, gradU, p, batch, eta=0.0001, L=100, V=3):
"""
Function to get posterior samples given parameters with SGLD
"""
n = nburnin + nsample
if type(p) != "np.array":
d = 1
else:
d = len(p)
samples = np.zeros((n,d))
for i in range(n):
p = SGLD_updater(gradU, p, batch, eta, L, V)
samples[i] = p.copy()
return samples[nburnin:] | 04d49230cb71c83561726939213e2bdc74edaf54 | 29,281 |
from typing import List
def create_commands(device_ids: List[str], file_path: str) -> List[CommandRunner.Command]:
"""
Create a list of `Command` of the remove file command to `Cortex XDR` and `CrowdstrikeFalcon`
:param device_ids: The device id's to run on
:param file_path: The file_path to delete
:return: A list of `Command` for the script
"""
cs_os_to_ids = get_crowdstrike_os_to_id(device_ids)
return [CommandRunner.Command(commands='xdr-run-script-delete-file',
args_lst={'endpoint_ids': ','.join(device_ids),
'file_path': file_path}),
CommandRunner.Command(commands='cs-falcon-rtr-remove-file',
args_lst=[{'host_ids': ','.join(ids),
'file_path': file_path,
'os': os_}
for os_, ids in cs_os_to_ids.items()])] | 2a27c69bcaaf91984c853bd37eb260cc7d27a720 | 29,282 |
from datetime import datetime
def compiler(stats_set):
"""
Processes the API data into a clean dataframe for analysis and predictions.
Parameters
----------
stats_set: list
Incoming data on all previous games from the balldontlie API request.
Returns
----------
final: pandas dataframe
The training dataset, cleaned and processed.
"""
df = pd.DataFrame([])
for i, stats in enumerate(stats_set):
if stats['min'] == '' or stats['min'] == '0:00': # not counting games that they did not play!
if i == 0:
df = pd.DataFrame([])
continue
if stats['game']['home_team_id'] == stats['player']['team_id']:
home = 0
opponent = stats['game']['visitor_team_id']
else:
home = 1
opponent = stats['game']['home_team_id']
g_info = {'date':stats['game']['date'][:10],'pts': stats['pts'], 'reb': stats['reb'], 'ast': stats['ast'], 'mins':stats['min'], 'home':home,
'opponent':get_team_name(opponent)}
if i == 0:
df = pd.DataFrame([g_info])
else:
df2 = pd.DataFrame([g_info])
df = pd.concat([df,df2])
df = df.sort_values(by=['date'])
df = df.reset_index(drop=True)
df = onehot_encode(df)
final = add_temporality(df)
# Remove the current game if it is underway
date = datetime.today().strftime('%Y-%m-%d')
for i in final['date']:
if i == date:
final = final.iloc[:-1 , :] # https://thispointer.com/drop-last-row-of-pandas-dataframe-in-python-3-ways/
return final | a4edcd41cfd770e82aa368e3e6232f51d6cecec8 | 29,283 |
def generate_cpda_eligible_lists(request):
"""
Function retrieves the eligible user information and related unreviewed,approved and archived CPDA applications.
"""
active_apps = (Cpda_application.objects
.select_related('applicant')
.filter(applicant=request.user)
.exclude(status='rejected')
.exclude(status='finished')
.order_by('-request_timestamp'))
archive_apps = (Cpda_application.objects
.select_related('applicant')
.filter(applicant=request.user)
.exclude(status='requested')
.exclude(status='approved')
.exclude(status='adjustments_pending')
.order_by('-request_timestamp'))
to_review_apps = (Cpda_application.objects
.select_related('applicant')
.filter(Q(tracking_info__reviewer_id=request.user))
.exclude(status='rejected')
.exclude(status='finished')
.exclude(status='approved')
.filter(tracking_info__review_status='under_review')
.order_by('-request_timestamp'))
reviewed_apps= (Cpda_application.objects.select_related('applicant')
.filter(Q(tracking_info__reviewer_id=request.user))
.order_by('-request_timestamp'))
reviewer=False
if(reviewed_apps is not None):
reviewer=True
for app in to_review_apps:
app.reviewform = Review_Form(initial={'app_id': app.id})
form = Cpda_Form()
bill_forms = {}
apps = Cpda_application.objects.select_related('applicant').filter(applicant=request.user).filter(status='approved')
for app in apps:
bill_forms[app.id] = Cpda_Bills_Form(initial={'app_id': app.id})
advance_took = (Cpda_application.objects
.select_related('applicant')
.filter(applicant=request.user)
.exclude(status='requested')
.aggregate(total_advance=Sum('requested_advance')))
if advance_took['total_advance']:
advance_taken=advance_took['total_advance']
advance_avail=300000-advance_took['total_advance']
else:
advance_taken=0
advance_avail=300000
today_date=date.today()
block_period=str(2018+int((np.ceil((today_date.year-2018)/3)-1))*3)+"-"+ str(2018+int(np.ceil((today_date.year-2018)/3))*3)
#hod=is_hod(request)
#registrar=is_registrar(request)
#director=is_director(request)
response = {
'reviewer':True,
'cpda_form': form,
'cpda_billforms': bill_forms,
'cpda_active_apps': active_apps,
'cpda_archive_apps': archive_apps,
'cpda_to_review_apps': to_review_apps,
'total_advance_by_user':advance_taken,
'remaining_advance': advance_avail,
'block_period': block_period,
'cpda_reviewed_apps': reviewed_apps
}
return response | b6cda540dd94ea07d3e4a400a8dfa19c8cec1e0a | 29,284 |
def get_points_on_sphere(n_points, r=1):
"""
Find n evenly spaced points on a sphere using the "How to generate
equidistributed points on the surface of a sphere" by Markus Deserno, 2004.
Arguments:
n_points (int): number of points to generate
r (float): radius of the sphere
Returns:
(list(np.ndarray))
"""
points = []
a = 4.0 * np.pi * r**2 / n_points
d = np.sqrt(a)
m_theta = int(np.round(np.pi / d))
d_theta = np.pi / m_theta
d_phi = a / d_theta
for m in range(m_theta):
theta = np.pi * (m + 0.5) / m_theta
m_phi = int(np.round(2.0 * np.pi * np.sin(theta)/d_phi))
for n in range(m_phi):
phi = 2.0 * np.pi * n / m_phi
point = np.array([r * np.sin(theta) * np.cos(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(theta)])
points.append(point)
return points | 575b7aa98c08942dbe24389bf18bde36cd0f9c0e | 29,285 |
def bottleneck_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
"""
bottleneck_block(x, filters, kernel_size=(3, 3), padding="same", strides=1)
This function creates a bottleneck block layer, which is the addition of a convolution block and a batch normalized/activated block
INPUTS:
* `filters`: number of filters in the convolutional block
* `x`: input keras layer
OPTIONAL INPUTS:
* `kernel_size`=(3, 3): tuple of kernel size (x, y) - this is the size in pixels of the kernel to be convolved with the image
* `padding`="same": see tf.keras.layers.Conv2D
* `strides`=1: see tf.keras.layers.Conv2D
GLOBAL INPUTS: None
OUTPUTS:
* keras layer, output of the addition between convolutional and bottleneck layers
"""
conv = tf.keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
conv = conv_block(conv, filters, kernel_size=kernel_size, padding=padding, strides=strides)
bottleneck = tf.keras.layers.Conv2D(filters, kernel_size=(1, 1), padding=padding, strides=strides)(x)
bottleneck = batchnorm_act(bottleneck)
return tf.keras.layers.Add()([conv, bottleneck]) | a0a37478013feb4702fa18b5143d7f0c33506119 | 29,286 |
import logging
def ClusterAndFindSplit(values, rand=None):
"""Finds a list of indices where we can detect significant changes.
This algorithm looks for the point at which clusterings of the "left" and
"right" datapoints show a significant difference. We understand that this
algorithm is working on potentially already-aggregated data (means, etc.) and
it would work better if we had access to all the underlying data points, but
for now we can do our best with the points we have access to.
In the E-Divisive paper, this is a two-step process: first estimate potential
change points, then test whether the clusters partitioned by the proposed
change point internally has potentially hidden change-points through random
permutation testing. Because the current implementation only returns a single
change-point, we do the change point estimation through bisection, and use the
permutation testing to identify whether we should continue the bisection, not
to find all potential change points.
Arguments:
- values: a sequence of values in time series order.
- rand: a callable which produces a value used for subsequence permutation
testing.
Returns:
- A list of indices into values where we can detect potential split points.
Raises:
- InsufficientData when the algorithm cannot find potential change points
with statistical significance testing.
"""
logging.debug('Starting change point detection.')
length = len(values)
if length <= 3:
raise InsufficientData(
'Sequence is not larger than minimum length (%s <= %s)' % (length, 3))
candidate_indices = set()
exploration_queue = [(0, length)]
while exploration_queue:
# Find the most likely change point in the whole range, only excluding the
# first and last elements. We're doing this because we want to still be able
# to pick a candidate within the margins (excluding the ends) if we have
# enough confidence that it is a change point.
start, end = exploration_queue.pop(0)
logging.debug('Exploring range seq[%s:%s]', start, end)
segment = values[start:end]
partition_point, _, _ = ChangePointEstimator(segment)
probability = PermutationTest(segment, partition_point, rand)
logging.debug(
'Permutation testing change point %d at seq[%s:%s]: %s;'
' probability = %.4f', partition_point, start, end,
probability >= _MIN_SIGNIFICANCE, probability)
if probability < _MIN_SIGNIFICANCE:
continue
lower, upper = ExtendChangePointRange(partition_point, segment)
if lower != partition_point or upper != partition_point:
logging.debug('Extending change range from %d to %d-%d.',
partition_point, lower, upper)
candidate_indices.add(
(start + partition_point, (start + lower, start + upper)))
exploration_queue.append((start, start + partition_point))
exploration_queue.append((start + partition_point, end))
if not candidate_indices:
raise InsufficientData('Not enough data to suggest a change point.')
return list(sorted(candidate_indices)) | fb89eca16806042ef23f81d611741c4c77577071 | 29,287 |
def list_nodelets():
"""
List all nodelets in all packages.
"""
nodelets = []
for p in rospkg.RosPack().get_depends_on('nodelet', implicit=False):
nodelets += Package(p).list_nodelets()
return nodelets | 367aeb784bc3282e1e37d279591ddb00aa698b0b | 29,288 |
import os
def install_openwhisk_action_in_path(packaging_params, action_params_to_env, path):
"""
This performs the equivalent of the command line:
wsk action create fetch_aws_keys --docker tleyden5iwx/openwhisk-dockerskeleton --param AwsAccessKeyId "$AWS_ACCESS_KEY_ID" --param AwsSecretAccessKey "$AWS_SECRET_ACCESS_KEY" --param KeyNukerOrg "default"
Where the param values are pulled out of environment variables. The param vals and their
corresponding environment variable names are specified in the action_params_to_env dictionary
The name of the action is discovered from the path basename.
"""
# Save the current working directory
cwd = os.getcwd()
os.chdir(path)
openwhisk_action = os.path.basename(os.getcwd())
if openwhisk_action not in action_params_to_env:
os.chdir(cwd) # Restore the original current working directory
return
params_to_env = action_params_to_env[openwhisk_action]
if packaging_params.dryRun is False and openwhisk_action_exists(openwhisk_action):
delete_openwhisk_action(openwhisk_action)
install_openwhisk_action(
packaging_params,
openwhisk_action,
params_to_env,
)
# Restore the original current working directory
os.chdir(cwd)
return openwhisk_action | 8a3e201b258341ff02d51b8b53a97f3fea96ab6a | 29,289 |
import logging
import sys
def checkMysql(checkOptions, options, required=False):
""" Used in functions that may use the DB interface either that this is required
or that functionality may be degraded.
<condition> = mysql is not used or pymysql is available
arguments:
options: dictionnary, derived from command line args
checkOptions: look for this entry ( *connect_string ) in options which
is a dictionnary, see if it names mysql
required: if True and <condition> not met, then signal an error and exit
if False : no action
if None and <condition> not met: emit a warning
"""
def usingMysql():
if checkOptions in options:
return options[checkOptions].startswith("mysql:")
return False
doExit=False
if MYSQL_NOT_FOUND is not None:
if MYSQL_NOT_FOUND:
if usingMysql():
if required:
logging.error(f"Module pymysql not available")
doExit = True
elif required is None:
logging.warning(f"Module pymysql not available, functionality may be degraded")
else:
logging.debug(f"CheckMysql:_config.MYSQL_NOT_FOUND assigned:{MYSQL_NOT_FOUND}")
else:
logging.error(f"CheckMysql:_config.MYSQL_NOT_FOUND not assigned")
doExit = required
if doExit:
sys.exit(20) | 1e5a77a4c578b99d36e7a27d901a4226a21e47fe | 29,290 |
def predict():
"""Predict route"""
audio_id = request.args.get('id')
db.download(audio_id)
y = detector.predict("data//audio.wav")
state = {"status": str(y)}
return jsonify(state) | a0e7011499e6c387532172750cbaa4163c509736 | 29,291 |
def colorize(text, color, bold=False):
"""Colorize some text using ANSI color codes.
Note that while ANSI color codes look good in a terminal they look
like noise in log files unless viewed in an ANSI color capable
viewer (such as 'less -R').
Args:
text: The text to colorize.
color: One of the color symbols from this module.
bold: If True, make the color brighter.
Returns:
The input text string, appropriately sprinkled with color
codes. Colors are reset to terminal defaults after the input
text.
"""
bold = _ansi_escape(_BOLD) if bold else ''
return '%s%s%s%s' % (bold, _ansi_escape(color),
text, _ansi_escape(_RESET)) | 8231b5f7c58f940820a7f0ef0846d663ec146366 | 29,292 |
def _create_log_entry(class_, message, author, created_at=None, **kwargs):
"""
This method will create a new LogEntry of the given type with the given
arguments.
:param type type: A subclass of LogEntry which should be created.
:param unicode message: the log message text
:param User author: user responsible for the entry
:param datetime|None created_at: Creation time of the entry. Defaults to
current database time if None.
:param kwargs: Additional arguments.
:return: the newly created LogEntry.
"""
if created_at is not None:
kwargs['created_at'] = created_at
kwargs['message'] = message
kwargs['author'] = author
entry = class_(**kwargs)
session.session.add(entry)
return entry | ec0ad487b4aa6580555d0e58fcd3eadb8e4c3941 | 29,293 |
def avg_stimpos(band, eclipse):
"""
Define the mean detector stim positions.
:param band: The band to return the average stim positions for,
either 'FUV' or 'NUV'.
:type band: str
:param eclipse: The eclipse number to return the average stim positions for.
:type eclipse: int
:returns: dict -- A dict containing the average x and y positions of the
four stims (for the requested band).
"""
if band == 'FUV':
avgstim = ({'x1':-2541.88, 'x2':2632.06, 'x3':-2541.53,
'x4':2631.68, 'y1':2455.28, 'y2':2455.02,
'y3':-2550.89, 'y4':-2550.92})
elif band == 'NUV':
if eclipse >= 38268:
# The average stim positions after the clock change (post-CSP).
avgstim = ({'x1':-2722.53, 'x2':2470.29, 'x3':-2721.98,
'x4':2471.09, 'y1':2549.96, 'y2':2550.10,
'y3':-2538.57, 'y4':-2538.62})
else:
# The average stim positions for pre-CSP data (eclipse 37423).
avgstim = ({'x1':-2722.27, 'x2':2468.84, 'x3':-2721.87,
'x4':2469.85, 'y1':2453.89, 'y2':2453.78,
'y3':-2565.81, 'y4':-2567.83})
else:
# Might need to raise a ValueError or other proper error here.
print("Error: No valid band specified.")
return avgstim | 59d748b99c621f6dbcbbc05ba77c54178b57533d | 29,294 |
def multiclass_positive_predictive_value(confusion_matrix: np.ndarray,
label_index: int) -> Number:
"""
Gets the "positive predictive value" for a multi-class confusion matrix.
The positive predictive value is also known as *precision*.
See the documentation of
:func:`fatf.utils.models.metrics.validate_confusion_matrix` for all the
possible errors and exceptions.
Parameters
----------
confusion_matrix : numpy.ndarray
A confusion matrix based on which the metric will be computed.
label_index : integer
The index of a label that should be treated as "positive". All the
other labels will be treated as "negative".
Returns
-------
metric : number
The "positive predictive value".
"""
assert validate_confusion_matrix(confusion_matrix, label_index), \
'The input parameters are invalid.'
true_positive = confusion_matrix[label_index, label_index]
predicted_condition_positive = confusion_matrix[label_index, :].sum()
metric = (true_positive / predicted_condition_positive
if predicted_condition_positive else 0)
return metric | 51822f7a75eb48301121fc52ef5e6f8fa661fe2f | 29,295 |
def get_matching_cost_swap_bf(election_1: OrdinalElection, election_2: OrdinalElection,
mapping):
""" Return: Cost table """
cost_table = np.zeros([election_1.num_voters, election_1.num_voters])
for v1 in range(election_1.num_voters):
for v2 in range(election_2.num_voters):
swap_distance = 0
for i, j in combinations(election_1.potes[0], 2):
if (election_1.potes[v1][i] > election_1.potes[v1][j] and
election_2.potes[v2][mapping[i]] < election_2.potes[v2][mapping[j]]) or \
(election_1.potes[v1][i] < election_1.potes[v1][j] and
election_2.potes[v2][mapping[i]] > election_2.potes[v2][mapping[j]]):
swap_distance += 1
cost_table[v1][v2] = swap_distance
return cost_table | 38b7857686caa60212ffdd4578097658dd9ba2e3 | 29,296 |
def auditable(event_type, msg_on_success):
"""
Makes the result of an endpoint audit loggable - passes an
AuditLogger object to the handler so that it can set extra data to be
logged.
"""
def decorator(f):
@wraps(f)
def _(self, request, *args, **kwargs):
audit_logger = AuditLogger(self.log)
audit_logger.add(event_type=event_type,
request_ip=request.getClientIP())
kwargs['audit_logger'] = audit_logger
d = defer.maybeDeferred(f, self, request, *args, **kwargs)
def audit_log_it(result):
audit_logger.audit(msg_on_success)
return result
return d.addCallback(audit_log_it)
return _
return decorator | e422fcb8892da5dc26299a437238e3a815492f9e | 29,297 |
def rec_pow(a, b):
"""Compute a**b recursively"""
if b == 0:
return 1
if b == 1:
return a
return (rec_pow(a,b//2)**2) * (a if b % 2 else 1) | 42972acab57b3e217dbd10fa32a38125c5eab44d | 29,298 |
def omega_d(m, bvectors, bweights, idx=None): # Eq. 36
"""
Compute the diagonal contribution to the spread functional
Parameters
----------
m: ndarray, shape (nkpts, nntot, nbnds, nbnds)
the overlap matrix
bvectors: ndarray, shape (nkpts, nntot, 3)
bweights: ndarray, shape (nntot,)
"""
(nkpts, nntot, nwann) = m.shape[:-1]
rv = wannier_centers(m, bvectors, bweights)
mii = np.copy(m.reshape((nkpts*nntot, nwann, nwann)).diagonal(offset=0, axis1=1, axis2=2))
bvectors = np.reshape(bvectors, (-1, 3))
bweights = np.tile(bweights, nkpts)
bvrv = np.einsum('bi,ri->br', bvectors, rv)
if idx is not None:
sprd_d = np.sum(bweights[:, np.newaxis] * (-1 * np.imag(np.log(mii)) - bvrv)**2, axis=0) / nkpts
sprd_d = sprd_d[idx]
else:
sprd_d = np.sum(bweights[:, np.newaxis] * (-1 * np.imag(np.log(mii)) - bvrv)**2) / nkpts
return sprd_d | 30136552c53b3fd18ebb56b666cf9bafa7baf926 | 29,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.