content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def pairs_to_annotations(annotation_pairs):
"""
Convert an array of annotations pairs to annotation array.
:param annotation_pairs: list(AnnotationPair) - annotations
:return: list(Annotation)
"""
annotations = []
for ap in annotation_pairs:
if ap.ann1 is not None:
annotations.append(ap.ann1)
if ap.ann2 is not None:
annotations.append(ap.ann2)
return annotations | b0e08889f541b14d596616d08b366f59b7f8ddd3 | 26,600 |
def sanitize_param(value, valid_characters=valid_chars, character_map=mapped_chars, invalid_character='X'):
"""Clean incoming parameters (strings or lists)"""
if isinstance(value, string_types):
return sanitize_text(value, valid_characters=valid_characters, character_map=character_map, invalid_character=invalid_character)
elif isinstance(value, list):
return [sanitize_text(x, valid_characters=valid_characters, character_map=character_map, invalid_character=invalid_character) for x in value]
else:
raise Exception('Unknown parameter type (%s)' % (type(value))) | e3ed0d1a62bdbff0c2b3a204836d4d3afe467ced | 26,601 |
def default_meta(inherit=True):
"""Initialize default meta for particular plugin.
Default Meta is inherited by all children comparing to Meta which is unique
per plugin.
:param inherit: Whatever to copy parents default meta
"""
def decorator(plugin):
plugin._default_meta_init(inherit)
return plugin
return decorator | 174b37f389160c007e7a609a78b5071031970004 | 26,602 |
def get_default_database_name():
"""
gets default database name.
:rtype: str
"""
return get_component(DatabasePackage.COMPONENT_NAME).get_default_database_name() | 23bf228a284b5880a5155beced606ef4d6f81d16 | 26,603 |
def npm_local_packages():
"""
Get list of local packages
:return: a tuple of dicts
"""
local_dependencies = {}
local_dev_dependencies = {}
package_json = get_package_json()
for name, version in package_json.get("dependencies", {}).items():
match = LOCAL_PACKAGE.match(version)
if match:
[local_dependencies[name]] = match.groups()
for name, version in package_json.get("devDependencies", {}).items():
match = LOCAL_PACKAGE.match(version)
if match:
[local_dev_dependencies[name]] = match.groups()
return local_dependencies, local_dev_dependencies | cb9f52bb97f402b00e3dac0c6a69332f2199ccd5 | 26,604 |
def lagrangian_descriptor(u, v, p_value = 0.5):
"""
Vector field equation for Lagrangian descriptor.
Parameters
----------
v : ndarray, shape(n,2)
Vector field at given point.
p_value : float, optional
Exponent in Lagrangian descriptor definition.
0 is the acton-based LD,
0 < p_value < 1 is the Lp quasinorm,
1 <= p_value < 2 is the Lp norm LD,
2 is the arclength LD.
The default is 0.5.
Returns
-------
LD : ndarray, shape(n,1)
Vector field for Lagrangian descriptor dimension.
"""
if p_value == 0:
LD = np.abs(u[:,1]*v[:,0])
elif p_value>0:
LD = np.sum(np.abs(v)**p_value, axis=1)
else:
LD = np.zeros(len(u[:,0]))
return LD | ddd6bb7fb8538b6d44f2507e7b065cbe70338c39 | 26,605 |
def xymatch(x1, y1, x2, y2, tol=None, nnearest=1):
"""Fast cross-matching of xy coordinates: from https://gist.github.com/eteq/4599814"""
x1 = np.array(x1, copy=False)
y1 = np.array(y1, copy=False)
x2 = np.array(x2, copy=False)
y2 = np.array(y2, copy=False)
if x1.shape != y1.shape:
raise ValueError('x1 and y1 do not match!')
if x2.shape != y2.shape:
raise ValueError('x2 and y2 do not match!')
# this is equivalent to, but faster than just doing np.array([x1, y1])
coords1 = np.empty((x1.size, 2))
coords1[:, 0] = x1
coords1[:, 1] = y1
# this is equivalent to, but faster than just doing np.array([x2, y2])
coords2 = np.empty((x2.size, 2))
coords2[:, 0] = x2
coords2[:, 1] = y2
kdt = KDT(coords2)
if nnearest == 1:
ds,idxs2 = kdt.query(coords1)
elif nnearest > 1:
retval = kdt.query(coords1, nnearest)
ds = retval[0]
idxs2 = retval[1][:, -1]
else:
raise ValueError('invalid nnearest ' + str(nnearest))
idxs1 = np.arange(x1.size)
if tol is not None:
msk = ds < tol
idxs1 = idxs1[msk]
idxs2 = idxs2[msk]
ds = ds[msk]
return idxs1, idxs2, ds | 0c81add24308fdbe90144776fb4b72e9801ddd11 | 26,606 |
def get_infection_probas_mean_field(probas, transmissions):
"""
- probas[i,s] = P_s^i(t)
- transmissions = csr sparse matrix of i, j, lambda_ij(t)
- infection_probas[i] = sum_j lambda_ij P_I^j(t)
"""
infection_probas = transmissions.dot(probas[:, 1])
return infection_probas | 70d5585b405bdff54f65bced166dead6ae45d26b | 26,607 |
import time
import ast
def eval_task(algo, specific_testsets, measures, head_items, crossfold_index, save_path=None, load_path=None, uid_plus_iid_to_row=None):
"""
Evaluate on specific testsets.
This function exists to make testset evaluation easier to parallelize.
"""
ret = []
if load_path and uid_plus_iid_to_row is None:
tic = time.time()
load_from = '{}_seed0_fold{}_all_predictions.txt'.format(load_path, crossfold_index)
print('load_from', load_from)
with open(load_from, 'r') as file_handler:
content = ['[' + x.strip('\n') + ']' for x in file_handler.readlines()]
assert(content[0] == '[uid,iid,r_ui,est,details,crossfold_index]')
all_predictions = [Prediction(*ast.literal_eval(line)[:-1]) for line in content[1:]]
uid_plus_iid_to_row = {}
for prediction in all_predictions:
uid_plus_iid = str(prediction[0]) + '_' + str(prediction[1])
uid_plus_iid_to_row[uid_plus_iid] = prediction
print('Loading predictions within eval_task took {}'.format(time.time() - tic))
for key, specific_testset in specific_testsets.items():
start_specific_testset = time.time()
if uid_plus_iid_to_row:
# if this dict is populated we should use it. if it is empty we can't use it, need to run algo.test
predictions = []
tic = time.time()
if isinstance(specific_testset, np.ndarray):
iterate_on = specific_testset.tolist()
else:
iterate_on = specific_testset
for prediction in iterate_on:
uid_plus_iid = str(prediction[0]) + '_' + str(prediction[1])
predictions.append(uid_plus_iid_to_row[uid_plus_iid])
#print('Took {} seconds to load {} predictions from uid_plus_iid_to_row'.format(time.time() - tic, len(predictions)))
else:
predictions = algo.test(specific_testset)
if save_path and load_path is None and uid_plus_iid_to_row is None: # if you just loaded the predictions, don't save them again, waste of time...
with open('{}_seed0_fold{}_{}_predictions.txt'.format(save_path, crossfold_index, key), 'w') as file_handler:
file_handler.write('uid,iid,r_ui,est,details,crossfold_index\n')
for prediction in predictions:
file_handler.write(','.join([str(x) for x in prediction] + [str(crossfold_index)]) + '\n')
if not predictions:
ret.append([key, {}, 0, 0])
continue
test_measures = {}
for m in measures:
tic = time.time()
eval_func = getattr(accuracy, m.lower())
result = eval_func(predictions, verbose=0)
# NMV 10/26: rewriting this whole chunk b/c we refactored accuracy.py.
#if 'ndcg' in m:
if m == 'list_metrics':
tail_result = eval_func(predictions, verbose=0, head_items=head_items)
for metric_name in result.keys():
mean_val, frac_of_users = result[metric_name]
tail_mean_val, tail_frac = tail_result[metric_name]
test_measures[metric_name] = mean_val
test_measures[metric_name + '_frac'] = frac_of_users
test_measures['tail' + metric_name] = tail_mean_val
test_measures['tail' + metric_name + '_frac'] = tail_frac
# sub_measures = m.split('_')
# for i_sm, sub_measure in enumerate(sub_measures):
# mean_val, frac_of_users = result[i_sm]
# tail_mean_val, _ = tail_result[i_sm]
# test_measures[sub_measure] = mean_val
# test_measures[sub_measure + '_frac'] = frac_of_users
# test_measures['tail' + sub_measure] = tail_mean_val
else:
test_measures[m] = result
test_time = time.time() - start_specific_testset
ret.append([key, test_measures, test_time, len(specific_testset)])
return ret | 4f5171ea4473505237b2c353e164ba4b78d07357 | 26,608 |
def newton(RJ, x0, verbose = False, rtol = 1.0e-6, atol = 1.0e-10, miter = 50,
linesearch = 'none', bt_tau = 0.5, bt_c = 1.0e-4):
"""
Manually-code newton-raphson so that I can output convergence info, if
requested.
Parameters:
RJ function return the residual + jacobian
x0 initial guess
Optional:
verbose verbose output
rtol relative tolerance
atol absolute tolerance
miter maximum iterations
linesearch available options: "none" and "backtracking"
bt_tau tau factor for backtracking line search
bt_c c factor for backtracking line search
"""
R, J = RJ(x0)
nR = la.norm(R)
nR0 = nR
x = np.copy(x0)
i = 0
if verbose:
print("Iter.\tnR\t\tnR/nR0\t\tcond\t\tlinesearch")
print("%i\t%e\t%e\t" % (i, nR, nR / nR0))
while (nR > rtol * nR0) and (nR > atol):
a = la.solve(J, R)
if linesearch == 'none':
f = 1.0
elif linesearch == 'backtracking':
f = backtrack(RJ, R, J, x, -a, tau = bt_tau, c = bt_c, verbose = verbose)
else:
raise ValueError("Unknown linesearch type.")
x -= (a * f)
R, J = RJ(x)
nR = la.norm(R)
i += 1
if verbose:
print("%i\t%e\t%e\t%e\t%f" % (i, nR, nR / nR0,la.cond(J), f))
if i > miter:
if verbose:
print("")
raise MaximumIterations()
if verbose:
print("")
return x | b6baa3288c6f417ca4ec7284237ea35d4f2442dd | 26,609 |
from typing import List
from pathlib import Path
def gen_oltp_trace(
tpcc_weight: str, tpcc_rates: List[int], pattern_iter: int) -> bool:
"""
Generates the trace by running OLTP TPCC benchmark on the built database
:param tpcc_weight: Weight for the TPCC workload
:param tpcc_rates: Arrival rates for each phase in a pattern
:param pattern_iter: Number of patterns
:return: True when data generation succeeds
"""
# Remove the old query_trace/query_text.csv
Path(DEFAULT_QUERY_TRACE_FILE).unlink(missing_ok=True)
# Server is running when this returns
oltp_server = TestOLTPBench(DEFAULT_OLTP_SERVER_ARGS)
db_server = oltp_server.db_instance
db_server.run_db()
# Download the OLTP repo and build it
oltp_server.run_pre_suite()
# Load the workload pattern - based on the tpcc.json in
# testing/oltpbench/config
test_case_config = DEFAULT_OLTP_TEST_CASE
test_case_config["weights"] = tpcc_weight
test_case = TestCaseOLTPBench(test_case_config)
# Prep the test case build the result dir
test_case.run_pre_test()
rates = tpcc_rates * pattern_iter
config_forecast_data(test_case.xml_config, rates)
# Turn on query trace metrics tracing
db_server.execute("SET query_trace_metrics_enable='true'", expect_result=False)
# Run the actual test
ret_val, _, stderr = run_command(test_case.test_command,
test_case.test_error_msg,
cwd=test_case.test_command_cwd)
if ret_val != ErrorCode.SUCCESS:
LOG.error(stderr)
return False
# Clean up, disconnect the DB
db_server.stop_db()
db_server.delete_wal()
if not Path(DEFAULT_QUERY_TRACE_FILE).exists():
LOG.error(
f"Missing {DEFAULT_QUERY_TRACE_FILE} at CWD after running OLTP TPCC")
return False
return True | 8ac09fd8f85d7c83944759829775c3dbb1b0741e | 26,610 |
def plot_hmesh(mesh, box=None, proj='pc', figsize=[9,4.5],
title=None, do_save=None, do_lsmask='fesom', color_lsmask=[0.6, 0.6, 0.6],
linecolor='k', linewidth=0.2, linealpha=0.75, pos_extend=None,):
"""
---> plot FESOM2 horizontal mesh:
___INPUT:___________________________________________________________________
mesh : fesom2 mesh object, with all mesh information
box : None, list (default: None) regional limitation of plot [lonmin,
lonmax, latmin, latmax]
proj : str, (default: 'pc') which projection should be used, 'pc'=
ccrs.PlateCarree(), 'merc'=ccrs.Mercator(), 'nps'=
ccrs.NorthPolarStereo(), 'sps'=ccrs.SouthPolarStereo(),
'rob'=ccrs.Robinson()
fig_size : list (default:[9,4.5] ), list with figure width and figure
height [w, h]
title : None, str,(default:None) give every plot panel a title string
IF: None ... no title is plotted
'descript' ... use data 'descript' attribute for title string
'string' ... use given string as title
do_save : None, str (default:None) if None figure will by not saved, if
string figure will be saved, strings must give directory and
filename where to save.
do_lsmask : None, str (default: 'fesom') plot land-sea mask.
If: None ... no land sea mask is used,
'fesom' ... overlay fesom shapefile land-sea mask using
color color_lsmask
'stock' ... use cartopy stock image as land sea mask
'bluemarble' ... use bluemarble image as land sea mask
'etopo' ... use etopo image as land sea mask
do_bottom : bool, (default:True) highlight nan bottom topography
with gray color defined by color_bot
color_lsmask: list, (default: [0.6, 0.6, 0.6]) RGB facecolor value for fesom
shapefile land-sea mask patch
linecolor : str, list, (default:'k') either color string or RGB list
linewidth : float, (default:0.2) linewidth of mesh
linealpha : float, (default:0.75) alpha value of mesh
___RETURNS:_________________________________________________________________
fig : returns figure handle
ax : returns list with axes handle
____________________________________________________________________________
"""
fontsize = 12
str_rescale = None
n_rc = [1,1]
pos_fac = 1.0
pos_gap = [0.02, 0.02]
#___________________________________________________________________________
# make matrix with row colum index to know where to put labels
rowlist = np.zeros((n_rc[0],n_rc[1]))
collist = np.zeros((n_rc[0],n_rc[1]))
for ii in range(0,n_rc[0]): rowlist[ii,:]=ii
for ii in range(0,n_rc[1]): collist[:,ii]=ii
rowlist = rowlist.flatten()
collist = collist.flatten()
#___________________________________________________________________________
# create box if not exist
if box is None: box = [ -180+mesh.focus, 180+mesh.focus, -90, 90 ]
#___________________________________________________________________________
# Create projection
if proj=='pc':
which_proj=ccrs.PlateCarree()
which_transf = None
elif proj=='merc':
which_proj=ccrs.Mercator()
which_transf = ccrs.PlateCarree()
elif proj=='nps':
which_proj=ccrs.NorthPolarStereo()
which_transf = ccrs.PlateCarree()
elif proj=='sps':
which_proj=ccrs.SouthPolarStereo()
which_transf = ccrs.PlateCarree()
elif proj=='rob':
which_proj=ccrs.Robinson()
which_transf = ccrs.PlateCarree()
#___________________________________________________________________________
# create lon, lat ticks
xticks,yticks = do_ticksteps(mesh, box)
#___________________________________________________________________________
# create figure and axes
fig, ax = plt.subplots( n_rc[0],n_rc[1],
figsize=figsize,
subplot_kw =dict(projection=which_proj),
gridspec_kw=dict(left=0.06, bottom=0.05, right=0.95, top=0.95, wspace=0.05, hspace=0.05,),
constrained_layout=False, )
#___________________________________________________________________________
# flatt axes if there are more than 1
if isinstance(ax, np.ndarray): ax = ax.flatten()
else: ax = [ax]
nax = len(ax)
#___________________________________________________________________________
# create mesh triangulation
tri = Triangulation(np.hstack((mesh.n_x,mesh.n_xa)),
np.hstack((mesh.n_y,mesh.n_ya)),
np.vstack((mesh.e_i[mesh.e_pbnd_0,:],mesh.e_ia)))
# Limit points to projection box
if proj=='nps' or proj=='sps' or 'pc':
e_idxbox = grid_cutbox_e(tri.x, tri.y, tri.triangles, box, which='hard')
else:
points = which_transf.transform_points(which_proj,
tri.x[tri.triangles].sum(axis=1)/3,
tri.y[tri.triangles].sum(axis=1)/3)
xpts, ypts = points[:,0].flatten().tolist(), points[:,1].flatten().tolist()
crs_pts = list(zip(xpts,ypts))
fig_pts = ax[0].transData.transform(crs_pts)
ax_pts = ax[0].transAxes.inverted().transform(fig_pts)
x, y = ax_pts[:,0], ax_pts[:,1]
e_idxbox = (x>=-0.05) & (x<=1.05) & (y>=-0.05) & (y<=1.05)
tri.triangles = tri.triangles[e_idxbox,:]
#___________________________________________________________________________
# loop over axes
for ii in range(0,nax):
#_______________________________________________________________________
# set axes extent
ax[ii].set_extent(box, crs=ccrs.PlateCarree())
#_______________________________________________________________________
# add grid mesh on top
ax[ii].triplot(tri.x, tri.y, tri.triangles,
color=linecolor, linewidth=linewidth,
alpha=linealpha)
#_______________________________________________________________________
# add mesh land-sea mask
ax[ii] = do_plotlsmask(ax[ii],mesh, do_lsmask, box, which_proj,
color_lsmask=color_lsmask, edgecolor=linecolor,
linewidth=0.5)
#_______________________________________________________________________
# add gridlines
ax[ii] = do_add_gridlines(ax[ii], rowlist[ii], collist[ii],
xticks, yticks, proj, which_proj)
#_______________________________________________________________________
# set title and axes labels
if title is not None:
# is title string:
if isinstance(title,str) :
ax[ii].set_title(title, fontsize=fontsize+2)
# is title list of string
elif isinstance(title,list):
ax[ii].set_title(title[ii], fontsize=fontsize+2)
nax_fin = ii+1
#___________________________________________________________________________
# delete axes that are not needed
for jj in range(nax_fin, nax): fig.delaxes(ax[jj])
#___________________________________________________________________________
# repositioning of axes and colorbar
ax, cbar = do_reposition_ax_cbar(ax, None, rowlist, collist, pos_fac,
pos_gap, title=title, proj=proj, extend=pos_extend)
#___________________________________________________________________________
# save figure based on do_save contains either None or pathname
do_savefigure(do_save, fig)
#___________________________________________________________________________
return(fig, ax) | 9a921224440f359c33686822411b928ebd939550 | 26,611 |
def _get_feature_proportion(features_percentage: int,
indices_number: int) -> int:
"""
Computes a number of features based on the given percentage.
"""
assert (isinstance(features_percentage, int)
and 0 <= features_percentage <= 100
and isinstance(indices_number, int))
feature_proportion = int((features_percentage / 100) * indices_number)
if feature_proportion:
features_number = feature_proportion
else:
logger.warning(
'Since the number of features to be extracted was not given '
'%d%% of features will be used. This percentage translates to '
'0 features, therefore the number of features to be used is '
'overwritten to 1. To prevent this from happening, you should '
'either explicitly set the number of features via the '
'features_number parameter or increase the value of the '
'features_percentage parameter.', features_percentage)
features_number = feature_proportion + 1
return features_number | 78a5d5515b479b20fcfbbf25cdd2339f0bc8b99f | 26,612 |
def orthoProjectionMatrix(left, right, bottom, top, nearClip=0.01, farClip=100.,
out=None, dtype=None):
"""Compute an orthographic projection matrix with provided frustum
parameters.
Parameters
----------
left : float
Left clipping plane coordinate.
right : float
Right clipping plane coordinate.
bottom : float
Bottom clipping plane coordinate.
top : float
Top clipping plane coordinate.
nearClip : float
Near clipping plane distance from viewer.
farClip : float
Far clipping plane distance from viewer.
out : ndarray, optional
Optional output array. Must be same `shape` and `dtype` as the expected
output if `out` was not specified.
dtype : dtype or str, optional
Data type for arrays, can either be 'float32' or 'float64'. If `None` is
specified, the data type is inferred by `out`. If `out` is not provided,
the default is 'float64'.
Returns
-------
ndarray
4x4 projection matrix
See Also
--------
perspectiveProjectionMatrix : Compute a perspective projection matrix.
Notes
-----
* The returned matrix is row-major. Values are floats with 32-bits of
precision stored as a contiguous (C-order) array.
"""
if out is None:
dtype = np.float64 if dtype is None else np.dtype(dtype).type
else:
dtype = np.dtype(out.dtype).type
projMat = np.zeros((4, 4,), dtype=dtype) if out is None else out
if out is not None:
projMat.fill(0.0)
u = dtype(2.0)
projMat[0, 0] = u / (right - left)
projMat[1, 1] = u / (top - bottom)
projMat[2, 2] = -u / (farClip - nearClip)
projMat[0, 3] = -((right + left) / (right - left))
projMat[1, 3] = -((top + bottom) / (top - bottom))
projMat[2, 3] = -((farClip + nearClip) / (farClip - nearClip))
projMat[3, 3] = 1.0
return projMat | f1b80b8eeda514ff02142ffe6dcdd761cd789e73 | 26,613 |
def get_nsnames(zone):
"""Get list of nameservers names to query"""
if Prefs.NO_NSSET:
if not Prefs.ADDITIONAL:
print("ERROR: -n requires specifying -a")
usage()
return Prefs.ADDITIONAL
answers = dns.resolver.resolve(zone, 'NS', 'IN')
return Prefs.ADDITIONAL + sorted([str(x.target) for x in answers.rrset]) | 1c5da972922afc0724144545a57bc1d01012dd11 | 26,614 |
def pbmcs_10x_cite_seq(
save_path: str = "data/",
protein_join: str = "inner",
run_setup_anndata: bool = True,
) -> anndata.AnnData:
"""
Filtered PBMCs from 10x Genomics profiled with RNA and protein.
Datasets were filtered for doublets and other outliers as in
https://github.com/YosefLab/totalVI_reproducibility/blob/master/data/data_filtering_scripts/pbmc_10k/pbmc_10k.py
Parameters
----------
save_path
Location to use when saving/loading the data.
protein_join
Whether to take an inner join or outer join of proteins
run_setup_anndata
If true, runs setup_anndata() on dataset before returning
Returns
-------
AnnData with batch info (``.obs['batch']``),
and protein expression (``.obsm["protein_expression"]``)
Missing protein values are zero, when ``protein_join == "outer`` and are identified during ``AnnData`` setup.
Examples
--------
>>> import scvi
>>> adata = scvi.data.pbmcs_10x_cite_seq()
"""
return _load_pbmcs_10x_cite_seq(
save_path=save_path,
protein_join=protein_join,
run_setup_anndata=run_setup_anndata,
) | eccb235496b6c466ffd2e234ab6b20487c7cf233 | 26,615 |
def binom(n, k):
"""Binomial coefficients for :math:`n choose k`
:param n,k: non-negative integers
:complexity: O(k)
"""
prod = 1
for i in range(k):
prod = (prod * (n - i)) // (i + 1)
return prod | 73e06e4c312f6634d9a97914f330ade845a9ce00 | 26,616 |
import os
def resource_map():
"""Dynamically generate a map of resources that will be managed for a
single hook execution.
"""
resource_map = deepcopy(BASE_RESOURCE_MAP)
release = os_release('keystone')
if CompareOpenStackReleases(release) < 'liberty':
resource_map.pop(POLICY_JSON)
if os.path.exists('/etc/apache2/conf-available'):
resource_map.pop(APACHE_CONF)
else:
resource_map.pop(APACHE_24_CONF)
if snap_install_requested():
if APACHE_CONF in resource_map:
resource_map.pop(APACHE_CONF)
if APACHE_24_CONF in resource_map:
resource_map.pop(APACHE_24_CONF)
else:
if KEYSTONE_NGINX_CONF in resource_map:
resource_map.pop(KEYSTONE_NGINX_CONF)
if KEYSTONE_NGINX_SITE_CONF in resource_map:
resource_map.pop(KEYSTONE_NGINX_SITE_CONF)
if snap_install_requested():
for cfile in resource_map:
svcs = resource_map[cfile]['services']
if 'apache2' in svcs:
svcs.remove('apache2')
if 'keystone' in svcs:
svcs.remove('keystone')
svcs.append('snap.keystone.nginx')
svcs.append('snap.keystone.uwsgi')
if run_in_apache():
if not snap_install_requested():
for cfile in resource_map:
svcs = resource_map[cfile]['services']
if 'keystone' in svcs:
svcs.remove('keystone')
if 'apache2' not in svcs:
svcs.append('apache2')
resource_map[WSGI_KEYSTONE_API_CONF] = {
'contexts': [
context.WSGIWorkerConfigContext(
name="keystone",
admin_script='/usr/bin/keystone-wsgi-admin',
public_script='/usr/bin/keystone-wsgi-public'),
keystone_context.KeystoneContext(),
keystone_context.KerberosContext()],
'services': ['apache2']
}
if enable_memcache(release=release):
resource_map[MEMCACHED_CONF] = {
'contexts': [context.MemcacheContext()],
'services': ['memcached']}
return resource_map | 2d63bcf12687269c171d76f565d2e0c396511246 | 26,617 |
def get_not_found_swagger_schema():
""" """
class NotFoundResponseModel(Schema):
""" """
type = "object"
properties = {
"message": {
"type": "string",
}
}
return NotFoundResponseModel | c1ac8c85224c2e885ade68593a1d250af09a465b | 26,618 |
def find_project(testrun_url):
"""
Find a project name from this Polarion testrun URL.
:param testrun_url: Polarion test run URL
:returns: project name eg "CEPH" or "ContainerNativeStorage"
"""
url_suffix = testrun_url[59:]
index = url_suffix.index('/')
return url_suffix[:index] | a19019846fa084398a4967cb99417e7aebc90499 | 26,619 |
def c2ip(c2, uname):
""" return complete ip address for c2 with substituted username """
return c2['ip_address'].replace('USER', uname) | c6f79b2330e78c8ebc85a3fb99ce1c5be407f158 | 26,620 |
def beacon(config):
"""
Watch the configured directories
Example Config
.. code-block:: yaml
beacons:
watchdog:
- directories:
/path/to/dir:
mask:
- create
- modify
- delete
- move
The mask list can contain the following events (the default mask is create,
modify delete, and move):
* create - File or directory is created in watched directory
* modify - The watched directory is modified
* delete - File or directory is deleted from watched directory
* move - File or directory is moved or renamed in the watched directory
"""
_config = {}
list(map(_config.update, config))
queue = _get_queue(_config)
ret = []
while queue:
ret.append(to_salt_event(queue.popleft()))
return ret | 5981f150276c2f9b9512c33864de02b0ce37094e | 26,621 |
import json
def scenario(request):
"""
Retrieve the parameters and nodes for a scenario
Parameters:
model_uuid (uuid): required
scenario_id (int): required
Returns: HttpResponse
Example:
GET: /component/scenario/
"""
model_uuid = request.GET['model_uuid']
scenario_id = request.GET['scenario_id']
request.session['scenario_id'] = scenario_id
model = Model.by_uuid(model_uuid)
can_edit = model.handle_view_access(request.user)
# Scenario Parameters
colors = model.color_lookup
parameters = Scenario_Param.objects.filter(
model_id=model.id, scenario_id=scenario_id,
run_parameter__user_visibility=True)
# All Loc Techs
loc_techs = []
lts = model.loc_techs
lts = lts.values('id', 'technology_id', 'technology__pretty_name',
'technology__pretty_tag',
'technology__abstract_tech__icon',
'location_1__pretty_name', 'location_2__pretty_name')
for lt in lts:
tech_id = lt["technology_id"]
color = colors[tech_id] if tech_id in colors.keys() else "#000"
loc_techs.append({
"id": lt['id'],
"technology_id": lt['technology_id'],
"tag": lt["technology__pretty_tag"],
"technology": lt["technology__pretty_name"],
"location_1": lt["location_1__pretty_name"],
"location_2": lt["location_2__pretty_name"],
"color": color,
"icon": lt["technology__abstract_tech__icon"]})
# Active Loc Techs
active_lts = Scenario_Loc_Tech.objects.filter(scenario_id=scenario_id)
active_lt_ids = list(active_lts.values_list("loc_tech_id", flat=True))
# Filters Data
unique_techs = [v['technology'] for v in loc_techs]
unique_tags = [v['tag'] for v in loc_techs]
locations = [(v['location_1'],
v['location_2']) for v in loc_techs]
unique_locations = [item for sublist in locations for item in sublist]
context = {
"model": model,
"parameters": parameters,
"can_edit": can_edit}
scenario_settings = list(render(request,
'scenario_settings.html',
context))[0]
context = {
"model": model,
"colors": colors,
"carrier_ins": model.carrier_lookup(True),
"carrier_outs": model.carrier_lookup(False),
"active_lt_ids": active_lt_ids,
"loc_techs": loc_techs,
"scenario_id": scenario_id,
"unique_techs": sorted(filter(None, set(unique_techs))),
"unique_tags": sorted(filter(None, set(unique_tags))),
"unique_locations": sorted(filter(None, set(unique_locations))),
"can_edit": can_edit}
scenario_configuration = list(render(request,
'scenario_configuration.html',
context))[0]
payload = {
'model_id': model.id,
'scenario_id': scenario_id,
'loc_techs': loc_techs,
'active_lt_ids': active_lt_ids,
'scenario_settings': scenario_settings.decode('utf-8'),
'scenario_configuration': scenario_configuration.decode('utf-8')}
return HttpResponse(json.dumps(payload, indent=4),
content_type="application/json") | 795bad706c97c20b566d9fcc999b7e01b0b79194 | 26,622 |
def already_voted(replied: str, user_id: str, db: dataset.Database) -> bool:
"""Search in the database for an existing vote of the user on the replied message
Args:
replied: id of the message which the vote is a reply
user_id: id of the user who's voting
Returns:
The return value. True if the user already voted on the message, False otherwise.
"""
table = db['messages']
row = table.find_one(replied=replied, user_id=user_id)
return row is not None | 89ec426df156776ab4a494f0dab0079881b45db2 | 26,623 |
def create_bi_sequence_embedding(inputs, seq_lengths, repr_dim, vocab_size, emb_name, rnn_scope, reuse_scope=False):
"""
Bidirectional encoding
:param inputs: tensor [d1, ... ,dn] of int32 symbols
:param seq_lengths: [s1, ..., sn] lengths of instances in the batch
:param repr_dim: dimension of embeddings
:param vocab_size: number of symbols
:return: return outputs_fw, last_state_fw, outputs_bw, last_state_bw
"""
# use a shared embedding matrix for now, test if this outperforms separate matrices later
embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, repr_dim], -0.1, 0.1, dtype=_FLOAT_TYPE),
name=emb_name, trainable=True, dtype=_FLOAT_TYPE)
# [batch_size, max_seq_length, input_size]
embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)
# dummy test to see if the embedding lookup is working
# Reduce along dimension 1 (`n_input`) to get a single vector (row) per input example
# embedding_aggregated = tf.reduce_sum(embedded_inputs, [1])
### first FW LSTM ###
with tf.variable_scope(rnn_scope + "_FW") as scope:
if reuse_scope == True:
scope.reuse_variables()
cell_fw = tf.nn.rnn_cell.LSTMCell(repr_dim, state_is_tuple=True)
#cell_fw = tf.contrib.rnn.AttentionCellWrapper(cell_fw, 3, state_is_tuple=True) # not working
cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_fw, output_keep_prob=0.9)
# outputs shape: [batch_size, max_time, cell.output_size]
# last_states shape: [batch_size, cell.state_size]
outputs_fw, last_state_fw = tf.nn.dynamic_rnn(
cell=cell_fw,
dtype=_FLOAT_TYPE,
sequence_length=seq_lengths,
inputs=embedded_inputs)
embedded_inputs_rev = tf.reverse(embedded_inputs, [False, True, False]) # reverse the sequence
### first BW LSTM ###
with tf.variable_scope(rnn_scope + "_BW") as scope:
if reuse_scope == True:
scope.reuse_variables()
cell_bw = tf.nn.rnn_cell.LSTMCell(repr_dim, state_is_tuple=True)
cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_bw, output_keep_prob=0.9)
# outputs shape: [batch_size, max_time, cell.output_size]
# last_states shape: [batch_size, cell.state_size]
outputs_bw, last_state_bw = tf.nn.dynamic_rnn(
cell=cell_bw,
dtype=_FLOAT_TYPE,
sequence_length=seq_lengths,
inputs=embedded_inputs_rev)
return outputs_fw, last_state_fw, outputs_bw, last_state_bw, embedding_matrix | 1f160100745801ac4baf3d82d8ee7b76900c0547 | 26,624 |
import mmap
async def input_checker(user_guess: str) -> bool:
"""Check if the user's input is actually a word.
Method for checking if input is in text file: https://stackoverflow.com/a/4944929"""
if len(user_guess) != 5:
valid = False
else:
with open(wordfile_path, encoding='utf-8', errors='ignore') as f, \
mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as s:
if s.find(str.encode(user_guess)) != -1:
valid = True
else:
valid = False
return valid | 60ffad6529b1a7b6d68309cc9804ff6e39e2e539 | 26,625 |
def pascal_voc_vgg16_config():
"""Specify the parameters to tune below."""
mc = base_model_config('PASCAL_VOC')
mc.DEBUG_MODE = False
# Data Augmentation
#mc.LOSS_TYPE = 'YOLO'
mc.DATA_AUG_TYPE = 'YOLO'
# Network Architecture
mc.BN = True
mc.IMAGE_WIDTH = 416
mc.IMAGE_HEIGHT = 416
mc.BATCH_SIZE = 32
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 1e-3
mc.DECAY_STEPS = 2e4
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 50
mc.DRIFT_Y = 50
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc | 0502074d7c376308a502509a38a532facd327ede | 26,626 |
def computeBasisFunctionsReferenceElement(edge_orientation, face_orientation, Nord, points):
"""Compute the basis function for the reference element.
:param ndarray edges_orientation: orientation for edges
:param ndarray faces_orientation: orientation for faces
:param int Nord: polynomial order of nedelec basis functions
:param ndarray points: spatial points at which basis functions will be computed
:return: basis functions on reference element
:rtype: ndarray
"""
# Get number of points
if points.ndim == 1:
num_points = 1
points = points.reshape((1,3))
else:
num_points = points.shape[0]
# Compute number of dofs for element
num_dof_in_element = np.int(Nord*(Nord+2)*(Nord+3)/2)
# Allocate
basis = np.zeros((3, num_dof_in_element, num_points), dtype=np.float)
for i in np.arange(num_points):
# Get gauss point coordinates
X = points[i,:]
# Polynomial order (6 edges, 4 faces, 1 volume)
Nord_vector = np.ones(11, dtype=np.int)*Nord
# Edge orientation (6 edges)
NoriE = edge_orientation
# Face orientation (4 faces)
NoriF = face_orientation
# Compute basis for iPoint
NrdofE, ShapE, CurlE = shape3DETet(X, Nord_vector, NoriE, NoriF)
# Verify consistency of number of dofs for this point
if (NrdofE != num_dof_in_element):
Print.master(' Number of DOFs is not consistent')
exit(-1)
# Niref=Ni in reference element
Niref = ShapE[0:3, 0:NrdofE]
# Store basis functions for i
basis[:,:,i] = Niref
return basis | 10579b5b6af4d5d270faf043186197c345a593d2 | 26,627 |
def load_wxbmp(name="", mask=False, image=None, maskpos=(0, 0), f=None, retry=True, can_loaded_scaledimage=True,
noscale=False, up_scr=None):
"""pos(0,0)にある色でマスクしたwxBitmapを返す。"""
if sys.platform <> "win32":
assert threading.currentThread() <> cw.cwpy
if not f and (not cw.binary.image.code_to_data(name) and not os.path.isfile(name)) and not image:
return wx.EmptyBitmap(0, 0)
if cw.cwpy and cw.cwpy.rsrc:
name = cw.cwpy.rsrc.get_filepath(name)
if up_scr is None:
up_scr = cw.UP_SCR # ゲーム画面と合わせるため、ダイアログなどでも描画サイズのイメージを使用する
name, up_scr = find_scaledimagepath(name, up_scr, can_loaded_scaledimage, noscale)
ext = ""
haspngalpha = False
bmpdepth = 0
maskcolour = None
isjpg = False
if mask:
if not image:
try:
if f:
data = f.read()
elif cw.binary.image.path_is_code(name):
data = cw.binary.image.code_to_data(name)
else:
if not os.path.isfile(name):
return wx.EmptyBitmap(0, 0)
with open(name, "rb") as f2:
data = f2.read()
f2.close()
if not data:
return wx.EmptyBitmap(0, 0)
ext = get_imageext(data)
if ext == ".png":
haspngalpha = cw.image.has_pngalpha(data)
bmpdepth = cw.image.get_bmpdepth(data)
data, ok1 = cw.image.fix_cwnext32bitbitmap(data)
data, ok2 = cw.image.fix_cwnext16bitbitmap(data)
if isinstance(data, wx.Image):
image = data
elif name and ok1 and ok2 and not cw.binary.image.path_is_code(name):
# BUG: io.BytesIO()を用いてのwx.ImageFromStream()は、
# 二重にファイルを読む処理よりなお10倍も遅い
image = wx.Image(name)
else:
with io.BytesIO(data) as f2:
image = wx.ImageFromStream(f2, wx.BITMAP_TYPE_ANY, -1)
f2.close()
except:
print_ex()
print u"画像が読み込めません(load_wxbmp)", name
return wx.EmptyBitmap(0, 0)
def set_mask(image, maskpos):
maskpos = convert_maskpos(maskpos, image.Width, image.Height)
r = image.GetRed(maskpos[0], maskpos[1])
g = image.GetGreen(maskpos[0], maskpos[1])
b = image.GetBlue(maskpos[0], maskpos[1])
image.SetMaskColour(r, g, b)
return (r, g, b)
if not image.IsOk():
return wx.EmptyBitmap(0, 0)
if not haspngalpha and not image.HasAlpha() and not image.HasMask():
maskcolour = set_mask(image, maskpos)
wxbmp = image.ConvertToBitmap()
# 255色GIFなどでパレットに存在しない色が
# マスク色に設定されている事があるので、
# その場合は通常通り左上の色をマスク色とする
# 将来、もしこの処理の結果問題が起きた場合は
# このif文以降の処理を削除する必要がある
if mask and image.HasMask() and image.CountColours() <= 255:
palette = wxbmp.GetPalette()
if not palette is None:
mask = (image.GetMaskRed(), image.GetMaskGreen(), image.GetMaskBlue())
maskok = False
for pixel in xrange(palette.GetColoursCount()):
if palette.GetRGB(pixel) == mask:
maskok = True
break
if not maskok:
maskcolour = set_mask(image, maskpos)
wxbmp = image.ConvertToBitmap()
elif image:
wxbmp = image.ConvertToBitmap()
else:
try:
wxbmp = wx.Bitmap(name)
except:
print u"画像が読み込めません(load_wxbmp)", name
return wx.EmptyBitmap(0, 0)
if bmpdepth == 1 and mask:
wxbmp.bmpdepthis1 = True
if maskcolour:
wxbmp.maskcolour = maskcolour
wxbmp.scr_scale = up_scr
return wxbmp | d1611ac0740049d42495dd49e31cca869c73022c | 26,628 |
def get_xixj(nodes_disjoint, knn, k):
"""
Get the features of each edge in the graph.
Paramters
---------
nodes : tf.Tensor
shape (None, n_features)
knn : tf.Tensor
shape (None, k)
int32, for each point, the indices of the points that are the
nearest neighbors.
k : int
Number of nearest neighbors (excluding self).
Returns
-------
tuple
Two Tensors with shape (None, k, n_features).
--> [?, i, j] desribes the edge between points i and j.
The first matrix is xi, i.e. for each edge the central point
The 2nd matrix is xj, i.e. for each edge the other point.
"""
nodes_central = tf.tile(
tf.expand_dims(nodes_disjoint, axis=-2),
[1, k, 1]
)
# TODO this produces a 'Converting sparse IndexedSlices to a dense Tensor
# of unknown shape.' warning. Thats because nodes has an unknown shape
# (None, n_features), along first axis is gathered.
nodes_neighbors = tf.gather(nodes_disjoint, knn)
return nodes_central, nodes_neighbors | f07e0083b6a51053c5b08ccd299d9834e0dd7018 | 26,629 |
def t_returns(inv, pfl, prices, date):
""" Computes the total return of a portfolio.
Parameters:
- `inv` : :class:`list` investment session `db` row
- `pfl` : :class:`string` name of the portfolio
- `prices` : :class:`dict` latest investment's ticker prices
- `date` : :class:`string` date of the purchase
Computes the sum of the shares when the invesment was made to the sum of the
shares now. The absolute change and returns are calculated with the same
formulas as in :py:func:`check.returns`
Returns a :class:`dict` containing the total initial price, the new
price, the absolute change, the returns and the date of the purchase.
"""
t_old = sum(map(lambda key: inv[pfl][key]*inv['prc'][key], inv[pfl].keys()))
t_old = round(t_old, 1)
t_new = sum(map(lambda key: inv[pfl][key]*prices[key], inv[pfl].keys()))
t_new = round(t_new, 1)
abs = round(t_new - t_old, 1)
rel = round(((t_new - t_old) / t_old) * 100, 2)
return {'abs': abs, 'rel': rel, 'old': t_old,
'new': t_new, 'qty': 'NA', 'date': date} | 8a928e0806b0e87d2a0539ff905112ad0d3d66ae | 26,630 |
def center_crop_pad(img, buffer=0, min_mean=10):
"""dynamically center crop image, cropping away black space left and right"""
g = np.array(img).mean(-1)
h, w = g.shape
zeros = g.mean(0)
zero_inds = np.where(zeros < min_mean)[0]
lo, hi = zero_inds[zero_inds < w // 2].max(), zero_inds[zero_inds > w // 2].min()
return expand2square(img.crop((lo - buffer, 0, hi + buffer, h))) | 97326539464826441f283303e21a17b6ae2954d6 | 26,631 |
import os
def resolve_settings_file():
"""Returns path to buildtest settings file that should be used. If there
is a user defined buildtest settings ($HOME/.buildtest/config.yml) it will
be honored, otherwise default settings from buildtest will be used.
"""
# if buildtest settings file exist return it otherwise return default file
if os.path.exists(USER_SETTINGS_FILE):
return USER_SETTINGS_FILE
return DEFAULT_SETTINGS_FILE | 0acaf5d9da339e554e98fcb3bdbf3bfac5264622 | 26,632 |
def DiagPart(a):
"""
Diag op that returns only the diagonal elements.
"""
return np.diagonal(a), | 4993f7034042303926f94f3dae28d7d8f8dc5058 | 26,633 |
def prometh_hosts():
"""
从apollo查询prome地址
:return: list
"""
external = env_file_conf('EXTERNAL', conf_type='bool')
if not external:
conf_name = 'prome_host'
else:
conf_name = 'prome_external_host'
if external:
print('Conneting to apollo from external net!')
apollo_query = ApolloQuery()
prome_hosts = None
try:
prome_hosts = apollo_query.apo_config(conf_name).split(',')
except Exception as e:
print('Getting prometheus addr from apollo failed!{}'.format(e.__str__()))
exit(1)
print('Debug prometheus hosts: {}'.format(prome_hosts))
return prome_hosts | 3370997eb9b44620fcc6e590c87aae07f04d7334 | 26,634 |
def _ensure_webhook_access(func):
"""Decorate WS function to ensure user owns the webhook ID."""
@callback
@wraps(func)
def with_webhook_access(hass, connection, msg):
# Validate that the webhook ID is registered to the user of the websocket connection
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES].get(msg["webhook_id"])
if config_entry is None:
connection.send_error(
msg["id"], websocket_api.ERR_NOT_FOUND, "Webhook ID not found"
)
return
if config_entry.data[CONF_USER_ID] != connection.user.id:
connection.send_error(
msg["id"],
websocket_api.ERR_UNAUTHORIZED,
"User not linked to this webhook ID",
)
return
func(hass, connection, msg)
return with_webhook_access | c1b64e5f435f79e52e8c69788b4354481d2a6f5b | 26,635 |
import copy
def episode_to_examples(episode, histsz):
"""Converts an episode (list of Parleys) into self-feeding compatible examples
WARNING: we no longer require a histz when making a self-feeding file. Shortening of
the history is typically done in the teacher file or in interactive mode.
"""
examples = []
history = []
for parley in episode:
# Update memories and history
# memories.extend(parley.memories)
history.append(parley.context)
# Concatenate history and add speaker tokens as necessary
# if history_size == 1, the bot (p2) only sees the immediately
# preceding utterance (the prompt from the human, p1).
if histsz < 0:
utterances = history
context = add_person_tokens(utterances, last_speaker=1)
elif histsz == 0:
context = '__null__'
else:
utterances = history[-histsz:]
context = add_person_tokens(utterances, last_speaker=1)
example = Parley(
context,
parley.response,
parley.reward,
copy.deepcopy(parley.candidates),
# copy.deepcopy(memories),
)
examples.append(example)
# Add current turn's response to the history
history.append(parley.response)
return examples | a95abd0183dc70e195312d82117b16720d2c4353 | 26,636 |
import torch
def camera_from_polyhedron(polyhedronFcn, camera_distance=1, to_spherical=False, device='cuda:0'):
"""
Returns the positions of a camera lying on the vertices of a given polyhedron
Parameters
----------
polyhedronFcn : callable
the polyhedron creation function
camera_distance : float (optional)
the camera distance from the origin (default is 1)
to_spherical : bool (optional)
if True, converts the coordinates into spherical (default is False)
device : str or torch.device
the device the tensors will be stored to (default is 'cuda:0')
Returns
-------
(Tensor,LongTensor)
the positions and the edge tensor of the camera views
"""
P, T = polyhedronFcn(device=device)[0:2]
theta = PI/100
R = torch.tensor([[1, 0, 0],
[0, cos(theta), -sin(theta)],
[0, sin(theta), cos(theta)]], dtype=torch.float, device=device)
P = torch.mul(torch.mm(normr(P), torch.t(R)), camera_distance)
if to_spherical:
P = cart2sph(P)
return P, poly2edge(T)[0] | 30c782b616299c101cc7130703563fae1327d364 | 26,637 |
def cifar10(args, dataset_paths):
""" Loads the CIFAR-10 dataset.
Returns: train/valid/test set split dataloaders.
"""
transf = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(0.5),
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ToTensor(),
# Standardize()]),
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))]),
'test': transforms.Compose([
transforms.ToTensor(),
# Standardize()])}
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))])
}
config = {'train': True, 'test': False}
datasets = {i: CIFAR10(root=dataset_paths[i], transform=transf[i],
train=config[i], download=True) for i in config.keys()}
# weighted sampler weights for full(f) training set
f_s_weights = sample_weights(datasets['train'].targets)
# return data, labels dicts for new train set and class-balanced valid set
data, labels = random_split(data=datasets['train'].data,
labels=datasets['train'].targets,
n_classes=10,
n_samples_per_class=np.repeat(500, 10).reshape(-1))
# define transforms for train set (without valid data)
transf['train_'] = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding),
transforms.ToTensor(),
# Standardize()])
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))])
# define transforms for class-balanced valid set
transf['valid'] = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
# Standardize()])
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))])
# save original full training set
datasets['train_valid'] = datasets['train']
# make new training set without validation samples
datasets['train'] = CustomDataset(data=data['train'],
labels=labels['train'], transform=transf['train_'])
# make class balanced validation set
datasets['valid'] = CustomDataset(data=data['valid'],
labels=labels['valid'], transform=transf['valid'])
# weighted sampler weights for new training set
s_weights = sample_weights(datasets['train'].labels)
config = {
'train': WeightedRandomSampler(s_weights,
num_samples=len(s_weights), replacement=True),
'train_valid': WeightedRandomSampler(f_s_weights,
num_samples=len(f_s_weights), replacement=True),
'valid': None, 'test': None
}
if args.distributed:
config = {'train': DistributedSampler(datasets['train']),
'train_valid': DistributedSampler(datasets['train_valid']),
'valid': None, 'test': None}
dataloaders = {i: DataLoader(datasets[i], sampler=config[i],
num_workers=8, pin_memory=True, drop_last=True,
batch_size=args.batch_size) for i in config.keys()}
return dataloaders | 867d3a6e7ff4ed72c02583c2eafab2885218c0ad | 26,638 |
import sys
import os
import shutil
import random
def cleanthread(thread, settings):
"""
Reset thread parameters in preparation for the next step of aimless shooting after the previous one has completed.
Add the next step to the itinerary if appropriate. Also write to history and output files, implement fork if
necessary, and terminate the thread if any of the termination criteria are met, among other housekeeping tasks.
This function should be called after every thread step is completed to handle it in the appropriate manner. In
effect, it serves as a housekeeping function to take care of all the important details that are checked for after
every "prod" step.
Parameters
----------
thread : Thread
The Thread object that just completed a move.
settings : Namespace
Global settings Namespace object.
Returns
-------
None
"""
# global candidateops
def report_rc_values(coord_file):
# Simple function for outputting the RC values for a given trajectory traj to the eps_results.out file
# todo: replace use of traj with simple evaluation of eps_fwd/bwd variable, depending on direction argument? (Unimportant, optimization only)
rc_values = []
if '.rst' in coord_file or '.rst7' in coord_file:
fileformat = '.rst7'
elif '.nc' in coord_file:
fileformat = '.nc'
else:
sys.exit('Error: cleanthread.report_rc_values() encountered a file of unknown format: ' + coord_file)
traj = pytraj.iterload(coord_file, thread.prmtop, format=fileformat)
for i in range(traj.__len__()): # iterate through frames of traj
cv_values = [float(cv) for cv in candidatevalues(coord_file, frame=i, reduce=True, settings=settings).split(' ') if cv] # CV values as a list
rc_values.append(get_rc_value(cv_values=cv_values, settings=settings))
for value in rc_values:
if thread.rc_min <= value <= thread.rc_max: # only write to output if the bead is inside the window
open('eps_results.out', 'a').write(str(thread.rc_min) + ' ' + str(thread.rc_max) + ' ' + str(value) + '\n')
open('eps_results.out', 'a').close()
return rc_values
if settings.eps_settings: # EPS behavior
if thread.last_valid == thread.suffix: # if this move was accepted...
thread.eps_fwd_la = thread.eps_fwd # update "last accepted" eps_(b/f)wd attributes for this thread
thread.eps_bwd_la = thread.eps_bwd
# Store RC values for each frame in both the fwd and bwd trajectories of the last-accepted move, regardless of
# whether that's this newest one or an old one.
fwd_rc_values = []
bwd_rc_values = []
init_rc_value = []
if thread.eps_fwd_la > 0 and int(thread.last_valid) > 0: # latter requirement because we need at least one accepted trajectory before we can start reporting values
try:
fwd_rc_values = report_rc_values(thread.basename + '_' + thread.last_valid + '_fwd.nc')
except ValueError:
sys.exit('Debug: Failed on ' + thread.basename + '_' + thread.last_valid + '_fwd.nc'
+ '\n thread.eps_fwd_la = ' + str(thread.eps_fwd_la)
+ '\n thread.last_valid = ' + str(thread.last_valid)
+ '\n thread.suffix = ' + str(thread.suffix))
if thread.eps_bwd_la > 0 and int(thread.last_valid) > 0:
try:
bwd_rc_values = report_rc_values(thread.basename + '_' + thread.last_valid + '_bwd.nc')
except ValueError:
sys.exit('Debug: Failed on ' + thread.basename + '_' + thread.last_valid + '_fwd.nc'
+ '\n thread.eps_fwd_la = ' + str(thread.eps_fwd_la)
+ '\n thread.last_valid = ' + str(thread.last_valid)
+ '\n thread.suffix = ' + str(thread.suffix))
if int(thread.last_valid) > 0:
init_rc_value = report_rc_values(thread.basename + '_' + thread.last_valid + '_init_fwd.rst')
# Finally, handle dynamic seeding:
if settings.eps_dynamic_seed and (True in [bool(x) for x in settings.empty_windows]) and (thread.last_valid == thread.suffix): # is this last boolean required? I think maybe yes because I'm using pickframe()?
rc_values = list(reversed(bwd_rc_values)) + init_rc_value + fwd_rc_values
start_bead = 0
suffix = 1
for rc_value in rc_values:
start_bead += 1
for window in range(len(settings.eps_windows) - 1):
if (settings.empty_windows[window] > 0) and (settings.eps_windows[window] - settings.overlap <= rc_value <= settings.eps_windows[window + 1] + settings.overlap):
# Write a new coordinate file from the appropriate trajectory
# todo: this is so ugly because I didn't design pickframe() to help make a new thread with an unknown initial structure. Can I clean this up somehow?
if start_bead <= thread.eps_bwd_la: # use la values since pickframe uses the la trajectory
structure = pickframe(thread.name, 'bwd', frame=int(thread.eps_bwd_la - start_bead), forked_from=thread, suffix=suffix, settings=settings) # "frame" should be zero-indexed
suffix += 1
debug_dir = 'bwd'
debug_frame = int(thread.eps_bwd_la - start_bead)
elif start_bead == thread.eps_bwd_la + 1: # the initial coordinates
structure = pickframe(thread.name, 'init', forked_from=thread, suffix=suffix, settings=settings)
suffix += 1
debug_dir = 'init_fwd'
debug_frame = 'N/A'
else: # inside the fwd trajectory
structure = pickframe(thread.name, 'fwd', frame=int(start_bead - thread.eps_bwd_la - 2), forked_from=thread, suffix=suffix, settings=settings) # "frame" should be zero-indexed
suffix += 1
debug_dir = 'fwd'
debug_frame = int(start_bead - thread.eps_bwd_la - 1)
settings.dynamic_seed_kludge = window # forces spawnthread to place this thread in the correct window in the case where it could fit into two due to overlap
newthread = spawnthread(structure, suffix='1', settings=settings) # spawn a new thread with the default settings
#newthread.last_valid = '0' # so that if the first shooting point does not result in a valid transition path, shooting will begin from the TS guess
newthread.prmtop = settings.topology # set prmtop filename for the thread
settings.itinerary.append(newthread) # submit it to the itinerary
open(settings.logfile, 'a').write('\nEmpty EPS window with upper and lower boundaries: ' +
str(settings.eps_windows[window] - settings.overlap) + ' and ' +
str(settings.eps_windows[window + 1] + settings.overlap) + ' has been'
' seeded using bead ' + str(start_bead) + ' from shooting move ' +
thread.name + '. Debug information:')
open(settings.logfile, 'a').write('\n fwd_rc_values = ' + str(fwd_rc_values))
open(settings.logfile, 'a').write('\n bwd_rc_values = ' + str(bwd_rc_values))
open(settings.logfile, 'a').write('\n rc_values = ' + str(rc_values))
open(settings.logfile, 'a').write('\n start_bead = ' + str(start_bead))
open(settings.logfile, 'a').write('\n pickframe trajectory = ' + thread.basename + '_' + thread.last_valid + '_' + debug_dir + '.nc')
open(settings.logfile, 'a').write('\n frame from trajectory = ' + str(debug_frame))
open(settings.logfile, 'a').write('\n structure = ' + str(structure))
open(settings.logfile, 'a').write('\n new empty_windows = ' + str(settings.empty_windows))
open(settings.logfile, 'a').close()
elif thread.commit1 != 'fail': # standard aimless shooting behavior
# Record result of forward trajectory in output file. This is done regardless of whether the shooting point was
# accepted; accept/reject is for keeping the sampling around the separatrix, but even rejected points are valid
# for calculating the reaction coordinate so long as they committed to a basin!
if thread.commit1 == 'fwd':
basin = 'A'
elif thread.commit1 == 'bwd':
basin = 'B'
else:
basin = thread.commit1
sys.exit('Error: thread commit1 flag took on unexpected value: ' + basin + '\nThis is a weird error.'
' Please raise this issue on GitHub along with your ATESA input file!')
open('as.out', 'a').write(basin + ' <- ' + candidatevalues(thread.name + '_init_fwd.rst', settings=settings) + '\n')
open('as.out', 'a').close()
# Write last result to history
if thread.last_valid == thread.suffix:
code = 'S'
elif thread.commit1 == thread.commit2 == 'fwd':
code = 'F'
elif thread.commit1 == thread.commit2 == 'bwd':
code = 'B'
else: # failure of one or both halves of shooting move
code = 'X'
thread.history.append(thread.name + ' ' + code)
try:
with open('history/' + thread.basename, 'w') as file:
for history_line in thread.history:
file.write(history_line + '\n')
file.close()
except (IOError, OSError):
if os.path.exists(settings.working_directory + '/history'):
shutil.rmtree(settings.working_directory + '/history') # delete old (apparently broken) history directory
os.makedirs(settings.working_directory + '/history') # make a new one
else:
os.makedirs(settings.working_directory + '/history')
with open('history/' + thread.basename, 'w') as file:
for history_line in thread.history:
file.write(history_line + '\n')
file.close()
thread.total_moves += 1
open(settings.logfile, 'a').write('\nShooting run ' + thread.name + ' finished with fwd trajectory result: ' + thread.commit1 + ' and bwd trajectory result: ' + thread.commit2)
if settings.eps_settings:
open(settings.logfile, 'a').write(', as well as init result: ' + checkcommit(thread, 'init', settings=settings)) # todo: should probably save an init_commit attribute to threads to avoid checking commitment on init for a second time here.
open(settings.logfile, 'a').write('\n' + thread.basename + ' has a current acceptance ratio of: ' + str(thread.accept_moves) + '/' + str(thread.total_moves) + ', or ' + str(100*thread.accept_moves/thread.total_moves)[0:5] + '%')
open(settings.logfile, 'a').close()
# Implementation of fork. Makes (fork - 1) new threads from successful runs and adds them to the itinerary. The new
# threads do not inherit anything from their parents except starting point and history.
if settings.fork > 1 and thread.last_valid == thread.suffix:
for i in range(settings.fork - 1):
direction = random.randint(0, 1)
if direction == 0:
pick_dir = 'fwd'
else:
pick_dir = 'bwd'
newthread = spawnthread(thread.name + '_' + str(i + 1), suffix='1', settings=settings)
newthread.prmtop = thread.prmtop
newthread.start_name = pickframe(newthread, pick_dir, thread, settings=settings)
#newthread.last_valid = '0'
newthread.history = thread.history
settings.itinerary.append(newthread)
if settings.eps_settings: # EPS behavior
start_bead = random.randint(1, settings.k_beads)
# Thread has attributes eps_fwd and eps_bwd telling me how long the fwd and bwd trajectories are...
if start_bead <= thread.eps_bwd_la: # use la values since pickframe uses the la trajectory
thread.start_name = pickframe(thread, 'bwd', frame=int(thread.eps_bwd_la - start_bead), settings=settings) # "frame" should be zero-indexed
elif start_bead == thread.eps_bwd_la + 1: # the initial coordinates
thread.start_name = thread.name + '_init_fwd.rst'
else: # inside the fwd trajectory
thread.start_name = pickframe(thread, 'fwd', frame=int(start_bead - thread.eps_bwd_la - 2), settings=settings) # "frame" should be zero-indexed
thread.eps_fwd = settings.k_beads - start_bead # set new eps_fwd and _bwd to keep string length the same
thread.eps_bwd = settings.k_beads - thread.eps_fwd - 1 # extra -1 to account for starting point
if settings.cleanup:
if not thread.suffix == thread.last_valid:
if os.path.exists(thread.basename + '_' + thread.suffix + '_fwd.nc'):
os.remove(thread.basename + '_' + thread.suffix + '_fwd.nc')
if os.path.exists(thread.basename + '_' + thread.suffix + '_bwd.nc'):
os.remove(thread.basename + '_' + thread.suffix + '_bwd.nc')
else: # normal aimless shooting behavior
direction = random.randint(0, 1) # This is necessary to avoid an issue where acceptance ratios fall off as sampling progresses. See Mullen et al. 2015 (Easy TPS) SI.
if direction == 0:
pick_dir = 'fwd'
else:
pick_dir = 'bwd'
if thread.last_valid == thread.suffix or settings.always_new: # pick a new starting point if the last move was a success
thread.start_name = pickframe(thread, pick_dir, settings=settings)
if settings.cleanup: # init trajectory is never used for anything, so delete it if settings.cleanup == True
if os.path.exists(thread.basename + '_' + thread.suffix + '_init.nc'):
os.remove(thread.basename + '_' + thread.suffix + '_init.nc')
thread.type = 'init'
thread.suffix = str(int(thread.suffix) + 1)
thread.name = thread.basename + '_' + thread.suffix
thread.jobid1 = '' # this line required if eps_settings is given, redundant otherwise
thread.jobid2 = '' # this line required if eps_settings is given, redundant otherwise
thread.commit1 = ''
thread.commit2 = ''
try:
null = settings.bootstrap_bookkeep # to throw AttributeError if not set up to do bootstrapping
settings.bootstrap_flag = handle_bootstrap(settings) # handles tasks associated with bootstrapping
if settings.bootstrap_flag == True:
open(settings.logfile, 'a').write('\nBootstrapped reaction coordinates agree to within given tolerance. No '
'further jobs will be submitted by this instance of ATESA, but currently'
'running jobs will be allowed to finish. To perform more sampling, submit'
' a new ATESA job in the same working directory with restart = True')
open(settings.logfile, 'a').close()
except AttributeError:
pass
if thread.failcount >= settings.max_fails > 0:
thread.status = 'max_fails' # the thread dies because it has failed too many times in a row
elif thread.total_moves >= settings.max_moves > 0:
thread.status = 'max_moves' # the thread dies because it has performed too many total moves
elif thread.accept_moves >= settings.max_accept > 0:
thread.status = 'max_accept' # the thread dies because it has accepted too many moves
else:
try:
if not settings.bootstrap_flag:
settings.itinerary.append(thread) # the thread lives and moves to next step
except AttributeError:
settings.itinerary.append(thread) # the thread lives and moves to next step
# Write a status file to indicate the acceptance ratio and current status of every thread.
with open('status.txt','w') as file:
for thread in settings.allthreads:
try:
file.write(thread.basename + ' acceptance ratio: ' + str(thread.accept_moves) + '/' + str(thread.total_moves) + ', or ' + str(100*thread.accept_moves/thread.total_moves)[0:5] + '%\n')
except ZeroDivisionError: # Since any thread that hasn't completed a move yet has total_moves = 0
file.write(thread.basename + ' acceptance ratio: ' + str(thread.accept_moves) + '/' + str(thread.total_moves) + ', or 0%\n')
if thread in settings.itinerary:
file.write(' Status: move ' + thread.suffix + ' queued\n')
elif thread in settings.running:
file.write(' Status: move ' + thread.suffix + ' running\n')
else:
if thread.status in ['max_accept','max_moves','max_fails']:
file.write(' Status: terminated after move ' + thread.suffix + ' due to termination criterion: ' + thread.status + '\n')
else:
file.write(' Status: crashed during move ' + thread.suffix + '\n')
file.close() | 9d9b2c8819126fdc284805e94b6d37e75d0abb9a | 26,639 |
def read_words(file="words.txt"):
"""
Reads a list of words from a file.
There needs to be one word per line, for this to work properly.
Args:
file: the file to read from
Returns:
An array of all the words in the file
"""
with open(file, "r") as f:
return f.read().lower().splitlines() | d3d82c4f9afc7db73b4f82f4715cab9b2e99973c | 26,640 |
def get_intersphinx_label(is_map, cur_project_dir):
"""
The top set of keys in the intersphinx map are shortname labels that intersphinx uses to identify different projects
A sub-tuple in the dict (here invdata[1]) is a list of possible locations for the project's objects.inv file
This utility checks all the locations (only filepath ones) to see if the current project dir name is in the filepath
If a match is found this immediately returns the shortname label, which can be used to locate current project data in the intersphinx map
This is a 'good guess' to determine which intersphinx entry relates to the current project
"""
for shortname, invdata in is_map.items():
for invpath in invdata[1]:
if invpath and not invpath.startswith("http"):
if cur_project_dir in invpath:
return shortname
return None | 87115f45c966b838566d6909d3a66af5359a2a1d | 26,641 |
async def patch_user(user: User):
"""update a `user` in the list of users"""
try:
session = Session()
selected_user = session.query(
UserTable
).filter(
UserTable.key == user.key
).first()
selected_user.firstname = user.firstname
selected_user.lastname = user.lastname
selected_user.classname = user.classname
session.commit()
except sqlalchemy.exc.IntegrityError:
return {"status": PatchUserResponseStatus.fail}
return {"status": PatchUserResponseStatus.success} | 911b2c3f5f5e5c2ec7aa7be7595b5106ccf17b0d | 26,642 |
import torchvision
def get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True,task="cifar100",train=False):
""" return training dataloader
Args:
mean: mean of cifar100 test dataset
std: std of cifar100 test dataset
path: path to cifar100 test python dataset
batch_size: dataloader batchsize
num_workers: dataloader num_works
shuffle: whether to shuffle
Returns: cifar100_test_loader:torch dataloader object
"""
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
#cifar100_test = CIFAR100Test(path, transform=transform_test)
if task == "cifar100":
cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=train, download=True, transform=transform_test)
elif task == "cifar10":
cifar100_test = torchvision.datasets.CIFAR10(root='./data', train=train, download=True, transform=transform_test)
cifar100_test_loader = DataLoader(
cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
return cifar100_test_loader | 402e119430a3d260e0e15238e6f55b91f929848a | 26,643 |
from typing import Set
def get(tags: Set[str]):
""" get options marked by `tags`
Options tagged by wildcard '*' are always returned
"""
# use specifically tagged options + those tagged with wildcard *
return (o for tag in ('*',) + tuple(tags) for o in _options[tag]) | 164e808c5dcd76febad488b8fb5bf0b76835ec2a | 26,644 |
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitter the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The
rule of thumb is to set this between (0, 0.1]. The default value is found
to mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random_normal([4], stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[0] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[1] * height
new_width = width * tf.exp(bbox_jitters[2])
new_height = height * tf.exp(bbox_jitters[3])
jittered_boxes = tf.concat([
new_center_y - new_height * 0.5,
new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5,
new_center_x + new_width * 0.5], axis=-1)
return jittered_boxes | e0ac4b003b77190390f397f3ef80a915ca5214d3 | 26,645 |
def soil_props(soil_type, depth):
"""
Parameters c, Ks, n, Beta, s_h, s_w, s_bal, s_fc, bulk_d:
Laio et al., 2001, Plants in water-controlled ecosystems: active role in hydrologic processes and response to water stress: II. Probabilistic soil moisture dynamic
Parameters p1 through p5:
Ezlit et al., 2013, Modification of the McNeal Clay Swelling Model Improves Prediction of Saturated Hydraulic Conductivity as a Function of Applied Water Quality
"""
# 5.7% clay
class_1 = {'c':4.8, 'Ks':1000.0, 'n':0.42, 'Beta':12.7, 's_h':0.08,
's_w':0.11, 's_bal':0.31, 's_fc':0.52, 'bulk_d':1.5,
'p1':0.649, 'p2':0.003, 'p3':8.837, 'p4':4.046,
'p7':0.008, 'p6':6.356, 'p5':30.818, 'CEC': 50}
# 16.2% clay
class_2 = {'c':6.5, 'Ks':800.0, 'n':0.43, 'Beta':13.8, 's_h':0.14,
's_w':0.18, 's_bal':0.46, 's_fc':0.56, 'bulk_d':1.5,
'p1':1.00, 'p2':0.912, 'p3':1.438, 'p4':7.29,
'p7':0.204, 'p6':4.105, 'p5':-5.054, 'CEC': 150}
# 48.5% clay
class_3 = {'c':9.8, 'Ks':200.0, 'n':0.45, 'Beta':14.8, 's_h':0.19,
's_w':0.24, 's_bal':0.57, 's_fc':0.65, 'bulk_d':1.2,
'p1':0.449, 'p2':1.005, 'p3':0.846, 'p4':10.968,
'p7':0.53, 'p6':4.0799, 'p5':-11.15, 'CEC': 300}
if soil_type == "class_1":
soil_dict = {**class_1}
elif soil_type == "class_2":
soil_dict = {**class_2}
elif soil_type == "class_3":
soil_dict = {**class_3}
gapon = 0.01475
mass = soil_dict['bulk_d']*depth
soil_dict.update(Kg=gapon, Zr=depth, Msoil = mass)
return soil_dict | a6d421d5606d4a00e6a621a513939af8ce2ad62c | 26,646 |
from typing import List
def DoMeshesBelongToSameMainMesh(list_mesh_identifiers: List[str]) -> bool:
"""checks whether all meshes given a list of mesh identifiers belong to the same main mesh
Throws if an mesh identifier does not belong to a mesh
"""
main_mesh_identifiers = []
for mesh_identifier in list_mesh_identifiers:
mesh_obj = salome_utilities.GetSalomeObject(mesh_identifier)
if IsMeshProxy(mesh_obj):
main_mesh_identifiers.append(mesh_identifier)
elif IsSubMeshProxy(mesh_obj) or IsMeshGroup(mesh_obj):
main_mesh_identifiers.append(salome_utilities.GetSalomeID(mesh_obj.GetMesh()))
else:
obj_type = type(mesh_obj)
obj_name = salome_utilities.GetObjectName(mesh_identifier)
raise Exception('Object with identifier "{}" is not a mesh! Name: "{}" , Type: "{}"'.format(mesh_identifier, obj_name, obj_type))
return len(set(main_mesh_identifiers)) <= 1 | 2e8e47c0b5bf4e6d67adf5a0a46a35bacba42bce | 26,647 |
import random
def test_ps_push_http():
""" test pushing to http endpoint """
if skip_push_tests:
return SkipTest("PubSub push tests don't run in teuthology")
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create random port for the http server
host = get_ip()
port = random.randint(10000, 20000)
# start an http server in a separate thread
http_server = StreamingHTTPServer(host, port)
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
_, status = topic_conf.set_config()
assert_equal(status/100, 2)
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create notifications
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name)
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# create subscription
sub_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX,
topic_name, endpoint='http://'+host+':'+str(port))
_, status = sub_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# check http server
keys = list(bucket.list())
# TODO: use exact match
http_server.verify_events(keys, exact_match=False)
# delete objects from the bucket
for key in bucket.list():
key.delete()
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# check http server
# TODO: use exact match
http_server.verify_events(keys, deletions=True, exact_match=False)
# cleanup
sub_conf.del_config()
notification_conf.del_config()
topic_conf.del_config()
zones[0].delete_bucket(bucket_name)
http_server.close() | d7712d42d57d20edebadc4063fca87bb252e0320 | 26,648 |
import os
def returnPaths(jsonObj):
"""
takes a json dict with new/old and path, construct paths and return result
:param jsonObj: json dict
:return: tuple of paths (oldPath, newPath)
"""
assert isinstance(jsonObj, dict), "In object not a dict"
paths = coll.namedtuple("paths", ["oldPath", "newPath"])
oldName = jsonObj.get("oldName")
newName = jsonObj.get("newName")
extension = jsonObj.get("extension")
path = jsonObj.get("path")
paths = paths(os.path.join(path, oldName),
os.path.join(path, newName) + "." + extension)
return paths | 479752b65f1382c68f2301c2ef51f0948f3df000 | 26,649 |
def active_roles(account, days_back):
""" Returns query for finding active roles (since days_back value). """
query_string = f"""SELECT DISTINCT useridentity.sessioncontext.sessionissuer.arn
FROM behold
WHERE account = '{account}'
AND useridentity.type = 'AssumedRole'
AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now());"""
return (query_string, f"athena_results/active_roles/{account}") | e6842696aa40d4f0b30f17d0d53afdcc5d1d0de9 | 26,650 |
def sample_user(email='user@test.com', password='Test123'):
"""
Helper method to create a sample user for our test cases!
:param email: Email address of the sample user
:param password: A password for account creation.
This can be a weak password as well since this is restricted
to our testing environment.
:return: Returns the created user object
"""
# We will be relying on the get_user_model() from
# the auth module to ensure that the user model
# can be switched in future without any new bugs getting introduced.
return get_user_model().objects.create_user(email, password) | c07efd2bbbbfd120b97516d93645d4bd6f004804 | 26,651 |
import asyncio
async def run_command(*args, **kwargs):
"""Shortcut for asyncronous running of a command"""
fn = asyncio.subprocess.create_subprocess_exec
if kwargs.pop("shell", False):
fn = asyncio.subprocess.create_subprocess_shell
check = kwargs.pop("check", False)
process = await fn(*args, **kwargs)
stdout, stderr = await process.communicate()
if check:
if process.returncode != 0:
raise Exception("Command failed: %s" % args)
return process.returncode, stdout, stderr | 948ccb127afb8cf1c2a1731a5198bc493a1e9fe4 | 26,652 |
import torch
def scaled_dot_product_attention(q, k, v, mask=None):
"""
#计算注意力权重。
q, k, v 必须具有匹配的前置维度。 且dq=dk
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
#虽然 mask 根据其类型(填充或前瞻)有不同的形状,
#但是 mask 必须能进行广播转换以便求和。
#参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v) seq_len_k = seq_len_v
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
#返回值:
#输出,注意力权重
"""
# matmul(a,b)矩阵乘:a b的最后2个维度要能做乘法,即a的最后一个维度值==b的倒数第2个纬度值,
# 除此之外,其他维度值必须相等或为1(为1时会广播)
matmul_qk = torch.matmul(q, k.transpose(-1, -2)) # 矩阵乘 =>[..., seq_len_q, seq_len_k]
# 缩放matmul_qk
dk = torch.tensor(k.shape[-1], dtype=torch.float32) # k的深度dk,或叫做depth_k
scaled_attention_logits = matmul_qk / torch.sqrt(dk) # [..., seq_len_q, seq_len_k]
# 将 mask 加入到缩放的张量上(重要!)
if mask is not None: # mask: [b, 1, 1, seq_len]
# mask=1的位置是pad,乘以-1e9(-1*10^9)成为负无穷,经过softmax后会趋于0
scaled_attention_logits += (mask * -1e9)
# softmax 在最后一个轴(seq_len_k)上归一化
attention_weights = torch.nn.functional.softmax(scaled_attention_logits, dim=-1) # [..., seq_len_q, seq_len_k]
output = torch.matmul(attention_weights, v) # =>[..., seq_len_q, depth_v]
return output, attention_weights | 3d51de38ca553c3b769bd1ba4936159034cd68e0 | 26,653 |
def _get_parent_entity(entities, entity_id):
"""
Gets the parent entity from the collection, or throws ParentDoesNotExist.
"""
try:
return entities[entity_id]
except KeyError:
raise ParentDoesNotExist(object_type='Entity', key=entity_id) | d898252058f191a2685803fc6d4495eb75ce56eb | 26,654 |
def tstop(f):
"""
Dust stopping time
"""
units = sutil.get_all_units(f)
grainSize = f['u_dustGrainSize']
grainDensity = SimArray(sutil.get_snap_param(f, 'dDustGrainDensity'), units['rho_unit'])
if sutil.is_isothermal(f):
gamma = 1.
else:
gamma = sutil.get_snap_gamma(f)
t = ((grainSize*grainDensity)/(f['rho'] * f['cs'])) * np.sqrt(np.pi*gamma/8.)
return t.in_units(units['t_unit']) | e1f13c3b87104d366dd0c5239dd5d24de954897c | 26,655 |
def solve_2d_discrete_observations_continuous_modelling(
cond_xy0s_list: tp.List[tp.Tuple[float, float]],
cond_xytGammas_list: tp.List[tp.Tuple[float, float, float]],
cond_f0s_list: tp.List[float],
cond_fGammas_list: tp.List[float],
a: float,
b: float,
c: float,
d: float,
T: float,
f: tp.Callable[[float, float, float], float],
g: tp.Callable[[float, float, float], float],
) -> tp.Callable[[float, float, float], float]:
"""
:param cond_xy0s_list: list of space points for initial conditions:
u(cond_x0_i, cond_y0_i, 0) = cond_f0_i
:param cond_xytGammas_list: list of space-time for boundary conditions:
u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i
:param cond_f0s_list: list of real values for initial conditions:
cond_f0_i = u(cond_x0_i, cond_y0_i, 0)
:param cond_fGammas_list: list of real values for boundary conditions:
cond_fGamma_i = u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i)
:param a: lower bound of the x-domains of g and u
:param b: upper bound of the x-domains of g and u
:param c: lower bound of the y-domains of g and u
:param d: upper bound of the y-domains of g and u
:param T: end time
:param f: real-valued function of space and time,
represents external perturbations in the system.
:param g: Green's function of the linear differential operator L
:return: real-valued function u of space and time,
least squares solution to L u(x, y, t) = f(x, y, t)
under initial conditions u(cond_x0_i, cond_y0_i, 0) = cond_f0_i,
and boundary conditions u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i.
"""
def u_infty(x: float, y: float, t: float) -> float:
return integrate.tplquad(lambda t_, x_, y_: g(x - x_, y - y_, t - t_) * f(x_, y_, t_), c, d, a, b, 0, T)[0]
vec_u0 = np.array([[
cond_f0_i - u_infty(cond_xy0_i[0], cond_xy0_i[1], 0.0)
] for cond_f0_i, cond_xy0_i in zip(cond_f0s_list, cond_xy0s_list)])
vec_uGamma = np.array([[
cond_fGamma_i - u_infty(cond_xytGamma_i[0], cond_xytGamma_i[1], cond_xytGamma_i[2])
] for cond_fGamma_i, cond_xytGamma_i in zip(cond_fGammas_list, cond_xytGammas_list)])
vec_u = np.vstack((vec_u0, vec_uGamma))
def A11(x: float, y: float) -> np.array:
return np.array([[g(
cond_x0_i - x,
cond_y0_i - y,
0.0 - 0.0,
)] for cond_x0_i, cond_y0_i in cond_xy0s_list])
def A12(x: float, y: float, t: float) -> np.array:
return np.array([[g(
cond_x0_i - x,
cond_y0_i - y,
0.0 - t,
)] for cond_x0_i, cond_y0_i in cond_xy0s_list])
def A21(x: float, y: float) -> np.array:
return np.array([[g(
cond_xGamma_i - x,
cond_yGamma_i - y,
cond_tGamma_i - 0.0,
)] for cond_xGamma_i, cond_yGamma_i, cond_tGamma_i in cond_xytGammas_list])
def A22(x: float, y: float, t: float) -> np.array:
return np.array([[g(
cond_xGamma_i - x,
cond_yGamma_i - y,
cond_tGamma_i - t,
)] for cond_xGamma_i, cond_yGamma_i, cond_tGamma_i in cond_xytGammas_list])
def A(x: float, y: float, t: float) -> np.matrix:
return np.vstack((
np.hstack((A11(x, y), A12(x, y, t))),
np.hstack((A21(x, y), A22(x, y, t))),
))
len0, lenGamma = len(cond_xy0s_list), len(cond_xytGammas_list)
P11 = np.matrix([[(
integrate.dblquad(lambda x, y: A11(x, y)[i] * A11(x, y)[j], c, d, a, b)[0] +
integrate.dblquad(lambda t, y: A12(a, y, t)[i] * A12(a, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, y: A12(b, y, t)[i] * A12(b, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, x: A12(x, c, t)[i] * A12(x, c, t)[j], a, b, 0, T)[0] +
integrate.dblquad(lambda t, x: A12(x, d, t)[i] * A12(x, d, t)[j], a, b, 0, T)[0]
) for j in range(len0)] for i in range(len0)])
P12 = np.matrix([[(
integrate.dblquad(lambda x, y: A11(x, y)[i] * A21(x, y)[j], c, d, a, b)[0] +
integrate.dblquad(lambda t, y: A12(a, y, t)[i] * A22(a, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, y: A12(b, y, t)[i] * A22(b, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, x: A12(x, c, t)[i] * A22(x, c, t)[j], a, b, 0, T)[0] +
integrate.dblquad(lambda t, x: A12(x, d, t)[i] * A22(x, d, t)[j], a, b, 0, T)[0]
) for j in range(lenGamma)] for i in range(len0)])
P21 = np.matrix([[(
integrate.dblquad(lambda x, y: A21(x, y)[i] * A11(x, y)[j], c, d, a, b)[0] +
integrate.dblquad(lambda t, y: A22(a, y, t)[i] * A12(a, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, y: A22(b, y, t)[i] * A12(b, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, x: A22(x, c, t)[i] * A12(x, c, t)[j], a, b, 0, T)[0] +
integrate.dblquad(lambda t, x: A22(x, d, t)[i] * A12(x, d, t)[j], a, b, 0, T)[0]
) for j in range(len0)] for i in range(lenGamma)])
P22 = np.matrix([[(
integrate.dblquad(lambda x, y: A21(x, y)[i] * A21(x, y)[j], c, d, a, b)[0] +
integrate.dblquad(lambda t, y: A22(a, y, t)[i] * A22(a, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, y: A22(b, y, t)[i] * A22(b, y, t)[j], c, d, 0, T)[0] +
integrate.dblquad(lambda t, x: A22(x, c, t)[i] * A22(x, c, t)[j], a, b, 0, T)[0] +
integrate.dblquad(lambda t, x: A22(x, d, t)[i] * A22(x, d, t)[j], a, b, 0, T)[0]
) for j in range(lenGamma)] for i in range(lenGamma)])
P = np.vstack((
np.hstack((P11, P12)),
np.hstack((P21, P22)),
))
def vec_f(x: float, y: float, t: float) -> np.array:
return A(x, y, t).T * np.linalg.pinv(P) * vec_u
def vec_f0(x: float, y: float, t: float) -> float:
return vec_f(x, y, t)[0]
def vec_fGamma(x: float, y: float, t: float) -> float:
return vec_f(x, y, t)[1]
def u_0(x: float, y: float, t: float) -> float:
return integrate.dblquad(lambda x_, y_: g(x - x_, y - y_, t - 0.0) * vec_f0(x_, y_, 0.0), c, d, a, b)[0]
def u_Gamma(x: float, y: float, t: float) -> float:
return integrate.dblquad(lambda t_, y_: g(x - a, y - y_, t - t_) * vec_fGamma(a, y_, t_), c, d, 0, T)[0] + \
integrate.dblquad(lambda t_, y_: g(x - b, y - y_, t - t_) * vec_fGamma(b, y_, t_), c, d, 0, T)[0] + \
integrate.dblquad(lambda t_, x_: g(x - x_, y - c, t - t_) * vec_fGamma(x_, c, t_), a, b, 0, T)[0] + \
integrate.dblquad(lambda t_, x_: g(x - x_, y - d, t - t_) * vec_fGamma(x_, d, t_), a, b, 0, T)[0]
def u(x: float, y: float, t: float) -> float:
return u_infty(x, y, t) + u_0(x, y, t) + u_Gamma(x, y, t)
return u | a8139c014c292b44aee1cf4533a7576413a7e685 | 26,656 |
import logging
from typing import Callable
from typing import Any
def log_calls_on_exception(
logger: logging.Logger, log_exception: bool = True
) -> GenericDecorator:
"""
Log calls to the decorated function, when exceptions are raised.
Can also decorate classes to log calls to all its methods.
:param logger: object to log to
:param log_exception: True, to log stacktrace and exception
"""
def log_function(
target: Callable[..., TargetReturnT], # TargetFunctionT,
*args: Any,
**kwargs: Any
) -> TargetReturnT:
try:
result = target(*args, **kwargs)
except BaseException:
if log_exception:
logger.exception("Exception")
else:
logger.info(f"{target.__name__} args: {args!r} {kwargs!r}")
raise
return result
decorator = GenericDecorator(log_function)
return decorator | 98a186d116547c2929c010b66b9395ba5d5c8603 | 26,657 |
from typing import Callable
from typing import Optional
def _minimize_lbfgs(
fun: Callable,
x0: Array,
maxiter: Optional[float] = None,
norm=jnp.inf,
maxcor: int = 10,
ftol: float = 2.220446049250313e-09,
gtol: float = 1e-05,
maxfun: Optional[float] = None,
maxgrad: Optional[float] = None,
maxls: int = 20,
):
"""
Minimize a function using L-BFGS
Implements the L-BFGS algorithm from
Algorithm 7.5 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 176-185
And generalizes to complex variables from
Sorber, L., Barel, M.V. and Lathauwer, L.D., 2012.
"Unconstrained optimization of real functions in complex variables"
SIAM Journal on Optimization, 22(3), pp.879-898.
Args:
fun: function of the form f(x) where x is a flat ndarray and returns a real scalar.
The function should be composed of operations with vjp defined.
x0: initial guess
maxiter: maximum number of iterations
norm: order of norm for convergence check. Default inf.
maxcor: maximum number of metric corrections ("history size")
ftol: terminates the minimization when `(f_k - f_{k+1}) < ftol`
gtol: terminates the minimization when `|g_k|_norm < gtol`
maxfun: maximum number of function evaluations
maxgrad: maximum number of gradient evaluations
maxls: maximum number of line search steps (per iteration)
Returns:
Optimization results.
"""
d = len(x0)
dtype = jnp.dtype(x0)
# ensure there is at least one termination condition
if (maxiter is None) and (maxfun is None) and (maxgrad is None):
maxiter = d * 200
# set others to inf, such that >= is supported
if maxiter is None:
maxiter = jnp.inf
if maxfun is None:
maxfun = jnp.inf
if maxgrad is None:
maxgrad = jnp.inf
# initial evaluation
f_0, g_0 = jax.value_and_grad(fun)(x0)
state_initial = LBFGSResults(
converged=False,
failed=False,
k=0,
nfev=1,
ngev=1,
x_k=x0,
f_k=f_0,
g_k=g_0,
s_history=jnp.zeros((maxcor, d), dtype=dtype),
y_history=jnp.zeros((maxcor, d), dtype=dtype),
rho_history=jnp.zeros((maxcor,), dtype=dtype),
gamma=1.,
status=0,
ls_status=0,
)
def cond_fun(state: LBFGSResults):
return (~state.converged) & (~state.failed)
def body_fun(state: LBFGSResults):
# find search direction
p_k = _two_loop_recursion(state)
# line search
ls_results = line_search(
f=fun,
xk=state.x_k,
pk=p_k,
old_fval=state.f_k,
gfk=state.g_k,
maxiter=maxls,
)
# evaluate at next iterate
s_k = ls_results.a_k * p_k
x_kp1 = state.x_k + s_k
f_kp1 = ls_results.f_k
g_kp1 = ls_results.g_k
y_k = g_kp1 - state.g_k
rho_k_inv = jnp.real(_dot(y_k, s_k))
rho_k = jnp.reciprocal(rho_k_inv)
gamma = rho_k_inv / jnp.real(_dot(jnp.conj(y_k), y_k))
# replacements for next iteration
status = 0
status = jnp.where(state.f_k - f_kp1 < ftol, 4, status)
status = jnp.where(state.ngev >= maxgrad, 3, status) # type: ignore
status = jnp.where(state.nfev >= maxfun, 2, status) # type: ignore
status = jnp.where(state.k >= maxiter, 1, status) # type: ignore
status = jnp.where(ls_results.failed, 5, status)
converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol
# TODO(jakevdp): use a fixed-point procedure rather than type-casting?
state = state._replace(
converged=converged,
failed=(status > 0) & (~converged),
k=state.k + 1,
nfev=state.nfev + ls_results.nfev,
ngev=state.ngev + ls_results.ngev,
x_k=x_kp1.astype(state.x_k.dtype),
f_k=f_kp1.astype(state.f_k.dtype),
g_k=g_kp1.astype(state.g_k.dtype),
s_history=_update_history_vectors(history=state.s_history, new=s_k),
y_history=_update_history_vectors(history=state.y_history, new=y_k),
rho_history=_update_history_scalars(history=state.rho_history, new=rho_k),
gamma=gamma,
status=jnp.where(converged, 0, status),
ls_status=ls_results.status,
)
return state
return lax.while_loop(cond_fun, body_fun, state_initial) | da9c1efe5a69cdb2f7181826032f0d00bdad6f0f | 26,658 |
def convert_2d_list_to_string(data):
"""Utility function."""
s = ''
for row in data:
c = '{'
for e in row:
c += str(e) + ','
s += c[:-1] + '},\n'
return s[:-2] | a6ac2c05f481a339c68ffc3543baba1f1d0d5e8e | 26,659 |
import argparse
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description="N-CREATE",
usage="ncreate [options] peer port dcmfile-in")
# Parameters
req_opts = parser.add_argument_group('Parameters')
req_opts.add_argument("peer", help="hostname of DICOM peer", type=str)
req_opts.add_argument("port", help="TCP/IP port number of peer", type=int)
req_opts.add_argument("dcmfile_in",
metavar="dcmfile-in",
help="DICOM file or directory to be transmitted",
type=str)
# General Options
gen_opts = parser.add_argument_group('General Options')
gen_opts.add_argument("--version",
help="print version information and exit",
action="store_true")
gen_opts.add_argument("-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_true")
gen_opts.add_argument("-v", "--verbose",
help="verbose mode, print processing details",
action="store_true")
gen_opts.add_argument("-d", "--debug",
help="debug mode, print debug information",
action="store_true")
gen_opts.add_argument("-ll", "--log-level", metavar='[l]',
help="use level l for the logger (fatal, error, warn, "
"info, debug, trace)",
type=str,
choices=['fatal', 'error', 'warn',
'info', 'debug', 'trace'])
gen_opts.add_argument("-lc", "--log-config", metavar='[f]',
help="use config file f for the logger",
type=str)
# Network Options
net_opts = parser.add_argument_group('Network Options')
net_opts.add_argument("-aet", "--calling-aet", metavar='[a]etitle',
help="set my calling AE title (default: FINDSCU)",
type=str,
default='FINDSCU')
net_opts.add_argument("-aec", "--called-aet", metavar='[a]etitle',
help="set called AE title of peer (default: ANY-SCP)",
type=str,
default='ANY-SCP')
# Transfer Syntaxes
ts_opts = parser.add_mutually_exclusive_group()
ts_opts.add_argument("-xe", "--request-little",
help="request explicit VR little endian TS only",
action="store_true")
ts_opts.add_argument("-xb", "--request-big",
help="request explicit VR big endian TS only",
action="store_true")
ts_opts.add_argument("-xi", "--request-implicit",
help="request implicit VR little endian TS only",
action="store_true")
return parser.parse_args() | 57ef788da2e13f395c95704a0fec52f1aa76c407 | 26,660 |
def get_info(sheet, row_num, percentage, sheet_name,
mandatory_tables):
"""
Function is used to create a dictionary that contains
the number of flawed records for a particular site.
:param
sheet (dataframe): pandas dataframe to traverse. Represents a
sheet with numbers indicating data quality.
row_num (int): row (0-index) with all of the information for
the specified site's data quality
percentage (boolean): used to determine whether or not the
number is a simple record count (e.g. duplicates)
versus the percentage of records (e.g. the success rate
for each of the tables)
sheet_name (str): name for the sheet for use in the error
message
analytics_type (str): the data quality metric the user wants to
investigate
mandatory_tables (lst): contains the tables that should be
documented for every table and at every date.
:return:
err_dictionary (dictionary): key:value pairs represent the
column name:number that represents the quality of the data
NOTE: This function was modified from the e-mail generator. This
function, however, logs ALL of the information in the returned
error dictionary. This includes 0 values if the data is wholly
complete.
"""
if row_num is not None:
data_info = sheet.iloc[row_num, :] # series, row labels and values
else: # row in future sheets but not current sheet
data_info = sheet.iloc[1, :] # just to get the columns
column_names = data_info.keys()
null_list = [None] * len(column_names)
data_info = pd.Series(null_list, column_names)
err_dictionary = {}
for col_label, number in data_info.iteritems():
if col_label in mandatory_tables:
if number is None or number == 'No Data': # row does not exist
err_dictionary[col_label] = float('NaN')
else:
try:
number = float(number)
except ValueError:
pass
else:
if number < 0: # just in case
raise ValueError("Negative number detected in sheet "
"{} for column {}".format(
sheet_name, col_label))
elif percentage and number > 100:
raise ValueError("Percentage value > 100 detected in "
"sheet {} for column {}".format(
sheet_name, col_label))
elif percentage and target_low: # proportion w/ errors
err_dictionary[col_label] = round(100 - number, 1)
elif percentage and not target_low: # effective
err_dictionary[col_label] = round(number, 1)
elif not percentage and number > -1:
err_dictionary[col_label] = int(number)
else:
pass # do nothing; do not want to document the column
# adding all the tables; maintaining consistency for versatility
for table in mandatory_tables:
if table not in err_dictionary.keys():
err_dictionary[table] = float('NaN')
return err_dictionary | 76552ee6cd366642d29c945a289b69efda28ba37 | 26,661 |
def get_structural_topology_reactions(filename, dset_path="readdy/config/structural_topology_reactions"):
"""
Construct a dictionary where the keys are reaction ids and value is corresponding name.
:param filename: the file name
:param dset_path: path to the dataset
:return: dictionary of reactions
"""
result = dict()
with h5py.File(filename, "r") as f:
if dset_path in f:
structural_reactions = f[dset_path]
for r in structural_reactions:
result[r["id"]] = r["name"]
return result | a3bb4f75740b540c8428d760c087df4dea782a4e | 26,662 |
def PyMapping_Keys(space, w_obj):
"""On success, return a list of the keys in object o. On failure, return NULL.
This is equivalent to the Python expression o.keys()."""
return space.call_function(space.w_list,
space.call_method(w_obj, "keys")) | 452b384a421fd675a53ff20d868b8f7353eb3d79 | 26,663 |
import aiohttp
import json
import asyncio
async def safebooru(ctx, tag, page='1'):
"""Searches safebooru. Usage: safebooru [tags]"""
async with aiohttp.ClientSession() as session:
invoker = ctx.message.author
post = await fetch(session, "https://safebooru.org/index.php?page=dapi&s=post&q=index&limit=1&tags={}&pid={}&json=1".format(tag, page))
obj = json.loads(post)
try:
directory = obj[0]['directory']
file_name = obj[0]['image']
url = 'https://safebooru.org/images/{}/{}'.format(directory, file_name)
tags_raw = obj[0]['tags']
rating = obj[0]['rating']
tags = tags_raw[:200] + (tags_raw[200:] and '..')
except Exception as e:
await ctx.send('Tag not found.', delete_after=5)
return 1
embed = discord.Embed(
title="Safebooru", url='https://safebooru.org', color=0x00fff)
embed.add_field(name='Rating', value=rating, inline=True)
embed.add_field(name='Tags', value=tags, inline=True)
embed.set_image(url=url)
msg = await ctx.send(embed=embed)
if int(page) > 1:
await msg.add_reaction('\U00002B05')
await msg.add_reaction('\U000027A1')
await msg.add_reaction('\U0001F1FD')
def check(reaction, user):
return user == invoker and reaction.message.id == msg.id and (str(reaction.emoji) == '\U00002B05' or str(reaction.emoji) == '\U000027A1' or str(reaction.emoji) == '\U0001F1FD')
try:
reaction, user = await bot.wait_for('reaction_add', timeout=120.0, check=check)
except asyncio.TimeoutError:
return 1
else:
if str(reaction.emoji) == '\U00002B05':
page = int(page)
page -= 1
page = str(page)
await msg.delete()
await ctx.invoke(safebooru, tag, page)
elif str(reaction.emoji) == '\U000027A1':
page = int(page)
page += 1
page = str(page)
await msg.delete()
await ctx.invoke(safebooru, tag, page)
elif str(reaction.emoji) == '\U0001F1FD':
await msg.delete()
return | 782000d62de1d36abc4b81e0bb4b025707e79940 | 26,664 |
import re
def is_arabicrange(text):
""" Checks for an Arabic Unicode block characters
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(u"([^\u0600-\u06ff\ufb50-\ufdff\ufe70-\ufeff\u0750-\u077f])", text):
return False
return True | 70862e901236eb94fec95ac6f7eb673729397e49 | 26,665 |
def del_pool(batch_client, config, pool_id=None):
# type: (azure.batch.batch_service_client.BatchServiceClient, dict,
# str) -> bool
"""Delete a pool
:param batch_client: The batch client to use.
:type batch_client: `azure.batch.batch_service_client.BatchServiceClient`
:param dict config: configuration dict
:param str pool_id: pool id
:rtype: bool
:return: if pool was deleted
"""
if util.is_none_or_empty(pool_id):
pool_id = settings.pool_id(config)
if not util.confirm_action(
config, 'delete {} pool'.format(pool_id)):
return False
logger.info('Deleting pool: {}'.format(pool_id))
batch_client.pool.delete(pool_id)
return True | fad5a672920a98305f12e9a7e7c6d665ff874f0a | 26,666 |
def str_repeat(space, s, repeat):
"""Repeat a string."""
return space.newstr(s * repeat) | 3e947da1fa3bf403b0836bd4e7ae0052d310636e | 26,667 |
def alarm(duration=250):
"""
Red alarm; flashing bright red to dark red.
:param int duration: The duration between hi/lo brightness,in milliseconds.
:returns: An infinite Flow consisting of 2 transitions.
:rtype: Flow
"""
return Flow(count=0, action=Action.recover, transitions=transitions.alarm(duration)) | a501c6a85c78cd37eadba200ca327660945dd4d7 | 26,668 |
def mkvc(x, numDims=1):
"""Creates a vector with the number of dimension specified
e.g.::
a = np.array([1, 2, 3])
mkvc(a, 1).shape
> (3, )
mkvc(a, 2).shape
> (3, 1)
mkvc(a, 3).shape
> (3, 1, 1)
"""
if type(x) == np.matrix:
x = np.array(x)
if hasattr(x, 'tovec'):
x = x.tovec()
if isinstance(x, Zero):
return x
assert isinstance(x, np.ndarray), "Vector must be a numpy array"
if numDims == 1:
return x.flatten(order='F')
elif numDims == 2:
return x.flatten(order='F')[:, np.newaxis]
elif numDims == 3:
return x.flatten(order='F')[:, np.newaxis, np.newaxis] | e749e0feadcdf69625355477fd22e2f9d363768f | 26,669 |
def location_distance_meters(a: Location, b: Location) -> float:
"""Calculates the distance between two points.
Returns:
A number of meters between two points.
"""
return location_distance_kilometers(a, b).m | 91179bc0fc2647d502a290ecc1df28eda8b149f5 | 26,670 |
import json
def file_to_dict(file: str):
"""Dump json file to dictionary"""
try:
with open(file) as json_file:
return json.load(json_file)
except json.decoder.JSONDecodeError:
print(f'File {file} is not a valid json file. Returning empty dict')
return {}
except FileNotFoundError:
print(f'File {file} does not exist. Returning empty dict')
return {} | 2265f2ad5e10931e93a08bafd8e8a7e20c91ae93 | 26,671 |
def fieldtype(field):
"""Return classname"""
return field.__class__.__name__ | afda2f7a13a2d0be991eadf31ac591762c519f05 | 26,672 |
from typing import Dict
def basic_extractor(
data: Dict,
) -> list:
"""
Returns list of the total_recieved token, the total sent token and the number of transactions the wallet participated in.
"""
return [data["total_received"],data["total_sent"],data["n_tx"]] | 946611423cf98c6104fa49e0ccb82308d741f900 | 26,673 |
def expand_stages_cfg(stage_cfgs):
""" For a list of stages """
assert isinstance(stage_cfgs, list)
ret = []
for x in stage_cfgs:
ret.append(expand_stage_cfg(x))
return ret | bb562da9ca5a547fc1c442e3ba8d73b7f7d0768e | 26,674 |
def find_team():
"""find a team by using filters from request arguments"""
# partial -> allow skipping of required fields
ts = TeamSchema(
partial=True,
only=(
"name",
"event_id",
"team_identifier",
"payment_status",
"single",
"page",
),
)
try:
_filter = ts.load(request.args)
except ValidationError as err:
raise FieldValidationFailed(error=err.messages)
team_paginated = TeamModel.find(_filter) # returns pagination obj
pagination_response = PaginatedResponse(
team_paginated,
TeamSchema(
partial=True,
many=True,
exclude=("team_identifier", "event_id", "payment.transaction_no"),
),
)
return pagination_response.dump() | 98aa1a67450aa9c117c5d9d7c158c169c2f7969c | 26,675 |
def clusterbased_permutation_1d_1samp_1sided(results, level=0, p_threshold=0.05, clusterp_threshold=0.05, n_threshold=2,
iter=1000):
"""
1-sample & 1-sided cluster based permutation test for 2-D results
Parameters
----------
results : array
A result matrix.
The shape of results should be [n_subs, x]. n_subs represents the number of subjects.
level : float. Default is 0.
An expected value in null hypothesis. (Here, results > level)
p_threshold : float. Default is 0.05.
The threshold of p-values.
clusterp_threshold : float. Default is 0.05.
The threshold of cluster-defining p-values.
n_threshold : int. Default is 2.
The threshold of number of values in one cluster (number of values per cluster > n_threshold).
iter : int. Default is 1000.
The times for iteration.
Returns
-------
ps : float
The permutation test resultz, p-values.
The shape of ps is [x]. The values in ps should be 0 or 1, which represent not significant point or significant
point after cluster-based permutation test, respectively.
"""
nsubs, x = np.shape(results)
ps = np.zeros([x])
ts = np.zeros([x])
for t in range(x):
ts[t], p = ttest_1samp(results[:, t], level, alternative='greater')
if p < p_threshold and ts[t] > 0:
ps[t] = 1
else:
ps[t] = 0
cluster_index, cluster_n = get_cluster_index_1d_1sided(ps)
if cluster_n != 0:
cluster_ts = np.zeros([cluster_n])
for i in range(cluster_n):
for t in range(x):
if cluster_index[t] == i + 1:
cluster_ts[i] = cluster_ts[i] + ts[t]
permu_ts = np.zeros([iter])
chance = np.full([nsubs], level)
print("\nPermutation test")
for i in range(iter):
permu_cluster_ts = np.zeros([cluster_n])
for j in range(cluster_n):
for t in range(x):
if cluster_index[t] == j + 1:
v = np.hstack((results[:, t], chance))
vshuffle = np.random.permutation(v)
v1 = vshuffle[:nsubs]
v2 = vshuffle[nsubs:]
permu_cluster_ts[j] = permu_cluster_ts[j] + ttest_rel(v1, v2, alternative="greater")[0]
permu_ts[i] = np.max(permu_cluster_ts)
show_progressbar("Calculating", (i+1)*100/iter)
if i == (iter - 1):
print("\nCluster-based permutation test finished!\n")
for i in range(cluster_n):
index = 0
for j in range(iter):
if cluster_ts[i] > permu_ts[j]:
index = index + 1
if index < iter * (1-clusterp_threshold):
for t in range(x):
if cluster_index[t] == i + 1:
ps[t] = 0
newps = np.zeros([x + 2])
newps[1:x + 1] = ps
for i in range(x):
if newps[i + 1] == 1 and newps[i] != 1:
index = 0
while newps[i + 1 + index] == 1:
index = index + 1
if index < n_threshold:
newps[i + 1:i + 1 + index] = 0
ps = newps[1:x + 1]
return ps | ade205fdd4c256567e0f1ce908f7a711caad2f5d | 26,676 |
def getObjectsContainers(mQueryObject = []):
"""
Return a list of containers that the passed in objects reside in.
@param [] mQueryObject: list of objects you are wanting to know, in which container they exists.
@return: key = container name, value = container MObject.
@rtype: {}
"""
containerDict = {}
nodeFn = om2.MFnContainerNode()
selNodeFn = om2.MFnDependencyNode()
containerObjs = getAllDagContainers()
for selObj in mQueryObject:
for obj in containerObjs:
nodeFn.setObject(obj)
if selObj in nodeFn.getMembers():
selNodeFn.setObject(selObj)
containerName = str(nodeFn.name())
# Adds the object to a dictionary, using the container as the key
if containerDict.has_key(nodeFn.name()):
containerDict[containerName].append(selNodeFn.object())
else:
containerDict[containerName] = [selNodeFn.object()]
return containerDict | 83da454e85067a2d74f2f251f255a74f3bba41ee | 26,677 |
from webdnn.backend.webgl.attributes.texture_shape import TextureShape
from webdnn.backend.webgl.attributes.channel_mode import ChannelMode
from typing import Optional
def dump_dot(graph: Graph, name: Optional[str] = None) -> str: # pragma: no cover
"""
Dumps graph into dot language for visualization.
Args:
graph: Target graph
name:
Returns:
source code of dot language.
"""
dot_source = ""
dot_source += "digraph webdnn_ir {\n"
# graph setting
dot_source += "graph [\n"
if name:
dot_source += f"label=\"{name}\"\n"
dot_source += "];\n"
added_variables = set()
def visualize_variable(var: Variable) -> str:
if var in added_variables:
return ""
node_attrs = {}
node_attrs["label"] = f"\"{var.name}\n{var.shape}\nOrder={var.order}"
if var.has_attribute(TextureShape):
node_attrs["label"] += f"\nTextureShape={TextureShape.get(var)}"
if var.has_attribute(ChannelMode):
node_attrs["label"] += f"\nChannelMode={ChannelMode.get(var).name}"
node_attrs["label"] += "\""
if isinstance(var, ConstantVariable):
node_attrs["shape"] = "doubleoctagon"
else:
node_attrs["shape"] = "octagon"
if var in graph.inputs:
node_attrs["style"] = "\"dashed\""
if var in graph.outputs:
node_attrs["style"] = "\"bold\""
dot_source_var = ""
dot_source_var += f"var_{id(var)} [\n"
dot_source_var += ",".join(f"{attr_key}={attr_value}" for attr_key, attr_value in node_attrs.items())
dot_source_var += "];\n"
added_variables.add(var)
return dot_source_var
for op in listup_operators(graph):
op_params = getattr(op, "parameters", {})
op_params_str = "\n".join(f"{k}={v}" for k, v in op_params.items())
dot_source += f"op_{op.name} [label=\"{op.name}\n{op.__class__.__name__}\n{op_params_str}\", shape=box];\n"
for connection_name, var in op.inputs.items():
dot_source += visualize_variable(var)
dot_source += f"var_{id(var)} -> op_{op.name} [label=\"{connection_name}\"];\n"
for connection_name, var in op.outputs.items():
dot_source += visualize_variable(var)
dot_source += f"op_{op.name} -> var_{id(var)} [label=\"{connection_name}\"];\n"
dot_source += "}"
return dot_source | 61e993c7383e939109463fd872501a6d7bda3d0d | 26,678 |
def RPL_LUSERCLIENT(sender, receipient, message):
""" Reply Code 251 """
return "<" + sender + ">: " + message | 4863c4d6945378f315932fadbf8f2615f020c611 | 26,679 |
def multiply_inv_gaussians_batch(mus, lambdas):
"""Multiplies a series of Gaussians that is given as a list of mean vectors and a list of precision matrices.
mus: list of mean with shape [..., d]
lambdas: list of precision matrices with shape [..., d, d]
Returns the mean vector, covariance matrix, and precision matrix of the product
"""
assert len(mus) == len(lambdas)
batch_size = mus[0].shape.as_list()[:-1]
d_z = lambdas[0].shape.as_list()[-1]
identity_matrix = tf.tile(tf.expand_dims(tf.expand_dims(tf.eye(d_z), axis=0), axis=0), batch_size+[1,1])
lambda_new = tf.reduce_sum(lambdas, axis=0) + identity_matrix
mus_summed = tf.reduce_sum([tf.einsum("bcij, bcj -> bci", lamb, mu)
for lamb, mu in zip(lambdas, mus)], axis=0)
sigma_new = tf.linalg.inv(lambda_new)
mu_new = tf.einsum("bcij, bcj -> bci", sigma_new, mus_summed)
return mu_new, sigma_new, lambda_new | 3239ee6c472506c0b0fcc90c8543deeca0edb02e | 26,680 |
def replace_if_present_else_append(
objlist,
obj,
cmp=lambda a, b: a == b,
rename=None):
"""
Add an object to a list of objects, if that obj does
not already exist. If it does exist (`cmp(A, B) == True`),
then replace the property in the property_list. The names
are compared in a case-insensitive way.
Input
=====
:objlist, list: list of objects.
:obj, object: object to Add
Options
=======
:cmp, (bool) cmp (A, B): compares A to B. If True, then the
objects are the same and B should replace A. If False,
then B should be appended to `objlist`.
:param rename: Should A be renamed instead of overwritten? If not False,
then rename should be a unary function that changes the name of A.
:type rename: bool or unary function
Output
======
List is modified in place. A reference to the list is returned.
"""
print(type (objlist))
for i in range(len(objlist)):
# was a matching object found in the list?
if cmp(objlist[i], obj):
# if so, should the old object be renamed?
if rename is not None:
newA = rename(objlist[i])
# is the renamed object distinct from the object
# (`obj`) that is to be added to the list?
if cmp(newA, obj):
msg = '`rename` does not make {} unique.'.format(
str(objlist[i])[:32])
raise ValueError(msg)
# now that we have newA, replace the original
# object in the list with `obj`...
objlist[i] = obj
#... and replace_if_present_else_append newA.
replace_if_present_else_append(
objlist, newA, cmp=cmp, rename=rename)
# if the existing object should not be renamed,
# simply replace.
else:
objlist[i] = obj
# short circuit to exit the for loop and the function.
return objlist
# if we get here, then the property was not found. Append. HI
objlist.append(obj)
return objlist | f76b3a76fe973ef91176f8ff4afd34d52ce89317 | 26,681 |
def rating_value(value):
"""Check that given value is integer and between 1 and 5."""
if 1 <= int(value) <= 5:
return int(value)
raise ValueError("Expected rating between 1 and 5, but got %s" % value) | cadb45a131a423940e1b3a763935f5e40d84285b | 26,682 |
def hsc_to_hs(ctx):
"""Process all hsc files into Haskell source files.
Args:
ctx: Rule context.
Returns:
list of File: New Haskell source files to use.
"""
ghc_defs_dump = _make_ghc_defs_dump(ctx)
sources = []
for f in ctx.files.srcs:
if f.extension == "hsc":
sources.append(_process_hsc_file(ctx, ghc_defs_dump, f))
else:
sources.append(f)
return sources | 7672ad9b3679fc663b461f4bcb9eb421293dc185 | 26,683 |
from shapely.geometry import Polygon
def calculate_iou_box(pts1, pts2):
"""
Measure the two list of points IoU
:param pts1: ann.geo coordinates
:param pts2: ann.geo coordinates
:return: `float` how Intersection over Union of tho shapes
"""
try:
except (ImportError, ModuleNotFoundError) as err:
raise RuntimeError('dtlpy depends on external package. Please install ') from err
if len(pts1) == 2:
# regular box annotation (2 pts)
pt1_left_top = [pts1[0][0], pts1[0][1]]
pt1_right_top = [pts1[0][0], pts1[1][1]]
pt1_right_bottom = [pts1[1][0], pts1[1][1]]
pt1_left_bottom = [pts1[1][0], pts1[0][1]]
else:
# rotated box annotation (4 pts)
pt1_left_top = pts1[0]
pt1_right_top = pts1[3]
pt1_left_bottom = pts1[1]
pt1_right_bottom = pts1[2]
poly_1 = Polygon([pt1_left_top,
pt1_right_top,
pt1_right_bottom,
pt1_left_bottom])
if len(pts2) == 2:
# regular box annotation (2 pts)
pt2_left_top = [pts2[0][0], pts2[0][1]]
pt2_right_top = [pts2[0][0], pts2[1][1]]
pt2_right_bottom = [pts2[1][0], pts2[1][1]]
pt2_left_bottom = [pts2[1][0], pts2[0][1]]
else:
# rotated box annotation (4 pts)
pt2_left_top = pts2[0]
pt2_right_top = pts2[3]
pt2_left_bottom = pts2[1]
pt2_right_bottom = pts2[2]
poly_2 = Polygon([pt2_left_top,
pt2_right_top,
pt2_right_bottom,
pt2_left_bottom])
iou = poly_1.intersection(poly_2).area / poly_1.union(poly_2).area
return iou | fe915dc952852e28214ce1a16f781c55600fc1ec | 26,684 |
def nextafter(x, direction, dtype, itemsize):
"""Return the next representable neighbor of x in the appropriate
direction."""
assert direction in [-1, 0, +1]
assert dtype.kind == "S" or type(x) in (bool, float, int)
if direction == 0:
return x
if dtype.kind == "S":
return string_next_after(x, direction, itemsize)
if dtype.kind in ['b']:
return bool_type_next_after(x, direction, itemsize)
elif dtype.kind in ['i', 'u']:
return int_type_next_after(x, direction, itemsize)
elif dtype.kind == "f":
if direction < 0:
return np.nextafter(x, x - 1)
else:
return np.nextafter(x, x + 1)
# elif dtype.name == "float32":
# if direction < 0:
# return PyNextAfterF(x,x-1)
# else:
# return PyNextAfterF(x,x + 1)
# elif dtype.name == "float64":
# if direction < 0:
# return PyNextAfter(x,x-1)
# else:
# return PyNextAfter(x,x + 1)
raise TypeError("data type ``%s`` is not supported" % dtype) | c14f6695eb4285afe3001ac6db019af26a95c78c | 26,685 |
def integrated_circular_gaussian(X=None, Y=None, sigma=0.8):
"""Create a circular Gaussian that is integrated over pixels
This is typically used for the model PSF,
working well with the default parameters.
Parameters
----------
X, Y: `numpy.ndarray`
The x,y-coordinates to evaluate the integrated Gaussian.
If `X` and `Y` are `None` then they will both be given the
default value `numpy.arange(-7, 8)`, resulting in a
`15x15` centered image.
sigma: `float`
The standard deviation of the Gaussian.
Returns
-------
image: `numpy.ndarray`
A Gaussian function integrated over `X` and `Y`.
"""
if X is None:
if Y is None:
X = np.arange(-7, 8)
Y = X
else:
raise Exception(
f"Either X and Y must be specified, or neither must be specified, got X={X} and Y={Y}")
result = integrated_gaussian(X, sigma)[None, :] * integrated_gaussian(Y, sigma)[:, None]
return result/np.sum(result) | 63201f6c37fba1e3750881cd692057c2bd5011b0 | 26,686 |
def nPairsToFracPairs(hd_obj, all_pairs_vs_rp, redshift_limit = 2):
"""
Function to convert the number of pairs into a fractional number density per shell
@redshift_limit :: the initial redshift limit set on the sample (needed for opening dir)
"""
num_pairs = all_pairs_vs_rp[1:] - all_pairs_vs_rp[:-1]
# get shell volume and projected radius bins
r_p, shell_volume = aimm.shellVolume()
# normalization
total_num_pairs = len(hd_obj)
N = total_num_pairs*(total_num_pairs - 1)
# fractional number density
f_pairs = num_pairs/(N*shell_volume[:len(num_pairs)])
return f_pairs, error(num_pairs)/(N*shell_volume[:len(num_pairs)]) | d9d8f72d8f05cff4e984b43f4a22da406dfe1c05 | 26,687 |
import os
import argparse
def is_valid_image(arg):
"""
Verifies that a given argument is a valid image files
arg: string representing filepath
Returns
-------
img : A W*H*3 array representing a color image
"""
if not os.path.isfile(arg):
raise argparse.ArgumentTypeError(f"{arg} does not exist!")
else:
try:
img = mpimg.imread(arg)
return arg
except IOError:
raise argparse.ArgumentTypeError(f"{arg} isn't a valid image file!")
except:
raise argparse.ArgumentTypeError(f"Something wrong with input files") | 6fd4551be4961a5734f6c65f7e270c6063a93677 | 26,688 |
def default_decode(events, mode='full'):
"""Decode a XigtCorpus element."""
event, elem = next(events)
root = elem # store root for later instantiation
while (event, elem.tag) not in [('start', 'igt'), ('end', 'xigt-corpus')]:
event, elem = next(events)
igts = None
if event == 'start' and elem.tag == 'igt':
igts = (
decode_igt(e)
for e in iter_elements(
'igt', events, root, break_on=[('end', 'xigt-corpus')]
)
)
xc = decode_xigtcorpus(root, igts=igts, mode=mode)
return xc | 36e0b4b13cb357d74cee20623e5a71cf9a5dd02a | 26,689 |
def attention_guide(dec_lens, enc_lens, N, T, g, dtype=None):
"""Build that W matrix. shape(B, T_dec, T_enc)
W[i, n, t] = 1 - exp(-(n/dec_lens[i] - t/enc_lens[i])**2 / (2g**2))
See also:
Tachibana, Hideyuki, Katsuya Uenoyama, and Shunsuke Aihara. 2017. “Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention.” ArXiv:1710.08969 [Cs, Eess], October. http://arxiv.org/abs/1710.08969.
"""
dtype = dtype or paddle.get_default_dtype()
dec_pos = paddle.arange(0, N).astype(dtype) / dec_lens.unsqueeze(
-1) # n/N # shape(B, T_dec)
enc_pos = paddle.arange(0, T).astype(dtype) / enc_lens.unsqueeze(
-1) # t/T # shape(B, T_enc)
W = 1 - paddle.exp(-(dec_pos.unsqueeze(-1) - enc_pos.unsqueeze(1))**2 /
(2 * g**2))
dec_mask = sequence_mask(dec_lens, maxlen=N)
enc_mask = sequence_mask(enc_lens, maxlen=T)
mask = dec_mask.unsqueeze(-1) * enc_mask.unsqueeze(1)
mask = paddle.cast(mask, W.dtype)
W *= mask
return W | 2af05dedb5260e52150d96b181fab063cd17efb8 | 26,690 |
from typing import Union
import os
from pathlib import Path
import gzip
def flex_load(file_path: Union[str, os.PathLike, PurePath],
default_serializer=None,
default_is_gzipped=False) -> Union[dict, list]:
"""
Determines which serializer is needed to open the file and whether it's compressed by
looking at the file extension. Supports `.pickle`, `.yaml`, `.yml`, `.msgp`, `.json`
and `.mat`. An optional additional `.gz` extension indicates the file is compressed
using gzip.
:param file_path: Path to the file to load.
:param default_serializer: Default serializer to use if the extension is unknown.
:param default_is_gzipped: True if a file with unknown extension is compressed.
:return: Object hierarchy stored in the file.
"""
# Convert path to Path object and find the serializer, whether it's compressed and
# whether the serialization is binary.
file_path = Path(file_path)
serializer, is_gzipped, is_binary = determine_serialization(
file_path,
default_serializer,
default_is_gzipped
)
# Open the file and load data.
open_mode = "rb" if is_binary or is_gzipped else "r"
with file_path.open(open_mode) as f:
# Read raw data
raw_data = f.read()
# Decompress if it's gzipped
if is_gzipped:
raw_data = gzip.decompress(raw_data)
# Convert to bytes or string if necessary.
if is_binary:
raw_data = bytes(raw_data, "utf8") if isinstance(raw_data, str) else raw_data
else:
raw_data = str(raw_data, "utf8") if isinstance(raw_data, bytes) else raw_data
return serializer.loads(raw_data) | 322e15958fda033904696d3db6988e027db54906 | 26,691 |
def two_step_colormap(left_max, left, center='transparent', right=None, right_max=None, name='two-step'):
"""Colormap using lightness to extend range
Parameters
----------
left_max : matplotlib color
Left end of the colormap.
left : matplotlib color
Left middle of the colormap.
center : matplotlib color | 'transparent'
Color for the middle value; 'transparent to make the middle transparent
(default).
right : matplotlib color
Right middle of the colormap (if not specified, tyhe colormap ends at
the location specified by ``center``).
right_max : matplotlib color
Right end of the colormap.
name : str
Name for the colormap.
Examples
--------
Standard red/blue::
>>> cmap = plot.two_step_colormap('black', 'red', 'transparent', 'blue', 'black', name='red-blue')
>>> plot.ColorBar(cmap, 1)
Or somewhat more adventurous::
>>> cmap = plot.two_step_colormap('black', (1, 0, 0.3), 'transparent', (0.3, 0, 1), 'black', name='red-blue-2')
"""
if center == 'transparent':
center_ = None
transparent_middle = True
else:
center_ = _to_rgb(center, False)
transparent_middle = False
left_max_ = _to_rgb(left_max, transparent_middle)
left_ = _to_rgb(left, transparent_middle)
is_symmetric = right is not None
if is_symmetric:
right_ = _to_rgb(right, transparent_middle)
right_max_ = _to_rgb(right_max, transparent_middle)
else:
right_ = right_max_ = None
kind = (is_symmetric, transparent_middle)
if kind == (False, False):
clist = (
(0.0, center_),
(0.5, left_),
(1.0, left_max_),
)
elif kind == (False, True):
clist = (
(0.0, (*left_[:3], 0)),
(0.5, left_),
(1.0, left_max_),
)
elif kind == (True, False):
clist = (
(0.0, left_max_),
(0.25, left_),
(0.5, center_),
(0.75, right_),
(1.0, right_max_),
)
elif kind == (True, True):
clist = (
(0.0, left_max_),
(0.25, left_),
(0.5, (*left_[:3], 0)),
(0.5, (*right_[:3], 0)),
(0.75, right_),
(1.0, right_max_),
)
else:
raise RuntimeError
cmap = LocatedLinearSegmentedColormap.from_list(name, clist)
cmap.set_bad('w', alpha=0.)
cmap.symmetric = is_symmetric
return cmap | 226dfd9a9beaadf5a47167c6080cdb3ba8fa522f | 26,692 |
def _broadcast_arg(U, arg, argtype, name):
"""Broadcasts plotting option `arg` to all factors.
Args:
U : KTensor
arg : argument provided by the user
argtype : expected type for arg
name : name of the variable, used for error handling
Returns:
iterable version of arg of length U.ndim
"""
# if input is not iterable, broadcast it all dimensions of the tensor
if arg is None or isinstance(arg, argtype):
return [arg for _ in range(U.ndim)]
# check if iterable input is valid
elif np.iterable(arg):
if len(arg) != U.ndim:
raise ValueError('Parameter {} was specified as a sequence of '
'incorrect length. The length must match the '
'number of tensor dimensions '
'(U.ndim={})'.format(name, U.ndim))
elif not all([isinstance(a, argtype) for a in arg]):
raise TypeError('Parameter {} specified as a sequence of '
'incorrect type. '
'Expected {}.'.format(name, argtype))
else:
return arg
# input is not iterable and is not the corrent type.
else:
raise TypeError('Parameter {} specified as a {}.'
' Expected {}.'.format(name, type(arg), argtype)) | 3a441b9156f7cf614b2ab2967159349252802bed | 26,693 |
import os
import subprocess
import locale
import sys
def _exec_command(command, use_shell=None, use_tee = None, **env):
"""
Internal workhorse for exec_command().
"""
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
if os.name == 'posix' and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
command = [sh, '-c', ' '.join(command)]
else:
command = [sh, '-c', command]
use_shell = False
elif os.name == 'nt' and is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = ' '.join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
# universal_newlines is set to False so that communicate()
# will return bytes. We need to decode the output ourselves
# so that Python will not raise a UnicodeDecodeError when
# it encounters an invalid character; rather, we simply replace it
proc = subprocess.Popen(command, shell=use_shell, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
except EnvironmentError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
text, err = proc.communicate()
text = text.decode(locale.getpreferredencoding(False),
errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
text = text[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
text = text.encode('ascii', errors='replace')
if use_tee and text:
print(text)
return proc.returncode, text | 16bb0ad58c4372519e749564f2d4154ec208d7a3 | 26,694 |
import signal
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Definition
----------
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
Input
-----
x, y array_like; arrays to be modified
Optional Input
--------------
xlim, ylim data range; the assumed plot range for the modification.
If not specified, they will be guessed from the data
mag float; the magnitude of the distortion (default: 1.0)
f1, f2, f3 int, float, int; filtering parameters.
f1 gives the size of the window (default: 50)
f2 gives the high-frequency cutoff (default: 0.01)
f3 gives the size of the filter (default: 15)
Output
------
x, y ndarrays; the modified lines
References
----------
See xkcd below.
Examples
--------
for line in ax.lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3)
line.set_data(x_int, y_int)
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013-2019 Matthias Cuntz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
# assure array
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None: xlim = (x.min(), x.max())
if ylim is None: ylim = (y.min(), y.max())
if xlim[1] == xlim[0]: xlim = ylim
if ylim[1] == ylim[0]: ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx*dx + dy*dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
# k = min(3, len(x) - 1)
k = min(3, x.size - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
# horizontal or vertical lines
# np.sign(np.cumsum(np.random.random(dx.size)-0.5)) emulates something like a Brownian motion
# i.e. auto-correlated random walks around 0; just the sign interests here.
eps = np.maximum(np.abs(np.amax(x_scaled)-np.amin(x_scaled)), np.abs(np.amax(y_scaled)-np.amin(y_scaled)))/Nu
if np.all(np.abs(dx) < eps):
dx = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
if np.all(np.abs(dy) < eps):
dy = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
# equal distances
if np.all(np.sign(dx) == np.sign(dx[0])):
dx *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
if np.all(np.sign(dy) == np.sign(dy[0])):
dy *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
# coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
coeffs = mag * np.random.normal(0, 0.01, x_int.size - 2)
b = signal.firwin(f1, f2*dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int | ea36487d6e2f4f9d5d0bc9d5cea23459a5b8a5a4 | 26,695 |
def generate_mutation() -> str:
"""
Retrieve staged instances and generate the mutation query
"""
staged = Node._get_staged()
# localns = {x.__name__: x for x in Node._nodes}
# localns.update({"List": List, "Union": Union, "Tuple": Tuple})
# annotations = get_type_hints(Node, globalns=globals(), localns=localns)
# query = ['{', '\tset {']
query = list()
for uid, node in staged.items():
subject, passed = _parse_subject(uid)
edges = node.edges
line = f'{subject} <{node.__class__.__name__}> "true" .'
query.append(line)
line = f'{subject} <_type> "{node.__class__.__name__}" .'
query.append(line)
for pred, obj in edges.items():
# annotation = annotations.get(pred, "")
if not isinstance(obj, list):
obj = [obj]
for o in obj:
facets = []
if isinstance(o, tuple) and hasattr(o, "obj"):
for facet in o.__class__._fields[1:]:
val = _raw_value(getattr(o, facet))
facets.append(f"{facet}={val}")
o = o.obj
if not isinstance(o, (list, tuple, set)):
out = [o]
else:
out = o
for output in out:
output = _make_obj(node, pred, output)
if facets:
facets = ", ".join(facets)
line = f"{subject} <{pred}> {output} ({facets}) ."
else:
line = f"{subject} <{pred}> {output} ."
query.append(line)
query = "\n".join(query)
Node._clear_staged()
return query | 789e6042226ed25451d7055bc9b383b81fd10ddf | 26,696 |
from datetime import datetime
def start(fund: Fund, start_date: datetime) -> Fund:
"""
Starts the fund by setting the added USD and the market value of the manager
as the current market value. Meaning that at the beginning there is only
the manager's positions.
:param fund: The fund to start
:param start_date: The date to calculate the market value
:return: The fund
"""
current_market_value = calculations.get_market_value(fund, start_date.date())
fund.added_usd = current_market_value
fund.market_value = current_market_value
fund.save()
manager = fund.get_manager()
manager.added_usd = current_market_value
manager.market_value = current_market_value
manager.save()
__rebalance_investors_ownership(fund)
return fund | e7f4a273b4c48eb3f9e440f663fee45847df902a | 26,697 |
def _make_experiment(exp_id=1, path="./Results/Tmp/test_FiftyChain"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
## Domain:
domain = FiftyChain()
## Representation
# discretization only needed for continuous state spaces, discarded otherwise
representation = Tabular(domain)
## Policy
policy = eGreedy(representation, epsilon=0.2)
## Agent
agent = SARSA(representation=representation, policy=policy,
discount_factor=domain.discount_factor,
learn_rate=0.1)
checks_per_policy = 3
max_steps = 50
num_policy_checks = 3
experiment = Experiment(**locals())
return experiment | 6cf51f8957e091175445b36aa1d6ee7b22465835 | 26,698 |
def find_largest_digit_helper(n, max_n=0):
"""
:param n: int,待判別整數
:param max_n: int,當下最大整數值
:return: int,回傳n中最大之 unit 整數
"""
# 特殊情況:已達最大值9,就不需再比了
if n == 0 or max_n == 9:
return max_n
else:
# 負值轉換為正值
if n < 0:
n *= -1
# 用餘數提出尾數
unit_n = n % 10
# 尾數比現在最大值
if unit_n > max_n:
max_n = unit_n
# 因變數會隨 Recursive 結束而釋出,所以需將 function 放在回傳上
return find_largest_digit_helper(n//10, max_n) | cd60a0cdb7cdfba6e2374a564bb39f1c95fe8931 | 26,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.