content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def is_stable(A, domain='z'):
"""Determines if a linear state-space model is stable from eigenvalues of `A`
Parameters
----------
A : ndarray(n,n)
state matrix
domain : str, optional {'z', 's'}
'z' for discrete-time, 's' for continuous-time state-space models
returns
-------
bool
"""
if domain == 'z': # discrete-time
# Unstable if at least one pole outside unit circle
if any(abs(eigvals(A)) > 1):
return False
elif domain == 's': # continuous-time
# Unstable if at least one pole in right-half plane
if any(np.real(eigvals(A)) > 0):
return False
else:
raise ValueError(f"{domain} wrong. Use 's' or 'z'")
return True
|
8b073fa021b0f50363d4f5f1a7bf3722a62ae71b
| 3,640,700
|
def email_sent_ipn(path: str) -> tuple:
"""
**email_sent_ipn**
Delivered ipn for mailgun
:param path: organization_id
:return: OK, 200
"""
# NOTE: Delivered ipn will end up here
if path == "delivered":
pass
elif path == "clicks":
pass
elif path == "opens":
pass
elif path == "failure":
pass
elif path == "spam":
pass
elif path == "unsubscribe":
pass
return "OK", 200
|
4bbfed4f86916ddc2b68ade0c8739e25a562bbda
| 3,640,701
|
import sys
def decode(path: str) -> str:
"""
utility fct to encode/decode
"""
return path.encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding)
|
9e75e04928e7df4646feaed85a799a137693fa77
| 3,640,702
|
import json
def load_posts_view(request):
"""Load posts view, handles asynchronous queries to retrieve more posts.
"""
if request.method == 'GET':
results, start = get_more_posts(request.GET)
json_result = json.dumps({'posts': results,
'start': start
})
return HttpResponse(json_result, mimetype='application/json')
else:
return HttpResponse('', mimetype='application/json')
|
832f2a04b23eb78ad25ad7db2d3cabfdaa61b075
| 3,640,703
|
def create_dataset(m, timestep, var='all', chunks=(10, 300, 300)):
"""
Create xarray Dataset from binary model data
for one time step. This also incorporates all model
grid information and dimensions, regardless of the variable selected.
Parameters
----------
m : LLCRegion
Model class generated with LLCRegion()
var : str, optional
Variable to be read. Defaults to 'all', but only one variable,
e.g. 'v', or a list of variabbles, e.g. ['t', 'v']
can be selected here instead.
chunks : tuple, optional
Chunk size for dask. Defaults to (10, 300, 300)
Returns
-------
ds : xarray Dataset
Dataset
"""
if var is 'all':
vars = _model_variables
else:
vars = {k: _model_variables[k] for k in var}
# vars = {var: _model_variables[var]}
# reduce xc/yc, xg/yg to 1d vector
lon, lat = _reduce_2d_coords(m.xc, m.yc)
xc, yc = _reduce_2d_coords(m.xc, m.yc)
xg, yg = _reduce_2d_coords(m.xg, m.yg)
# calculate Zu, Zl, Zp1 (combination of Zu, Zl)
tmp = m.drf
tmp = np.insert(tmp, 0, 0)
Zp1 = np.cumsum(tmp)
Zl = Zp1[0:-1]
Zu = Zp1[1::]
# calculate drc
drc = np.diff(m.z)
drc = np.insert(drc, 0, m.z[0])
drc = np.append(drc, Zp1[-1]-m.z[-1])
# generate xarray dataset with only grid information first
ds = xr.Dataset(coords={'xc': (['xc'], xc, {'axis': 'X'}),
'yc': (['yc'], yc, {'axis': 'Y'}),
'lon': (['xc'], xc, {'axis': 'X'}),
'lat': (['yc'], yc, {'axis': 'Y'}),
'dxc': (['yc', 'xg'], m.dxc),
'dyc': (['yg', 'xc'], m.dxc),
'xg': (['xg'], xg, {'axis': 'X', 'c_grid_axis_shift': -0.5}),
'yg': (['yg'], yg, {'axis': 'Y', 'c_grid_axis_shift': -0.5}),
'dxg': (['yg', 'xc'], m.dxg),
'dyg': (['yc', 'xg'], m.dyg),
'dxv': (['yg', 'xg'], m.dxv),
'dyu': (['yg', 'xg'], m.dyu),
'z': (['z'], m.z, {'axis': 'Z'}, {'axis': 'Z'}),
'zl': (['zl'], Zl, {'axis': 'Z', 'c_grid_axis_shift': -0.5}),
'zu': (['zu'], Zu, {'axis': 'Z', 'c_grid_axis_shift': +0.5}),
'zp1': (['zp1'], Zp1, {'axis': 'Z', 'c_grid_axis_shift': (-0.5,0.5)}),
'drc': (['zp1'], drc, {'axis': 'Z'}),
'drf': (['z'], m.drf, {'axis': 'Z'}),
'ra': (['yc', 'xc'], m.rac),
'raz': (['yg', 'xg'], m.raz),
'depth': (['yc', 'xc'], m.hb),
'hfacc': (['z', 'yc', 'xc'], m.hfacc),
'hfacw': (['z', 'yc', 'xg'], m.hfacw),
'hfacs': (['z', 'yg', 'xc'], m.hfacs)})
# define dictionary that will hold dask arrays
d = {}
# read all variables into a dict with dask arrays
for k, v in vars.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.90'
# account for funky V file names
if v=='V':
exist = _check_file_exists(filename, verbose=False)
if ~exist:
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6858.1_936.1062.90_Neg'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_3d_data)(filename), (m.Nz, m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks)
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
# add 2d variables
if var is 'all':
vars2d = _model_2dvariables
d = {}
for k, v in vars2d.items():
filename = m.data_dir+'{}/{:010d}_{}'.format(v, timestep, v)+\
'_10609.6859.1_936.1062.1'
exist = _check_file_exists(filename)
d[k] = da.from_delayed(delayed(m.load_2d_data)(filename), (m.Nlat, m.Nlon), m.dtype)
d[k] = d[k].rechunk(chunks[1:])
for k, v in d.items():
ds[k] = (_grid_association[k], v)
del d
return ds
|
02dd6a4e7ff520e5ae65c7a3a9e3bd2b92d58629
| 3,640,704
|
def fetchnl2bash(m:Manager, shuffle:bool=True)->DRef:
"""
FIXME: Unhardcode '3rdparty'-based paths
"""
allnl=fetchlocal(m,
path=join('3rdparty','nl2bash_essence','src','data','bash','all.nl'),
sha256='1db0c529c350b463919624550b8f5882a97c42ad5051c7d49fbc496bc4e8b770',
mode='asis',
output=[promise, 'all.nl'] )
allcm=fetchlocal(m,
path=join('3rdparty','nl2bash_essence','src','data','bash','all.cm'),
sha256='3a72eaced7fa14a0938354cefc42b2dcafb2d47297102f1279086e18c3abe57e',
mode='asis',
output=[promise, 'all.cm'] )
if shuffle:
s=lineshuffle(m, src={'allnl':mklens(allnl).output.refpath,
'allcm':mklens(allcm).output.refpath})
allnl_refpath=mklens(s).allnl.refpath
allcm_refpath=mklens(s).allcm.refpath
else:
allnl_refpath=mklens(allnl).output.refpath
allcm_refpath=mklens(allcm).output.refpath
nlfiles=splitfile(m, src=allnl_refpath,
fractions=[('train',f'train_nl.txt',0.9),
('eval', f'eval_nl.txt',0.1)])
cmfiles=splitfile(m, src=allcm_refpath,
fractions=[('train',f'train_cm.txt',0.9),
('eval', f'eval_cm.txt',0.1)])
return mknode(m, name='fetchnl2bash', sources={
'train_input_combined':mklens(nlfiles).train.refpath,
'train_target_combined':mklens(cmfiles).train.refpath,
'eval_input_combined':mklens(nlfiles).eval.refpath,
'eval_target_combined':mklens(cmfiles).eval.refpath
})
|
d99376bbe2dbc2250281eccb9b5d7108cf9f9c84
| 3,640,705
|
def mummer_cmds_four(path_file_four):
"""Example MUMmer commands (four files)."""
return MUMmerExample(
path_file_four,
[
"nucmer --mum -p nucmer_output/file1_vs_file2 file1.fna file2.fna",
"nucmer --mum -p nucmer_output/file1_vs_file3 file1.fna file3.fna",
"nucmer --mum -p nucmer_output/file1_vs_file4 file1.fna file4.fna",
"nucmer --mum -p nucmer_output/file2_vs_file3 file2.fna file3.fna",
"nucmer --mum -p nucmer_output/file2_vs_file4 file2.fna file4.fna",
"nucmer --mum -p nucmer_output/file3_vs_file4 file3.fna file4.fna",
],
[
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file2.delta "
"nucmer_output/file1_vs_file2.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file3.delta "
"nucmer_output/file1_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file1_vs_file4.delta "
"nucmer_output/file1_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file3.delta "
"nucmer_output/file2_vs_file3.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file2_vs_file4.delta "
"nucmer_output/file2_vs_file4.filter"
),
(
"delta_filter_wrapper.py delta-filter -1 "
"nucmer_output/file3_vs_file4.delta "
"nucmer_output/file3_vs_file4.filter"
),
],
)
|
65262a16f47b952796f79dcb9bba37c5dcbaed0b
| 3,640,706
|
def Exponweibull(a=1, c=1, scale=1, shift=0):
"""
Expontiated Weibull distribution.
Args:
a (float, Dist) : First shape parameter
c (float, Dist) : Second shape parameter
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
"""
dist = cores.exponweibull(a, c)*scale + shift
dist.addattr(str="Exponweibull(%s,%s,%s,%s)"%(a, c, scale,shift))
return dist
|
64871830101df96f8148ef6ee0b8735813793306
| 3,640,707
|
def authed_request_for_id(gplus_id, request):
"""Adds the proper access credentials for the specified user and then makes an HTTP request."""
# Helper method to make retry easier
def make_request(retry=True):
token = get_access_token_for_id(gplus_id)
request.headers['Authorization'] = 'Bearer %s' % token
prepared_request = request.prepare()
response = session.send(prepared_request, timeout=GOOGLE_API_TIMEOUT)
if response.status_code == 401:
# Our access token is invalid. If this is the first failure,
# try forcing a refresh of the access token.
if retry:
Cache.delete(ACCESS_TOKEN_CACHE_KEY_TEMPLATE % gplus_id)
return make_request(retry=False)
return response
response = make_request()
if response.status_code == 403:
# Typically used to indicate that Google is rate-limiting the API call
raise UnavailableException('API 403 response: %r' % api_response.json(), 503)
elif response.status_code == 401:
raise UnavailableException('Invalid access token.', 401)
elif response.status_code != 200:
raise UnavailableException(
'Unknown API error (code=%d): %r' % (response.status_code, response.json()), 502)
return response
|
f727cc818fd3d5b70fba80b00dfb09cf1f182275
| 3,640,708
|
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
|
d6dec673d9a0f8384c3510bdda449f8e4157c96e
| 3,640,709
|
def get_playlist_by_id(playlist_id):
""" Returns a playlist by playlist id """
return Playlist.query.filter(Playlist.playlist_id == playlist_id).first()
|
28fd295a5d096b1da40391193e6333cc48b14ea2
| 3,640,710
|
def section_cfield(xs, x_a, c_field, rmax = 60e3):
"""
extract a section of a sound speed transcet for use in xmission calculation
"""
x_i = np.bitwise_and(x_a >= xs, x_a <= xs + rmax)
return x_a[x_i], c_field[:, x_i]
|
c4c213293f7aee7735a9a6209f671aae6d8e3989
| 3,640,711
|
def shared_dropout(shape, use_noise, trng, value):
"""
Shared dropout mask (pervasive dropout)
:param shape:
:param use_noise:
:param trng:
:param value:
:return:
"""
return tensor.switch(use_noise,
trng.binomial(shape, p=value, n=1,
dtype=floatX),
theano.shared(np.float32(value)))
|
51373285b3c708cedd1ebf2a613237deaa7b6dab
| 3,640,712
|
def setup_flow_assembler(gb, method, data_key=None, coupler=None):
"""Setup a standard assembler for the flow problem for a given grid bucket.
The assembler will be set up with primary variable name 'pressure' on the
GridBucket nodes, and mortar_flux for the mortar variables.
Parameters:
gb: GridBucket.
method (EllipticDiscretization).
data_key (str, optional): Keyword used to identify data dictionary for
node and edge discretization.
Coupler (EllipticInterfaceLaw): Defaults to RobinCoulping.
Returns:
Assembler, ready to discretize and assemble problem.
"""
if data_key is None:
data_key = "flow"
if coupler is None:
coupler = pp.RobinCoupling(data_key, method)
if isinstance(method, pp.MVEM) or isinstance(method, pp.RT0):
mixed_form = True
else:
mixed_form = False
for g, d in gb:
if mixed_form:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1, "faces": 1}}
else:
d[pp.PRIMARY_VARIABLES] = {"pressure": {"cells": 1}}
d[pp.DISCRETIZATION] = {"pressure": {"diffusive": method}}
for e, d in gb.edges():
g1, g2 = gb.nodes_of_edge(e)
d[pp.PRIMARY_VARIABLES] = {"mortar_flux": {"cells": 1}}
d[pp.COUPLING_DISCRETIZATION] = {
"lambda": {
g1: ("pressure", "diffusive"),
g2: ("pressure", "diffusive"),
e: ("mortar_flux", coupler),
}
}
d[pp.DISCRETIZATION_MATRICES] = {"flow": {}}
assembler = pp.Assembler(gb)
num_blocks = assembler.full_dof.size
block_info = np.zeros((num_blocks, 5))
block_start = np.hstack((0, np.cumsum(assembler.full_dof)))
# map from grids to block dof index. Will be unique, since there is a single
# dof per subdomain
subdom_block_map = {}
for (g, var), ind in assembler.block_dof.items():
is_mortar = 0
if var == "mortar_flux":
is_mortar = 1
dim = g[0].dim
else:
dim = g.dim
subdom_block_map[g] = ind
block_info[ind, :3] = np.array([dim, is_mortar, block_start[ind]], dtype=np.int)
# Second loop over the blocks. This time, we will fill in the two last
# columns, on neighboring subdomains.
for (g, var), ind in assembler.block_dof.items():
if var == "mortar_flux":
block_info[ind, 3] = subdom_block_map[g[0]]
block_info[ind, 4] = subdom_block_map[g[1]]
else:
block_info[ind, 3:] = np.array([-1, -1])
return assembler, block_info
|
6e1baaf91e06679ef760932f6ae27e0c606e4f21
| 3,640,713
|
def get_article(article_id: str, db: Session = Depends(deps.get_db),
current_user: schemas.UserVerify = Depends(
deps.get_current_user)) -> JSONResponse:
""" Return Single Article"""
data = crud_articles.get_article(article_id=article_id, db=db)
if data is None:
return JSONResponse(status_code=500,
content={"message": "No Records Found"})
json_compatible_item_data = jsonable_encoder(data)
return JSONResponse(status_code=200, content=json_compatible_item_data)
|
e78af6052b112c5da5811a0c92fe462743bb5c7e
| 3,640,714
|
def _get_sp_instance():
"""Create an spotify auth_manager and check whether the current user has
a token (has been authorized already). If the user has a token, then they
are authenticated -- return their spotipy instance. If the user does not have
a token, then they are not authenticated -- raise an exception
"""
auth_manager = _get_auth_manager()
if auth_manager.get_cached_token():
return spotipy.Spotify(auth_manager=auth_manager)
else:
raise SpotifyUserAuthFailure(get_auth_url(show_dialog=True))
|
b2117c709169192626efdf2b699a9a1c2c501ecc
| 3,640,715
|
def get_func_global(op_type, dtype):
"""Generate function for global address space
Used as `generator(op_type, dtype)`.
"""
op = getattr(dppy.atomic, op_type)
def f(a):
op(a, 0, 1)
return f
|
72816c78bc36f7aa630551ae161fa0870acefe36
| 3,640,716
|
def klucb(x, d, div, upperbound, lowerbound=-float("inf"), precision=1e-6):
"""The generic klUCB index computation.
Input args.:
x,
d,
div:
KL divergence to be used.
upperbound,
lowerbound=-float('inf'),
precision=1e-6,
"""
l = max(x, lowerbound)
u = upperbound
while u - l > precision:
m = (l + u) / 2
if div(x, m) > d:
u = m
else:
l = m
return (l + u) / 2
|
82aa51e248568d201e0d9d5621bf043532df8572
| 3,640,717
|
def convert_pk_to_index(pk_tuples, indices):
"""
For a list of tuples with elements referring to pk's of indices,
convert pks to 0-index values corresponding to order of queryset
:param pk_tuples: list of tuples [(row_pk, col_pk), ... ]
:param indices: list of querysets
:return: list of tuples [(row_idx, col_idx), ... ]
"""
output_tuples = []
maps = [pk_index_map(idx) for idx in indices]
for pk_tuple in pk_tuples:
try:
idxs = tuple(maps[axis][pk] for axis, pk in enumerate(pk_tuple))
output_tuples.append(idxs)
except KeyError:
# pk may not be in index scope which is fine
pass
return output_tuples
|
81837ded50d4cd086b9330ea5c709fb3bd93ca0f
| 3,640,718
|
from typing import Union
def device_path_to_str(path: Union[bytes, str]) -> str:
"""
Converts a device path as returned by the fido2 library to a string.
Typically, the path already is a string. Only on Windows, a bytes object
using an ANSI encoding is used instead. We use the ISO 8859-1 encoding to
decode the string which should work for all systems.
"""
if isinstance(path, bytes):
return path.decode("iso-8859-1", errors="ignore")
else:
return path
|
76d0d3d50e978d998ef68e0c509c3933f94778d9
| 3,640,719
|
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov)
|
67c8c1f42590ee6c8d56f5f1e53253c5eff74376
| 3,640,720
|
def emitir_extrato(contas, numero_conta, movimentacoes, data_inicial):
"""
Retorna todas as movimentações de <movimentacoes> feitas pela conta
com o <numero_conta> a partir da <data_inicial>
"""
historico_movimentacoes = []
if numero_conta in contas:
minhas_movimentacoes = movimentacoes_da_conta(numero_conta, movimentacoes)
inicial = -1
#Verifica a partir de qual data o extrato vai ser emitido
for i, movimentacao in enumerate(minhas_movimentacoes):
data_movimentacao = movimentacao[5]
#Verifica qual data é mais recente, a data da emissão do extrato ou a data
#da movimentação em questão
if verificar_data_mais_recente(data_inicial, data_movimentacao):
continue
inicial = i
break
#Verifica se há alguma movimentação após a data pedida, se sim, guarda todas essas
#movimentações para retornar depois
if(inicial >= 0):
historico_movimentacoes = minhas_movimentacoes[inicial:]
return historico_movimentacoes
else:
return 0
|
0caa46aaed0ccfa506f8caa9b82625649d116ce1
| 3,640,721
|
def wavelength_to_energy(wavelength):
"""
Converts wavelength (A) to photon energy (keV)
"""
return 12.39842/wavelength
|
4e2d11f2de8ed4890df5d885801cd492644817d8
| 3,640,722
|
def calculate_hash_512(filepath, verbose):
"""
SHA512 Hash Digest
"""
if verbose:
print 'Calculating hash...'
sha512_hash = hashlib.sha512()
with open(filepath, 'rb') as f:
statinfo = os.stat(filepath)
block_size = 100 * (2**20) #Magic number: 100 * 1MB blocks
nb_blocks = (statinfo.st_size / block_size) + 1
cnt_blocks = 0
while True:
block = f.read(block_size)
if not block: break
sha512_hash.update(block)
cnt_blocks = cnt_blocks + 1
progress = 100 * cnt_blocks / nb_blocks
if verbose:
draw_progress_bar(progress)
f.close()
return sha512_hash.digest()
|
4bf153275d9791112f39d3629e9cc94f54177dc4
| 3,640,723
|
def _crop_after_rotation(im, angle, xres, yres, surroundings):
"""Crop image to the bounding box of bite's surroundings.
Arguments:
im: PIL.Image, rotated map part
angle: by which the map has been rotated, in degrees (counterclockwise)
xres: width of one tile in pixels
yres: height of one tile in pixels
surroundings: shapely.geometry.polygon.Polygon
"""
#before rotation
x1, y1, x2, y2 = surroundings.bounds
old_bb_upper_left = Point(x1, y1)
old_bb_upper_right = Point(x2, y1)
old_bb_bottom_left = Point(x1, y2)
old_bb_center = ((x1+x2)/2, (y1+y2)/2)
#shapely y-axis goes upwards
shapely_angle = -angle
#after rotation
x1, y1, x2, y2 = affinity.rotate(surroundings, shapely_angle, origin=old_bb_center).bounds
crop_upper_left = Point(x1, y1)
crop_width = x2 - x1
crop_height = y2 - y1
#points where old bounding box of surroundings (i.e. the old image) touches
#its bounding box after rotation
tl = None #touch at the left side of the new bounding box
tt = None #touch at the top side of the new bounding box
if angle > 0:
tl = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_right, shapely_angle, origin=old_bb_center)
else:
tl = affinity.rotate(old_bb_bottom_left, shapely_angle, origin=old_bb_center)
tt = affinity.rotate(old_bb_upper_left, shapely_angle, origin=old_bb_center)
#upper left corner of ther new bounding box
new_bb_upper_left = Point(tl.x, tt.y)
#from these we get b: upper left corner of the crop area relative to new_bb_upper_left
b = (crop_upper_left.x - new_bb_upper_left.x, crop_upper_left.y - new_bb_upper_left.y)
#crop rectangle in pixels relative to new_bb_upper_left
crop_box = [int(x) for x in [
b[0] * xres,
b[1] * yres,
(b[0] + crop_width) * xres,
(b[1] + crop_height) * yres
]]
cropped = im.crop(box=crop_box)
cropped.load()
return cropped
|
eeeda2c5c8d868e813a67584c72561560409e1b3
| 3,640,724
|
import copy
def get_custom_scorer(metric, gib=True, needs_proba=False, needs_threshold=False):
"""Get a scorer from a str, func or scorer.
Scorers used by ATOM have a name attribute.
Parameters
----------
metric: str, func or scorer
Name, metric or scorer to get ATOM's scorer from.
gib: bool, optional (default=True)
whether the metric is a score function or a loss function,
i.e. if True, a higher score is better and if False, lower is
better. Is ignored if the metric is a string or a scorer.
needs_proba: bool, optional (default=False)
Whether the metric function requires probability estimates of
a classifier. Is ignored if the metric is a string or a scorer.
needs_threshold: bool, optional (default=False)
Whether the metric function takes a continuous decision
certainty. Is ignored if the metric is a string or a scorer.
Returns
-------
scorer: scorer
Custom sklearn scorer with name attribute.
"""
# Copies are needed to not alter SCORERS
if isinstance(metric, str):
metric = metric.lower()
if metric in SCORERS:
scorer = copy(SCORERS[metric])
scorer.name = metric
elif metric in SCORERS_ACRONYMS:
scorer = copy(SCORERS[SCORERS_ACRONYMS[metric]])
scorer.name = SCORERS_ACRONYMS[metric]
elif metric in CUSTOM_SCORERS:
scorer = make_scorer(copy(CUSTOM_SCORERS[metric]))
scorer.name = scorer._score_func.__name__
else:
raise ValueError(
"Unknown value for the metric parameter, got "
f"{metric}. Choose from: {', '.join(SCORERS)}."
)
elif hasattr(metric, "_score_func"): # Scoring is a scorer
scorer = copy(metric)
# Some scorers use default kwargs
default_kwargs = ("precision", "recall", "f1", "jaccard")
if any(name in scorer._score_func.__name__ for name in default_kwargs):
if not scorer._kwargs:
scorer._kwargs = {"average": "binary"}
for key, value in SCORERS.items():
if scorer.__dict__ == value.__dict__:
scorer.name = key
break
else: # Scoring is a function with signature metric(y, y_pred)
scorer = make_scorer(
score_func=metric,
greater_is_better=gib,
needs_proba=needs_proba,
needs_threshold=needs_threshold,
)
scorer.name = scorer._score_func.__name__
return scorer
|
a698798302ec0ed7ad469b76d6893e85e669905e
| 3,640,725
|
def julian_day(t='now'):
"""
Wrap a UTC -> JD conversion from astropy.
"""
return Time(parse_time(t)).jd
|
fa2f0d707798227e8e7f67b21cf2e4dc42308093
| 3,640,726
|
from typing import Counter
def add_stop_words(dataframe: pd.DataFrame,
k_words: int) -> list:
"""
Получить список стоп-слов, которые наиболее часто встречаются в документе
:param dataframe:
:param k_words: кол-во наиболее часто повторяющихся уникальных слов
:return:
"""
split_words = dataframe['text'].values
split_words = " ".join(split_words)
split_words = split_words.split()
_counter = Counter(split_words).most_common(k_words)
n_words = [i[0] for i in _counter]
return list(set(n_words))
|
3ca7fbe1221b55e2a072d49f01c553af1786ca8f
| 3,640,727
|
import torch
def get_batch(data_iterator):
"""Build the batch."""
# Items and their type.
keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask']
datatype = torch.int64
# Broadcast data.
data = next(data_iterator) if data_iterator is not None else None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens = data_b['text'].long()
types = data_b['types'].long()
sentence_order = data_b['is_random'].long()
loss_mask = data_b['loss_mask'].float()
lm_labels = data_b['labels'].long()
padding_mask = data_b['padding_mask'].long()
return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
|
fad3b181c685e3e57fa185c1eb790517536527ec
| 3,640,728
|
import os
def outcomes_by_resected_lobe(directory='L:\\', filename='All_Epilepsy_Ops_CROSSTAB_Statistics_YAY_2019.xlsx',
lobes=['T Lx', 'T Lesx']):
"""
Creates the list of Gold_standard post-operative ILAE 1 at all follow up years MRNs in patients who had only
specific lobe resections.
lobes = chose from this list NBOTE THE SPACES:
['CCx', 'F Lesx', 'F Lesx ', 'F Lx', 'F T Lx', 'Hx', 'Hx ', 'MST', 'O Lesx', 'O Lx',
'O P Lx', 'P Lesx', 'P Lx', 'T F Lx', 'T Lesx', 'T Lx', 'T O Lesx', 'T P Lesx', 'T P Lx']
These are MRNs - not exactly patients (some patients have more than one)
"""
excel_file = os.path.join(directory, filename)
df_outcomes = pd.read_excel(excel_file, sheet_name = 'Aa_E_Only_All_E_Ops_CROSSTAB', usecols=[1, 4, 36]) # non-indexed
df_outcomes2 = df_outcomes['Hospital No'].str.split(', ').apply(pd.Series) # makes 4 columns of hosp no's
df_outcomes2.index = df_outcomes.set_index(['boolean', 'OP Type']).index # set index (this weird line so can use deepcopy prior if req'd)
df_outcomes3 = df_outcomes2.stack().reset_index(['boolean', 'OP Type']) # now 1,105 non-null row DataFrame
df_outcomes3.columns = ['Gold_outcome', 'Resected Lobe', 'MRN'] # rename columns
df_outcomes3.set_index('MRN', inplace=True) # now have list of 1,105 MRNs(=index) and boolean Gold_outcome as two columns in pd.DataFrame
# from the above chose the temporal lobe resections:
df_temporal = df_outcomes3.loc[df_outcomes3['Resected Lobe'].isin(lobes)] # returns the rows with T Lx or T Lesx
# now to access all the Gold_outcome True from the temporal lobe resections:
df_gold_temporal_outcomes = df_temporal.loc[df_temporal.Gold_outcome == True] # gives a DataFrame of all MRNs and outcome Trues
temporal_gold_outcomes_MRNs = list(df_gold_temporal_outcomes.index.values) # list of just MRNs for use in temporal_find_MRN_label_outcomes()
# the false dataframe index values gives all temporal lobe resected patients who had surgery without gold outcome
df_temporal_had_surgery = df_temporal.loc[df_temporal.Gold_outcome == False]
temporal_had_surgery_MRNs = list(df_temporal_had_surgery.index.values)
return temporal_gold_outcomes_MRNs, temporal_had_surgery_MRNs
|
30c35d288beadc15be31d19bab8f8ce7e28ee599
| 3,640,729
|
def pool(sparkdf, start_column, end_column, var):
"""
Generate pools and calculate maximum var unpooled.
:param sparkdf: Input Spark dataframe.
:param start_column: Start time column name.
:param end_column: End time column name.
:param var: Variable for which to calculate metric.
:return: A Spark dataframe with pools (sizes and counts).
:return: Maximum active metric for var.
"""
starts_dict, ends_dict, starts_sorted, ends_sorted = sorted_dicts(sparkdf, start_column, end_column, var)
size_groups = {s:{'current': 0, 'max': 0} for s in [r.size for r in sparkdf.select(var).distinct().collect()]}
active = {'current': 0, 'max': 0}
start_index, end_index = 0, 0
while start_index < len(starts_sorted) or end_index < len(ends_sorted):
start, end = None, ends_sorted[end_index]
if start_index < len(starts_sorted):
start = starts_sorted[start_index]
if start is None or start > end:
group = size_groups[ends_dict[end]]
group['current'] -= 1
active['current'] -= ends_dict[end]
end_index += 1
else:
group = size_groups[starts_dict[start]]
group['current'] += 1
if group['current'] > group['max']:
group['max'] = group['current']
active['current'] += starts_dict[start]
if active['current'] > active['max']:
active['max'] = active['current']
start_index += 1
pool_counts = [{var: int(s), 'count': int(size_groups[s]['max'])} for s in size_groups.keys()]
max_unpooled = active['max']
return pool_counts, max_unpooled
|
913094bebc6f91ad023d83186084d858a7332531
| 3,640,730
|
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt返回
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
:param token:
:param user:
:param request:
:return:
"""
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
|
972f4cbd39d9bd049fcd7a99bfc168e6c825572a
| 3,640,731
|
import requests
def query_yelp_lookup(biz_id):
""" Lookup resturant using id """
headers = {'Authorization': ('Bearer '
'w5JFtwCUKq05GlSpm8cKo51dBYDQ6r9tyzo-qRsKt4wDyB5'
'_ro6gW5gnG9hS6bvnNHNxOQLHfw7o_9S1e86nkvgcU7DQI_'
'sM6GVt9rqcq_rRYKtagQrexuH0zsU0WXYx')}
url = 'https://api.yelp.com/v3/businesses/' + biz_id
query = requests.get(url, headers=headers)
return query.json()
|
ab2087d42833f0092229870ab3208a24bd041b95
| 3,640,732
|
def dashboard(request, condition='recent'):
"""Dashboard"""
post_count = settings.DASHBOARD_POST_COUNT
comment_count = settings.DASHBOARD_COMMENT_COUNT
if condition == 'recent':
order = '-id'
elif condition == 'view':
order = '-view_count'
elif condition == 'like':
order = '-like_count'
elif condition == 'comment':
order = '-comment_count'
else:
return error_page(request)
posts = Blog.objects.filter(status='1normal').order_by(order)[:post_count]
comments = Comment.objects.filter(
status='1normal').order_by('-id')[:comment_count]
total_posts = Blog.objects.filter(status='1normal').count()
total_comments = Comment.objects.filter(status='1normal').count()
total_spams = Comment.objects.filter(status='7spam').count()
total_users = User.objects.count()
return render(
request,
"blogs/dashboard.html",
{
'posts': posts,
'comments': comments,
'condition': condition,
'total_posts': total_posts,
'total_comments': total_comments,
'total_spams': total_spams,
'total_users': total_users,
}
)
|
fc5422bf580a4608e921b7d59caf7f0ea58a50fd
| 3,640,733
|
def read_manifest(path):
"""Read dictionary of workflows from the Packal manifest.xml file."""
workflows = {}
tree = ET.parse(path)
root = tree.getroot()
for workflow in root:
data = {"packal": True}
for child in workflow:
if child.tag == "short":
data["description"] = child.text.strip()
else:
data[child.tag] = child.text.strip() if child.text else None
# print(child.tag, ':', child.text)
data["author_url"] = packal_user_url(data["author"])
if "bundle" in data:
workflows[data["bundle"]] = data
return workflows
|
8f91126f4a48c0b1af357487ffe791ba790c7745
| 3,640,734
|
def _load_v1_txt(path):
"""Parses a SIF V1 text file, returning numpy arrays.
Args:
path: string containing the path to the ASCII file.
Returns:
A tuple of 4 elements:
constants: A numpy array of shape (element_count). The constant
associated with each SIF element.
centers: A numpy array of shape (element_count, 3). The centers of the
SIF elements.
radii: A numpy array of shape (element_count, 3). The axis-aligned
radii of the gaussian falloffs.
rotations: A numpy array of shape (element_count, 3). The euler-angle
rotations of the SIF elements.
symmetry_count: An integer. The number of elements which are left-right
symmetric.
features: A numpy array of shape (element_count, implicit_len). The LDIF
neural features, if they are present.
"""
lines = file_util.readlines(path)
if lines[0] != 'SIF':
raise ValueError(f'Could not parse {path} as a sif txt. First line was {lines[0]}')
shape_count, version, implicit_len = [int(x) for x in lines[1].split(' ')]
version += 1
if version != 1:
raise ValueError(f'This function can only parse v1 files. This version: {version}.')
symmetry_count = 0
last_was_symmetric = True
constants = []
centers = []
radii = []
rotations = []
features = []
for row in lines[2:]:
elts = row.split(' ')
if len(elts) != 11 + implicit_len:
raise ValueError('Failed to parse the following row with '
f'implicit_len {implicit_len}: {row}')
explicit_params = np.array([float(x) for x in elts[:10]], dtype=np.float32)
is_symmetric = bool(int(elts[10]))
if is_symmetric:
symmetry_count += 1
if not last_was_symmetric:
raise ValueError(f'File not supported by parser: row {row} is '
'symmetric but follows an asymmetric element.')
constants.append(explicit_params[0])
centers.append(explicit_params[1:4])
radii.append(explicit_params[4:7])
rotations.append(explicit_params[7:10])
if implicit_len > 0:
implicit_params = np.array([float(x) for x in elts[11:]], dtype=np.float32)
features.append(implicit_params)
constants = np.stack(constants)
centers = np.stack(centers)
radii = np.stack(radii)
rotations = np.stack(rotations)
features = np.stack(features) if features else None
# Radii have their sqrt stored for GAPS:
radii = radii * radii
return constants, centers, radii, rotations, symmetry_count, features
|
9f7ea3f0059ef3688cc962e9836a558debebf80f
| 3,640,735
|
def split_model_name(model):
"""
Split model names by _
Takes into account packages with _ and processor types with _
"""
model = model[:-3].replace('.', '_')
# sort by key length so that nertagger is checked before tagger, for example
for processor in sorted(ending_to_processor.keys(), key=lambda x: -len(x)):
if model.endswith(processor):
model = model[:-(len(processor)+1)]
processor = ending_to_processor[processor]
break
else:
raise AssertionError(f"Could not find a processor type in {model}")
lang, package = model.split('_', 1)
return lang, package, processor
|
305a70899eb8eb3c5beca4c7e7403010a008a80d
| 3,640,736
|
from .divine1983 import JupiterD4Field
from .distributions import DG83Distribution
from .integrate import FormalRTIntegrator
from .synchrotron import NeuroSynchrotronCalculator
def dg83_setup(
ghz = 95,
lat_of_cen = 10,
cml = 20,
n_alpha = 10,
n_E = 10,
E0 = 0.1,
E1 = 10.,
nn_dir = None,
no_synch = False,
):
"""Create and return a VanAllenSetup object prepared to use the Divine &
Garrett 1983 model of Jupiter's magnetic field and plasma.
ghz
The observing frequency, in GHz.
lat_of_cen
The body's latitude-of-center, in degrees.
cml
The body's central meridian longitude, in degrees.
n_alpha
Number of pitch angles to sample when deriving p/k distribution parameters.
n_E
Number of energies to sample when deriving p/k distribution parameters.
E0
Low end of energy sampling regime, in MeV.
E1
High end of energy sampling regime, in MeV.
nn_dir
The directory with the neural-network data used to generate synchrotron
radiative transfer coefficients.
no_synch
If true, ignore `nn_dir` and do not load synchrotron computatation info.
Makes things faster if you just want to evaluate the DG83 model and not
actually do any radiative transfer.
"""
lat_of_cen *= astutil.D2R
cml *= astutil.D2R
o2b = ObserverToBodycentric(lat_of_cen, cml)
bfield = JupiterD4Field()
distrib = DG83Distribution()
distrib.n_alpha = n_alpha
distrib.n_E = n_E
distrib.E0 = E0
distrib.E1 = E1
ray_tracer = FormalRayTracer()
ray_tracer.ne0_cutoff = 1e-6
rad_trans = FormalRTIntegrator()
if no_synch:
synch_calc = None
else:
synch_calc = NeuroSynchrotronCalculator(nn_dir=nn_dir)
return VanAllenSetup(o2b, bfield, distrib, ray_tracer, synch_calc,
rad_trans, cgs.rjup, ghz * 1e9)
|
94aea682df900d600e922ff560109255e2b69ac7
| 3,640,737
|
def compute() -> int:
"""
Returns the sum of all numbers whose
sum of the factorials of all digits
add up to the number itself.
>>> compute()
40730
"""
return sum(
num
for num in range(3, 7 * factorial(9) + 1)
if sum_of_digit_factorial(num) == num
)
|
c2460158eb7d32142b4f59801bdc307a0ba1d4ff
| 3,640,738
|
import heapq
def dijkstra(matrix, start=None, end=None):
"""
Implementation of Dijkstra algorithm to find the (s,t)-shortest path between top-left and bottom-right nodes
on a nxn grid graph (with 8-neighbourhood).
NOTE: This is an vertex variant of the problem, i.e. nodes carry weights, not edges.
:param matrix (np.ndarray [grid_dim, grid_dim]): Matrix of node-costs.
:return: matrix (np.ndarray [grid_dim, grid_dim]), indicator matrix of nodes on the shortest path.
"""
if start is None:
start = (0, 0)
def neighbors_func(pos):
pos = np.array(pos)
neighbors = get_neighbor_pattern(dim=2)
for off in neighbors:
new_pos = pos+off
if np.all(new_pos > 0) and np.all(new_pos < matrix.shape):
yield new_pos
costs = np.full_like(matrix, 1.0e10)
costs[start] = matrix[start]
priority_queue = [(matrix[0][0], start)]
certain = set()
transitions = dict()
while priority_queue:
_, (cur_x, cur_y) = heapq.heappop(priority_queue)
if (cur_x, cur_y) in certain:
pass
for x, y in neighbors_func(cur_x, cur_y):
if (x, y) not in certain:
if matrix[x][y] + costs[cur_x][cur_y] < costs[x][y]:
costs[x][y] = matrix[x][y] + costs[cur_x][cur_y]
heapq.heappush(priority_queue, (costs[x][y], (x, y)))
transitions[(x, y)] = (cur_x, cur_y)
certain.add((cur_x, cur_y))
if end is None:
return transitions
# retrieve the path
cur_x, cur_y = end
on_path = np.zeros_like(matrix)
on_path[-1][-1] = 1
while (cur_x, cur_y) != start:
cur_x, cur_y = transitions[(cur_x, cur_y)]
on_path[cur_x, cur_y] = 1.0
return on_path
|
96338e6c65e1ff88025971361e2b36c0f1efe2af
| 3,640,739
|
def is_finally_visible_segm(*args):
"""is_finally_visible_segm(segment_t s) -> bool"""
return _idaapi.is_finally_visible_segm(*args)
|
9050bd583208824859e71e84f02169237b3ac9f2
| 3,640,740
|
from vivofoundation import vivo_sparql_query
def make_course_dictionary(debug=False):
"""
Make a course dictionary from VIVO contents. Key is course number
such as ABF2010C. Value is URI.
"""
query = """
SELECT ?x ?label ?coursenum
WHERE {
?x a ufVivo:Course .
?x ufVivo:courseNum ?coursenum .
}"""
result = vivo_sparql_query(query)
try:
count = len(result["results"]["bindings"])
except:
count = 0
if debug:
print query, count, result["results"]["bindings"][0],\
result["results"]["bindings"][1]
course_dictionary = {}
i = 0
while i < count:
b = result["results"]["bindings"][i]
coursenum = b['coursenum']['value']
uri = b['x']['value']
course_dictionary[coursenum] = uri
i = i + 1
return course_dictionary
|
cfe014f41aeac18116f6650cb2cb8b1200469eb9
| 3,640,741
|
def get_undisbursed_principal(loan):
"""Gets undisbursed principal"""
principal = frappe.get_value("Microfinance Loan", loan, "loan_principal")
if not principal:
raise frappe.DoesNotExistError("Loan: {} not found".format(loan))
return principal - get_disbursed(loan)
|
7829b93eb1e6298e8640290c94b2b2aacb0de8bd
| 3,640,742
|
def northing_and_easting(dictionary):
"""
Retrieve and return the northing and easting strings to be used as
dictionary keys
Parameters
----------
dictionary : dict
Returns
-------
northing, easting : tuple
"""
if not 'x' and 'y' in dictionary.keys():
northing = 'latitude'
easting = 'longitude'
else:
northing = 'x'
easting = 'y'
return northing, easting
|
2f41d8b681d27f6ef29265c1945591ea18bba79f
| 3,640,743
|
import sys
def decode_path(name):
""" Attempt to decode path with correct encoding """
return name.decode(sys.getfilesystemencoding())
|
14da12b60c1f734e59ee5daec249c3658f3a23e4
| 3,640,744
|
import os
import pickle
def save_account(account):
"""
Function that serializes the account such
that it can be saved.
"""
root_dir = "./accounts/"+account.name+"/"
if not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(root_dir+account.name, "wb+") as f:
pickle.dump(account, f)
return 0
|
08e0253764695dd71e767190c70dd32189839988
| 3,640,745
|
import math
def affine(p, scale, theta, offset):
""" Scale, rotate and translate point """
return arcpy.Point((p.X * math.cos(theta) - p.Y * math.sin(theta)) * scale.X + offset.X,
(p.X * math.sin(theta) + p.Y * math.cos(theta)) * scale.Y + offset.Y)
|
2d1cd34ed94ee0c4e7ecbb786510c0165b9fca9d
| 3,640,746
|
def GetMarkedPos(slot):
"""
Get marked position
@param slot: slot number: 1..1024 if the specifed value is <= 0
range, IDA will ask the user to select slot.
@return: BADADDR - the slot doesn't contain a marked address
otherwise returns the marked address
"""
curloc = idaapi.curloc()
intp = idaapi.int_pointer()
intp.assign(slot)
return curloc.markedpos(intp)
|
2c6fc7bac4a389c0cafd119fbef537e135b7f745
| 3,640,747
|
def elslib_CylinderParameters(*args):
"""
* parametrization P (U, V) = Location + V * ZDirection + Radius * (Cos(U) * XDirection + Sin (U) * YDirection)
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param U:
:type U: float &
:param V:
:type V: float &
:rtype: void
"""
return _ElSLib.elslib_CylinderParameters(*args)
|
5fa697d09866747be2ef98b1b913b7aeb59fcf79
| 3,640,748
|
def totaled_no_review_url(cc, sql_time_specification): # pragma: no cover
"""Counts the number of commits with no review url in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
count(int): a count of all commits with no review_url
results(list): a list of lists with all tbr'ed commits with no lgtm in the
format [rietveld_url, git_timestamp, git_subject, git_hash]
"""
cc.execute("""SELECT git_commit.review_url, git_commit.timestamp,
git_commit.subject, git_commit.hash
FROM git_commit
WHERE git_commit.review_url = ''
AND %s""" % sql_time_specification)
result = cc.fetchall()
count = len(result)
formatted_data = []
for data in result:
subject = data[2]
formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"),
subject.replace('-', ' '), data[3]])
results = sorted(formatted_data, key=lambda x: x[1], reverse=True)
return count, results
|
027f49b13316ecb36eed3e7dde880848b261e3b4
| 3,640,749
|
import warnings
def is_sat(formula, solver_name=None, logic=None, portfolio=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used
:type solver_name: string
:param logic: Specify the logic that is going to be used
:param portfolio: A list of solver names to perform portfolio solving.
:type portfolio: An iterable of solver names
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic,
portfolio=portfolio)
|
9121747de68aa531c7c7e9c9f683cd1f1518e54b
| 3,640,750
|
import math
def bounds(*tile):
"""Returns the bounding box of a tile
Parameters
----------
tile : Tile or tuple
May be be either an instance of Tile or 3 ints (X, Y, Z).
Returns
-------
LngLatBbox
"""
tile = _parse_tile_arg(*tile)
xtile, ytile, zoom = tile
Z2 = math.pow(2, zoom)
ul_lon_deg = xtile / Z2 * 360.0 - 180.0
ul_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / Z2)))
ul_lat_deg = math.degrees(ul_lat_rad)
lr_lon_deg = (xtile + 1) / Z2 * 360.0 - 180.0
lr_lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * (ytile + 1) / Z2)))
lr_lat_deg = math.degrees(lr_lat_rad)
return LngLatBbox(ul_lon_deg, lr_lat_deg, lr_lon_deg, ul_lat_deg)
|
ed2eb5865d21033029ddfcdd133663c9d222687d
| 3,640,751
|
from typing import Tuple
def http(func: str, arg: Tuple[str]) -> int:
"""Summary.
Args:
func (str): Path to a function.
arg (Tuple[str]): Description
Returns:
int: Description
"""
return ERGO_CLI.http(func, *list(arg))
|
1e46eefa4101ff63d2d3851bacbfd472e1d3c7ce
| 3,640,752
|
import time
def isMWS_bhb(primary=None, objtype=None,
gaia=None, gaiaaen=None, gaiadupsource=None, gaiagmag=None,
gflux=None, rflux=None, zflux=None,
w1flux=None, w1snr=None, maskbits=None,
gnobs=None, rnobs=None, znobs=None,
gfracmasked=None, rfracmasked=None, zfracmasked=None,
parallax=None, parallaxerr=None):
"""Set bits for BHB Milky Way Survey targets
Parameters
----------
see :func:`~desitarget.cuts.set_target_bits` for other parameters.
Returns
-------
mask : array_like.
True if and only if the object is a MWS-BHB target.
Notes
-----
- Criteria supplied by Sergey Koposov
- gflux, rflux, zflux, w1flux have been corrected for extinction
(unlike other MWS selections, which use obs_flux).
- Current version (03/20/21) is version 1 on `the SV3 wiki`_.
"""
if primary is None:
primary = np.ones_like(gaia, dtype='?')
mws = primary.copy()
# ADM do not target any objects for which entries are NaN
# ADM and turn off the NaNs for those entries
nans = np.isnan(gflux) | np.isnan(rflux) | np.isnan(zflux) | np.isnan(w1flux) | np.isnan(parallax) | np.isnan(gaiagmag)
w = np.where(nans)[0]
if len(w) > 0:
# ADM make copies as we are reassigning values
rflux, gflux, zflux, w1flux = rflux.copy(), gflux.copy(), zflux.copy(), w1flux.copy()
parallax = parallax.copy()
gaigmag = gaiagmag.copy()
rflux[w], gflux[w], zflux[w], w1flux[w] = 0., 0., 0., 0.
parallax[w] = 0.
gaiagmag[w] = 0.
mws &= ~nans
log.info('{}/{} NaNs in file...t = {:.1f}s'
.format(len(w), len(mws), time()-start))
gmag = 22.5 - 2.5 * np.log10(gflux.clip(1e-7))
rmag = 22.5 - 2.5 * np.log10(rflux.clip(1e-7))
zmag = 22.5 - 2.5 * np.log10(zflux.clip(1e-7))
gmr = gmag-rmag
rmz = rmag-zmag
# ADM don't target MWS-like targets in Legacy Surveys mask regions.
mws &= imaging_mask(maskbits, mwsmask=True)
# APC must be a Legacy Surveys object that matches a Gaia source
mws &= gaia
# APC type must be PSF
mws &= _psflike(objtype)
# APC no sources brighter than Gaia G = 10
mws &= gaiagmag > 10.
# APC exclude nearby sources by parallax
mws &= parallax <= 0.1 + 3*parallaxerr
mws &= (gfracmasked < 0.5) & (gflux > 0) & (gnobs > 0)
mws &= (rfracmasked < 0.5) & (rflux > 0) & (rnobs > 0)
mws &= (zfracmasked < 0.5) & (zflux > 0) & (znobs > 0)
# APC no gaia duplicated sources
mws &= ~gaiadupsource
# APC gaia astrometric excess noise < 3
mws &= gaiaaen < 3.0
# APC BHB extinction-corrected color range -0.35 <= gmr <= -0.02
mws &= (gmr >= -0.35) & (gmr <= -0.02)
# Coefficients from Sergey Koposov
bhb_sel = rmz - (1.07163*gmr**5 - 1.42272*gmr**4 + 0.69476*gmr**3 - 0.12911*gmr**2 + 0.66993*gmr - 0.11368)
mws &= (bhb_sel >= -0.05) & (bhb_sel <= 0.05)
# APC back out the WISE error = 1/sqrt(ivar) from the SNR = flux*sqrt(ivar)
w1fluxerr = w1flux/(w1snr.clip(1e-7))
w1mag_faint = 22.5 - 2.5 * np.log10((w1flux-3*w1fluxerr).clip(1e-7))
# APC WISE cut (Sergey Koposov)
mws &= rmag - 2.3*gmr - w1mag_faint < -1.5
# APC Legacy magnitude limits
mws &= (rmag >= 16.) & (rmag <= 20.)
return mws
|
1a4f4287263ee64497ebdf882cbe3b782840b8f3
| 3,640,753
|
def bernpoly(n, z):
"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-3.33333e-2, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`bernoulli`)::
>>> print bernpoly(12, 0), bernoulli(12)
-0.253113553113553 -0.253113553113553
>>> print bernpoly(13, 0), bernoulli(13)
0.0 0.0
"""
n = int(n)
assert n >= 0
# XXX: optimize
return sum(binomial(n,k)*bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
|
60da6461246e48f8b5ff7172f7d244c59b9ad7ed
| 3,640,754
|
def sort(obs, pred):
"""
Return sorted obs and pred time series'
"""
obs = obs.sort_values(ascending=True)
pred = pred.sort_values(ascending=True)
return obs,pred
|
11c44c1fd605611a2722321b3c3d58a822b9c643
| 3,640,755
|
import random
def random_point_of_triangle(vertices):
"""Compute a random point of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
while True:
x = random.random()
y = random.random()
if x + y <= 1:
return p + pq*x + pr*y
|
ba3bf9183ddae4a16561a06b6f2455ce0ede6c8f
| 3,640,756
|
import time
def get_minutes(hour:str) -> int:
""" Get total number of minutes from time in %H:%M .
Args:
hour (str): String containing time in 24 hour %H:%M format
Returns:
int: Returns total number of minutes
"""
t = time.strptime(hour, '%H:%M')
minutes = t[3] * 60 + t[4]
return minutes
|
069835bdb6b0919d6206e0379a1933986ad2d5bd
| 3,640,757
|
from typing import Tuple
import logging
def get_rotation_scale_from_transformation(matrix: np.array) -> Tuple[np.array,np.array] :
"""
This function breaks the given transformation matrix into a Rotation matrix and a Scale matrix
as described in "As-Rigid-As-Possible Shape Interpolation" by Alexa et al
Arguments:
matrix : Any transformation matrix
Returns:
R_gamma: Rotation matrix 3x3
S: Scale matrix 3x3
"""
R_alpha,D,R_beta = np.linalg.svd(matrix,full_matrices=True)
D = np.eye(3)*D
R_gamma = R_alpha @ R_beta
if np.linalg.det(R_gamma) < 0:
R_gamma[0,:] *= -1
S = R_beta.T @ D @ R_beta
assert is_rotation_matrix(R_gamma), logging.error("Computed matrix is not a rotation")
return (R_gamma,S)
|
c48ed5d1e880c7caf79e009bfeb84c95de8007e3
| 3,640,758
|
def calc_check_digit(number):
"""Calculate the check digit."""
weights = (7, 9, 8, 6, 5, 4, 3, 2)
check = sum(w * int(n) for w, n in zip(weights, number)) % 11
return str((10 - check) % 9 + 1)
|
eec82a1e6cec8baf513db16e672294df79ce4b9f
| 3,640,759
|
import json
def leave_studygroup(request):
"""
Remove a student from the list of participants of a study group.
"""
body = json.loads(request.body)
group_id = body['id']
token = body['token']
rcs = Student.objects.get(token=token).rcs
group = Studygroup.objects.get(id=group_id)
participants = json.loads(group.participants)
participants.remove(rcs)
group.participants = json.dumps(participants)
group.save()
res = {'res': 'OK'}
return JsonResponse(res, safe=False)
|
705c63640c11485dc68ce69d7757642a84c5798c
| 3,640,760
|
def list_revisions_courses(request_ctx, course_id, url, per_page=None, **request_kwargs):
"""
List the revisions of a page. Callers must have update rights on the page in order to see page history.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param url: (required) ID
:type url: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List revisions
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/pages/{url}/revisions'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, url=url)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
c6ef2cd08b1a98c5204dd0e6a52b55ef57dbc78a
| 3,640,761
|
def snr2Ivar(flux, snr):
"""
Estimate the inverse variance given flux and S/N.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
snr : scalar or array of float
Signal to noise ratio
"""
return 1.0 / ((flux / snr) ** 2.0)
|
91c76cd942a8f37f57a227ccb35cf4968a16193b
| 3,640,762
|
def revision_to_cashflows(rev, end_date):
"""Converts a revision to a list of cashflows
end_date -- the date from which we want to stop computing
"""
if rev.end_date is not None:
end_date = next_month(rev.end_date)
result = []
for first_of_month in first_of_month_range(rev.start_date, end_date):
start = max(first_of_month, rev.start_date)
end = next_month(first_of_month)
if rev.end_date is not None:
end = min(end, rev.end_date)
delta = end - start
total_days = monthrange(first_of_month.year, first_of_month.month)[1]
rent = fractional_amount(-rev.rent, delta.days, total_days)
result.append(Cashflow(first_of_month, rent, _("rent")))
if rev.provision != 0:
p = fractional_amount(-rev.provision, delta.days, total_days)
result.append(Cashflow(
first_of_month, p, _("provision")))
return result
|
51778e5c389420101d3ef6afab0e28b6aa708689
| 3,640,763
|
def filter_verified_user(path, community_user_dataFrame,verified_user_file,sep = ',',header = None):
"""
根据已经认证的用户文件,过滤到保留社区中的认证用户。
:param path:认证用户文件的保存路径。
:param community_user_dataFrame:社区用户数据框,两列,列名(user_id, community_id)。
:param verified_user_file:认证用户的文件,为CSV文件,格式为(user_id, is_verified, name),分隔符为逗号。
:return: 过滤掉认证用户之后的pandas数据框,格式与community_user_dataFrame相同。列名(user_id, community_id)。
"""
print 'fileter verified user'
verified_user_dataFrame = pd.read_csv(path + verified_user_file, names=['user_id', 'is_verified', 'name'],dtype={'user_id': np.str},sep = sep,header = header)
verified_user_dataFrame = verified_user_dataFrame[verified_user_dataFrame.is_verified == True]
del verified_user_dataFrame['is_verified']
del verified_user_dataFrame['name']
dataFrame = pd.DataFrame()
user_id_list = set(list(community_user_dataFrame.user_id))
verified_user_id_list = list(verified_user_dataFrame.user_id)
for user_id in user_id_list:
if user_id not in verified_user_id_list:
dataFrame = dataFrame.append(community_user_dataFrame[community_user_dataFrame.user_id == user_id],ignore_index=False)
print 'keep user: ', user_id
else:
print 'delete user: ', user_id
pass
return dataFrame
|
a571f72000e0a52e8f5aa0d5ae61cd07e2d7189d
| 3,640,764
|
from datetime import datetime
def calculate(series):
"""
:param series: a list of lists of [[(),()], [(),()]] for every swc tube in the pixel
:return:
"""
# gets every dates tuple in the list
dates = [t for t, v in series]
# define the dates as a set
ds = set(dates[0])
# get the intersection of every other set.
for d in dates[1:]:
ds = ds.intersection(set(d))
def func(di):
""""""
# check for matching dates in the intersected and ordered set with the values from the series.
ns = [get_matching_date(di, zip(*cs)) for cs in series] # ns is the matching values
# if the value is not none...
ns = [ni for ni in ns if ni is not None]
# print "Here is your ns {}".format(ns)
# calculate the error of the mean for that value.
# if ns:
# return datetime.strptime(di, '%m/%d/%Y'), calculate_sem(ns) #, calculate_avg(ns)
if ns:
return datetime.strptime(di, '%m/%d/%Y'), ns
# sets are NOT ordered so you need to find the ones that match up.
# vs = [func(di) for di in sorted(list(ds), reverse=True)]
storages = [func(di) for di in sorted(list(ds), reverse=True)]
# vs = [vi for vi in vs if vi is not None]
storages = [i for i in storages if i is not None]
# return zip(*vs)
# print "length storages {}".format(len(storages))
# print "STORAGES {}".format(storages)
# print "length storages {}".format(len(zip(*storages)))
# print "STORAGES {}".format(zip(*storages))
return zip(*storages)
|
136fa5fd72bad6c1cb2938e25adc44d768caf43b
| 3,640,765
|
from typing import Tuple
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
dataset = np.load(f"..//datasets//{filename}")
X, y = dataset[:, :2], dataset[:, -1]
return X, y
|
15b6c7422fa397e13fc19de2a1e7681b73e3638c
| 3,640,766
|
import csv
def readCSV(name,shape = [None], delimiter = ","):
""" Lectura de archivo csv name
Devuelve matriz con los datos y cabecera
"""
data = []
with open(name, 'r') as f:
reader = csv.reader(f,delimiter = delimiter)
for row in reader:
data.append(row[slice(*shape)])
return data
|
789341daf51b2f1e92086a42698ea0fef1130505
| 3,640,767
|
from gdata.media import Category
def build_category(category):
"""Build a single-item list of a YouTube category.
This refers to the Category of a video entry, such as "Film" or "Comedy",
not the atom/gdata element. This does not check if the category provided
is valid.
Keyword arguments:
category: String representing the category.
Returns:
A single-item list of a YouTube category (type gdata.media.Category).
"""
return [Category(
text=category,
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label=category)]
|
6906e30fcf1d72d7842ec5d7381a12842a9ded3e
| 3,640,768
|
def all(event, context):
""" retrieves all experiment results from redis
params:
- namespace (optional)
- scope (optional, comma-separated list of experiments)
"""
r = _redis()
namespace = event.get('namespace', 'alephbet')
scope = event.get('scope')
if scope:
experiments = scope.split(',')
else:
experiments = r.smembers("{0}:experiments".format(namespace))
results = []
results.append({'meta': {'scope': scope}})
for ex in experiments:
goals = experiment({'experiment': ex, 'namespace': namespace}, context)
results.append({'experiment': ex, 'goals': goals})
return results
|
583cc6a0101fbb6ef1ca12d6ddcfe76626bdd8dd
| 3,640,769
|
import os
import time
def wait_for_save(filename, timeout=5):
"""Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(filename)
start_time = time.time()
while time.time() < start_time + timeout:
if (os.path.getmtime(filename) > modification_time and
os.path.getsize(filename) > 0):
return True
time.sleep(0.2)
return False
|
fa65a638188d32dba9904bb19e2327f5b0390996
| 3,640,770
|
def check_submodule_update(job, position):
"""
Checks to see if certain submodules have been updated and post a comment to the PR if so.
"""
output = get_output_by_position(job, position)
modules = find_in_output(output, "CIVET_CLIENT_SUBMODULE_UPDATES")
if not modules:
return False
if not job.event.pull_request or not job.event.pull_request.review_comments_url:
return False
for mod in modules.split():
api = job.event.build_user.api()
url = job.event.pull_request.review_comments_url
sha = job.event.head.sha
msg = "**Caution!** This contains a submodule update"
# The 2 position will leave the message on the new submodule hash
api.pr_review_comment(url, sha, mod, 2, msg)
return True
|
ca6772b516fca899d6196cea47aa4185d958ec48
| 3,640,771
|
def _compute_subseq_errors_direct(series, weights):
"""
Subsequence errors (using one pass formulation)
:param Array{Float64,1} series
:param Array{Float64,1} weights
The subsequence errors is:
$$\begin{align}
E[i,j] &= Q[i,j] - \frac{S[i,j]^2}{W[i,j]}
\end{align}$$
Were W, S, Q are upper diagonal matrices:
$$\begin{align}
W[i,j] &\equiv \sum_{k=i}^j w_k \\
S[i,j] &\equiv \sum_{k=i}^j w_k x_k \\
Q[i,j] &\equiv \sum_{k=i}^j w_k {x_k}^2
\end{align}$$
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Because $$Q[i,j]$$ and $$\frac{S[i,j]^2}{W[i,j]}$$ can be very similar numbers,
cancellation can lead to the precision of the result to be much less than
the inherent precision of the floating-point arithmetic used to perform the computation.
Thus this algorithm should not be used in practice.
This is particularly bad if the standard deviation is small relative to the mean.
"""
N = np.size(series)
wgts = np.diag(weights)
wsum = np.diag(weights * series)
sqrs = np.diag(weights * series * series)
dists = np.zeros((N, N), dtype=np.float)
means = np.diag(series)
# Fill the upper triangle of dists and means by performing up-right
# diagonal sweeps through the matrices
for delta in range(0, N):
for l in range(0, (N-1-delta)):
# l = left boundary, r = right boundary
r = l + delta + 1
# Incrementally update every partial sum
wgts[l, r] = wgts[l, r-1] + wgts[r, r]
wsum[l, r] = wsum[l, r-1] + wsum[r, r]
sqrs[l, r] = sqrs[l, r-1] + sqrs[r, r]
# Calculate the mean over the range
means[l, r] = 0 if (wgts[l, r] == 0) else wsum[l, r] / wgts[l, r]
dists[l, r] = sqrs[l, r] - means[l, r] * wsum[l, r]
if dists[l, r] < 0:
print("[WARNING] Numerical instability detected, dists[", l, ", ", r, "] is negative: ", dists[l, r])
return dists, means
|
d8083b2b19102d51ee10baf2907890663f1d2b82
| 3,640,772
|
def preprocess_dataframe(data):
"""Helper method to preprocess the dataframe.
Creates new columns for year,month,recalls and percentage change.
Limits the date range for the experiment (these data are trustworthy)."""
data['recalls'] = data['doc_count'] + 1
data.drop(columns=['product', 'Unnamed: 0', 'key', 'key_as_string', 'doc_count'], inplace=True)
data = data.resample("M").sum()
mask = (data.index > '2007-05-31') & (data.index < '2019-09-30')
data = data.loc[mask]
data['pct'] = data['recalls'].pct_change()
return data
|
f6670cac1319108c88ee9ee409ce0ecdd1eca746
| 3,640,773
|
def is_solution(x:int, y:int) -> bool:
"""Returns try if (x, y) is a solution."""
# x and y are the values in a sequence of 15 terms of the following form:
# xxxxyxxxxxyxxxx
# x must be a positive integer
if x <= 0:
return False
# y must be a negative integer
if y >= 0:
return False
# a run of 6 consecutive terms must be positive
if 5 * x + y <= 0:
return False
# a run of 11 consecutive terms must be negative
if 9 * x + 2 * y >= 0:
return False
# x must be <= 16 or y must be >= 16
return x <= 16 or y >= -16
|
5e620fc390f6a79fd25d00c8c8b51d0af788d48c
| 3,640,774
|
def load_crl(file):
# type: (AnyStr) -> CRL
"""
Load CRL from file.
:param file: Name of file containing CRL in PEM format.
:return: M2Crypto.X509.CRL object.
"""
with BIO.openfile(file) as f:
cptr = m2.x509_crl_read_pem(f.bio_ptr())
return CRL(cptr, 1)
|
d711503c78edbb722189d7a06340ab9f719f853f
| 3,640,775
|
def clean_and_lemmatize(text):
"""
Clean and lemmatize the text of a Tweet
Returns:
cleaned_text (string): The cleaned and lemmatized text.
"""
wnl = WordNetLemmatizer() # NLTK lemmatizer
converted_tweet = clean_and_tokenize(
text) # cleans the text and tokenize it
tagged = nltk.pos_tag(converted_tweet) # POS tag the tokenized Tweet
wordnet_tagged = list(
map(lambda x: (x[0], pos_tagger(x[1])), tagged))
lemmatized_sentence = []
# loop through each word in the tagged list
for word, tag in wordnet_tagged:
if tag is None:
# if there is no available tag, append the word as is
lemmatized_sentence.append(word)
else:
# else use the tag to lemmatize the word
lemmatized_sentence.append(wnl.lemmatize(word, tag))
# attached lemmatized words to a string
cleaned_text = " ".join(lemmatized_sentence)
return cleaned_text
|
0635e08aca69191d77ad652dcf752254fbfc2ea6
| 3,640,776
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding
Arguments:
in_planes {int} -- Number of channels in the input image
out_planes {int} -- Number of channels produced by the convolution
Keyword Arguments:
stride {int or tuple, optional} -- Stride of the convolution. Default: 1 (default: {1})
groups {int, optional} -- Number of blocked connections from input channels to output channels.tion] (default: {1})
dilation {int or tuple, optional} -- Spacing between kernel elements (default: {1})
Returns:
output layer of 3x3 convolution with padding
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
|
1f55153bb35dd56b7cca2b00e13a0f4e5a963248
| 3,640,777
|
def task_finish(request, pk):
"""タスクを完了するAPI
:param request:
:param pk:
:return:
"""
task = get_object_or_404(models.Task, pk=pk)
prev_task = task.get_prev_task()
if prev_task is None or prev_task.can_continue():
task.status = '99'
task.updated_user = request.user
task.save()
serializer = serializers.TaskSerializer(task)
return Response(serializer.data)
else:
return Response({'detail': constants.ERROR_PREV_TASK_UNFINISHED}, status=400)
|
b995edd022e27b6101c9c79874af4fc78a01afe2
| 3,640,778
|
import sys
import argparse
def add_parser(subparsers):
"""Add reduction parser"""
parser = subparsers.add_parser(
'reduce', aliases=['red'], help="""Reduce games""",
description="""Create reduced game files from input game files.""")
parser.add_argument(
'--input', '-i', metavar='<input-file>', default=sys.stdin,
type=argparse.FileType('r'), help="""Input file for script. (default:
stdin)""")
parser.add_argument(
'--output', '-o', metavar='<output-file>', default=sys.stdout,
type=argparse.FileType('w'), help="""Output file for script. (default:
stdout)""")
parser.add_argument(
'--type', '-t', choices=REDUCTIONS, default='dpr', help="""Type of
reduction to perform. `dpr` - deviation preserving. `hr` -
hierarchical. `tr` - twins. `idr` - identity. (default:
%(default)s)""")
parser.add_argument(
'--sorted-roles', '-s', action='store_true', help="""If set, reduction
should be a comma separated list of reduced counts for the role names
in sorted order.""")
parser.add_argument(
'reduction', nargs='?', metavar='<role>:<count>;...',
help="""Number of players in each reduced-game role. This is a string
e.g. "role1:4;role2:2".""")
return parser
|
d1629ba58fd17f89b6f8646b92a251fbcdba0a67
| 3,640,779
|
def fine_tune():
"""recreates top model architecture/weights and fine tunes with image augmentation and optimizations"""
# reconstruct vgg16 model
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load vgg16 weights
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
# add the classification layers
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
# fine-tune the model
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=[early_stopping])
# save the model
json_string = model.to_json()
with open('final_model_architecture.json', 'w') as f:
f.write(json_string)
model.save_weights('final_weights.h5')
# return the model for convenience when making predictions
return model
|
69982c739927fca8f1c8e3779a7358a7fc646a5f
| 3,640,780
|
def normalize(a, seqlength=None, rv=None):
"""
Normalize the VSA vector
:param a: input VSA vector
:param seqlength: Optional, for BSC vectors must be set to a valid.
:param rv: Optional random vector, used for splitting ties on binary and ternary VSA vectors.
:return: new VSA vector
"""
return a.normalize(a, seqlength, rv)
|
ef8ec307add55a56be5991bb13579bc989726d3c
| 3,640,781
|
def MACD(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
|
368f80feb27bd67a387b0b9abb652d53205d22ac
| 3,640,782
|
def ecef2map(xyz, spatialRef):
""" transform 3D cartesian Earth Centered Earth fixed coordinates, to
map coordinates (that is 2D) in a projection frame
Parameters
----------
xyz : np.array, size=(m,3), float
np.array with 3D coordinates, in WGS84. In the following form:
[[x, y, z], [x, y, z], ... ]
spatialRef : osgeo.osr.SpatialReference
target projection
Returns
-------
xyz : np.array, size=(m,2), float
np.array with planar coordinates, within a given projection frame
"""
if isinstance(spatialRef, str):
spatialStr = spatialRef
spatialRef = osr.SpatialReference()
spatialRef.ImportFromWkt(spatialStr)
llh = ecef2llh(xyz) # get spherical coordinates and height
xy = ll2map(llh[:, :-1], spatialRef)
return xy
|
f7177912c931cfe6c2cee5737ed6bd2afedeba08
| 3,640,783
|
def dis2speed(t, dis):
"""
Return speed in distance travelled per hour.
Args:
t (datetime64[ms]): 1D array with time.
dis (float ): 1D array with distance travelled.
Returns:
float: 1D array with speed data.
"""
# divide by one hour (=3600 x 1000 milliseconds)
speed = np.diff(dis) / (np.float64(np.diff(t))/1000) *3600
speed = np.r_[np.nan, speed]
return speed
|
4dae87509ad44604949f1d3f925f3b28947b9952
| 3,640,784
|
def default_context(plugin, context):
"""
Return the default context for plugins rendered with a template, which
simply is a single variable named ``plugin`` containing the plugin
instance.
"""
return {"plugin": plugin}
|
5f7a88c02b6c11a150197e50a5be1847cba422b0
| 3,640,785
|
def get_model_from_key(model_name):
"""
Gets the model from a given key.
param:
model_name: name of model
return:
object
"""
_known_models = {}
#populate
for klass in Model.__subclasses__():
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
_known_models[sub.__name__] = sub
return _known_models.get(model_name, None)
|
488c52635bfdf10c79936b0f33b84d0222d1ae5b
| 3,640,786
|
def default_fields(
coll_id=None, type_id=None, entity_id=None,
width=12, **kwargs):
"""
Returns a function that accepts a field width and returns a dictionary of entity values
for testing. The goal is to isolate default entity value settings from the test cases.
"""
def_label = kwargs.get("default_label",
default_label(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_comment = kwargs.get("default_comment",
default_comment(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_label_esc = def_label.replace("'", "'")
def_comment_esc = def_comment.replace("'", "'")
def_entity_url = collection_entity_view_url(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
collection_url = collection_view_url(coll_id).rstrip("/")
def def_fields(width=12):
fields = layout_classes(width=width)
fields.update(
{ 'coll_id': coll_id
, 'type_id': type_id
, 'entity_id': entity_id
, 'default_label': def_label
, 'default_comment': def_comment
, 'default_label_esc': def_label_esc
, 'default_comment_esc': def_comment_esc
, 'default_entity_url': def_entity_url
, 'collection_url': collection_url
})
if kwargs:
fields.update(kwargs)
return fields
return def_fields
|
1670f95a8e95f84ca5f08aab8ee0a8effb1e6f76
| 3,640,787
|
import traceback
def predict_using_broadcasts(feature1, feature2, feature3, feature4):
"""
Scale the feature values and use the model to predict
:return: 1 if normal, -1 if abnormal 0 if something went wrong
"""
prediction = 0
x_test = [[feature1, feature2, feature3, feature4]]
try:
x_test = SCL.value.transform(x_test)
prediction = CLF.value.predict(x_test)[0]
except ValueError:
traceback.print_exc()
print('Cannot predict:', x_test)
return int(prediction)
|
4f766002e11fbe34f3479769db752c0df08b2df5
| 3,640,788
|
import torch
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
device = tensor.get_device()
buf_name = f'range_buf_{device}'
if not hasattr(make_positions, buf_name):
setattr(make_positions, buf_name, tensor.new())
setattr(make_positions, buf_name, getattr(make_positions, buf_name).type_as(tensor))
if getattr(make_positions, buf_name).numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=getattr(make_positions, buf_name))
mask = tensor.ne(padding_idx)
positions = getattr(make_positions, buf_name)[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
new_tensor = tensor.clone()
return new_tensor.masked_scatter_(mask, positions[mask]).long()
|
8e65c68daae2e40710c777d6e74f048b8b0ad547
| 3,640,789
|
def teq(state, *column_values):
"""Tag-Equals filter. Expects, that a first row contains tags and/or metadata
Tag row is ignored in comparison, but prepended to the result (in order to maintain the first row in the results).
Accepts one or more column-value pairs. Keep only rows where value in the column equals specified value.
Example: teq-column1-1
"""
df = state.get()
tags = df.iloc[:1, :]
df = df.iloc[1:, :]
assert state.type_identifier == "dataframe"
for i in range(0, len(column_values), 2):
c = column_values[i]
v = column_values[i + 1]
state.log_info(f"Equals: {c} == {v}")
index = np.array([x == v for x in df[c]], np.bool)
try:
if int(v) == float(v):
index = index | (df[c] == int(v))
else:
index = index | (df[c] == float(v))
except:
pass
df = df.loc[index, :]
df = tags.append(df, ignore_index=True)
return state.with_data(df)
|
7fd2786dcbe8705b48081c6bc96dcdc7452e35d3
| 3,640,790
|
def adjust_labels(data_y, dataset, pred_type='actions'):
"""
Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param pred_type: string, ['gestures', 'locomotion', 'actions', 'tasks']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
data_y[data_y == "null_class"] = 0
if dataset == 'wetlab':
if pred_type == 'tasks': # Labels for tasks are adjusted
data_y[data_y == "1solvent"] = 1
data_y[data_y == "2catalysator"] = 2
data_y[data_y == "3cutting"] = 3
data_y[data_y == "4mixing"] = 4
data_y[data_y == "5catalysator"] = 5
data_y[data_y == "6waterbath"] = 6
data_y[data_y == "7solvent"] = 7
data_y[data_y == "8catalysator"] = 8
data_y[data_y == "9cutting"] = 9
data_y[data_y == "10mixing"] = 10
data_y[data_y == "11catalysator"] = 11
data_y[data_y == "12waterbath"] = 12
data_y[data_y == "13waterbath"] = 13
data_y[data_y == "14catalysator"] = 14
data_y[data_y == "15pestling"] = 15
data_y[data_y == "16filtrate"] = 16
data_y[data_y == "17catalysator"] = 17
data_y[data_y == "18pouring"] = 18
data_y[data_y == "19detect"] = 19
data_y[data_y == "20waterbath"] = 20
data_y[data_y == "21catalysator"] = 21
data_y[data_y == "22pestling"] = 22
data_y[data_y == "23filtrate"] = 23
data_y[data_y == "24catalysator"] = 24
data_y[data_y == "25pouring"] = 25
data_y[data_y == "26detect"] = 26
data_y[data_y == "27end"] = 27
elif pred_type == 'actions': # Labels for actions are adjusted
data_y[data_y == "cutting"] = 1
data_y[data_y == "inverting"] = 2
data_y[data_y == "peeling"] = 3
data_y[data_y == "pestling"] = 4
data_y[data_y == "pipetting"] = 5
data_y[data_y == "pouring"] = 6
data_y[data_y == "pour catalysator"] = 6
data_y[data_y == "stirring"] = 7
data_y[data_y == "transfer"] = 8
elif dataset == 'sbhar':
data_y[data_y == 'walking'] = 1
data_y[data_y == 'walking_upstairs'] = 2
data_y[data_y == 'walking_downstairs'] = 3
data_y[data_y == 'sitting'] = 4
data_y[data_y == 'standing'] = 5
data_y[data_y == 'lying'] = 6
data_y[data_y == 'stand-to-sit'] = 7
data_y[data_y == 'sit-to-stand'] = 8
data_y[data_y == 'sit-to-lie'] = 9
data_y[data_y == 'lie-to-sit'] = 10
data_y[data_y == 'stand-to-lie'] = 11
data_y[data_y == 'lie-to-stand'] = 12
elif dataset == 'rwhar' or dataset == 'rwhar_3sbjs':
data_y[data_y == 'climbing_down'] = 0
data_y[data_y == 'climbing_up'] = 1
data_y[data_y == 'jumping'] = 2
data_y[data_y == 'lying'] = 3
data_y[data_y == 'running'] = 4
data_y[data_y == 'sitting'] = 5
data_y[data_y == 'standing'] = 6
data_y[data_y == 'walking'] = 7
elif dataset == 'hhar':
data_y[data_y == 'bike'] = 1
data_y[data_y == 'sit'] = 2
data_y[data_y == 'stand'] = 3
data_y[data_y == 'walk'] = 4
data_y[data_y == 'stairsup'] = 5
data_y[data_y == 'stairsdown'] = 6
elif dataset == 'opportunity' or 'opportunity_ordonez':
if pred_type == 'locomotion':
data_y[data_y == "stand"] = 1
data_y[data_y == "walk"] = 2
data_y[data_y == "sit"] = 3
data_y[data_y == "lie"] = 4
elif pred_type == 'gestures':
data_y[data_y == 'open_door_1'] = 1
data_y[data_y == 'open_door_2'] = 2
data_y[data_y == 'close_door_1'] = 3
data_y[data_y == 'close_door_2'] = 4
data_y[data_y == 'open_fridge'] = 5
data_y[data_y == 'close_fridge'] = 6
data_y[data_y == 'open_dishwasher'] = 7
data_y[data_y == 'close_dishwasher'] = 8
data_y[data_y == 'open_drawer_1'] = 9
data_y[data_y == 'close_drawer_1'] = 10
data_y[data_y == 'open_drawer_2'] = 11
data_y[data_y == 'close_drawer_2'] = 12
data_y[data_y == 'open_drawer_3'] = 13
data_y[data_y == 'close_drawer_3'] = 14
data_y[data_y == 'clean_table'] = 15
data_y[data_y == 'drink_from_cup'] = 16
data_y[data_y == 'toggle_switch'] = 17
return data_y
|
1d201a20a8865cd505c0ee6b5385622a0ae28817
| 3,640,791
|
def is_str_or_bytes(x):
""" True if x is str or bytes.
This doesn't use rpartial to avoid infinite recursion.
"""
return isinstance(x, (str, bytes, bytearray))
|
ff4bf19177ffe62f24713e077824e48ec45f8587
| 3,640,792
|
def _type_convert(new_type, obj):
"""
Convert type of `obj` to `force`.
"""
return new_type(obj)
|
fc47c100508d41caa7ffc786746b58e3d6f684e2
| 3,640,793
|
def create_tokenizer(corpus_file, vocab_size):
"""Create a tokenizer from a corpus file
Args:
corpus_file (Pathlib path): File containng corpus i.e. all unique words for
vocab_size (int): Vocabulary size of the tokenizer
Returns:
hugging_face tokenizer: Byte pair tokenizer used to tokenize text
"""
tokenizer = Tokenizer(BPE())
trainer = BpeTrainer(
special_tokens=["<pad>", "<s>", "</s>", "<unk>"], vocab_size=vocab_size
)
tokenizer.pre_tokenizer = Whitespace()
files = [str(corpus_file)]
tokenizer.train(trainer, files)
tokenizer.post_processor = TemplateProcessing(
single="<s> $A </s>",
special_tokens=[
("<s>", tokenizer.token_to_id("<s>")),
("</s>", tokenizer.token_to_id("</s>")),
],
)
tokenizer.enable_padding(
pad_token="<pad>",
pad_id=tokenizer.token_to_id("<pad>"),
)
return tokenizer
|
6de30b057d920d650f065af0f9083130fbb6df77
| 3,640,794
|
def get_readings(tag):
"""Get sensor readings and collate them in a dictionary."""
try:
enable_sensors(tag)
readings = {}
# IR sensor
readings["ir_temp"], readings["ir"] = tag.IRtemperature.read()
# humidity sensor
readings["humidity_temp"], readings["humidity"] = tag.humidity.read()
# barometer
readings["baro_temp"], readings["pressure"] = tag.barometer.read()
# luxmeter
readings["light"] = tag.lightmeter.read()
# battery
readings["battery"] = tag.battery.read()
disable_sensors(tag)
# round to 2 decimal places for all readings
readings = {key: round(value, 2) for key, value in readings.items()}
return readings
except BTLEException as error:
print("Unable to take sensor readings. {}".format(error))
return {}
|
481aae840d9ab41995086e3ef98c500abf4ec82e
| 3,640,795
|
import binascii
def digita_gw(request):
"""
Digita GW endpoint implementation
"""
identifier = request.data['DevEUI_uplink']['DevEUI']
apsen = core.models.apartment_sensor_models.ApartmentSensor.objects.get_or_create(identifier=identifier)[0]
payload = binascii.unhexlify(request.data['DevEUI_uplink']['payload_hex'])
decoded_payload = decode_elsys_payload(payload)
mapping = settings.DIGITA_GW_PAYLOAD_TO_ATTRIBUTES # type: dict
new_values = []
for key, value in decoded_payload.items():
uri = mapping.get(key, '')
if uri:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(uri=uri, defaults={'description': key})[0]
else:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(description=key)[0]
apsen_attr = apsen.attributes.get_or_create(attribute=attr)[0]
new_values.append(apsen_attr.values.create(value=value))
models.Subscription.handle_new_values(new_values)
return Response({"message": "Updated successfully"})
|
e6bf45c92ea61278dd47e52ed945e91aa514d21b
| 3,640,796
|
def resize_img(_img, maxdims=(1000, 700)):
"""
Resize a given image. Image can be either a Pillow Image, or a NumPy array. Resizing is done automatically such
that the entire image fits inside the given maxdims box, keeping aspect ratio intact
:param _img:
:param maxdims:
:return:
"""
try:
# If NumPy array, create Pillow Image
img = Image.fromarray(_img)
except TypeError:
# Else image must already be a Pillow Image
img = _img
ratio = max(img.size[1] / maxdims[0], img.size[0] / maxdims[1])
image = img.resize((int(img.size[0] / ratio), int(img.size[1] / ratio)), Image.ANTIALIAS)
return image
|
54716e4ce030a675a0655b06d7121d4c38bd7c43
| 3,640,797
|
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally"
(i.e., ``name5`` will come before ``name10`` and ``1`` will come
before ``A``). This function is designed to be used as the ``key``
argument to sorting functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
|
d0eb51bdd3e7c6caa5b13c38269bc9c07e3834d2
| 3,640,798
|
import os
import re
def query_props(path):
"""
Extracts a QueryProps object from a file name.
:param path: Path to a query file
:return: QueryProps of the file
"""
basename = os.path.basename(path)
match = re.match(r'''
(?P<topic>[^-]+?)
(\s*-\s*)
(?P<contributor>[A-Z]+)
(\s*-\s*)?
(?P<query_string>[^-]+)?
(\.(?P<extension>[a-z]+))
''', basename, re.X)
if not match:
raise ValueError(
'"{}" does not follow the file name convention.'.format(basename)
)
return QueryProps(path, **match.groupdict())
|
d91406d724f98db063c5f2fe54ec4d5bd5ed325c
| 3,640,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.