content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def sort_keywords(scores):
"""
:param scores: A dictionary of lemmas and their corresponding scores,
assigned by the pagerank algorithm
:return: The same dictionary, sorted in descending order
"""
sorted_lemmas = [lemma for lemma in sorted(scores, key=scores.get, reverse=True)]
return sorted_lemmas | ef4349976e755fb5d0d95b0ee98c5184fbf055f2 | 3,630,600 |
def DecodeControlTuples(ldapControlTuples,knownLDAPControls=None):
"""
Returns list of readily decoded ResponseControl objects
ldapControlTuples
Sequence-type of 3-tuples returned by _ldap.result4() containing
the encoded ASN.1 control values of response controls.
knownLDAPControls
Dictionary mapping extended control's OID to ResponseControl class
of response controls known by the application. If None
ldap.controls.KNOWN_RESPONSE_CONTROLS is used here.
"""
knownLDAPControls = knownLDAPControls or KNOWN_RESPONSE_CONTROLS
result = []
for controlType,criticality,encodedControlValue in ldapControlTuples or []:
try:
control = knownLDAPControls[controlType]()
except KeyError:
if criticality:
raise ldap.UNAVAILABLE_CRITICAL_EXTENSION('Received unexpected critical response control with controlType %s' % (repr(controlType)))
else:
control.controlType,control.criticality = controlType,criticality
try:
control.decodeControlValue(encodedControlValue)
except PyAsn1Error:
if criticality:
raise
else:
result.append(control)
return result | d792ba07134b07d16881623123589709099a9a0c | 3,630,601 |
def transform_geom(
src_crs,
dst_crs,
geom,
antimeridian_cutting=True,
antimeridian_offset=10.0,
precision=-1):
"""Transform geometry from source coordinate reference system into target.
Parameters
------------
src_crs: CRS or dict
Source coordinate reference system, in rasterio dict format.
Example: CRS({'init': 'EPSG:4326'})
dst_crs: CRS or dict
Target coordinate reference system.
geom: GeoJSON like dict object
antimeridian_cutting: bool, optional
If True, cut geometries at the antimeridian, otherwise geometries
will not be cut (default). If False and GDAL is 2.2.0 or newer
an exception is raised. Antimeridian cutting is always on as of
GDAL 2.2.0 but this could produce an unexpected geometry.
antimeridian_offset: float
Offset from the antimeridian in degrees (default: 10) within which
any geometries will be split.
precision: float
If >= 0, geometry coordinates will be rounded to this number of decimal
places after the transform operation, otherwise original coordinate
values will be preserved (default).
Returns
---------
out: GeoJSON like dict object
Transformed geometry in GeoJSON dict format
"""
loose_gdal_version = filter(
lambda x: x.isdigit(),
rasterio.__gdal_version__.split('.'))
loose_gdal_version = tuple(map(int, loose_gdal_version))
if loose_gdal_version[:2] >= (2, 2) and not antimeridian_cutting:
raise GDALBehaviorChangeException(
"Antimeridian cutting is always enabled on GDAL 2.2.0 or "
"newer, which could produce a different geometry than expected.")
return _transform_geom(
src_crs,
dst_crs,
geom,
antimeridian_cutting,
antimeridian_offset,
precision) | 918d1018b9fcc591fa7c4761c0bfe4f3a394dddc | 3,630,602 |
def part_one(data: str) -> int:
"""The best possible cookie score given the ingredient properties from data."""
total_quantity = 100
ingredients = [parse_ingredient(line) for line in data.splitlines()]
# There are 4 ingredients total in the input, so nothing is lost here.
quantities = [total_quantity // len(ingredients) for _ in ingredients]
while True:
# The scoring function is convex (or rather, its negative is). Hence
# we can trace a path to the maximum by following local adjustments.
next_quantities = max_neighboring_quantities(ingredients, quantities)
if next_quantities == quantities:
# Reached maximum point
return dough_score(ingredients, quantities)
quantities = next_quantities | 3958414bffa0836c909c746cbbfa7ef9b01b35cc | 3,630,603 |
import unittest
def skip_if_quick(func):
"""Decorator to skip tests if quick option is used."""
@wraps(func)
def wrapper(*args):
# C0111: *Missing docstring*
# pylint: disable=C0111
# W0212: *Access to a protected member %%s of a client class*
# pylint: disable=W0212
if args[0]._opts.quick:
raise unittest.SkipTest('Remove --quick option to run {t}'.format(
t=func.__name__))
return func(*args)
return wrapper | 6b3bb6073b076e79d9795cf26b454783d7fcfcaf | 3,630,604 |
def _build_edges(wave, sampling_type):
"""
Calculates edges of bins of given wavelength given the center value of the
bin and the type of sampling.
Parameters
----------
wave : numpy.ndarray
Array with the wavelengths.
sampling_type : string
Sampling type of the array. It can be either linear ('linear'),
natural logarithm ('ln') or base 10 logarithm (log).
Returns
-------
edges : numpy.ndarray
Array with the edges of the wavelength bins.
"""
if sampling_type == 'linear':
step = wave[1] - wave[0]
edges = wave[0] - step/2.
edges = np.append(edges, wave + step/2.)
elif sampling_type == 'log':
step = np.log10(wave[1]/wave[0])
edges = wave[0] / 10.**(step/2.)
edges = np.append(edges, wave * 10.**(step/2.))
elif sampling_type == 'ln':
step = np.log(wave[1]/wave[0])
edges = wave[0] / np.e**(step/2.)
edges = np.append(edges, wave * np.e**(step/2.))
return edges | 16c92dec058e895fa0be6cc2738ada23ba198099 | 3,630,605 |
def file_readlines(fn):
"""Open file with name `fn`, return open(fn).readlines()."""
fd = open(fn, 'r')
lst = fd.readlines()
fd.close()
return lst | 2594e6763b566f4e83844f2f4457bcc8ea3663a5 | 3,630,606 |
import re
def filter_table(table, **kwargs):
"""Retrieve the filtered rows
Parameters
----------
table: astropy.table.Table, pandas.DataFrame
The table to filter
param: str
The parameter to filter by, e.g. 'Teff'
value: str, float, int, sequence
The criteria to filter by,
which can be single valued like 1400
or a range with operators [<, <=, >, >=],
e.g. ('>1200', '<=1400')
Returns
-------
astropy.table.Table, pandas.DataFrame
The filtered table
"""
pandas = False
if isinstance(table, pd.DataFrame):
pandas = True
table = at.Table.from_pandas(table)
for param, value in kwargs.items():
# Check it is a valid column
if param not in table.colnames:
raise KeyError("No column named {}".format(param))
# Wildcard case
if isinstance(value, str) and '*' in value:
# Get column data
data = np.array(table[param])
if not value.startswith('*'):
value = '^' + value
if not value.endswith('*'):
value = value + '$'
# Strip souble quotes
value = value.replace("'", '').replace('"', '').replace('*', '(.*)')
# Regex
reg = re.compile(value, re.IGNORECASE)
keep = list(filter(reg.findall, data))
# Get indexes
idx = np.where([i in keep for i in data])
# Filter table
table = table[idx]
else:
# Make single value string into conditions
if isinstance(value, str):
# Check for operator
if any([value.startswith(o) for o in ['<', '>', '=']]):
value = [value]
# Assume eqality if no operator
else:
value = ['== ' + value]
# Turn numbers into strings
if isinstance(value, (int, float)) or (isinstance(value, str) and isnumber(value)):
value = ["== {}".format(value)]
# Iterate through multiple conditions
for cond in value:
# Equality
if cond.startswith('='):
v = cond.replace('=', '')
table = table[table[param] == eval(v)]
# Less than or equal
elif cond.startswith('<='):
v = cond.replace('<=', '')
table = table[table[param] <= eval(v)]
# Less than
elif cond.startswith('<'):
v = cond.replace('<', '')
table = table[table[param] < eval(v)]
# Greater than or equal
elif cond.startswith('>='):
v = cond.replace('>=', '')
table = table[table[param] >= eval(v)]
# Greater than
elif cond.startswith('>'):
v = cond.replace('>', '')
table = table[table[param] > eval(v)]
else:
raise ValueError("'{}' operator not understood.".format(cond))
if pandas:
table = table.to_pandas()
return table | 50629e02c5e9ab6c17fcde1211b2a0a2c74570ca | 3,630,607 |
def get_cached_skin_path():
"""
Get the value of the #SKINSPATH# variable.
This can be collected from various installation locations,
since there are numerous ways to install Rainmeter.
The easiest solution is, if the user tells Sublime Rainmeter,
that he installed Rainmeter in a specific folder. We trust,
that the user knows where he installed it.
If we know, where the rainmeter settings are,
we can parse that from the Raimeter.ini.
If the user chose a portable installation,
it is also directly in the Rainmeter installation path.
If the user chose a default installation,
and enabled Rainmeter to store the information in the registry,
we can use that to determine the path.
If all fails we start guessing from the user document folder upwards.
"""
rm_path = get_cached_program_path()
settings_path = get_cached_setting_path()
return get_path_from_st_settings() \
or get_path_from_rm_settings(rm_path, settings_path) \
or get_path_from_portable_rm(rm_path, settings_path) \
or get_path_from_registry() \
or guess_path_from_user_documents() | 403e83a785620bb7291a89524646db75711f74fa | 3,630,608 |
from typing import Callable
import functools
import inspect
from typing import Hashable
def func_dispatch(func: Callable = None, *, default: bool, clazz=None):
"""
Value-based dynamic-dispatch function decorator.
Transforms a function into a dynamic dispatch function, which has different
behaviors depending upon the value of its first positional parameter. The
decorated function acts as the default implementation, if default is specified,
and additional functions may be registered using the dispatch() attribute of
the dispatch function.
:param func: function to add dynamic dispatch to.
:param default: whether or not to default when given an unregistered value.
:param clazz: class that func is __new__ for, or None.
:returns: dispatch function.
"""
if func is None:
return functools.partial(func_dispatch, default=default, clazz=clazz)
if inspect.ismethod(func):
raise NotImplementedError('member functions are not supported')
if clazz is None:
name = func.__name__
parameters = inspect.signature(func).parameters
else:
name = clazz.__name__
parameters = inspect.signature(clazz.__init__).parameters
registry = {}
# Find the first explicit (non-splat) positional argument. This is the dispatch parameter.
parameters = iter(parameters.values())
param = None
while param is None or param.name == 'self' or param.name == 'return' or \
param.kind == inspect.Parameter.VAR_POSITIONAL or param.kind == inspect.Parameter.VAR_KEYWORD:
try:
param = next(parameters)
except StopIteration:
raise TypeError('dispatch function does not have any explicit positional arguments') from None
key = param.name
@functools.wraps(func)
def dispatch(*args, **kwargs):
# If dispatching a class, the first argument indicates the type of class desired.
# If that class is not the dispatch class, someone is instantiating a derived class directly.
# In this special case, we bypass dispatch.
if clazz is not None:
if len(args) > 0 and inspect.isclass(args[0]):
klass = args[0]
if klass is not clazz and not (klass.__qualname__ == clazz.__qualname__ and clazz in klass.__bases__):
if issubclass(klass, clazz):
return func(*args, **kwargs)
else:
raise TypeError(f'cls argument for __new__ must be subclass of {clazz!r}, got {klass!r}')
# Find dispatch param by position or key.
value = _lookup(key, func, *args, **kwargs)
if default:
# Allow default to dispatch func, which we know has the dispatch param at index 0.
impl, idx = registry.get(value, (func, 0))
else:
try:
impl, idx = registry[value]
except KeyError:
raise ValueError(f'no registered implementations for {value!r} for {name}') from None
if inspect.isclass(impl):
args = args[1:]
if idx is not None:
idx -= 1
if idx is None:
# Dispatch param is not desired, remove it.
if key in kwargs:
del kwargs[key]
else:
# Not in kwargs, must be the first parameter.
args = args[1:]
elif idx > 0 and key not in kwargs:
# Dispatch param is desired and it's not the first argument, so rearrange.
args = args[1:idx + 1] + args[0:1] + args[idx + 1:]
return impl(*args, **kwargs)
@typechecked(always=True)
def register(impl: Callable = None, *, arguments: MappingProxyType, on: Hashable):
"""
Registers a new implementation for the given value of key.
:param on: dispatch value to register this implementation on.
:param arguments: parameters to impl.
:param impl: implementation to associate with value.
"""
if impl is None:
return functools.partial(register, arguments=arguments, on=on)
if on in registry:
raise ValueError(f'duplicate implementation for {on!r} for {name}')
# Determine index of dispatch parameter in this signature.
idx = None
for i, parameter in enumerate(arguments.values()):
if parameter.name == key:
if parameter.kind == inspect.Parameter.KEYWORD_ONLY:
# Parameter is keyword-only, so it has no 'index'.
idx = -1
else:
idx = i
registry[on] = impl, idx
return impl
dispatch.dispatch = register
return dispatch | af2dd4d389a26ba887cf4a34115a744fcff25534 | 3,630,609 |
import functools
def access_controlled_app(app):
"""
An app where all the routes are access-controlled to the 'buyer' role, and there are some login routes if we need.
"""
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
# simulate loading a user from the API
return User.from_json({"users": USERS[user_id]})
app.register_blueprint(login_for_tests)
app.secret_key = 'secret'
main = Blueprint('main', 'main')
@main.route('/')
def simple_route():
return "Hello"
main.before_request(functools.partial(require_login, role='buyer'))
app.register_blueprint(main)
return app | 638658711e02cc5d7fc51221a27a87099a264d38 | 3,630,610 |
def get_local_content_list(filename, encoding):
"""Return the file content with status"""
textl = []
stat = False
try:
with open(filename, 'r', encoding=encoding) as f:
textl = ''.join([f.read(), '\n']).splitlines()
stat = True
except Exception as e:
log.exception('E: Could not find file: {}'.format(filename,))
return textl, stat | c4171befc0b6107739d216b6854531956c2e2ac6 | 3,630,611 |
def check_pods_status(run_name: str, namespace: str, status: PodStatus, app_name: NAUTAAppNames = None) -> bool:
"""
Returns true if all pods related to a given run have given status.
:param run_name: name of a run - obligatory
:param namespace: namespace where run is located - obligatory
:param status: status which will be compared with pods' statuses
:param app_name: name of an app - if None - pods are not limited to any application
:return: True if all pods related to <run_name> have <status> status. False otherwise
"""
api = get_k8s_api()
label_selector = f"runName={run_name}"
if app_name:
label_selector = label_selector + f",app={app_name}"
pods_list = api.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
if not pods_list:
return False
for pod in pods_list.items:
if PodStatus(pod.status.phase.upper()) != status:
return False
return True | 6c8eed3a769976b4c8095bc93328c5ecc759778d | 3,630,612 |
def retrieve_account(community, platform_type, platform_identifier, blah, community_platform_id=None):
"""Helper method to get a specific linked account."""
result = LinkedAccount.objects.filter(community=community, platform_type=platform_type,
platform_identifier=platform_identifier)
if community_platform_id:
result = result.filter(community_platform_id=community_platform_id)
if not result:
raise ValueError(
f"No LinkedAccount found in community {community} with platform {platform_type} "
f"and identifier {platform_identifier} (community_platform_id: {community_platform_id})"
)
return result[0] | 99ffa075668e374b84bd498008f16a79f1d294c4 | 3,630,613 |
import os
def _read(obj):
"""
Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
text = obj.read()
elif isinstance(obj, (str, bytes)):
text = obj
try:
if os.path.isfile(text):
with open(text, "rb") as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError(f"Cannot read object of type '{type(obj).__name__}'")
return text | e6d82067203a1a7ef3a2143b30633f12dfa65fbc | 3,630,614 |
def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None):
"""
Reconstruct a state from unconstrained least-squares fitting.
Args:
tomo_data (list[dict]): state or process tomography data.
weights (list or array or None): weights to use for least squares
fitting. The default is standard deviation from a binomial
distribution.
trace (float or None): trace of returned operator. The default is 1.
beta (float or None): hedge parameter (>=0) for computing frequencies
from zero-count data. The default value is 0.50922.
Returns:
numpy.array: A numpy array of the reconstructed operator.
"""
if trace is None:
trace = 1. # default to unit trace
data = tomo_data['data']
keys = data[0]['circuit'].keys()
# Get counts and shots
counts = []
shots = []
ops = []
for dat in data:
for key in keys:
counts.append(dat['counts'][key])
shots.append(dat['shots'])
projectors = dat['circuit'][key]
op = __projector(projectors['meas'], tomo_data['meas_basis'])
if 'prep' in projectors:
op_prep = __projector(projectors['prep'],
tomo_data['prep_basis'])
op = np.kron(op_prep.conj(), op)
ops.append(op)
# Convert counts to frequencies
counts = np.array(counts)
shots = np.array(shots)
freqs = counts / shots
# Use hedged frequencies to calculate least squares fitting weights
if weights is None:
if beta is None:
beta = 0.50922
K = len(keys)
freqs_hedged = (counts + beta) / (shots + K * beta)
weights = np.sqrt(shots / (freqs_hedged * (1 - freqs_hedged)))
return __tomo_linear_inv(freqs, ops, weights, trace=trace) | cc15c7e440ccf10f5e90c6daa27e6fb31a8f59c6 | 3,630,615 |
import http
async def testpoint(mockserver: server.MockserverFixture) -> TestpointFixture:
"""Testpoint fixture returns testpoint session instance that works
as decorator that registers testpoint handler. Original function is
wrapped with :ref:`AsyncCallQueue`
:param name: testpoint name
:returns: decorator
.. code-block::
def test_foo(testpoint):
@testpoint('foo'):
def testpoint_handler(data):
pass
...
# testpoint_handler is AsyncCallQueue instance, e.g.:
assert testpoint_handler.has_calls
assert testpoint_handler.next_call == {...}
aseert testpoint_handler.wait_call() == {...}
"""
session = TestpointFixture()
@mockserver.json_handler('/testpoint')
async def _handler(request: http.Request):
body = request.json
handler = session.get_handler(body['name'])
if handler is not None:
data = await handler(body['data'])
else:
data = None
return {'data': data}
return session | f9168902b4eb6074786062aba2a58e9cd083e4f8 | 3,630,616 |
def cfg(c=[], all=None):
"""returns parsed config for section <endpoint> - if not exists, builds it
When interactive auth is requred we do not write - i.e. only valid for cur.
session.
To avoid frequent password queries, call GL.setup function.
"""
try:
return c[0][FLG.endpoint_name]
except:
pass
c.append(ConfigParser())
fn = fn_cfg()
if exists(fn):
app.info('Reading', fn=fn)
c[0].read(fn)
else:
c[0]['global'] = {
'default': FLG.endpoint_name,
'ssl_verify': 'true',
'timeout': 1,
}
if not FLG.endpoint_name in c[0]:
t = CloudFoundry.authenticate()
c[0][FLG.endpoint_name] = t
if all:
return c[0]
return c[0][FLG.endpoint_name] | 6886df8163cce6b8df37e69e181d19e443770a53 | 3,630,617 |
from datetime import datetime
def JIRATOSQLdatetimeformat(datetime_in):
"""
removes certain characters from fields returned by Jira requests, in order to facilitate insertion into SQL tables
would need to be written differently for a production application, to handle escape characters etc. more intelligently
parameters:
str_in (string): the string from Jira that needs characters removed
returns:
string: the string with characters removed
"""
datetime_out = datetime.strptime(datetime_in, "%Y-%m-%dT%H:%M:%S.%f%z").strftime("%Y-%m-%d %H:%M:%S")
return datetime_out | d75df0f925e4a3ed104ca98f8ef4ea0ae1d0557b | 3,630,618 |
def _split_and_reshape_to_ndarrays(flat_v, sizes, shapes):
"""Split and reshape a single flat vector to make a list of ndarrays."""
xp = chainer.cuda.get_array_module(flat_v)
sections = np.cumsum(sizes)
vs = xp.split(flat_v, sections)
return [v.reshape(shape) for v, shape in zip(vs, shapes)] | f388260cc9da0b2b8836248f109b12670e4aed57 | 3,630,619 |
from re import M
import random
def simSpikes(coef,M_k,M_h,dt):
"""
simSpikes- function to simulate
"""
print(np.exp(np.dot(M,coef))[1:10])
M = np.hstack(M_k,M_h)
length = M.shape[0]
print(length)
tsp = []
jbin = 0
tspnext = random.expovariate(1)
rprev = 0
nsp = 0
#bhlen = M_h.shape[] #TODO: change to the shape of h current
# ihthi = [dt:dt:max(glmprs.iht)]'; % time points for sampling
# ihhi = interp1(glmprs.iht, ih, ihthi, 'linear', 0);
# hlen = length(ihhi);
chunk_size = 20 # decide on a size later
dc = -0.7
while jbin < length:
indx_chunk = np.arange(jbin,min(jbin+chunk_size-1,length))
intensity_chunk = np.exp(np.dot(M,coef)+dc)[indx_chunk]
print('max indx_chunk')
print(intensity_chunk)
cum_intensity = np.cumsum(intensity_chunk)+rprev
print(cum_intensity)
print(len(cum_intensity))
if (tspnext>cum_intensity[-1]):
# No spike
print('no spike')
jbin = indx_chunk[-1]+1
rprev = cum_intensity[-1]
else: # Spike!
print(tspnext)
print(cum_intensity[-1])
print('spike'+str(jbin))
ispk = indx_chunk[np.where(cum_intensity>=tspnext)[0][0]]
tsp.append(ispk*dt)
postspike_limit = min(length,ispk+hlen)
indx_postSpk = np.arange(ispk+1,postspike_limit)
#TODO: add if statement to record postspike current
#TODO: pass an H current separately
tspnext = random.expovariate(1)
rprev = 0
jbin = ispk + 1
nsp = nsp + 1
# number of samples per iteration?
# print(tsp[-1])
chunk_size = max(20,np.round(1.5*jbin/nsp))#TODO: make safe for Python 2.7
return(tsp) | 4a42e8eafe3c8a281c91d91399b411ea9761fcc1 | 3,630,620 |
import json
def apply_lookup(dataframe, group_type):
"""
converts df[group_type] from ids to names
dataframe : df
group_type : string
returns : df
"""
print("applying look up")
print("working with {}".format(group_type))
df = dataframe.copy(deep=True)
df[group_type] = (
df[group_type].astype("str", copy=False).dropna(inplace=False)
) # some columns were mixed type
# load the lookup table
path_to_save = "./data/lookups/{}_lookup.json".format(group_type)
with open(path_to_save) as f:
lookup = json.load(f)
lookup["nan"] = "None"
# explode by group_type
df[group_type] = df[group_type].map(lambda x: x.split(","))
df_exp = df.explode(group_type)
# apply lookup
df_exp[group_type] = df_exp[group_type].map(lambda x: lookup[x])
# implode
df_imp = (
df_exp[["bgg_id", group_type]]
.groupby("bgg_id")
.agg(lambda x: ",".join(x))
.reset_index()
)
# join back
df[group_type] = df_imp[group_type]
print("finished with {}".format(group_type))
print("finished applying look up")
return df | a41749555f106b4477414cc837f1c0cd56128acc | 3,630,621 |
def make_url_parser(global_conf, directory, base_python_name,
index_names=None, hide_extensions=None,
ignore_extensions=None,
**constructor_conf):
"""
Create a URLParser application that looks in ``directory``, which
should be the directory for the Python package named in
``base_python_name``. ``index_names`` are used when viewing the
directory (like ``'index'`` for ``'index.html'``).
``hide_extensions`` are extensions that are not viewable (like
``'.pyc'``) and ``ignore_extensions`` are viewable but only if an
explicit extension is given.
"""
if index_names is None:
index_names = global_conf.get(
'index_names', ('index', 'Index', 'main', 'Main'))
index_names = converters.aslist(index_names)
if hide_extensions is None:
hide_extensions = global_conf.get(
'hide_extensions', ('.pyc', 'bak', 'py~'))
hide_extensions = converters.aslist(hide_extensions)
if ignore_extensions is None:
ignore_extensions = global_conf.get(
'ignore_extensions', ())
ignore_extensions = converters.aslist(ignore_extensions)
# There's no real way to set constructors currently...
return URLParser({}, directory, base_python_name,
index_names=index_names,
hide_extensions=hide_extensions,
ignore_extensions=ignore_extensions,
**constructor_conf) | 2865a0adcdac1dd7879a9bf286ca91140c88945e | 3,630,622 |
import os
import yaml
def get_user_labels_from_storage(label_storage_file: str) -> UserLabels:
"""
get all labels from label storage file
Returns:
UserLabels: all labels
Raises:
FileNotFoundError: if label storage file not found
ValueError: if version mismatch
Error: if parse failed or other error occured
"""
obj = {}
if os.path.isfile(label_storage_file):
with open(label_storage_file, 'r') as f:
obj = yaml.safe_load(f)
return UserLabels(**obj) | adb1024dcb6f09877c8d4f77fd502b7a716fdff6 | 3,630,623 |
def add_memo():
"""
Insert a memo into the database.
"""
try:
date = arrow.get(request.args.get('date', 0, type=str),
'YYYY/MM/DD').naive
text = request.args.get('text', 0, type=str)
record = {"type": "dated_memo",
"date": date,
"text": text}
result = collection.insert_one(record)
record_id = str(result.inserted_id)
message = 'Memo added.'
worked = True
except:
message = 'Memo not added.'
record_id = ''
worked = False
return flask.jsonify(message=message, record_id=record_id, result=worked) | cc89826956d6b2e5dc9bed3276bf7e102f47b285 | 3,630,624 |
def items_JSON():
"""Returns JSON object with all items"""
# Get all items
items = getItemAll(session)
# Return JSON object
return jsonify(Items=[i.serialize for i in items]) | 9e6d86c066054e2e567059fcc00f08c13c4fa797 | 3,630,625 |
def unescape(value, escape = "\\"):
"""
Unescapes the provided string value using the provided escape
character as the reference for the unescape operation.
This is considered to be a very expensive operation and so it
should be used carefully.
:type value: String
:param value: The string value that is going to be unescape.
:rtype: String
:return: The final unescaped value.
"""
result = []
iterator = iter(value)
for char in iterator:
if char == escape:
try:
result.append(next(iterator))
except StopIteration:
result.append(escape)
else:
result.append(char)
return "".join(result) | 28aaebbfc5ea0022ce519a3ef91988504ea345f4 | 3,630,626 |
def __num_elems(shape):
"""Returns the number of elements in the given shape
Args:
shape: TensorShape
Return:
tot_elems: int
"""
tot_elems = 1
for s in shape:
tot_elems *= int(s)
return tot_elems | fd4f72394b22c98e6bedb545d7d11b8bfae11add | 3,630,627 |
def ocrRawCaptcha(image):
"""
recognize a captcha from http://bkxk.xmu.edu.cn/xsxk/login.html without preprocessing
:param image: image data of the captcha
:return: a string with four character
"""
images, _ = processImg.processImg(image)
result = ocrCaptchas(images)
return result | 672cb1fbd405cf4268971d87b17581563600fea4 | 3,630,628 |
from typing import Optional
from typing import Any
import json
def read_JSON(path: OpenFile) -> Optional[Any]:
"""
Attempt to read a JSON file. Returns False if the file doesn't exist.
:param path: the path of the file to read
"""
try:
with open(path, "r") as f:
return json.load(f)
except IOError:
return None | 0403b2a3e891c82389ddbdcf1f13f58400cec0c8 | 3,630,629 |
def adoptionSearch(cursor, search):
# search = data_params['search']
"""Return the result based on Mo's search input"""
lower_search = search.lower()
query = f"SELECT DISTINCT Adopter.email, AdoptionApplication.application_num, AdoptionApplication.date, " \
f"AdoptionApplication.co_applicant_first_name, AdoptionApplication.co_applicant_last_name, " \
f"Adopter.first_name, Adopter.last_name, Adopter.state, Adopter.city, Adopter.street, Adopter.zip_code, Adopter.cell_phone " \
f"FROM AdoptionApplication LEFT JOIN Adopter ON AdoptionApplication.adopter = Adopter.email " \
f"WHERE AdoptionApplication.STATUS = 'approved' AND (LOWER( Adopter.last_name ) LIKE '%{lower_search}%' OR LOWER( AdoptionApplication.co_applicant_last_name ) LIKE '%{lower_search}%');"
cursor.execute(query)
query_result = cursor.fetchall()
return query_result | 22388799f65bef447c80c3da5c8f656705cba27e | 3,630,630 |
def onc_datetime(date_time, timezone="Canada/Pacific"):
"""Return a string representation of a date/time in the particular
ISO-8601 extended format required by the Ocean Networks Canada (ONC)
data web services API.
:arg date_time: Date/time to transform into format required by
ONC data web services API.
:type date_time: str
or :py:class:`datetime.datetime`
or :py:class:`arrow.Arrow`
:arg str timezone: Timezone of date_time.
:returns: UTC date/time formatted as :kbd:`YYYY-MM-DDTHH:mm:ss.SSSZ`
:rtype: str
"""
d = arrow.get(date_time)
d_tz = arrow.get(d.datetime, timezone)
d_utc = d_tz.to("utc")
return "{}Z".format(d_utc.format("YYYY-MM-DDTHH:mm:ss.SSS")) | ac22065635dbba3eef6381eb464dee4cd8994c36 | 3,630,631 |
def req(retry=3, proxy=False, timeout=30, concurren=1):
""" 通过装饰器来给出可选的配置。 """
def call(func):
req = ReqParse(func, retry=retry, proxy=proxy, timeout=timeout, concurren=concurren)
return req
return call | dd02e6fb79e9333a07cf41c0e955ff7671fe1683 | 3,630,632 |
import sys
def _read(f):
"""Read a file's contents, autodetecting whether the arg is a file or filename,
and treating '-' as as indication to read from stdin."""
if type(f) is str:
if f == "-":
return sys.stdin.read()
else:
with open(f, "r") as ff:
return ff.read()
else:
return f.read() | 51966b1a28d4c3d9b0bd037a8ffe037e901f58e5 | 3,630,633 |
import sys
import os
def find_utils():
"""Find all the utilities used by this script"""
utils = dict()
for bin in ['xl', 'qemu-img', 'killall']:
res = lookup_bin(bin)
if not res:
log.error('Cannot find required program: ' + str(bin))
sys.exit(1)
utils[bin] = res
# unpack is special because its apart of this project so we'll try looking
# in a few places for it along with PATH.
if os.path.isfile('../bin/unpack'):
utils['unpack'] = '../bin/unpack'
elif os.path.isfile('./bin/unpack'):
utils['unpack'] = './bin/unpack'
else:
utils['unpack'] = lookup_bin('unpack')
if not utils['unpack']:
log.error('Cannot find unpack. Is it compiled? If so, try adding it to PATH')
sys.exit(1)
return utils | f62f8bf6d315808c315ce1d35a413a9bb5517588 | 3,630,634 |
def getHandValue(cards):
"""Returns value of cards."""
value = 0
numberOfAces = 0
for card in cards:
rank = card[0]
if rank == 'A':
numberOfAces += 1
elif rank in ('K','Q','J'):
value += 10
else:
value += int(rank)
value += numberOfAces
for i in range(numberOfAces):
if value + 10 <= 21:
value += 10
return value | b40d45db627add8376ff9135229688114f81be83 | 3,630,635 |
from typing import Dict
def zigzag(n: int = 8) -> Dict:
"""
ZigZag encoder & decoder
Args:
n: size of chunk
Returns:
dictionary of encoder and decoder
"""
idx_zigzag = []
for a in sorted((p % n + p // n, (p % n, p // n)[(p % n - p // n) % 2], p) for p in range(n * n)):
idx_zigzag.append(a[2])
idx2zig = np.array(list(zip(range(n * n), idx_zigzag)))
inverse_idx_zigzag = np.array(sorted(idx2zig, key=lambda x: x[-1]))[:, 0]
return {
"encoder": idx2zig,
"decoder": inverse_idx_zigzag
} | c3e67aa5e41030eafd09058413e40ac3bb1f3201 | 3,630,636 |
def get_lr(optimizer):
"""Get current learning rate
Parameters
----------
optimizer : obj
An optimizer object.
Returns
-------
lr
Current learning rate.
"""
for param_group in optimizer.param_groups:
return param_group["lr"] | d3636ab7e4c1e92c24de29aff02d95151a7f42e8 | 3,630,637 |
import requests
def get_business_by_id(business_ids):
"""
Gets the business details for all the business_id's that are provided (
:param business_ids: This takes a single business id or a list of business ids
:type business_ids: list
:return: business
:rtype: dict
"""
logger.info("Attempting to fetch businesses", business_ids=business_ids)
params = {"id": business_ids}
url = f'{app.config["PARTY_URL"]}/party-api/v1/businesses'
response = requests.get(url, params=params, auth=app.config["BASIC_AUTH"])
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
raise ApiError(logger, response)
return response.json() | 26161afc4b06da6572bcce28c82099fbf476e74e | 3,630,638 |
import os
import csv
def load_target_class(input_dir):
"""Loads target classes."""
with tf.gfile.Open(os.path.join(input_dir, 'data.csv')) as f:
return {row[0]: int(row[2]) for row in csv.reader(f)} | d23b9eeb717ab3c33efee9b42fcce7634899fa39 | 3,630,639 |
from federatedscope.core.lr import LogisticRegression
from federatedscope.cross_backends import LogisticRegression
from federatedscope.core.mlp import MLP
from federatedscope.tabular.model import QuadraticModel
from federatedscope.cv.model import get_cnn
from federatedscope.nlp.model import get_rnn
from federatedscope.nlp.model import get_transformer
from federatedscope.gfl.model import get_gnn
from federatedscope.mf.model.model_builder import get_mfnet
def get_model(model_config, local_data, backend='torch'):
"""
Arguments:
local_data (object): the model to be instantiated is responsible for the given data.
Returns:
model (torch.Module): the instantiated model.
"""
for func in register.model_dict.values():
model = func(model_config, local_data)
if model is not None:
return model
if model_config.type.lower() == 'lr':
if backend == 'torch':
# TODO: make the instantiation more general
if isinstance(
local_data, dict
) and 'test' in local_data and 'x' in local_data['test']:
model = LogisticRegression(
in_channels=local_data['test']['x'].shape[-1],
class_num=1,
use_bias=model_config.use_bias)
else:
if isinstance(local_data, dict):
if 'data' in local_data.keys():
data = local_data['data']
elif 'train' in local_data.keys():
# local_data['train'] is Dataloader
data = next(iter(local_data['train']))
else:
raise TypeError('Unsupported data type.')
else:
data = local_data
x, _ = data
model = LogisticRegression(in_channels=x.shape[-1],
class_num=model_config.out_channels)
elif backend == 'tensorflow':
model = LogisticRegression(
in_channels=local_data['test']['x'].shape[-1],
class_num=1,
use_bias=model_config.use_bias)
else:
raise ValueError
elif model_config.type.lower() == 'mlp':
if isinstance(local_data, dict):
if 'data' in local_data.keys():
data = local_data['data']
elif 'train' in local_data.keys():
# local_data['train'] is Dataloader
data = next(iter(local_data['train']))
else:
raise TypeError('Unsupported data type.')
else:
data = local_data
x, _ = data
model = MLP(channel_list=[x.shape[-1]] + [model_config.hidden] *
(model_config.layer - 1) + [model_config.out_channels],
dropout=model_config.dropout)
elif model_config.type.lower() == 'quadratic':
if isinstance(local_data, dict):
data = next(iter(local_data['train']))
else:
# TODO: complete the branch
data = local_data
x, _ = data
model = QuadraticModel(x.shape[-1], 1)
elif model_config.type.lower() in ['convnet2', 'convnet5', 'vgg11', 'lr']:
model = get_cnn(model_config, local_data)
elif model_config.type.lower() in ['lstm']:
model = get_rnn(model_config, local_data)
elif model_config.type.lower().endswith('transformers'):
model = get_transformer(model_config, local_data)
elif model_config.type.lower() in [
'gcn', 'sage', 'gpr', 'gat', 'gin', 'mpnn'
]:
model = get_gnn(model_config, local_data)
elif model_config.type.lower() in ['vmfnet', 'hmfnet']:
model = get_mfnet(model_config, local_data)
else:
raise ValueError('Model {} is not provided'.format(model_config.type))
return model | 7740b62b23d2eb556210cccfc7b37d0db6e629d1 | 3,630,640 |
def twilio_secure(func):
"""Wrap a view function to ensure that every request comes from Twilio."""
@wraps(func)
def wrapper(*a, **kw):
if validate_twilio_request():
return func(*a, **kw)
return Response("Not a valid Twilio request", status=403)
return wrapper | 364b5dcb34b626d207296417c719bb0d4ddc54e8 | 3,630,641 |
def get_batch_dataset(record_file, parser, config):
"""
训练数据集TFRecordDataset的batch生成器。
Args:
record_file: 训练数据tf_record路径
parser: 数据存储的格式
config: 超参数
"""
num_threads = tf.constant(config.num_threads, dtype=tf.int32)
dataset = tf.data.TFRecordDataset(record_file).map(
parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
if config.is_bucket:
# bucket方法,用于解决序列长度不同的mini-batch的计算效率问题
buckets = [tf.constant(num) for num in range(*config.bucket_range)]
def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
c_len = tf.reduce_sum(
tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
buckets_min = [np.iinfo(np.int32).min] + buckets
buckets_max = buckets + [np.iinfo(np.int32).max]
conditions_c = tf.logical_and(
tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
bucket_id = tf.reduce_min(tf.where(conditions_c))
return bucket_id
def reduce_func(key, elements):
return elements.batch(config.batch_size)
dataset = dataset.apply(tf.contrib.data.group_by_window(
key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
else:
dataset = dataset.batch(config.batch_size)
return dataset | 1e8ea8e8b7991d52b51d27245850a8abd7a486a6 | 3,630,642 |
from typing import Iterable
from typing import Tuple
from typing import List
def find_bundles_rescalings(
bundles: Iterable[InstanceBundle]
) -> Tuple[Tuple[List[InstanceBundle], Rescaling], ...]:
"""Finds a rescaling parameters for each subset of compatible instances.
Args:
bundles: Iterable of bundles to extract rescaling values from.
Returns:
Tuple of subgroups of bundles with an average rescaling value.
"""
groups = []
for bundle in bundles:
parameters = bundle.representative_parameters
for group in groups:
group_parameters = group[0].representative_parameters
if parameters.equals_for_rescaling(group_parameters):
group.append(bundle)
break
else:
groups.append([bundle])
def mean_rescaling(group: List[InstanceBundle]) -> Rescaling:
slopes, intercepts = [], []
for instance in group:
slopes.append(instance.intrinsic_rescaling.slope)
intercepts.append(instance.intrinsic_rescaling.intercept)
return Rescaling(np.average(slopes), np.average(intercepts))
return tuple((group, mean_rescaling(group)) for group in groups) | 44e3e5fd9ff563a3c4c906f0282e41ce1c4beba3 | 3,630,643 |
def get_ordinal_suffix(number):
"""Receives a number int and returns it appended with its ordinal suffix,
so 1 -> 1st, 2 -> 2nd, 4 -> 4th, 11 -> 11th, etc.
Rules:
https://en.wikipedia.org/wiki/Ordinal_indicator#English
- st is used with numbers ending in 1 (e.g. 1st, pronounced first)
- nd is used with numbers ending in 2 (e.g. 92nd, pronounced ninety-second)
- rd is used with numbers ending in 3 (e.g. 33rd, pronounced thirty-third)
- As an exception to the above rules, all the "teen" numbers ending with
11, 12 or 13 use -th (e.g. 11th, pronounced eleventh, 112th,
pronounced one hundred [and] twelfth)
- th is used for all other numbers (e.g. 9th, pronounced ninth).
"""
number = str(number)
# create a dictionary with keys 1,2,3 and values st, nd, rd
f_123 = dict(zip("1 2 3".split(), "st nd rd".split()))
# save the suffix
# use get from dict to check if the number ends with 1,2, or 3 if not
# save 'th'
suffix = f_123.get(number[-1]) or "th"
# teen numbers
# if the number is 10 or more and the second last is 1
if len(number) > 1 and number[-2] == "1":
suffix = "th"
# return f-string with number and suffix
return f"{number}{suffix}" | fc59e8586fa1df40b91c2922f52e4208ecc58038 | 3,630,644 |
def get_greppable(string):
"""Simply produces a string that -- when grepped -- will omit listing the grep process in a grep listing.
"""
return string.replace(string[0], '[%s]' % string[0], 1) | 65be4daa5650605ca3d95720d74a7a1137b5f4d7 | 3,630,645 |
def is_convergent_pair(p, rules):
"""Is the critical pair convergent?"""
u , v = p
n1, n2 = list(normalforms(u, rules)), list(normalforms(v, rules))
return n1 == n2 | e5e6790a98c5bb6fd6f3c6fb2e8b578d82d22d19 | 3,630,646 |
def is_userti(*args):
"""
is_userti(ea) -> bool
"""
return _ida_nalt.is_userti(*args) | 8171b1bf071bf78306035008238d7ca96812409e | 3,630,647 |
def players_player_id_get(player_id): # noqa: E501
"""Retrieve a single player's record
Returns a player record # noqa: E501
:param player_id: ID of player to return
:type player_id: str
:rtype: Player
"""
return 'do some magic!' | d9c2c92dbba3d139b2b5188e8722a0add7668393 | 3,630,648 |
def zero_array(array):
"""
Method to zero an array of data with the initial values.
:param array: Array of data - rows are time points, columns are signals.
:return: Zero'd numpy array
:rtype: np.ndarray
"""
init = array[:, 0]
zerod = np.apply_along_axis(lambda x: x - init, 0, array)
return zerod | 79b51adb648f1fa56f6264079c8b24236c6e9714 | 3,630,649 |
from typing import Callable
from typing import Sequence
from typing import Hashable
from typing import List
def _stemmatological_costs_factory(
max_del_len: int = 5, frag_start: float = 10.0, frag_end: float = 10.0
) -> Callable:
"""
Define and return a function for computing candidate costs for a "stemmatological" distance matrix.
:param max_del_len: The maximum length of deletion block.
:param frag_start:
:param frag_end:
:return:
"""
def _stemmatological_costs(
seq_x: Sequence[Hashable],
seq_y: Sequence[Hashable],
d: List[List[float]],
i: int,
j: int,
):
"""
Computes candidate costs for an entry of a "stemmatological" distance matrix.
This internal function will compute the candidate costs for an entry
(i, j) in terms of the Levenshtein distance matrix (seq_a, seq_b),
each cost corresponding to one of the available edit operations.
:param seq_x: The first sequence to be compared.
:param seq_y: The second sequence to be compared.
:param d: The "starting matrix" for the cost computation.
:param i: The index of `seq_x` to be considered.
:param j: The index of `seq_y` to be considered.
:return: A tuple with the costs for deletion, insertion, and substitution.
"""
substitution_cost = 0 if seq_x[i - 1] == seq_y[j - 1] else 1
costs = [
d[i][j - 1] + 1,
d[i - 1][j - 1] + substitution_cost,
]
m = len(seq_x)
lower = round(m * frag_start / 100.0)
upper = round(m * (100 - frag_end) / 100.0)
# Delete consecutive block of n
for n in range(1, min(max_del_len, i)):
# Discount bulk deletion near ends
if i <= lower or i >= upper:
costs.append(d[i - n][j] + 0.5)
else:
costs.append(d[i - n][j] + 1)
return costs
return _stemmatological_costs | 2be18ea378fb70b7efc511d3d5572ef8b7638a9c | 3,630,650 |
def results_by_parameter(res, param, sort_by=None, sort_desc=False,
crossvalid_use_measurment='validation',
crossvalid_reduce=False,
crossvalid_reduce_fn=None):
"""
Takes a list of evaluation results `res` returned by a LDA evaluation function (a list in the form
`[(parameter_set_1, {'<metric_name>': result_1, ...}), ..., (parameter_set_n, {'<metric_name>': result_n, ...})]`)
and returns a list with tuple pairs using only the parameter `param` from the parameter sets in the evaluation
results such that the returned list is
`[(param_1, {'<metric_name>': result_1, ...}), ..., (param_n, {'<metric_name>': result_n, ...})]`.
Optionally order either by parameter value (`sort_by=None` - the default) or by result metric
(`sort_by='<metric name>'`).
"""
if len(res) == 0:
return []
if crossvalid_use_measurment not in ('validation', 'training'):
raise ValueError('`crossvalid_use_measurment` must be either "validation" or "training" to use the validation '
'or training measurements.')
tuples = [(p[param], r) for p, r in res]
if type(tuples[0][1]) in (list, tuple): # cross validation results
if len(tuples[0][1]) < 1 or len(tuples[0][1][0]) != 2:
raise ValueError('invalid evaluation results from cross validation passed')
mean = lambda x: sum(x) / len(x)
crossvalid_reduce_fn = crossvalid_reduce_fn or mean
use_measurements_idx = 0 if crossvalid_use_measurment == 'training' else 1
measurements = [(p, [pair[use_measurements_idx] for pair in r]) for p, r in tuples]
measurements_reduced = [(p, crossvalid_reduce_fn(r)) for p, r in measurements]
sort_by_idx = 0 if sort_by is None else 1
sorted_ind = argsort(list(zip(*measurements_reduced))[sort_by_idx])
if sort_desc:
sorted_ind = reversed(sorted_ind)
if crossvalid_reduce:
measurements = measurements_reduced
else: # single validation results
if len(tuples[0]) != 2:
raise ValueError('invalid evaluation results passed')
params, metric_results = list(zip(*tuples))
if sort_by:
sorted_ind = argsort([r[sort_by] for r in metric_results])
else:
sorted_ind = argsort(params)
if sort_desc:
sorted_ind = reversed(sorted_ind)
measurements = tuples
return [measurements[i] for i in sorted_ind] | 2ff2f4cc8edfb750bfa82b03e2d3f5cfea8646b5 | 3,630,651 |
def generate_case_study(
sampling_method: NormalSamplingMethod, cmap: CommitMap,
case_study_version: int, project_name: str, **kwargs: tp.Any
) -> CaseStudy:
"""
Generate a case study for a given project.
This function will draw `num_samples` revisions from the history of the
given project and persists the selected set into a case study for
evaluation.
Args:
sampling_method: to use for revision sampling
cmap: commit map to map revisions to unique IDs
case_study_version: version to set for the case study
project_name: name of the project so sample from
kwargs: additional args that should be passed on to the strategy
Returns:
a new case study
"""
case_study = CaseStudy(project_name, case_study_version)
if kwargs['revs_per_year'] > 0:
extend_with_revs_per_year(case_study, cmap, **kwargs)
if (
isinstance(
sampling_method, (HalfNormalSamplingMethod, UniformSamplingMethod)
)
):
extend_with_distrib_sampling(case_study, cmap, **kwargs)
if kwargs['extra_revs']:
extend_with_extra_revs(case_study, cmap, **kwargs)
return case_study | cd116a9cf1ae0b9eea55dde7f04ecc1d0fdeb47a | 3,630,652 |
def is_checkbox(field):
"""
Boolean filter for form fields to determine if a field is using a checkbox
widget.
"""
return isinstance(field.field.widget, forms.CheckboxInput) | e59b1f7692babd1d91752cf72c2fd51b6b9fac31 | 3,630,653 |
def build_info_str(username: str, name_len: int, remaining_chip: int, action: str,
chip: int, is_waiting: bool, countdown: int) -> str:
"""Build a string to explain action of a user
Args:
username (str): user name
name_len (int): characters to show the name
remaining_chip (int): remaining chip of the user
action (str): the action being taken, should be one of the
following: check, bet, raise, all-in, fold
The differences of `bet` and `raise` are that `bet` is
the first put-chip action, while `raise` is another
put-chip action against prior `bet`
chip (int): the chip of an action, only meaningful when
`action` is `bet`, `raise` or `all-in`
is_waiting (bool): a flag that indicate if this user is in
execution position
countdown (int): the countdown of waiting, only meaningful
when `is_waiting` is `True`
Return:
info_str (str): a string to explain action of a user
"""
info = f"{username:{name_len}} (${str(remaining_chip) + ')':<5} {action}"
if action in ("bet", "raise", "all-in"):
info += f" ${chip} "
if is_waiting:
info += f"{countdown}s"
info = "-> " + info
else:
info = " " + info
return info | 1ecbb6c33d54a55500d51ce09cf9740ac28def96 | 3,630,654 |
from typing import Union
from typing import List
import sys
import pathlib
def main(argv: Union[List[str], None] = None) -> int:
"""Drive the derivation."""
argv = sys.argv[1:] if argv is None else argv
if len(argv) != 0:
print("Usage: indexer")
return 2
alphabet = tuple('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ')
table = {f'{k}{j}': '<td> </td>' for k in alphabet for j in alphabet}
regions, total_airports = 0, 0
for ic in prefix_store:
regions += 1
count = len(prefix_store[ic]["features"])
total_airports += count
table[ic] = f'<td><a href="{ic}/" title="{ic}">{count}</a></td>'
data_rows = []
for k in alphabet:
row = [f'<tr><th>{k}</th>']
for j in alphabet:
row.append(table[f'{k}{j}'])
row.append('\n')
data_rows.append(''.join(row))
html_dict = {
ANCHOR: f'prefix/',
PATH: PATH_NAV,
HOST: HOST_NAV,
'NUMBER_REGIONS': str(regions),
'TOTAL_AIRPORTS': str(total_airports),
'DATA_ROWS': ''.join(data_rows),
}
html_page = HTML_PAGE
for key, replacement in html_dict.items():
html_page = html_page.replace(key, replacement)
html_path = pathlib.Path('prefix', 'index.html')
with open(html_path, 'wt', encoding=ENCODING) as html_handle:
html_handle.write(html_page)
return 0 | 735fc90511fa0c8a734cfbac8e092e5eb29a3abc | 3,630,655 |
def clustering_from_distance(dendrogram, distance):
"""
Given a dendrogram and a distance level, compute the partitions corresponding to the distance level
Parameters
----------
dendrogram: numpy.array
Each line of the dendrogram contains the merged nodes, the distance between merged nodes and the number of
nodes in the new cluster
cut: int
The distance level at which the partition is extracted. All the clusters are extracted at this distance
Returns
-------
partition: list of list
A list of clusters, where each cluster is a list of nodes
References
----------
-
"""
n_nodes = np.shape(dendrogram)[0] + 1
if distance < 0:
raise ValueError
cluster = {u: [u] for u in range(n_nodes)}
for t in range(n_nodes - 1):
if dendrogram[t, 2] > distance:
break
cluster[n_nodes + t] = cluster.pop(int(dendrogram[t][0])) + cluster.pop(int(dendrogram[t][1]))
clusters = [cluster[c] for c in cluster]
return clusters | f4399e4dbe54b9cb9e9c3e786e8e11805ef0a75f | 3,630,656 |
import torch
def _loss_from_outputs_batch(loss_function, outputs_per_chunk, targets_per_chunk):
"""
Applies the loss function to batch of outputs (unpadded, masked) and batch of targets (padded, unmasked).
:param loss_function: e.g., NLLLoss.
:param outputs_per_chunk: a list of tensors, each the relevant (i.e., masked) outputs for a chunk.
:param targets_per_chunk: the UNMASKED targets (e.g., an y_batch_chunk given by yield_batches).
:return: a Variable containing the loss.
"""
# This nonempty-filter is probably no longer necessary in pytorch version 4 (bug in torch.cat).
nonempty_outputs = [output for output in outputs_per_chunk if len(output) > 0]
# But the following still is necessary, because the loss function cannot handle an empty tensor...
if len(nonempty_outputs) == 0:
loss = torch.Tensor([0.0])
if outputs_per_chunk[0].is_cuda: loss = loss.cuda()
return autograd.Variable(loss, requires_grad=True)
# For computing the loss, first concatenate all the outputs:
outputs_concatenated = torch.cat(nonempty_outputs, dim=0) # Would give an error for empty tensors in PyTorch 3
# And likewise 'pack' all targets into a long list and wrap inside a Variable:
mask = (targets_per_chunk != NON_ENTITY_MENTION).squeeze()
targets_packed, _ = tensor_utils.pack_batch_masked(targets_per_chunk, mask=mask)
targets_packed = autograd.Variable(targets_packed, requires_grad=False).squeeze()
# Easy peasy!
loss = loss_function(outputs_concatenated, targets_packed)
return loss | 3f40b84c7b2a02b2b0315e338719fe6d78a6f1de | 3,630,657 |
import os
def dataInput(fileName):
"""Reads a line of data from a file and returns either a string for
when there is only a single line of data or a list when there is
more than one line.
"""
data = []
if os.path.isfile(fileName):
file = open(fileName, "r")
data = file.readlines()
else:
# To make configuration easier, it creates emtpy files
# If it cannot find them.
file = open(fileName, "w")
file.write(os.linesep)
file.close()
# Turn the raw data into something usable.
if len(data) < 1:
result = ""
elif len(data) == 1:
result = data[0]
else:
result = []
for line in data:
result.append(line.strip(os.linesep))
return result | 421f0a3661463d89d6f5edf001dcb798d78711a6 | 3,630,658 |
def equivPhkv(k,v,n):
"""
Checks if two values are the same and implicitly checks if id actually checks if two variables
point to an object at the same memory location
"""
return equivValue(k,v,n) and referenceIdentity(n,v) and referenceIdentity(n.node,v.node) | 87d1983123cf56d964adb6c6f0bc039d5a33b775 | 3,630,659 |
import numpy
def get_spherical_bounding_box(lons, lats):
"""
Given a collection of points find and return the bounding box,
as a pair of longitudes and a pair of latitudes.
Parameters define longitudes and latitudes of a point collection
respectively in a form of lists or numpy arrays.
:return:
A tuple of four items. These items represent western, eastern,
northern and southern borders of the bounding box respectively.
Values are floats in decimal degrees.
:raises ValueError:
If points collection has the longitudinal extent of more than
180 degrees (it is impossible to define a single hemisphere
bound to poles that would contain the whole collection).
"""
north, south = numpy.max(lats), numpy.min(lats)
west, east = numpy.min(lons), numpy.max(lons)
assert (-180 <= west <= 180) and (-180 <= east <= 180), (west, east)
if get_longitudinal_extent(west, east) < 0:
# points are lying on both sides of the international date line
# (meridian 180). the actual west longitude is the lowest positive
# longitude and east one is the highest negative.
if hasattr(lons, 'flatten'):
# fixes test_surface_crossing_international_date_line
lons = lons.flatten()
west = min(lon for lon in lons if lon > 0)
east = max(lon for lon in lons if lon < 0)
if not all((get_longitudinal_extent(west, lon) >= 0
and get_longitudinal_extent(lon, east) >= 0)
for lon in lons):
raise ValueError('points collection has longitudinal extent '
'wider than 180 deg')
return SphericalBB(west, east, north, south) | 6a66b6d42f993036258a73f6fe7783f1dcb6d701 | 3,630,660 |
def get_similarity_score(dict1, dict2, dissimilarity = False):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary of words or n-grams for one text
dict2: frequency dictionary of words or n-grams for another text
dissimilarity: Boolean, optional parameter. Default to False.
If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead.
Returns:
int, a percentage between 0 and 100, inclusive
representing how similar the texts are to each other
The difference in text frequencies = DIFF sums words
from these three scenarios:
* If a word or n-gram occurs in dict1 and dict2 then
get the difference in frequencies
* If a word or n-gram occurs only in dict1 then take the
frequency from dict1
* If a word or n-gram occurs only in dict2 then take the
frequency from dict2
The total frequencies = ALL is calculated by summing
all frequencies in both dict1 and dict2.
Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity
is False, otherwise returns 100*(DIFF/ALL)
"""
DIFF = 0
for i in dict1:
x = False
#Boolean used to not add repeated frequencies as it will be seen later
for j in dict2:
if i == j:
#use of == instead of i in j as for example word "meme" could
#be in "memes" and would therefore cause a problem
DIFF += abs(dict1[i] - dict2[j])
#if the word/n-gram appears in both dictionnaires then
#the absolute value of the difference between the frequencies
#in each dictionnary is added to DIFF
x = True
if x == False:
#Boolean used so that frequencies of a word/n-gram are not added again
#and again to DIFF
DIFF += dict1[i]
for j in dict2:
x = False
#same use of boolean for same reasons as previou for loop
for i in dict1:
if i == j:
#use of == due to the same reason
x = True
#this time the absolute value of the difference between the
#frequencies doesn't have to be added as it already has been
if x == False:
DIFF += dict2[j]
ALL = 0
for i in dict1:
ALL += dict1[i]
#all the frequencies of the first dictionnary are added to ALL
for j in dict2:
ALL += dict2[j]
#same occurs as in the previous loop but for the second dictionnary
#Depending on the input of dissimilarity this will occur
if dissimilarity == False:
result = round(100*(1 - (DIFF/ALL)))
#similarity between the dictionnaries of word/n-grams is the result
else:
result = round(100*(DIFF/ALL))
#dissimilarity between the dictionnaries of word/n-grams is the result
return result | 31e8602d6ef098a58a8eaf497badebf2e19288eb | 3,630,661 |
def predict_fn(input_data, model):
"""Predict using input and model"""
return model(input_data) | 00f7bf0bd71f70833f8f77b16ffa62559747e915 | 3,630,662 |
import os
import pathlib
import re
import sys
import base64
def solve_challenge(challenge):
"""
The parameter challenge comes in binary already
"""
global IA
# 1) get the location of the certificates and keys
SC = os.environ["SC"] if "SC" in os.environ else os.path.join(str(pathlib.Path.home()), "go", "src", "github.com", "scionproto", "scion")
SC = os.path.join(SC, "gen")
m = re.match("^([0-9]+)-([0-9]+)$", IA)
if not m:
print ("ERROR: could not understand the IA from: ", IA)
sys.exit(1)
I = m.group(1)
A = m.group(2)
filepath = os.path.join(SC, "ISD"+I, "AS"+A, "bs"+I+"-"+A+"-1")
privkey = os.path.join(filepath, "keys")
if not os.path.exists(privkey):
print("ERROR: no such path: ", privkey)
sys.exit(1)
privkeys = [k for k in sorted(os.listdir(privkey), reverse=True) if k.endswith(".seed")]
if len(privkeys) < 1:
print("ERROR: could not find a private key under ", privkey)
sys.exit(1)
privkey = os.path.join(privkey, privkeys[0])
try:
with open(privkey) as f:
privkey = f.read()
except Exception as ex:
print("ERROR: could not read file %s: %s" % (privkey, ex))
sys.exit(1)
privkey = base64.standard_b64decode(privkey)
# 2) instantiate the private key and certificate and sign the challenge
signed = sign(challenge, privkey)
return signed | 2a1f5944d529f4058dffb41186b6efa7bfbb94fd | 3,630,663 |
def uniform_2_sphere(num: int = None):
"""Uniform sampling on a 2-sphere
Source: https://gist.github.com/andrewbolster/10274979
Args:
num: Number of vectors to sample (or None if single)
Returns:
Random Vector (np.ndarray) of size (num, 3) with norm 1.
If num is None returned value will have size (3,)
"""
if num is not None:
phi = np.random.uniform(0.0, 2 * np.pi, num)
cos_theta = np.random.uniform(-1.0, 1.0, num)
else:
phi = np.random.uniform(0.0, 2 * np.pi)
cos_theta = np.random.uniform(-1.0, 1.0)
theta = np.arccos(cos_theta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.stack((x, y, z), axis=-1) | 097c65af0f24c1d20ee66c99723f413e6450b8f9 | 3,630,664 |
import torch
def nce_past(z_next_trans_dist, z_next_enc):
"""
z_next_trans_dist: p(.|z, u)
z_next_enc: samples from p(.|x')
"""
batch_size, z_dim = z_next_enc.size(0), z_next_enc.size(1)
z_next_trans_dist_rep = repeat_dist(z_next_trans_dist, batch_size, z_dim)
z_next_enc_rep = z_next_enc.repeat(1, batch_size).view(-1, z_dim)
# scores[i, j] = p(z'_i | z_j, u_j)
scores = z_next_trans_dist_rep.log_prob(z_next_enc_rep).view(batch_size, batch_size)
with torch.no_grad():
normalize = torch.max(scores, dim=-1)[0].view(-1, 1)
scores = scores - normalize
scores = torch.exp(scores)
# I_NCE
positive_samples = scores.diag()
avg_negative_samples = torch.mean(scores, dim=-1)
return -torch.mean(torch.log(positive_samples / avg_negative_samples + 1e-8)) | ad5c0206a1295dc32588464c519bead4d7591448 | 3,630,665 |
def plot_performance(barcode_counts,
tick_label_size = 8,
cbar_label_size = 5,
dpi = 300,
barcode_threshold = 1,
absent_color = "black",
present_color = "green",
save = False,
wdir = None,
ytick_freq = None,
xtick_freq = None,
xtick_rotation = 90,
tick_genes = False,
gene_name_index = None):
"""
Plot presence/absence plot for a mip run.
"""
if xtick_freq is None:
xtick_freq = barcode_counts.shape[1]//30
if xtick_freq == 0:
xtick_freq = 1
if ytick_freq is None:
ytick_freq = barcode_counts.shape[0]//30
if ytick_freq == 0:
ytick_freq = 1
fig, ax = plt.subplots()
cmap = colors.ListedColormap([absent_color, present_color])
boundaries = [-0.5, 0.5, 1.5]
norm = colors.BoundaryNorm(boundaries, cmap.N)
heat = ax.pcolormesh(
barcode_counts.applymap(
lambda a: np.nan if np.isnan(a)
else 0 if a < barcode_threshold
else 1
), cmap=cmap, norm=norm)
sample_ids = list(barcode_counts.index)
sample_locs = np.arange(1, len(sample_ids) + 1, ytick_freq) - 0.5
ylabs = sample_ids[::ytick_freq]
plt.yticks(sample_locs, ylabs)
if tick_genes:
bc_cols = barcode_counts.columns.tolist()
bc_cols = [c[gene_name_index] for c in bc_cols]
xlabs = bc_cols[::xtick_freq]
gene_locs = np.arange(1, len(bc_cols) + 1, xtick_freq) - 0.5
plt.xticks(gene_locs, xlabs,
rotation = xtick_rotation,
ha = "right")
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(tick_label_size)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(tick_label_size)
ax.set_ylabel("Samples")
ax.set_xlabel("Probes")
fig.suptitle("Performance",
verticalalignment="bottom")
fig.tight_layout()
cbar = fig.colorbar(heat, ticks = [0, 1],
shrink = 0.2
)
cbar.ax.tick_params(labelsize=cbar_label_size)
cbar.ax.set_yticklabels(["Absent", "Present"])
fig.set_dpi(dpi)
fig.tight_layout()
if save:
fig.savefig(wdir + "performance.png",
dpi = dpi,
bbox_inches='tight')
plt.close("all")
else:
return fig,ax
return | c88f14ce7ea07473fe6b3d99d34a9823eb4580fe | 3,630,666 |
def get_simple_countings(df, start_yr=START_YR, end_yr=END_YR):
"""Generates simple counting statistics for the criterias ports, authors,
platforms, file extensions and types of exploits. It determines the 10
largest countings of each criteria.
:param df (DataFrame): A DataFrame object.
:param start_yr (str): The year when the counting should start; default
is START_YR from config.
:param end_yr (str): The year when the counting should end; default is
END_YR from config.
:return: A dictionary for the countings of ports, authors, platforms,
file extensions and types as keys and a 3-tuple as values
containing the DataFrame object, absolute size of exploit counting
and the real occurrence in the 10 largest countings.
"""
df.index = pd.to_datetime(df.date.values)
df_year_range = df[start_yr:end_yr]
ports = df_year_range['port']
ports_size = ports.size - ports.isna().sum()
ports_counts = ports.value_counts(dropna=True)
nlargest_ports = ports_counts.nlargest(10)
nlargest_ports.index = pd.Int64Index(nlargest_ports.index)
nsmallest_ports_sum = ports_counts.nsmallest(ports_counts.size - 10).sum()
nlargest_ports['others'] = nsmallest_ports_sum
nlargest_ports = nlargest_ports.to_frame()
nlargest_ports.rename(columns={'port': 'count'}, inplace=True)
authors = df_year_range['author']
authors_size = authors.size
authors_counts = authors.value_counts(dropna=True)
nlargest_authors = authors_counts.nlargest(10)
nsmallest_authors_sum = authors_counts.nsmallest(
authors_counts.size - 10).sum()
nlargest_authors['others'] = nsmallest_authors_sum
nlargest_authors = nlargest_authors.to_frame()
nlargest_authors.rename(columns={'author': 'count'}, inplace=True)
platforms = df_year_range['platform']
platforms_size = platforms.size
platforms_counts = platforms.value_counts(dropna=True)
nlargest_platforms = platforms_counts.nlargest(10)
nsmallest_platforms_sum = platforms_counts.nsmallest(
platforms_counts.size - 10).sum()
nlargest_platforms['others'] = nsmallest_platforms_sum
nlargest_platforms = nlargest_platforms.to_frame()
nlargest_platforms.rename(columns={'platform': 'count'}, inplace=True)
files = df_year_range['file']
files_size = files.size
files_counts = files.str.split('.').str[1].value_counts()
nlargest_files = files_counts.nlargest(10)
nsmallest_files_sum = files_counts.nsmallest(files_counts.size - 10).sum()
nlargest_files['others'] = nsmallest_files_sum
nlargest_files = nlargest_files.to_frame()
nlargest_files.rename(columns={'file': 'count'}, inplace=True)
types = df_year_range['type']
types_size = types.size
types_counts = types.value_counts(dropna=True)
nlargest_types = types_counts.nlargest(10)
nsmallest_types_sum = types_counts.nsmallest(types_counts.size - 10).sum()
nlargest_types['others'] = nsmallest_types_sum
nlargest_types = nlargest_types.to_frame()
nlargest_types.rename(columns={'type': 'count'}, inplace=True)
return {
'ports': (nlargest_ports, ports_size, ports_counts.size),
'authors': (nlargest_authors, authors_size, authors_counts.size),
'platforms': (nlargest_platforms, platforms_size,
platforms_counts.size),
'file extensions': (nlargest_files, files_size, files_counts.size),
'types': (nlargest_types, types_size, types_counts.size)
} | e44ce1b7a0d9fe1e83726d52e8cd3f1eb90a50b7 | 3,630,667 |
from zine.application import get_application
def get_engine():
"""Return the active database engine (the database engine of the active
application). If no application is enabled this has an undefined behavior.
If you are not sure if the application is bound to the active thread, use
:func:`~zine.application.get_application` and check it for `None`.
The database engine is stored on the application object as `database_engine`.
"""
return get_application().database_engine | 2cfeb1aed8eceab4cc94db3e3619f6bcc12d59bd | 3,630,668 |
def get_unnormalized_text(words):
""" Returns the (unnormalized) text composed from the given words."""
return "".join([x.unnormalized_with_whitespaces for x in words]) | 162854d917ee4d49c3b2b824abc07697ac4f05ba | 3,630,669 |
def getCurrentUsersHomePath():
""" Return the path to the users home directory. Usually C:/Users/<user> """
return (shell.SHGetFolderPath (0, shellcon.CSIDL_PROFILE, None, 0)) | f6f637028bbf6b6dd6eb976e7ed81b460173b4eb | 3,630,670 |
import re
def alpha_num_order(string: str) -> str:
"""Returns all numbers on 5 digits to let sort the string with numeric order.
Ex: alphaNumOrder("a6b12.125") ==> "a00006b00012.00125"
"""
return "".join(
[
format(int(x), "05d") if x.isdigit() else x
for x in re.split(r"(\d+)", string)
]
) | 01d49320c30f232163198ae8e88a11e4dfbc611f | 3,630,671 |
import logging
import os
import sys
def setup_logging_streams(model, log_to_file=True, log_to_stdout=False):
"""Utility function for setting up logging handlers for `model`."""
formatter = logging.Formatter(
'[%(name)s][%(asctime)s][%(levelname)s]: %(message)s',
datefmt='%m:%d:%Y:%I:%M:%S'
)
handlers = []
if log_to_file:
fh = logging.FileHandler(os.path.join(model.log_dir, 'metrics.log'))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
print("Logging to %s" % os.path.join(model.log_dir, 'metrics.log'))
handlers.append(fh)
if log_to_stdout:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
print("Logging to STDOUT")
handlers.append(ch)
def clear_handlers():
for handler in handlers:
handler.close()
log.removeHandler(handler)
print("Cleared all logging handlers")
return clear_handlers | c656a59d54f903398269214e65170db59bfcc632 | 3,630,672 |
import tempfile
import logging
import sys
def setup_logger():
"""Set up the logger output.
"""
def get_console_handler(stream_level="INFO"):
console_handler = logging.StreamHandler()
console_handler.setLevel(level=getattr(logging, stream_level))
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler(log_file):
file_handler = logging.FileHandler(log_file, mode='w')
file_handler.setLevel(level=logging.DEBUG)
file_handler.setFormatter(FORMATTER)
return file_handler
make_dir(LOG_CACHE)
dirpath = tempfile.mkdtemp(dir=LOG_CACHE)
stream_level = 'INFO'
if "-v" in sys.argv or "--verbose" in sys.argv:
stream_level = 'DEBUG'
FORMATTER = LogFormatter(_logger_format(), datefmt='%Y-%m-%d %H:%M:%S')
LOG_FILE = '%s/pesummary.log' % (dirpath)
logger = logging.getLogger('PESummary')
logger.setLevel(level=logging.DEBUG)
logger.addHandler(get_console_handler(stream_level=stream_level))
logger.addHandler(get_file_handler(LOG_FILE))
return logger, LOG_FILE | 48c44a14b45399772c751a90b664b2f722d63b17 | 3,630,673 |
import requests
def _web_services_request(endpoint, params, method='GET'):
"""
Perform a request on an NYC.ID Web Services endpoint.
'userName' and 'signature' are added to the specified params.
:param endpoint: web services endpoint (e.g. "/account/validateEmail.htm")
:param params: request parameters excluding 'userName' and 'signature'
:param method: HTTP method
:return: request response
"""
current_app.logger.info("NYC.ID Web Services Requests: {} {}".format(method, endpoint))
params['userName'] = current_app.config['NYC_ID_USERNAME']
# don't refactor to use dict.update() - signature relies on userName param
params['signature'] = _generate_signature(
current_app.config['NYC_ID_PASSWORD'],
_generate_string_to_sign(method, endpoint, params)
)
req = None
# SSLError with 'bad signature' is sometimes thrown when sending the request which causes an nginx error and 502
# resending the request resolves the issue
for i in range(0, 5):
try:
req = requests.request(
method,
urljoin(current_app.config['WEB_SERVICES_URL'], endpoint),
verify=current_app.config['VERIFY_WEB_SERVICES'],
params=params # query string parameters always used
)
except SSLError:
sentry.captureException()
continue
break
return req | 745a93616cc8fa1bc5a275f7abf3c000c5635af4 | 3,630,674 |
import os
def write_nk_file(target_dir, target_name, data):
"""
Args:
target_dir(str): must be in an existing directory
target_name(str): name of the file without the extension
data(str):
Returns:
str: path the file has been written to.
"""
target_path = os.path.join(target_dir, "{}.nk".format(target_name))
with open(target_path, "w") as f:
f.write(data)
logger.info("[write_nk_file] Finished. Wrote {}.".format(target_path))
return target_path | 47b0959fa7fa81316eb10569a73f7728eccd0a12 | 3,630,675 |
def dihedral_group(n):
"""
Return the dihedral group S_n.
>>> from qitensor import dihedral_group
>>> S3 = dihedral_group(3)
>>> S3.order
6
>>> S3.elements
[<S3.r0>, <S3.r1>, <S3.r2>, <S3.s0>, <S3.s1>, <S3.s2>]
>>> S3.e
<S3.r0>
>>> S3.r1 * S3.s0
<S3.s1>
>>> import pickle
>>> id(S3) == id(pickle.loads(pickle.dumps(S3)))
True
"""
if n not in _dihedral_group_cache:
_dihedral_group_cache[n] = DihedralGroup_impl(n, True)
return _dihedral_group_cache[n] | 9045d3d417f5aea2b6e118fd2ad7f054a194c308 | 3,630,676 |
def _half(X):
"""Returns the lower triangular part of
a matrix with half of diagonal part.
Args:
X: tensor of shape (..., m, m).
Returns:
tensor of shape (..., m, m), a set of matrices with half
of diagonal and without upper triangular parts."""
dim = tf.shape(X)[-1]
dtype = X.dtype
half = tf.ones((dim, dim),
dtype=dtype) - 0.5 * tf.linalg.diag(tf.ones((dim,), dtype))
half = tf.linalg.band_part(half, -1, 0)
return half * X | 3f52e2c4a289fc9d14ba8e9c9c42c3d3a22a7b28 | 3,630,677 |
def multipart_encode_for_requests(params, boundary=None, cb=None):
"""streams uploads instead of loading entire file into memory"""
datagen, headers = multipart_encode(params, boundary, cb)
return IterableToFileAdapter(datagen), headers | 859c07b9af80b4df3267900276ab1c0506d42355 | 3,630,678 |
import json
def get_ability_icons(champion, input_path):
"""
This function takes a champion and input path strings as input and returns a
dictionary of png file paths with keys corresponding to the following
abilities: Passive, Q, W, E, and R
"""
global ability_icon_paths
ability_icon_paths = dict()
# Rek'Sai appears to be the exception in naming conventions
if champion == 'Reksai':
champion = 'RekSai'
# Read champ-specific json
with open(f"{input_path}{champion}.json") as f:
data = json.load(f)
P_png = data['data'][champion]['passive']['image']['full']
Q_png = data['data'][champion]['spells'][0]['image']['full']
W_png = data['data'][champion]['spells'][1]['image']['full']
E_png = data['data'][champion]['spells'][2]['image']['full']
R_png = data['data'][champion]['spells'][3]['image']['full']
ability_icon_paths['Passive'] = f"data/dragontail-11.1.1/11.1.1/img/passive/{P_png}"
ability_icon_paths['Q'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{Q_png}"
ability_icon_paths['W'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{W_png}"
ability_icon_paths['E'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{E_png}"
ability_icon_paths['R'] = f"data/dragontail-11.1.1/11.1.1/img/spell/{R_png}"
return ability_icon_paths | e33c01bedcd8bf20959978df2bc2b33b934e2181 | 3,630,679 |
import json
def build_json_response(data, response_code='200 OK'):
"""
data (str | dict) : JSON encodable data
response_code (str) : HTTP response code
----
Return (bytes) HTTP response of JSON-encoded data.
"""
if type(data) == str:
data = json.loads(data)
elif type(data) == dict:
_recursive_json_load(data)
encoded_json = json.dumps(data)
return bytes(f'HTTP/1.1 {response_code}\r\nContent-Type: application/json\r\nContent-Length: {len(encoded_json)}\r\n\r\n{encoded_json}', 'utf-8') | ca2e19fbcaea7811c45d984540f2b03d21a1ce87 | 3,630,680 |
import math
def eval_biot_savart(xcp0, xnode1, xnode2, gamma, l0, delta_visc=0.025):
"""
This function uses the Biot-Savart law to evaluate the induced velocities
at control points (xcp), due to vortex line elements defined by locations xnode1, xnode2,
with strengths gamme and lengths l0. The delta_visc parameter ensures velocity goes
zero at the vortex line.
Input shapes:
xcp: (ncp, 3)
xnode1, xnode2: (1, nvor, 3)
gamma, l0: (nvor,)
ncp, nvor = number of control points / vortex line elements
Returns:
u_gamma: (ncp, nvor, 3)
"""
xcp = xcp0.reshape(-1, 1, 3) # xcp shape (ncp, 1, 3)
# dim [1] of xcp is broadcast nvor times
# dim [0] of xnode1/2 is broadcast ncp times
r1 = xcp - xnode1 # r1 shape (ncp, nvor, 3)
r2 = xcp - xnode2 # r2 shape (ncp, nvor, 3)
r1_norm = np.sqrt(np.sum(r1 ** 2, axis=2)) # r1_norm shape = (ncp, nvl)
r1_norm = r1_norm.reshape(r1_norm.shape + (1,)) # add 3rd dimension
r2_norm = np.sqrt(np.sum(r2 ** 2, axis=2)) # r2_norm shape = (ncp, nvl)
r2_norm = r2_norm.reshape(r2_norm.shape + (1,)) # add 3rd dimension
cross_r1r2 = np.cross(r1, r2)
dotr1r2 = np.sum(r1 * r2, axis=2)
dotr1r2 = dotr1r2.reshape(dotr1r2.shape + (1,)) # add 3rd dimension
r1r2 = r1_norm * r2_norm
numer = gamma.reshape(1,-1,1) * (r1_norm + r2_norm) * cross_r1r2
denom = 4 * math.pi * (r1r2 * (r1r2 + dotr1r2) + (delta_visc * l0.reshape(1,-1,1)) ** 2)
u_gamma = numer / denom
return u_gamma | 01595ad9eb5977f39ebe916f733746f8789e4bd0 | 3,630,681 |
import inspect
def validate_params(func):
"""
@note: validate decorator
"""
def _decorator(*args, **kwargs):
def _get_param_items(func, args, kwargs):
parameters = inspect.signature(func).parameters
arg_keys = tuple(parameters.keys())
vparams = [k for k, v in parameters.items() if k == str(v)]
param_items = []
# collect args *args
for i, value in enumerate(args):
_key = arg_keys[i]
if _key in vparams:
param_items.append([_key, value])
# collect kwargs **kwargs
for arg_name, value in kwargs.items():
if arg_name in vparams:
param_items.append([arg_name, value])
return param_items
check_list = _get_param_items(func, args, kwargs)
# cannot be null
for item in check_list:
if item[1] is None:
return format_return(99901)
return func(*args, **kwargs)
return _decorator | ae0eb32347a3916f657653b1d8fda4ddd3292e44 | 3,630,682 |
def techniques_used(technique_list, technique):
""" Add technique to technique list and make distinction between techniques
subtechniques
"""
attack_id = util.buildhelpers.get_attack_id(technique['object'])
has_subtechniques = False
if attack_id:
# Check if technique not already in technique_list dict
if attack_id not in technique_list:
# Check if attack id is a sub-technique
if util.buildhelpers.is_sub_tid(attack_id):
has_subtechniques = True
parent_id = util.buildhelpers.get_parent_technique_id(attack_id)
# If parent technique not already in list, add to list and add current sub-technique
if parent_id not in technique_list:
technique_list[parent_id] = {}
technique_list[parent_id]['subtechniques'] = []
technique_list[parent_id]['subtechniques'].append(attack_id)
# Attack id is regular technique
else:
# Add technique to list
technique_list[attack_id] = {}
technique_list[attack_id]['subtechniques'] = []
# Check if parent ID was added by sub-technique
# parent ID will not have description
elif 'descr' not in technique_list[attack_id]:
# Check if it has external references
if technique['relationship'].get('description'):
# Get filtered description
technique_list[attack_id]['descr'] = True
return has_subtechniques | b1132f1bdf2abc0084a284ef891fd78b716a602b | 3,630,683 |
def admin_view_semesters_of_a_curriculum(request, curriculum_id):
""" gets all the semesters of a specfic curriculum """
curriculum = Curriculum.objects.get(id=curriculum_id)
semesters = Curriculum.get_semesters_objects(curriculum)
semester_slots = []
for sem in semesters:
a = list(Semester.get_courseslots_objects(sem))
semester_slots.append(a)
max_length = 0
for course_slots in semester_slots:
max_length = max(max_length, len(course_slots))
for course_slots in semester_slots:
course_slots += [""] * (max_length - len(course_slots))
semester_credits = []
for semester in semesters:
credits_sum = 0
for course_slot in semester.courseslots:
max_credit = 0
courses = course_slot.courses.all()
for course in courses:
max_credit = max(max_credit, course.credit)
credits_sum = credits_sum + max_credit
semester_credits.append(credits_sum)
print (semester_credits)
transpose_semester_slots = list(zip(*semester_slots))
return render(request, 'programme_curriculum/acad_admin/admin_view_semesters_of_a_curriculum.html', {'curriculum': curriculum, 'semesters': semesters, 'semester_slots': transpose_semester_slots, 'semester_credits': semester_credits}) | 60be9bad528d4e144bc53370719b94b0692716c2 | 3,630,684 |
import requests
import os
def fetch_lightcurve_dr2(gaia_id, output_dir='../data/'):
""" Fetch Gaia Lightcurve for a Gaia Source ID (of a variable star) from Gaia DR2 Data Link
Returns path of csv file stored for given source
Args:
gaia_id (string): String. Gaia Source ID of the variable star you need to fetch the lightcurve from DR1 for
[output_dir] (string): Optional. String. By default, the csv files for the lightcurves are stored in the subfolder data/. To change the default path, enter a new path for the folder to save the lightcurve
Returns:
String. Gives back the path/to/lightcurve/filename.csv where the lightcurve is stored. Returns empty string if no lightcurve is fetched.
"""
url='https://gea.esac.esa.int/data-server/data?ID=Gaia+DR2+'+gaia_id+'&RETRIEVAL_TYPE=EPOCH_PHOTOMETRY&FORMAT=CSV'
save_path=output_dir+gaia_id+'_data_dr2.csv'
read_data = requests.get(url, allow_redirects=True)
if(len(read_data.content)==0):
print('Could not fetch lightcurve from DR2 for Gaia Source ID '+gaia_id)
return ''
#assert len(read_data.content)!=0, 'Could not fetch lightcurve from DR2 for Gaia Source ID '+gaia_id
if not os.path.exists(output_dir):
os.makedirs(output_dir)
open(save_path, 'wb').write(read_data.content)
return save_path | 18fb18ccb7cfcd2bbb8f826cbdf592ca59356ef9 | 3,630,685 |
from typing import Awaitable
from re import T
async def _aw_to_coro(aw: Awaitable[T]) -> T:
"""Wrap a given awaitable so it appears as a coroutine."""
return await aw | 5ac8301fa23fb9c231bfb68be43cee70cdeb8b8e | 3,630,686 |
from typing import Union
def get_text_recursive(tag: Union[Tag, NavigableString, None]) -> str:
"""Extract the text using the childrens."""
if tag is None:
return ""
if isinstance(tag, NavigableString):
return str(tag).strip().replace("\n", " ")
tag_name = tag.name
# Special tags, like headers, lists, tables, ...
if tag_name in TAGS_HEADERS:
return (
"\n" * 2
+ " ".join(
filter(
lambda x: len(x) > 0,
[get_text_recursive(x) for x in tag.children if x is not None],
)
)
+ "\n"
)
if tag_name in {"ul", "ol"}:
return (
"\n".join(
map(
lambda x: "- " + x,
filter(
lambda x: len(x) > 0,
[get_text_recursive(x) for x in tag.children if x is not None],
),
)
)
+ "\n" * 2
)
if tag_name == "tr":
return (
"- "
+ "; ".join(
filter(
lambda x: len(x) > 0,
[get_text_recursive(x) for x in tag.children if x is not None],
),
)
+ "\n" * 2
)
# What should be the end, after the text of the element
if tag_name in TAGS_FORCE_NEWLINE:
end = "\n"
elif tag_name in TAGS_SPACE:
end = " "
else:
end = ""
return (
" ".join(
filter(
lambda x: len(x) > 0, [get_text_recursive(x) for x in tag.children if x is not None]
)
)
+ end
) | 012019d845825f6cdba4d2d1883cd0b3940cd4d3 | 3,630,687 |
def _l1_regularization(l1, model):
"""Computes the L1 regularization for the given model
Args:
l1 (float): L1 parameter
model (:obj:`torch.nn.Module`): Model to use
Returns:
float: L1 loss (i.e. l1 * l1_norm(params))
"""
l1_loss = sum(param.norm(1) for param in model.parameters())
return l1 * l1_loss | 32826672a7de00f8a0412e2496e6ebfea213b502 | 3,630,688 |
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2D(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias_attr=False) | 55a4e869297ed1faccdabfcddae6a61855a67efb | 3,630,689 |
import torch
def create_feature_extractor(model, device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to evaluate
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
def _inference(engine, batch):
global ITER
model.eval()
with torch.no_grad():
data, camid, date = batch
data = data.to(device) if torch.cuda.device_count() >= 1 else data
feat = model(data)
# print('shape {}'.format(feat.shape))
return feat, camid, date
engine = Engine(_inference)
return engine | d39bf294c7a667e6319eda27b4436c4ce6688d65 | 3,630,690 |
def callit(iteratee, *args, **kwargs):
"""Inspect argspec of `iteratee` function and only pass the supported arguments when calling
it."""
maxargs = len(args)
argcount = kwargs["argcount"] if "argcount" in kwargs else getargcount(iteratee, maxargs)
argstop = min([maxargs, argcount])
return iteratee(*args[:argstop]) | 2a23ad787929e7f50e6a38447c2a6fc458fc528e | 3,630,691 |
def get_doc_tokens(paragraph_text):
"""Tokenize the given paragraph and return character to word token offset for answer ranges"""
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return doc_tokens, char_to_word_offset | bbe0374877ee19f2d9c31298f84201e8c5261a13 | 3,630,692 |
def ispython(script_path):
"""
Check to see if file is a python script file by extension
:param script_path:
:return:
"""
return hasextension(script_path, ".py") | 2d2c13df1eff659fb12c502b64eb5ed81b04466e | 3,630,693 |
def num_to_name(num):
"""
(int) -> str
Get IO pin name from its numeric identifier
>> num_to_name(8)
gpio_8
>> num_to_name(107)
D7
>>num_to_name(115)
A1
"""
# Pi pin numeric identifiers are the actual GPIO number
if num <= 27:
pname = "gpio_" + str(num)
# 100 is added to Arduino digital pin numeric identifiers
elif num >= 100 and num <= 113:
pname = "D" + str(int(num) - 100)
# 114 is added to Arduino analog pin numeric identifiers
elif num >= 114:
pname = "A" + str(int(num) - 114)
return pname | 202a7be3831ad9adbca68a3ff46587775b467ca2 | 3,630,694 |
import os
def load_cifar_datasets(data_dir):
"""Load CIFAR10 and SVHN dataset from np array."""
tr_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_train.npy'))
val_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_val.npy'))
test_in = load_tfdata_from_np(os.path.join(data_dir, 'cifar10_test.npy'))
test_ood = load_tfdata_from_np(
os.path.join(data_dir, 'svhn_cropped_test.npy'))
return {
'tr_in': tr_in,
'val_in': val_in,
'test_in': test_in,
'test_ood': test_ood # val_ood is val_in_grey
} | 6e1af6bc036dcc5cfebe6be54db0c714646948e6 | 3,630,695 |
def ConnectToDb_Return_Df_table(id,pwd,host,db_name,table_name):
"""
This method will Connect to Data base and return the requested table in the form of a dataframe
Better to make it a singleton to ensure multiple db connections are not spawned
:param id:
:param pwd:
:param host:
:param db_name:
:param table_name:
:return:
"""
#making connection object
conn = mysql.connector.connect(
user=id,
password=pwd,
host=host,
database=db_name)
#Starting cursor
cur = conn.cursor()
# query = ("SELECT * FROM "+ tableName+" limit 2")
#Preparing query with the give table name
query = ("SELECT * FROM "+ table_name)
#Reading the query result to a dataframe
df =pd.read_sql_query(query,conn)
conn.close()
return df | af1fca9a0c7cacd22bb9fbb032eee0599b7158bd | 3,630,696 |
from typing import List
from sys import path
def update_model(
model_artifact,
parameters: dict = None,
metrics: dict = None,
extra_data: dict = None,
inputs: List[Feature] = None,
outputs: List[Feature] = None,
feature_vector: str = None,
feature_weights: list = None,
key_prefix: str = "",
labels: dict = None,
write_spec_copy=True,
):
"""Update model object attributes
this method will edit or add attributes to a model object
example::
update_model(model_path, metrics={'speed': 100},
extra_data={'my_data': b'some text', 'file': 's3://mybucket/..'})
:param model_artifact: model artifact object or path (store://..) or DataItem
:param parameters: parameters dict
:param metrics: model metrics e.g. accuracy
:param extra_data: extra data items key, value dict
(value can be: path string | bytes | artifact)
:param inputs: list of input features (feature vector schema)
:param outputs: list of output features (output vector schema)
:param feature_vector: feature store feature vector uri (store://feature-vectors/<project>/<name>[:tag])
:param feature_weights: list of feature weights, one per input column
:param key_prefix: key prefix to add to metrics and extra data items
:param labels: metadata labels
:param write_spec_copy: write a YAML copy of the spec to the target dir
"""
if hasattr(model_artifact, "artifact_url"):
model_artifact = model_artifact.artifact_url
if isinstance(model_artifact, ModelArtifact):
model_spec = model_artifact
elif is_store_uri(model_artifact):
model_spec, _ = store_manager.get_store_artifact(model_artifact)
else:
raise ValueError("model path must be a model store object/URL/DataItem")
if not model_spec or model_spec.kind != "model":
raise ValueError(f"store artifact ({model_artifact}) is not model kind")
if parameters:
for key, val in parameters.items():
model_spec.parameters[key] = val
if metrics:
for key, val in metrics.items():
model_spec.metrics[key_prefix + key] = val
if labels:
for key, val in labels.items():
model_spec.labels[key] = val
if inputs:
model_spec.inputs = inputs
if outputs:
model_spec.outputs = outputs
if feature_weights:
model_spec.feature_weights = feature_weights
if feature_vector:
model_spec.feature_vector = feature_vector
if extra_data:
for key, item in extra_data.items():
if hasattr(item, "target_path"):
extra_data[key] = item.target_path
upload_extra_data(model_spec, extra_data, prefix=key_prefix, update_spec=True)
if write_spec_copy:
spec_path = path.join(model_spec.target_path, model_spec_filename)
store_manager.object(url=spec_path).put(model_spec.to_yaml())
model_spec.db_key = model_spec.db_key or model_spec.key
mlrun.get_run_db().store_artifact(
model_spec.db_key,
model_spec.to_dict(),
model_spec.tree,
iter=model_spec.iter,
project=model_spec.project,
)
return model_spec | 0d3d7fdb152614fc7269356819cb302c3cbef8e0 | 3,630,697 |
import os
import sys
from io import StringIO
import traceback
def runscript(scriptname, args, in_directory=None,
fail_ok=False, sandbox=False):
"""Run a Python script using exec().
Run the given Python script, with the given args, in the given directory,
using 'exec'. Mimic proper shell functionality with argv, and capture
stdout and stderr.
When using :attr:`fail_ok`=False in tests, specify the expected error.
"""
sysargs = [scriptname]
sysargs.extend(args)
cwd = os.getcwd()
try:
status = -1
oldargs = sys.argv
sys.argv = sysargs
oldout, olderr = sys.stdout, sys.stderr
sys.stdout = StringIO()
sys.stdout.name = "StringIO"
sys.stderr = StringIO()
if in_directory:
os.chdir(in_directory)
else:
in_directory = cwd
try:
print('running:', scriptname, 'in:', in_directory, file=oldout)
print('arguments', sysargs, file=oldout)
status = _runscript(scriptname, sandbox=sandbox)
except SystemExit as err:
status = err.code
except: # pylint: disable=bare-except
traceback.print_exc(file=sys.stderr)
status = -1
finally:
sys.argv = oldargs
out, err = sys.stdout.getvalue(), sys.stderr.getvalue()
sys.stdout, sys.stderr = oldout, olderr
os.chdir(cwd)
if status != 0 and not fail_ok:
print(out)
print(err)
assert False, (status, out, err)
return status, out, err | 51296e78ceb41c3f30454baef0f74a1e3ee45658 | 3,630,698 |
def n_prop_vs_rec(sorted_props, gt, n=100):
"""
sort n proposals by their score.
returns #proposals vs. recall -> [[a], [b], ..., [n]]
a = recall for 1 proposal
b = recall for 2 proposals
.
.
.
n = recall for (n+1) proposals
:param sorted_props: [[prop_0], [prop_1], ... , [prop_N]]
:param gt: ground truth
:param n: num of props to sort for evaluation (default = 100)
:return tp(thrs=0.3), tp(thrs=0.5), fn(thrs=0.3), fn(thrs=0.5)
"""
sorted_props = [[[prop[0], prop[1], prop[2] - prop[0], prop[3] - prop[1]]] for prop in sorted_props[:n]]
# sys.stdout.write("\rn = {:d} len(props) = {:d}".format(n, len(sorted_props)))
# for large n's this should definitely be paralleled
# print(sorted_props[:n])
tp_03, _, fn_03, _ = dect_is(sorted_props, gt, 0.3)
tp_05, _, fn_05, _ = dect_is(sorted_props, gt, 0.5)
return tp_03, tp_05, fn_03, fn_05 | 558c4fcccd08329ed97b1dbbba87b8d8fca883e8 | 3,630,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.