content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def gumbel_softmax(log_pi, tau=0.1, axis=1):
"""Gumbel-Softmax sampling function.
This function draws samples :math:`y_i` from Gumbel-Softmax distribution,
.. math::
y_i = {\\exp((g_i + \\log\\pi_i)/\\tau)
\\over \\sum_{j}\\exp((g_j + \\log\\pi_j)/\\tau)},
where :math:`\\tau` is a temperature parameter and
:math:`g_i` s are samples drawn from
Gumbel distribution :math:`Gumbel(0, 1)`
See `Categorical Reparameterization with Gumbel-Softmax \
<https://arxiv.org/abs/1611.01144>`_.
Args:
log_pi (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable representing pre-normalized
log-probability :math:`\\log\\pi`.
tau (:class:`~float` or :class:`~chainer.Variable`): \
Input variable representing temperature :math:`\\tau`.
Returns:
~chainer.Variable: Output variable.
"""
xp = backend.get_array_module(log_pi)
if log_pi.ndim < 1:
return variable.Variable(xp.ones((), log_pi.dtype))
dtype = log_pi.dtype
g = xp.random.gumbel(size=log_pi.shape).astype(dtype)
y = chainer.functions.softmax((log_pi + g) / tau, axis=axis)
return y | 91751a5bd8069c71de5dbe9f2cbbc7757daff140 | 31,400 |
def has_active_lease(storage_server, storage_index, now):
"""
:param allmydata.storage.server.StorageServer storage_server: A storage
server to use to look up lease information.
:param bytes storage_index: A storage index to use to look up lease
information.
:param float now: The current time as a POSIX timestamp.
:return bool: ``True`` if any only if the given storage index has a lease
with an expiration time after ``now``.
"""
leases = storage_server.get_slot_leases(storage_index)
return any(
lease.get_expiration_time() > now
for lease
in leases
) | 544b17489bc766a15bf2eca5cddab55c1bf473dd | 31,401 |
from typing import List
import os
def _enumerate_files(path: str, parts: List[str], repl_list1: List[str], repl_list2: List[str] = None) -> str:
""" Enumerate all possible file names """
if len(parts) <= 2:
for token in repl_list1:
parts[-1] = token
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
else:
assert len(parts) == 4
for p2 in repl_list1:
parts[1] = p2
for p4 in repl_list2:
parts[3] = p4
candidate = os.path.join(path, ''.join(parts))
if os.path.isfile(candidate):
return candidate
return None | a227d9727efe27038f40b814cce6647cb0c75862 | 31,402 |
def MXfunc(A, At, d1, p1):
"""
Compute P^{-1}X (PCG)
y = P^{-1}*x
"""
def matvec(x):
return p1 * x
N = p1.shape[0]
return LinearOperator((N, N), matvec=matvec) | c2c1d6361756779f9318a9356251c8ba1a610057 | 31,403 |
from ._filter import filter_
from typing import Callable
def filter(predicate: Predicate[_T]) -> Callable[[Observable[_T]], Observable[_T]]:
"""Filters the elements of an observable sequence based on a
predicate.
.. marble::
:alt: filter
----1---2---3---4---|
[ filter(i: i>2) ]
------------3---4---|
Example:
>>> op = filter(lambda value: value < 10)
Args:
predicate: A function to test each source element for a
condition.
Returns:
An operator function that takes an observable source and
returns an observable sequence that contains elements from the
input sequence that satisfy the condition.
"""
return filter_(predicate) | b56f4ed6e770d9b623362cca92a791d7f1ef5fa7 | 31,404 |
def scb_to_unit(scb):
"""Convert codes used by Statistics Sweden to units used by the NAD GIS files."""
scbform = 'SE/' + '{:0<9}'.format(scb)
if scbform in g_units.index:
return g_units.loc[scbform, 'G_unit']
else:
return 0 | 1c878492bb0bb4e8c7b7874097c86fa8bbc93329 | 31,405 |
import os
def revision_pattern_from_build_bucket_path(bucket_path):
"""Get the revision pattern from a build bucket path."""
return '.*?' + os.path.basename(bucket_path) | b7db362eb47531413397f0dc2079f4f7fd931d94 | 31,406 |
def img_to_square(im_pic):
""" 把图片处理成正方形
:param im_pic:
:return:
"""
w, h = im_pic.size
if w >= h:
w_start = (w - h) * 0.618
box = (w_start, 0, w_start + h, h)
region = im_pic.crop(box)
else:
h_start = (h - w) * 0.618
box = (0, h_start, w, h_start + w)
region = im_pic.crop(box)
return region | ae672ea715cb982272eddaff0417d4f64926894c | 31,407 |
from typing import List
def load_sentence(
filename: str,
with_symbol: bool=True
) -> List[str]:
"""コーパスをロードする。"""
if with_symbol:
tokens = [
list(sent.split()) + [config.END_SYMBOL]
for sent in (_.strip() for _ in open(filename))
]
else:
tokens = [
list(sent.split())
for sent in (_.strip() for _ in open(filename))
]
print("loaded sentences from {}".format(filename))
return tokens | 3f494b740a4ed157f163329de8cc0568e5541cdc | 31,408 |
def to_str(bytes_or_str):
"""
The first function takes a bytes or str instance and always returns
a str.
"""
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode('utf-8')
else:
value = bytes_or_str
return value | 4a73559039501764a00e697c092d20426949058d | 31,409 |
def configure_ampliseq(request):
"""View for ampliseq.com importing stuff"""
ctx = get_ctx_ampliseq(request)
return render_to_response(
"rundb/configure/ampliseq.html", ctx, context_instance=RequestContext(request)
) | 90cdd14de158efc79b5cad11feaa115878469866 | 31,410 |
import struct
def _copy(s):
"""Creates a new set from another set.
Args:
s: A set, as returned by `sets.make()`.
Returns:
A new set containing the same elements as `s`.
"""
return struct(_values = dict(s._values)) | b505af5037d0aa889aa8ed707eaf69d572e54f84 | 31,411 |
import random
def particle_movement_x(time):
"""
Generates a random movement in the X label
Parameter:
time (int): Time step
Return:
x (int): X position
"""
x = 0
directions = [1, -1]
for i in range(time):
x = x + random.choice(directions)
return x | 0dff68080dbfd56997cffb1e469390a1964a326f | 31,412 |
def find_match_characters(string, pattern):
"""Find match match pattern string.
Args:
params: string
pattern
Returns:
Raises:
"""
matched = []
last_index = 0
if not string or not pattern:
return matched
if string[0] != pattern[0]:
return matched
for c in pattern:
index = string.find(c, last_index)
if index < 0:
return []
matched.append((c, index))
last_index = index + 1
return matched | 6d3bc3844c20584038e41c22eeead7325031b647 | 31,413 |
import itertools
def cross_product_configs(**configs):
"""
Given configs from users, we want to generate different combinations of
those configs
For example, given M = ((1, 2), N = (4, 5)),
we will generate (({'M': 1}, {'N' : 4}),
({'M': 1}, {'N' : 5}),
({'M': 2}, {'N' : 4}),
({'M': 2}, {'N' : 5}))
"""
configs_attrs_list = []
for key, values in configs.items():
tmp_results = [{key : value} for value in values]
configs_attrs_list.append(tmp_results)
# TODO(mingzhe0908) remove the conversion to list.
# itertools.product produces an iterator that produces element on the fly
# while converting to a list produces everything at the same time.
generated_configs = list(itertools.product(*configs_attrs_list))
return generated_configs | 7fcf61abcbb850630a5be9468822f567199460bc | 31,414 |
from typing import Union
def columnwise_normalize(X: np.ndarray) -> Union[None, np.ndarray]:
"""normalize per column"""
if X is None:
return None
return (X - np.mean(X, 0)) / np.std(X, 0) | 24b5995a5b36738e1c9eecf427fdb4ed83d43145 | 31,415 |
import os
def fq_classification(fqclass, verbose=False):
"""
Read the fastq classification file
:param fqclass: the classification file that has the file name and then arbitrary classifications separated by tabs
:param verbose: more output
:return: a dict of the classification. Guaranteed that all have the same number of elements.
"""
classi = {}
maxlen = 0
with open(fqclass, 'r') as f:
for l in f:
p = l.strip().split("\t")
if len(p) > maxlen:
maxlen = len(p) - 1
fname = p[0].split(os.path.sep)[-1]
classi[fname] = p[1:]
for i in classi:
while len(classi[i]) < maxlen:
classi[i].append("None")
strclassi = {x:"\t".join(classi[x]) for x in classi}
return strclassi | 84f71e91ad9b20c5781377b05f3a72c05a6d28b5 | 31,416 |
def line_intersect(a1, da, b1, db):
"""
compute intersection of infinetly long lines
"""
dba = np.array(a1) - np.array(b1)
da_perpendicular = perp(da)
num = np.dot(da_perpendicular, dba)
denom = np.dot(da_perpendicular, db)
dist_b = (num / denom)
return dist_b*db + b1 | 5be8211d3f31d7984820349dfbbb1e05592287a4 | 31,417 |
def sequence_(t : r(e(a))) -> e(Unit):
"""
sequence_ :: (Foldable r, Monad e) => r (e a) -> e ()
Evaluate each monadic action in the structure from left to right, and
ignore the results. For a version that doesn't ignore the results see
sequence.
As of base 4.8.0.0, sequence_ is just sequenceA_, specialized to Monad.
"""
return sequenceA_(t) | 85c36cc767f60eaccd8d91cdc4d14ca2370069f7 | 31,418 |
import re
def param_validated(param, val):
"""Return True if matches validation pattern, False otherwise"""
if param in validation_dict:
pattern = validation_dict[param]
if re.match(rf'{pattern}', val) is None:
log.error("Validation failed for param='%s', "
"val='%s'. Values must match the pattern '%s'."
% (param, val, pattern))
return False
elif bool(validation_dict):
log.error("Validation for param='%s' not found in the "
"requirements file." % param)
return False
return True | 4359b20437e8cfd21b82b5952cc1c9026e6dca13 | 31,419 |
def remove_keys_from_array(array, keys):
"""
This function...
:param array:
:param keys:
:return:
"""
for key in keys:
array.remove(key)
return array | 3143b8e42eb1e1b2f5818a254bcec3631c30f5ea | 31,420 |
def create_tree_data(codepage, options, target_node, pos):
"""Create structure needed for Dijit Tree widget """
tree_nodes = []
for opt in options:
code = opt['code']
cp = codepage + code
add_tree_node(tree_nodes, cp, code+"-"+opt['label'], cp)
tree_data = {
'node_name': TREE_NODE_NAME,
'node_obj': tree_nodes,
'target_node': target_node,
'pos': pos,
'type': 'application'
}
return tree_data | fededfc4bc3e39860221783459d324a15f16d081 | 31,421 |
def detect_octavia():
"""
Determine whether the underlying OpenStack is using Octavia or not.
Returns True if Octavia is found in the region, and False otherwise.
"""
try:
creds = _load_creds()
region = creds['region']
for catalog in _openstack('catalog', 'list'):
if catalog['Name'] == 'octavia':
for endpoint in catalog.get('Endpoints', []):
if endpoint['region'] == region:
return True
except Exception:
log_err('Error while trying to detect Octavia\n{}', format_exc())
return False
return False | 265e01730cfc2b7e1870c0364ddb4cd5d7c4b336 | 31,422 |
def add_cache_control_header(response):
"""Disable caching for non-static endpoints
"""
if "Cache-Control" not in response.headers:
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
return response | 06f3f4a7259076be535b4ea0ca719a13e9e665a0 | 31,423 |
from typing import List
def clean_new_import_aliases(
import_aliases: List[ImportAlias],
) -> List[ImportAlias]:
"""Clean up a list of import aliases."""
# Sort them
cleaned_import_aliases = sorted(import_aliases, key=lambda n: n.evaluated_name)
# Remove any trailing commas
last_name = cleaned_import_aliases[-1]
if last_name.comma != MaybeSentinel.DEFAULT:
cleaned_import_aliases[-1] = last_name.with_changes(comma=MaybeSentinel.DEFAULT)
return cleaned_import_aliases | 5f0b25798f353c999d325125e80c9ccb67b5afec | 31,424 |
def _format_td(timedelt):
"""Format a timedelta object as hh:mm:ss"""
if timedelt is None:
return ''
s = int(round(timedelt.total_seconds()))
hours = s // 3600
minutes = (s % 3600) // 60
seconds = (s % 60)
return '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds) | 071f25c3c8cfc75cacf2fedc7002527897362654 | 31,425 |
def add_quad_reaction_node(graph, rxn):
"""
Adds a "Quad Reaction Node" (QRN) group of nodes to a graph, and connects
them to the correct compound nodes.
The QRN consists of two nodes constituting the intended forward direction
of the reaction and two nodes constituting the reverse direction. Each pair
of nodes is connected by an edge in the direction of the reaction. Each node
represents a group of compounds on one side of the reaction equation.
"""
# Make sure the reaction is in good shape
rxn_malformed = False
try:
rxn_id = rxn['_id']
except:
rxn_malformed = True
try:
reactants_f = set([x[1] for x in rxn['Reactants']])
products_f = set([x[1] for x in rxn['Products']])
reactants_r = products_f
products_r = reactants_f
except:
rxn_malformed = True
if rxn_malformed:
s_err("Warning: Reaction '%s' is malformed.\n" % str(rxn))
return graph
# Find the compound nodes of the reactants and the products
rf = set([])
pf = set([])
rr = set([])
pr = set([])
for c_mid in reactants_f:
try:
node = graph.graph['cmid2node'][c_mid]
rf.add(node)
pr.add(node)
except KeyError:
# If a reactant is missing, the reaction should not be added
s_err("Warning: Compound '" + c_mid + "' in reaction '" + rxn_id + \
"' is missing. Reaction nodes were not added to the network.\n")
return graph
for c_mid in products_f:
try:
node = graph.graph['cmid2node'][c_mid]
pf.add(node)
rr.add(node)
except KeyError:
# If a product is missing, the reaction should not be added
s_err("Warning: Compound '" + c_mid + "' in reaction '" + rxn_id + \
"' is missing. Reaction nodes were not added to the network.\n")
return graph
# Create the reaction nodes
N = len(graph.nodes()) + 1
graph.add_node(N, type='rf', mid=rxn_id, c=rf)
for c_node in rf:
if check_connection(graph, c_node, N):
graph.add_edge(c_node, N)
N += 1
graph.add_node(N, type='pf', mid=rxn_id, c=pf)
for c_node in pf:
if check_connection(graph, c_node, N):
graph.add_edge(N, c_node)
graph.add_edge(N-1, N) # Forward reaction edge
N += 1
graph.add_node(N, type='rr', mid=rxn_id, c=rr)
for c_node in rr:
if check_connection(graph, c_node, N):
graph.add_edge(c_node, N)
N += 1
graph.add_node(N, type='pr', mid=rxn_id, c=pr)
for c_node in pr:
if check_connection(graph, c_node, N):
graph.add_edge(N, c_node)
graph.add_edge(N-1, N) # Reverse reaction edge
return graph | 360e7c4e74ed58da9b85548c4d217e3d1f40150b | 31,426 |
import os
def api_dataset(dataset):
"""
Return a list of available years
"""
path = os.path.join(geodata_dir, dataset)
if os.path.exists(path):
return jsonify(find_maps(path))
else:
return 'error: cannot find dataset: %s' % dataset | 92dd50dfcb4b8d4ec2bb87f1e098ebc06ac3bec9 | 31,427 |
def matrixFilter(np_image_2D, np_mask):
"""
Processing filtering with given matrix
Keyword argument:
np_image_2D -- two dimensional image(grayscale or single color channel)
np_mask -- mask matrix as numpy array
Return:
np_image_fil -- image as numpy 2D array, after specified filtering
"""
size = np_mask.shape[0]
dir_size = int((size-1)/2)
mask_sum = np_mask.sum()
np_image_fil = np.copy(np_image_2D)
x_max, y_max = np_image_2D.shape
for index, x in np.ndenumerate(np_image_2D):
if (index[0] >= dir_size and index[0] < x_max-dir_size-1 and index[1] >= dir_size and index[1] < y_max-dir_size-1):
np_window = bs.getWindow(np_image_2D, index, dir_size, struct_elem='rect')
np_window = np_window * np_mask
window_sum = np_window.sum()
new_value = window_sum / mask_sum if mask_sum !=0.0 else window_sum
new_value = new_value if new_value <=255 else 255
new_value = new_value if new_value >=0 else 0
np_image_fil[index[0], index[1]] = new_value
return np_image_fil.astype(np.uint8) | 422de6c4c539ceb154827d511127f1b120519a2a | 31,428 |
def batch_split_axis(batch, n_split):
"""Reshapes batch to have first axes size equal n_split."""
x, y = batch
n = x.shape[0]
n_new = n / n_split
assert n_new == int(n_new), (
"First axis cannot be split: batch dimension was {} when "
"n_split was {}.".format(x.shape[0], n_split))
n_new = int(n_new)
return tuple(arr.reshape([n_split, n_new, *arr.shape[1:]]) for arr in (x, y)) | 0f413e40961b15b64bf118b2daa012e853dbc294 | 31,429 |
def right_align(value, length):
"""
:param value: string to right align
:param length: the number of characters to output (spaces added to left)
:return:
"""
if length <= 0:
return u""
value = text(value)
if len(value) < length:
return (" " * (length - len(value))) + value
else:
return value[-length:] | de8c42734b094514ebd45c2cb5517da806ec74b8 | 31,430 |
import yaml
from pathlib import Path
def test_disable_functions_as_notebooks(backup_spec_with_functions):
"""
Tests a typical workflow with a pieline where some tasks are functions
"""
with open('pipeline.yaml') as f:
spec = yaml.safe_load(f)
spec['meta']['jupyter_functions_as_notebooks'] = False
Path('pipeline.yaml').write_text(yaml.dump(spec))
cm = PloomberContentsManager()
def get_names(out):
return {model['name'] for model in out['content']}
assert get_names(cm.get('')) == {'my_tasks', 'pipeline.yaml'}
assert get_names(cm.get('my_tasks')) == {'__init__.py', 'clean', 'raw'}
# check new notebooks appear, which are generated from the function tasks
assert get_names(cm.get('my_tasks/raw')) == {
'__init__.py',
'functions.py',
}
assert get_names(cm.get('my_tasks/clean')) == {
'__init__.py',
'functions.py',
'util.py',
} | f5e5c8cc687a64d4c7593ef571e181d6cf4d27ce | 31,431 |
import builtins
def proxy_gettext(*args, **kwargs):
"""Proxy calls this function to the real built-in gettext() function.
This is not required for normal operation of the application, could be not imported at all,
but will help development.
"""
### this would load gettext in case it wasn't done before use, but could hide an app bug.
#if 'gettext' not in builtins.__dict__:
# load_gettext_no_locale()
return builtins.__dict__['gettext'](*args, **kwargs) | 870e5c3d7c6bceec438f042c875e313e4877d9b4 | 31,432 |
def generate_users_data(users):
"""
Generate users' rows (assuming the user's password is the defualt one)
:param users:
:return:
"""
headers = ['שם משתמש', 'סיסמה']
rows = [[user.username, DEFAULT_TEAM_USER_PASSWORD] for user in users]
rows.insert(0, headers)
return rows | c6d9ef03b3b28c31f59627be574c4d20328b5d82 | 31,433 |
def ListToMatrix(lv):
""" Convert a list of 3 or 4 ``c4d.Vector`` to ``c4d.Matrix``. """
if not isinstance(lv, list):
raise TypeError("E: expected list of vectors, got %r" % type(lv))
m = len(lv)
if not isinstance(lv[0], c4d.Vector):
raise TypeError("E: expected list elements of type c4d.Vector, got %r" % (type(lv[0])))
if m == 4:
return c4d.Matrix(lv[0], lv[1], lv[2], lv[3])
elif m == 3:
return c4d.Matrix(c4d.Vector(0), lv[1], lv[2], lv[3])
else:
raise ValueError("E: need list of length 3 or 4, got %d" % (m)) | b60e1f62d250ce1f7c8ef2772755c3a3ce878395 | 31,434 |
def loadEpithelium(name):
"""Returns an epithelium from a CSV file with the given name.
Precondition: name exists and it's in CSV format"""
assert type(name) == str, "name isn't a string"
recs = []
text = open(name)
i = 0
for line in text:
if i > 0:
recs.append(loadReceptor(line, True))
i += 1.
text.close()
return Epithelium(recs) | 01e0c613faa71dbc77be650c3eba1f5006966f9e | 31,435 |
def focal_attention(query, context, use_sigmoid=False, scope=None):
"""Focal attention layer.
Args:
query : [N, dim1]
context: [N, num_channel, T, dim2]
use_sigmoid: use sigmoid instead of softmax
scope: variable scope
Returns:
Tensor
"""
with tf.variable_scope(scope or "attention", reuse=tf.AUTO_REUSE):
# Tensor dimensions, so pylint: disable=g-bad-name
_, d = query.get_shape().as_list()
_, K, _, d2 = context.get_shape().as_list()
assert d == d2
T = tf.shape(context)[2]
# [N,d] -> [N,K,T,d]
query_aug = tf.tile(tf.expand_dims(
tf.expand_dims(query, 1), 1), [1, K, T, 1])
# cosine simi
query_aug_norm = tf.nn.l2_normalize(query_aug, -1)
context_norm = tf.nn.l2_normalize(context, -1)
# [N, K, T]
a_logits = tf.reduce_sum(tf.multiply(query_aug_norm, context_norm), 3)
a_logits_maxed = tf.reduce_max(a_logits, 2) # [N,K]
attended_context = softsel(softsel(context, a_logits,
use_sigmoid=use_sigmoid), a_logits_maxed,
use_sigmoid=use_sigmoid)
return attended_context | 27f480b8911b3ff1a4367af6b7d5b9a549d24653 | 31,436 |
def format_float(x):
""" Pretty formatting for floats
"""
if pd.isnull(x):
return " ."
else:
return "{0:10.2f}".format(x) | 9f3cdc0ab41bc69807d178e1c4a56abec0ac6fab | 31,437 |
def random_points_and_attrs(count, srs_id):
"""
Generate Random Points and attrs (Use some UTM Zone)
"""
points = generate_utm_points(count, srs_id)
rows = []
for p in points:
rand_str = ''.join(choice(ascii_uppercase + digits) for _ in range(10))
rand_bool = bool(randint(0, 1))
rand_int = randint(0, 1000)
rows.append((p, rand_int, rand_str, rand_str, rand_bool))
return rows | 1dcdbb376f91d75c33baf40275a047393bf13fd5 | 31,438 |
from typing import Union
from typing import List
def trimap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in TriMap basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'trimap', **kwargs) | e3294c689e9081813c6e1defecdf20918dc02b8b | 31,439 |
import collections
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr() | 42e53b43d7ea580616d82fea4bdc08260de3b661 | 31,440 |
def random_new(algo=RNG_CMWC):
"""Return a new Random instance. Using ``algo``.
Args:
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm.
"""
return tcod.random.Random(algo) | f6eef62d3eb483dbcb85d420262cf411d6934790 | 31,441 |
def get_kth_value(unsorted, k, axis=-1):
"""
Args:
unsorted: numpy.ndarray of any dimensionality.
k: int
Returns:
kth values along the designated axis.
"""
indices = np.argpartition(unsorted, k, axis=axis)[..., :k]
k_smallests = np.take_along_axis(unsorted, indices, axis=axis)
kth_values = k_smallests.max(axis=axis)
return kth_values | ab787a89c04d390424749916a6ec7958efcc931e | 31,442 |
def rdc_transformer(
local_data,
meta_types,
domains,
k=None,
s=1.0 / 6.0,
non_linearity=np.sin,
return_matrix=False,
ohe=True,
rand_gen=None,
):
# logger.info('rdc transformer', k, s, non_linearity)
"""
Given a data_slice,
return a transformation of the features data in it according to the rdc
pipeline:
1 - empirical copula transformation
2 - random projection into a k-dimensional gaussian space
3 - pointwise non-linear transform
"""
N, D = local_data.shape
if rand_gen is None:
rand_gen = np.random.RandomState(17)
#
# precomputing transformations to reduce time complexity
#
#
# FORCING ohe on all discrete features
features = []
for f in range(D):
if meta_types[f] == MetaType.DISCRETE:
features.append(ohe_data(local_data[:, f], domains[f]))
else:
features.append(local_data[:, f].reshape(-1, 1))
# else:
# features = [data_slice.getFeatureData(f) for f in range(D)]
#
# NOTE: here we are setting a global k for ALL features
# to be able to precompute gaussians
if k is None:
feature_shapes = [f.shape[1] if len(f.shape) > 1 else 1 for f in features]
k = max(feature_shapes) + 1
#
# forcing two columness
features = [make_matrix(f) for f in features]
#
# transform through the empirical copula
features = [empirical_copula_transformation(f) for f in features]
#
# substituting nans with zero (the above step should have taken care of that)
features = [np.nan_to_num(f) for f in features]
#
# random projection through a gaussian
random_gaussians = [rand_gen.normal(size=(f.shape[1], k)) for f in features]
rand_proj_features = [s / f.shape[1] * np.dot(f, N) for f, N in zip(features, random_gaussians)]
nl_rand_proj_features = [non_linearity(f) for f in rand_proj_features]
#
# apply non-linearity
if return_matrix:
return np.concatenate(nl_rand_proj_features, axis=1)
else:
return [np.concatenate((f, np.ones((f.shape[0], 1))), axis=1) for f in nl_rand_proj_features] | 420e0ca3dfedf23434cb1f2ee500a2d0f8969f52 | 31,443 |
import sys
def trim(docstring):
"""Trims the leading spaces from docstring comments.
From http://www.python.org/dev/peps/pep-0257/
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed) | f1b96cebf76df60491324a9f8a20477b3bd6cc11 | 31,444 |
async def get_layer_version(compatible_runtime=None,layer_name=None,version=None,opts=None):
"""
Provides information about a Lambda Layer Version.
"""
__args__ = dict()
__args__['compatibleRuntime'] = compatible_runtime
__args__['layerName'] = layer_name
__args__['version'] = version
__ret__ = await pulumi.runtime.invoke('aws:lambda/getLayerVersion:getLayerVersion', __args__, opts=opts)
return GetLayerVersionResult(
arn=__ret__.get('arn'),
compatible_runtime=__ret__.get('compatibleRuntime'),
compatible_runtimes=__ret__.get('compatibleRuntimes'),
created_date=__ret__.get('createdDate'),
description=__ret__.get('description'),
layer_arn=__ret__.get('layerArn'),
layer_name=__ret__.get('layerName'),
license_info=__ret__.get('licenseInfo'),
source_code_hash=__ret__.get('sourceCodeHash'),
source_code_size=__ret__.get('sourceCodeSize'),
version=__ret__.get('version'),
id=__ret__.get('id')) | 57319a257b4e15e2d7ab2d14bfc43718967ea7e2 | 31,445 |
from typing import Optional
from typing import List
def get_header_names(header_annotation_names: Optional[List[str]],
doc: Optional[str] = None,
docs: Optional[List[str]] = None):
"""Get a list of header annotations and a dictionary for renamed annotations."""
# Get source_header_names from headers file if it exists
source_header_names = []
if docs:
for d in docs:
h = Headers(d)
if h.exists():
source_header_names.extend(h.read())
source_header_names = list(set(source_header_names))
elif Headers(doc).exists():
source_header_names = Headers(doc).read()
# Parse header_annotation_names and convert to annotations
annotation_names = misc.parse_annotation_list(header_annotation_names, source_header_names)
header_annotations = [(Annotation(a[0], doc) if doc else AnnotationAllDocs(a[0]), a[1]) for a in
annotation_names]
export_names = _create_export_names(header_annotations, None, False, keep_struct_names=False)
return [a[0] for a in header_annotations], export_names | e011af009e350e0ddbd5b18d19eb45816a7e8d6c | 31,446 |
def data_index(data, key):
"""Indexing data for key or a list of keys."""
def idx(data, i):
if isinstance(i, int):
return data[i]
assert isinstance(data, dict)
if i in data:
return data[i]
for k, v in data.items():
if str(k) == str(i):
return v
raise ValueError("{} is not found".format(i))
if isinstance(key, (list, tuple)):
keys = list(key)
if len(keys) > 1:
return data_index(idx(data, keys[0]), keys[1:])
return idx(data, keys[0])
return idx(data, key) | f2b6d18bcd83eb0ffd9b355643e79b40459d8d6a | 31,447 |
def calculate_misfit(da):
""" For each force orientation, extracts minimum misfit
"""
misfit = da.min(dim=('origin_idx', 'F0'))
return misfit.assign_attrs({
'best_force': _min_force(da)
}) | 3dcba853b2e30d9fb5d3cbf96cfa6a00760335a6 | 31,448 |
def f_score_one_hot(labels,predictions,beta=1.0,average=None):
"""compute f score, =(1+beta*beta)precision*recall/(beta*beta*precision+recall)
the labels must be one_hot.
the predictions is prediction results.
Args:
labels: A np.array whose shape matches `predictions` and must be one_hot.
Will be cast to `bool`.
predictions: A floating point np.array of arbitrary shape.
average : string, [None(default), 'micro', 'macro',]
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
Returns:
values: A score float.
References
-----------------------
[1] https://blog.csdn.net/sinat_28576553/article/details/80258619
"""
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
beta2 = beta ** 2
p=precision_score_one_hot(labels,predictions,average=average)
r=recall_score_one_hot(labels,predictions,average=average)
#In the functions:precision and recall,add a epsilon,so p and r will
#not be zero.
f=(1+beta2)*p*r/(beta2*p+r)
if average is None or average=='micro':
p = precision_score_one_hot(labels, predictions, average=average)
r = recall_score_one_hot(labels, predictions, average=average)
f = (1 + beta2) * p * r / (beta2 * p + r)
return f
elif average=='macro':
p = precision_score_one_hot(labels, predictions, average=None)
r = recall_score_one_hot(labels, predictions, average=None)
f = (1 + beta2) * p * r / (beta2 * p + r)
return np.average(f)
else:
raise ValueError('Invaild average: %s.' % average) | 091143244858dee1e001042931f625db30c58195 | 31,449 |
def articles():
"""Show a list of article titles"""
the_titles = [[a[0], a[1]] for a in articles]
return render_template('articles.html', titles = the_titles) | bb8f9af9cedb30f89fa950f60c7710ac840f026c | 31,450 |
import json
from pathlib import Path
import uuid
def create_montage_for_background(montage_folder_path: str, im_b_path: str, f_path: str, only_face: bool) -> str:
"""
Creates and saves the montage from a designed background. If a folder is provided for faces, it will create a file
'faces.json' inside the folder for faster loading if used repeatedly. If new images are
introduced in the faces' folder, delete the json and the next time 'create_montage_for_background' is called,
it will be created automatically.
:param montage_folder_path: folder to save the montage
:param im_b_path: str with the background image path
:param f_path: folder face path or json with the face's params. If folder provided, the 'faces.json' file
will be created inside the faces' folder
:param only_face: Whether to crop the hair and chin of the face or not
:return: str with montage path
"""
json_f_path = get_or_create_params_json(f_path, is_background=False)
f_faces = open(json_f_path, "r")
json_faces: dict = json.load(f_faces)
f_faces.close()
im_montage = create_montage(im_b_path, json_faces, only_face) # creates the montage
montage_file_path = Path(f"{montage_folder_path}/montage_{uuid.uuid4().hex[:10]}.png")
try:
im_montage.save(montage_file_path)
except IOError:
logger.error("Montage created but error while saving montage")
raise
logger.info(f"Montage created and saved in '{montage_file_path}'")
return str(montage_file_path) | b0c229d16e0ffdf2a8ea63cbc5785be918c09d46 | 31,451 |
def deserialize_question(
question: QuestionDict
) -> Question:
"""Convert a dict into Question object."""
return Question(
title=question['title'],
content=question.get('content'),
choices=[
Choice(
title=title,
goto=goto
)
for title, goto in question['choices'].items()
]
) | c6f5dd962cdc7a0ef273d4397472de572f92c1f8 | 31,452 |
from datetime import datetime
def ensure_utc_datetime(value):
"""
Given a datetime, date, or Wayback-style timestamp string, return an
equivalent datetime in UTC.
Parameters
----------
value : str or datetime.datetime or datetime.date
Returns
-------
datetime.datetime
"""
if isinstance(value, str):
return parse_timestamp(value)
elif isinstance(value, datetime):
if value.tzinfo:
return value.astimezone(timezone.utc)
else:
return value.replace(tzinfo=timezone.utc)
elif isinstance(value, date):
return datetime(value.year, value.month, value.day, tzinfo=timezone.utc)
else:
raise TypeError('`datetime` must be a string, date, or datetime') | 0d5c631d2736094f5a60c2eb4ca7c83fcb1e3e6a | 31,453 |
def get_pod_status(pod_name: str) -> GetPodEntry:
"""Returns the current pod status for a given pod name"""
oc_get_pods_args = ["get", "pods"]
oc_get_pods_result = execute_oc_command(oc_get_pods_args, capture_output=True).stdout
line = ""
for line in oc_get_pods_result.splitlines():
if pod_name in line:
break
return GetPodEntry.parse(line) | 1353fb4f457a4818ffcfda188ca4d3db55ce5cc9 | 31,454 |
def helix_evaluate(t, a, b):
"""Evalutes an helix at a parameter.
Parameters
----------
t: float
Parameter
a: float
Constant
b: float
Constant
c: float
Constant
Returns
-------
list
The (x, y, z) coordinates.
Notes
-----
An interpretation of the constants a and b are the radius of the helix is a, and the slope of the helix is b / a.
References
----------
.. [1] Wolfram MathWorld. *Helix*.
Available at: http://mathworld.wolfram.com/Helix.html.
"""
return [a * cos(t), a * sin(t), b * t] | 2d62cae57dac72cd244d66df8de2d0a5d3b70c38 | 31,455 |
import scipy
def get_pfb_window(num_taps, num_branches, window_fn='hamming'):
"""
Get windowing function to multiply to time series data
according to a finite impulse response (FIR) filter.
Parameters
----------
num_taps : int
Number of PFB taps
num_branches : int
Number of PFB branches. Note that this results in `num_branches / 2` coarse channels.
window_fn : str, optional
Windowing function used for the PFB
Returns
-------
window : array
Array of PFB windowing coefficients
"""
window = scipy.signal.firwin(num_taps * num_branches,
cutoff=1.0 / num_branches,
window=window_fn,
scale=True)
window *= num_taps * num_branches
return xp.array(window) | 1193a29ab754e2c8f30e1a58f34c9efcf58513af | 31,456 |
from typing import Dict
from typing import OrderedDict
def retrieve_bluffs_by_id(panelist_id: int,
database_connection: mysql.connector.connect,
pre_validated_id: bool = False) -> Dict:
"""Returns an OrderedDict containing Bluff the Listener information
for the requested panelist ID
Arguments:
panelist_id (int)
database_connection (mysql.connector.connect)
pre_validated_id (bool): Flag whether or not the panelist ID
has been validated or not
"""
if not pre_validated_id:
if not utility.validate_id(panelist_id, database_connection):
return None
try:
cursor = database_connection.cursor()
query = ("SELECT ( "
"SELECT COUNT(blm.chosenbluffpnlid) FROM ww_showbluffmap blm "
"JOIN ww_shows s ON s.showid = blm.showid "
"WHERE s.repeatshowid IS NULL AND blm.chosenbluffpnlid = %s "
") AS chosen, ( "
"SELECT COUNT(blm.correctbluffpnlid) FROM ww_showbluffmap blm "
"JOIN ww_shows s ON s.showid = blm.showid "
"WHERE s.repeatshowid IS NULL AND blm.correctbluffpnlid = %s "
") AS correct;")
cursor.execute(query, (panelist_id, panelist_id,))
result = cursor.fetchone()
cursor.close()
if result:
bluffs = OrderedDict()
bluffs["chosen"] = result[0]
bluffs["correct"] = result[1]
return bluffs
return None
except ProgrammingError as err:
raise ProgrammingError("Unable to query the database") from err
except DatabaseError as err:
raise DatabaseError("Unexpected database error") from err | f3f5d3e86423db20aa4ffe22410cc491ec5e80ed | 31,457 |
def extract_protein_from_record(record):
"""
Grab the protein sequence as a string from a SwissProt record
:param record: A Bio.SwissProt.SeqRecord instance
:return:
"""
return str(record.sequence) | a556bd4316f145bf23697d8582f66f7dcb589087 | 31,458 |
import cftime
def _diff_coord(coord):
"""Returns the difference as a `xarray.DataArray`."""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if calendar:
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(coord, ref_units, calendar)
coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)
return np.diff(coord)
elif pd.api.types.is_datetime64_dtype(v0):
return np.diff(coord).astype("timedelta64[s]").astype("f8")
else:
return np.diff(coord) | e430d7f22f0c4b9ac125768b5c69a045e44046a5 | 31,459 |
import _tkinter
def checkDependencies():
"""
Sees which outside dependencies are missing.
"""
missing = []
try:
del _tkinter
except:
missing.append("WARNING: _tkinter is necessary for NetworKit.\n"
"Please install _tkinter \n"
"Root privileges are necessary for this. \n"
"If you have these, the installation command should be: sudo apt-get install python3-tk")
return missing | 05aad218f3df84ddb5656d0206d50ec32aa02dcb | 31,460 |
from typing import Dict
from typing import Any
def get_user_groups_sta_command(client: Client, args: Dict[str, Any]) -> CommandResults:
""" Function for sta-get-user-groups command. Get all the groups associated with a specific user. """
response, output_data = client.user_groups_data(userName=args.get('userName'), limit=args.get('limit'))
if not response:
return CommandResults(
readable_output=NO_RESULT_MSG,
)
header_sequence = ['id', 'schemaVersionNumber', 'name', 'description', 'isSynchronized']
return CommandResults(
readable_output=tableToMarkdown(
f"Groups associated with user - {args.get('userName')} : ", response, headers=header_sequence,
headerTransform=pascalToSpace, removeNull=True),
outputs_prefix='STA.USER',
outputs_key_field=['id'],
outputs=output_data
) | b21b087e4e931e33111720bbc987b1bb6749fee8 | 31,461 |
def get_capital_flow(order_book_ids, start_date=None, end_date=None, frequency="1d", market="cn"):
"""获取资金流入流出数据
:param order_book_ids: 股票代码or股票代码列表, 如'000001.XSHE'
:param start_date: 开始日期
:param end_date: 结束日期
:param frequency: 默认为日线。日线使用 '1d', 分钟线 '1m' 快照 'tick' (Default value = "1d"),
:param market: (Default value = "cn")
:returns: pandas.DataFrame or None
"""
ensure_string_in(frequency, ("1d", "1m", "tick"), "frequency")
if frequency == "tick":
return get_capital_flow_tickbar(order_book_ids, start_date, end_date, TICKBAR_FIELDS, market)
order_book_ids = ensure_order_book_ids(order_book_ids)
start_date, end_date = ensure_date_range(start_date, end_date)
if frequency == "1d":
return get_capital_flow_daybar(order_book_ids, start_date, end_date, DAYBAR_FIELDS, 1, market)
return get_capital_flow_minbar(order_book_ids, start_date, end_date, MINBAR_FIELDS, 1, market) | f7c3f94fd012672b75d960ef1c4d749959a7e6cc | 31,462 |
def split_quoted(s):
"""Split a string with quotes, some possibly escaped, into a list of
alternating quoted and unquoted segments. Raises a ValueError if there are
unmatched quotes.
Both the first and last entry are unquoted, but might be empty, and
therefore the length of the resulting list must be an odd number.
"""
result = []
for part in s.split(QUOTE):
if result and result[-1].endswith('\\'):
result[-1] = result[-1] + QUOTE + part
else:
result.append(part)
if not len(result) % 2:
raise ValueError('Unmatched quote.')
return result | 0790e7b2fecfd6c2aa1ca04c8cb5f1faebb3722b | 31,463 |
import torch
def calc_IOU(seg_omg1: torch.BoolTensor, seg_omg2: torch.BoolTensor, eps: float = 1.e-6) -> float:
"""
calculate intersection over union between 2 boolean segmentation masks
:param seg_omg1: first segmentation mask
:param seg_omg2: second segmentation mask
:param eps: eps for numerical stability
:return: IOU
"""
dim = [1, 2, 3] if len(seg_omg1.shape) == 4 else [1, 2]
intersection = (seg_omg1 & seg_omg2).sum(dim=dim)
union = (seg_omg1 | seg_omg2).sum(dim=dim)
return (intersection.float() / (union.float() + eps)).mean().item() | 6586b1f9995858be9ab7e40edd1c3433cd1cd6f4 | 31,464 |
import time
def _call_list(urls, method, payload=None, headers=None, auth=None,
proxies=None, timeout=None, stream=None, verify=True,
payload_to_json=True, allow_redirects=True):
"""Call list of supplied URLs, return on first success."""
_LOGGER.debug('Call %s on %r', method, urls)
attempts = []
for url in urls:
success, response, status_code = _call(
url, method, payload, headers, auth, proxies, timeout=timeout,
stream=stream, verify=verify,
payload_to_json=payload_to_json,
allow_redirects=allow_redirects,
)
if success:
return success, response
attempts.append((time.time(), url, status_code, _msg(response)))
return False, attempts | f172e726991235fc9b6bfc63c9071994ab33e7c0 | 31,465 |
def td_path_join(*argv):
"""Construct TD path from args."""
assert len(argv) >= 2, "Requires at least 2 tdpath arguments"
return "/".join([str(arg_) for arg_ in argv]) | 491f1d50767a50bfbd7d3a2e79745e0446f5204c | 31,466 |
import torch
def calculate_segmentation_statistics(outputs: torch.Tensor, targets: torch.Tensor, class_dim: int = 1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, "Found only two dimensions, shape should be [bs , C , ...]" # noqa: S101
assert outputs.shape == targets.shape, "shape mismatch" # noqa: S101
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives | ccc017dd5c7197565e54c62cd83eb5cdc02d7d17 | 31,467 |
def sample_from_cov(mean_list, cov_list, Nsamples):
"""
Sample from the multivariate Gaussian of Gaia astrometric data.
Args:
mean_list (list): A list of arrays of astrometric data.
[ra, dec, plx, pmra, pmdec]
cov_list (array): A list of all the uncertainties and covariances:
[ra_err, dec_err, plx_err, pmra_err, pmdec_err, ra_dec_corr,
ra_plx_corr, ra_pmra_corr, ra_pmdec_corr, dec_plx_corr,
dec_pmra_corr, dec_pmdec_corr, plx_pmra_corr, plx_pmdec_corr,
pmra_pmdec_corr]
Nsamples: (int): The number of samples.
"""
Ndim = len(mean_list) # 5 dimensions: ra, dec, plx, pmra, pmdec
Nstars = len(mean_list[0])
# Construct the mean and covariance matrices.
mean = np.vstack(([i for i in mean_list]))
cov = construct_cov(cov_list, Ndim)
# Sample from the multivariate Gaussian
samples = np.zeros((Nsamples, Ndim, Nstars))
for i in range(Nstars):
samples[:, :, i] = np.random.multivariate_normal(
mean[:, i], cov[:, :, i], Nsamples)
return samples | 353c08bfd8951610fdcf1511107888c1153d3eed | 31,468 |
def get_finger_distal_angle(x,m):
"""Gets the finger angle th3 from a hybrid state"""
return x[2] | f93b1931f3e4a9284ccac3731dfeea21526ea07c | 31,469 |
def get_karma(**kwargs):
"""Get your current karma score"""
user_id = kwargs.get("user_id").strip("<>@")
session = db_session.create_session()
kama_user = session.query(KarmaUser).get(user_id)
try:
if not kama_user:
return "User not found"
if kama_user.karma_points == 0:
return "Sorry, you don't have any karma yet"
return (
f"Hey {kama_user.username}, your current karma is {kama_user.karma_points}"
)
finally:
session.close() | 29f2622e65c45e642285014bbfa6dc33abb0e326 | 31,470 |
from pathlib import Path
import os
import logging
def medical_charges_nominal(dataset_dir: Path) -> bool:
"""
medical_charges_nominal x train dataset (130452, 11)
medical_charges_nominal y train dataset (130452, 1)
medical_charges_nominal x test dataset (32613, 11)
medical_charges_nominal y train dataset (32613, 1)
"""
dataset_name = 'medical_charges_nominal'
os.makedirs(dataset_dir, exist_ok=True)
X, y = fetch_openml(name='medical_charges_nominal', return_X_y=True,
as_frame=False, data_home=dataset_dir)
X = pd.DataFrame(X)
y = pd.DataFrame(y)
logging.info(f'{dataset_name} is loaded, started parsing...')
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
scaler = StandardScaler().fit(x_train, y_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
scaler = StandardScaler().fit(y_train)
y_train = scaler.transform(y_train)
y_test = scaler.transform(y_test)
for data, name in zip((x_train, x_test, y_train, y_test),
('x_train', 'x_test', 'y_train', 'y_test')):
filename = f'{dataset_name}_{name}.npy'
np.save(os.path.join(dataset_dir, filename), data)
logging.info(f'dataset {dataset_name} is ready.')
return True | 719ae4cef8fdae440fb601d18dc5ae8d88bbacca | 31,471 |
def plotly_shap_violin_plot(X, shap_values, col_name, color_col=None, points=False, interaction=False):
"""
Returns a violin plot for categorical values.
if points=True or color_col is not None, a scatterplot of points is plotted next to the violin plots.
If color_col is given, scatter is colored by color_col.
"""
assert is_string_dtype(X[col_name]), \
f'{col_name} is not categorical! Can only plot violin plots for categorical features!'
x = X[col_name]
shaps = shap_values[:, X.columns.get_loc(col_name)]
n_cats = X[col_name].nunique()
if points or color_col is not None:
fig = make_subplots(rows=1, cols=2*n_cats, column_widths=[3, 1]*n_cats, shared_yaxes=True)
showscale = True
else:
fig = make_subplots(rows=1, cols=n_cats, shared_yaxes=True)
fig.update_yaxes(range=[shaps.min()*1.3, shaps.max()*1.3])
for i, cat in enumerate(X[col_name].unique()):
col = 1+i*2 if points or color_col is not None else 1+i
fig.add_trace(go.Violin(
x=x[x == cat],
y=shaps[x == cat],
name=cat,
box_visible=True,
meanline_visible=True,
showlegend=False,
),
row=1, col=col)
if color_col is not None:
if is_numeric_dtype(X[color_col]):
fig.add_trace(go.Scatter(
x=np.random.randn(len(x[x == cat])),
y=shaps[x == cat],
name=color_col,
mode='markers',
showlegend=False,
hoverinfo="text",
hovertemplate =
"<i>shap</i>: %{y:.2f}<BR>" +
f"<i>{color_col}" + ": %{marker.color}",
text = [f"shap: {shap}<>{color_col}: {col}" for shap, col in zip(shaps[x == cat], X[color_col][x==cat])],
marker=dict(size=7,
opacity=0.6,
cmin=X[color_col].min(),
cmax=X[color_col].max(),
color=X[color_col][x==cat],
colorscale='Bluered',
showscale=showscale,
colorbar=dict(title=color_col)),
),
row=1, col=col+1)
else:
n_color_cats = X[color_col].nunique()
colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
colors = colors * (1+int(n_color_cats / len(colors)))
colors = colors[:n_color_cats]
for color_cat, color in zip(X[color_col].unique(), colors):
fig.add_trace(go.Scatter(
x=np.random.randn(len(x[(x == cat) & (X[color_col] == color_cat)])),
y=shaps[(x == cat) & (X[color_col] == color_cat)],
name=color_cat,
mode='markers',
showlegend=showscale,
hoverinfo="text",
hovertemplate =
"<i>shap</i>: %{y:.2f}<BR>" +
f"<i>{color_col}: {color_cat}",
marker=dict(size=7,
opacity=0.8,
color=color)
),
row=1, col=col+1)
showscale = False
elif points:
fig.add_trace(go.Scatter(
x=np.random.randn(len(x[x == cat])),
y=shaps[x == cat],
mode='markers',
showlegend=False,
hovertemplate =
"<i>shap</i>: %{y:.2f}",
marker=dict(size=7,
opacity=0.6,
color='blue'),
), row=1, col=col+1)
if points or color_col is not None:
for i in range(n_cats):
fig.update_xaxes(showgrid=False, zeroline=False, visible=False, row=1, col=2+i*2)
fig.update_yaxes(showgrid=False, zeroline=False, row=1, col=2+i*2)
if color_col is not None:
fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}<br>(colored by {color_col})', hovermode='closest')
else:
fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}', hovermode='closest')
else:
fig.update_layout(title=f'Shap {"interaction" if interaction else None} values for {col_name}')
return fig | 03754df82272826965e73266075f3a0334620e93 | 31,472 |
def animation():
"""
This function gives access to the animation tools factory - allowing you
to access all the tools available.
Note: This will not re-instance the factory on each call, the factory
is instanced only on the first called and cached thereafter.
:return: factories.Factory
"""
# -- If we already have a cached factory return that
global _anim_library
if _anim_library:
return _anim_library
# -- Instance a new factory
_anim_library = factories.Factory(
abstract=AnimTool,
plugin_identifier='identifier',
versioning_identifier='version',
envvar=constants.PLUGIN_ENVIRONMENT_VARIABLE,
paths=constants.PLUGIN_LOCATIONS,
)
return _anim_library | 682cfe96f5682296ae721519be03aeb98b918e20 | 31,473 |
def merge(pinyin_d_list):
"""
:rtype: dict
"""
final_d = {}
for overwrite_d in pinyin_d_list:
final_d.update(overwrite_d)
return final_d | 512f551620ccedae8fb53f0c60f7caf931aae249 | 31,474 |
import subprocess
def HttpPostRequest(url, post_dict):
"""Proceed an HTTP POST request, and returns an HTTP response body.
Args:
url: a URL string of an HTTP server.
post_dict: a dictionary of a body to be posted.
Returns:
a response from the server.
"""
body = urlencode(post_dict)
cmd = [GOMA_FETCH, '--noauth', '--post', url, '--data', body]
return subprocess.check_output(cmd) | 59322ac0c7beff4ba71c2ec6ee3661c18ac0fb28 | 31,475 |
from typing import Dict
from typing import Callable
def nn_avg_pool2d(expr: Expr,
params: Dict[str, np.ndarray],
schedule: Schedule,
net: Dict[Expr, Expr],
op_idx: Dict[str, int],
RELAY_2_XLAYER: Dict[str, Callable],
**kwargs) -> XLayer:
"""
TVM Avg Pool2d to XLayer
Relay
-----
Type: tvm.relay.op.nn.nn.avg_pool2d
Ref: https://docs.tvm.ai/api/python/relay/nn.html
Parameters:
- data (tvm.relay.Expr)
The input data to the operator.
- strides (tuple of int, optional)
The strides of pooling.
- padding (tuple of int, optional)
The padding for pooling.
- layout (str, optional)
Layout of the input.
- ceil_mode (bool, optional)
To enable or disable ceil while pooling.
- count_include_pad (bool, optional)
To include padding to compute the average.
"""
if expr in net:
logger.debug("MEMORY: NN AVG POOL2D")
# This expressions is already transformed so we reuse that one
return net[expr]
pool_size = [int(e) for e in list(expr.attrs.pool_size)]
strides = [int(e) for e in list(expr.attrs.strides)]
padding = [int(e) for e in list(expr.attrs.padding)]
data_layout = str(expr.attrs.layout)
ceil_mode = bool(expr.attrs.ceil_mode)
count_include_pad = bool(expr.attrs.count_include_pad)
# if count_include_pad:
# logger.debug("Padding: {}".format(padding))
# raise NotImplementedError("Including padding in avg pool2d
# " computation"
# " is not supported")
data_expr, data_expr_class = expr.args[0], expr.args[0].__class__.__name__
data_layer = RELAY_2_XLAYER[data_expr_class](data_expr, params, schedule,
net, op_idx, RELAY_2_XLAYER,
**kwargs)
logger.debug("nn_avg_pool2d: {}".format(hash(expr)))
# Update schedule with input data layer
if data_expr not in net:
schedule.append(data_expr)
net[data_expr] = data_layer
# Create XLayer
pool_type = 'Avg'
# Convert NHWC -> NCHW TODO: remove data layout
if data_layout == 'NHWC':
t_name = 'nn_avg_pool2d_NHWC>NCHW-' + str(hash(expr))
data_layer.tops.append(t_name)
data_layer = xlf.get_xop_factory_func('Transpose', internal=True)(
t_name, data_layer, [0, 3, 1, 2])
schedule.append(t_name)
net[t_name] = data_layer
# Create name
op_name = 'nn_avg_pool2d-' + str(hash(expr))
X = xlf.get_xop_factory_func('Pooling')(
op_name, data_layer, pool_type, pool_size,
strides, padding, 'NCHW',
ceil_mode, count_include_pad,
relay_id=[hash(expr)])
logger.debug("-- outshape: {}".format(list(X.shapes)))
# !Important: set input layer tops
data_layer.tops.append(X.name)
# Convert to NCHW -> NHWC TODO: remove data layout
if data_layout == 'NHWC':
schedule.append(X.name)
net[X.name] = X
t_name = 'nn_avg_pool2d_NCHW>NHWC-' + str(hash(expr))
X.tops.append(t_name)
res_X = xlf.get_xop_factory_func('Transpose', internal=True)(
t_name, X, [0, 2, 3, 1])
else:
res_X = X
return res_X | 6da43b7927049af26be40f3c153dab2f319312e1 | 31,476 |
def dprnn_tasnet(name_url_or_file=None, *args, **kwargs):
""" Load (pretrained) DPRNNTasNet model
Args:
name_url_or_file (str): Model name (we'll find the URL),
model URL to download model, path to model file.
If None (default), DPRNNTasNet is instantiated but no pretrained
weights are loaded.
*args: Arguments to pass to DPRNNTasNet.
**kwargs: Keyword arguments to pass to DPRNNTasNet.
Returns:
DPRNNTasNet instance (with ot without pretrained weights).
Examples:
>>> from torch import hub
>>> # Instantiate without pretrained weights
>>> model = hub.load('mpariente/asteroid', 'dprnn_tasnet')
>>> # Use pretrained weights
>>> URL = "TOCOME"
>>> model = hub.load('mpariente/asteroid', 'dprnn_tasnet', URL)
"""
# No pretrained weights
if name_url_or_file is None:
return models.DPRNNTasNet(*args, **kwargs)
return models.DPRNNTasNet.from_pretrained(name_url_or_file) | cf3190656d9c24730d9bab1554987d684ec33712 | 31,477 |
def utcnow():
"""Better version of utcnow() that returns utcnow with a correct TZ."""
return timeutils.utcnow(True) | a23cc98eca8e291f6e9aff5c0e78494930476f78 | 31,478 |
def build_profile(base_image, se_size=4, se_size_increment=2, num_openings_closings=4):
"""
Build the extended morphological profiles for a given set of images.
Parameters:
base_image: 3d matrix, each 'channel' is considered for applying the morphological profile. It is the spectral information part of the EMP.
se_size: int, initial size of the structuring element (or kernel). Structuring Element used: disk
se_size_increment: int, structuring element increment step
num_openings_closings: int, number of openings and closings by reconstruction to perform.
Returns:
emp: 3d matrix with both spectral (from the base_image) and spatial information
"""
base_image_rows, base_image_columns, base_image_channels = base_image.shape
se_size = se_size
se_size_increment = se_size_increment
num_openings_closings = num_openings_closings
morphological_profile_size = (num_openings_closings * 2) + 1
emp_size = morphological_profile_size * base_image_channels
emp = np.zeros(
shape=(base_image_rows, base_image_columns, emp_size))
cont = 0
for i in range(base_image_channels):
# build MPs
mp_temp = build_morphological_profiles(
base_image[:, :, i], se_size, se_size_increment, num_openings_closings)
aux = morphological_profile_size * (i+1)
# build the EMP
cont_aux = 0
for k in range(cont, aux):
emp[:, :, k] = mp_temp[:, :, cont_aux]
cont_aux += 1
cont = morphological_profile_size * (i+1)
return emp | 7f7cd0e1259cdd52cd4ce73c3d6eee9c9f87b474 | 31,479 |
def mujoco_env(env_id, nenvs=None, seed=None, summarize=True,
normalize_obs=True, normalize_ret=True):
""" Creates and wraps MuJoCo env. """
assert is_mujoco_id(env_id)
seed = get_seed(nenvs, seed)
if nenvs is not None:
env = ParallelEnvBatch([
lambda s=s: mujoco_env(env_id, seed=s, summarize=False,
normalize_obs=False, normalize_ret=False)
for s in seed])
return mujoco_wrap(env, summarize=summarize, normalize_obs=normalize_obs,
normalize_ret=normalize_ret)
env = gym.make(env_id)
set_seed(env, seed)
return mujoco_wrap(env, summarize=summarize, normalize_obs=normalize_obs,
normalize_ret=normalize_ret) | 5bad4500be5261f33a19e49612ce62e1db8c66dd | 31,480 |
import torch
def polar2cart(r, theta):
"""
Transform polar coordinates to Cartesian.
Parameters
----------
r, theta : floats or arrays
Polar coordinates
Returns
-------
[x, y] : floats or arrays
Cartesian coordinates
"""
return torch.stack((r * theta.cos(), r * theta.sin()), dim=-1).squeeze() | c13225a49d6435736bf326f70af5f6d4039091d8 | 31,481 |
def belongs_to(user, group_name):
"""
Check if the user belongs to the given group.
:param user:
:param group_name:
:return:
"""
return user.groups.filter(name__iexact=group_name).exists() | e1b70b4771dfec45218078ca16335ddc3c6214e2 | 31,482 |
import torch
def sum_log_loss(logits, mask, reduction='sum'):
"""
:param logits: reranking logits(B x C) or span loss(B x C x L)
:param mask: reranking mask(B x C) or span mask(B x C x L)
:return: sum log p_positive i over all candidates
"""
num_pos = mask.sum(-1) # B x C
gold_scores = logits.masked_fill(~(mask.bool()), 0)
gold_scores_sum = gold_scores.sum(-1) # BxC
all_log_sum_exp = torch.logsumexp(logits, -1) # B x C
# gold_log_probs = gold_scores_sum - all_log_sum_exp * num_pos
gold_log_probs = gold_scores_sum/num_pos - all_log_sum_exp
loss = -gold_log_probs.sum()
if reduction == 'mean':
loss /= logits.size(0)
return loss | 88a312f74e7d4dce95d8dcadaeeaa1a136fceca6 | 31,483 |
from acor import acor
from .autocorrelation import ipce
from .autocorrelation import icce
def _get_iat_method(iatmethod):
"""Control routine for selecting the method used to calculate integrated
autocorrelation times (iat)
Parameters
----------
iat_method : string, optional
Routine to use for calculating said iats. Accepts 'ipce', 'acor', and 'icce'.
Returns
-------
iatroutine : function
The function to be called to estimate the integrated autocorrelation time.
"""
if iatmethod=='acor':
iatroutine = acor
elif iatmethod == 'ipce':
iatroutine = ipce
elif iatmethod == 'icce':
iatroutine = icce
return iatroutine | a5bbe3a4f4bad486f9bab6ca4b367040ce516478 | 31,484 |
def run():
"""Main entry point."""
return cli(obj={}, auto_envvar_prefix='IMPLANT') # noqa | ae9e96478dbf081469052ff29d31873263060bff | 31,485 |
def qtl_test_interaction_GxG(pheno, snps1, snps2=None, K=None, covs=None, test="lrt"):
"""
Epistasis test between two sets of SNPs
Args:
pheno: [N x 1] np.array of 1 phenotype for N individuals
snps1: [N x S1] np.array of S1 SNPs for N individuals
snps2: [N x S2] np.array of S2 SNPs for N individuals
K: [N x N] np.array of LMM-covariance/kinship koefficients (optional)
If not provided, then linear regression analysis is performed
covs: [N x D] np.array of D covariates for N individuals
test: 'lrt' for likelihood ratio test (default) or 'f' for F-test
Returns:
pv: [S2 x S1] np.array of P values for epistasis tests beten all SNPs in
snps1 and snps2
"""
if K is None:
K = np.eye(N)
N = snps1.shape[0]
if snps2 is None:
snps2 = snps1
return qtl_test_interaction_GxE_1dof(
snps=snps1, pheno=pheno, env=snps2, covs=covs, K=K, test=test
) | 77eebc7c1c673562b1b793e9e5513b9a50aa6f1b | 31,486 |
import copy
import io
def log_parser(log):
"""
This takes the EA task log file generated by e-prime and converts it into a
set of numpy-friendly arrays (with mixed numeric and text fields.)
pic -- 'Picture' lines, which contain the participant's ratings.
res -- 'Response' lines, which contain their responses (unclear)
vid -- 'Video' lines, which demark the start and end of trials.
"""
# substitute for GREP -- finds 'eventtype' field.
# required as this file has a different number of fields per line
logname = copy.copy(log)
log = open(log, "r").readlines()
pic = filter(lambda s: 'Picture' in s, log)
vid = filter(lambda s: 'Video' in s, log)
# write out files from stringio blobs into numpy genfromtxt
pic = np.genfromtxt(io.StringIO(''.join(pic)), delimiter='\t',
names=['subject', 'trial', 'eventtype', 'code', 'time', 'ttime', 'uncertainty1', 'duration', 'uncertainty2', 'reqtime', 'reqduration', 'stimtype', 'pairindex'],
dtype=['|S64' , int , '|S64' , '|S64', int , int , int , int , int , int , int , '|S64' , int])
vid = np.genfromtxt(io.StringIO(''.join(vid)), delimiter='\t',
names=['subject', 'trial', 'eventtype', 'code', 'time', 'ttime', 'uncertainty1'],
dtype=['|S64' , int , '|S64' , '|S64', int , int , int])
# ensure our inputs contain a 'MRI_start' string.
if pic[0][3] != 'MRI_start':
logger.error('log {} does not contain an MRI_start entry!'.format(logname))
raise ValueError
else:
# this is the start of the fMRI run, all times are relative to this.
mri_start = pic[0][7]
return pic, vid, mri_start | 7793cb1b53100961aca5011655211b0da47af856 | 31,487 |
from typing import List
def line_assign_z_to_vertexes(line_2d: ogr.Geometry,
dem: DEM,
allowed_input_types: List[int] = None) -> ogr.Geometry:
"""
Assign Z dimension to vertices of line based on raster value of `dem`. The values from `dem` are interpolated using
bilinear interpolation to provide smooth surface.
Parameters
----------
line_2d : ogr.Geometry
`ogr.Geometry` containing lines. Allowed types are checked against `allowed_input_types`.
dem : DEM
Raster data source in specific format `DEM` (`gdalhepers` class).
allowed_input_types : list of int, optional
Allowed geometry types for `line_2d`. Default value is `None` which means any type of line. The default values
is equal to definition `allowed_input_types=[ogr.wkbLineString, ogr.wkbLineString25D,
ogr.wkbLineStringM, ogr.wkbLineStringZM]]`.
Returns
-------
ogr.Geometry
`ogr.Geometry` with definition `ogr.wkbLineStringZ`.
"""
if allowed_input_types is None:
allowed_input_types = [ogr.wkbLineString, ogr.wkbLineString25D, ogr.wkbLineStringM, ogr.wkbLineStringZM]
geometry_checks.check_variable_expected_geometry(line_2d, "line_2d", allowed_input_types)
line_3d = ogr.Geometry(ogr.wkbLineString25D)
for i in range(0, line_2d.GetPointCount()):
pt = line_2d.GetPoint(i)
z_value = dem.get_value_bilinear(pt[0], pt[1])
if z_value != dem.get_nodata_value():
line_3d.AddPoint(pt[0], pt[1], z_value)
return line_3d | ae3e6c496cd10848e35830c1122a77589f322aad | 31,488 |
def backoff_linear(n):
"""
backoff_linear(n) -> float
Linear backoff implementation. This returns n.
See ReconnectingWebSocket for details.
"""
return n | a3a3b3fc0c4a56943b1d603bf7634ec50404bfb3 | 31,489 |
import pkg_resources
def _doc():
"""
:rtype: str
"""
return pkg_resources.resource_string(
'dcoscli',
'data/help/config.txt').decode('utf-8') | e83f8a70b9d6c9cff38f91b980cd3f9031d84fd7 | 31,490 |
def sk_algo(U, gates, n):
"""Solovay-Kitaev Algorithm."""
if n == 0:
return find_closest_u(gates, U)
else:
U_next = sk_algo(U, gates, n-1)
V, W = gc_decomp(U @ U_next.adjoint())
V_next = sk_algo(V, gates, n-1)
W_next = sk_algo(W, gates, n-1)
return V_next @ W_next @ V_next.adjoint() @ W_next.adjoint() @ U_next | e8251d7a41899584f92c808af1d4fdee10757349 | 31,491 |
def get_movie_list():
"""
Returns:
A list of populated media.Movie objects
"""
print("Generating movie list...")
movie_list = []
movie_list.append(media.Movie(
title='Four Brothers',
summary='Mark Wahlberg takes on a crime syndicate with his brothers.',
trailer_youtube_url='https://www.youtube.com/watch?v=vZPi0K6UoP8',
rating=5,
imdb_id='tt0430105'))
movie_list.append(media.Movie(
'American Sniper',
imdb_id='tt2179136',
trailer_youtube_url='https://www.youtube.com/watch?v=5bP1f_1o-zo',
rating=5))
movie_list.append(media.Movie(
imdb_id='tt0120657',
trailer_youtube_url='https://www.youtube.com/watch?v=JYUBKcurY88',
rating=4))
movie_list.append(media.Movie(
imdb_id='tt0416449',
trailer_youtube_url='https://www.youtube.com/watch?v=UrIbxk7idYA',
rating=5))
movie_list.append(media.Movie(
imdb_id='tt1790885',
trailer_youtube_url='https://www.youtube.com/watch?v=k7R2uVZYebE',
rating=5))
movie_list.append(media.Movie(
imdb_id='tt0119698',
trailer_youtube_url='https://www.youtube.com/watch?v=4OiMOHRDs14',
rating=5))
print("Done!")
return movie_list | e00f67b55a47bf13075a4b2065b94feec4138bcd | 31,492 |
def check_dna_sequence(sequence):
"""Check if a given sequence contains only the allowed letters A, C, T, G."""
return len(sequence) != 0 and all(base.upper() in ['A', 'C', 'T', 'G'] for base in sequence) | 2f561c83773ddaaad2fff71a6b2e5d48c5a35f87 | 31,493 |
def test_inner_scalar_mod_args_length():
"""
Feature: Check the length of input of inner scalar mod.
Description: The length of input of inner scalar mod should not less than 2.
Expectation: The length of input of inner scalar mod should not less than 2.
"""
class Net(Cell):
def __init__(self):
super().__init__()
self.param_a = Parameter(Tensor(5, ms.int32), name="param_a")
self.mod = P.Mod()
def construct(self, x):
return x + self.param_a + self.mod(5)
x = Tensor(2, dtype=ms.int32)
net = Net()
with pytest.raises(Exception, match="For 'S-Prim-Mod', the size of input should be 2"):
ret = net(x)
print("ret:", ret) | 06bc7530106c5bf2f586e08ee2b941bd964228f1 | 31,494 |
import requests
def zip_list_files(url):
"""
cd = central directory
eocd = end of central directory
refer to zip rfcs for further information :sob:
-Erica
"""
# get blog representing the maximum size of a EOBD
# that is 22 bytes of fixed-sized EOCD fields
# plus the max comment length of 65535 bytes
eocd_blob_range = "-65557"
eocd_blob_response = requests.get(url, headers={"range": eocd_blob_range})
eocd_blob = eocd_blob_response.content.read()
"""
End of central directory record (EOCD)
Offset Bytes Description[26]
0 4 End of central directory signature = 0x06054b50
4 2 Number of this disk
6 2 Disk where central directory starts
8 2 Number of central directory records on this disk
10 2 Total number of central directory records
12 4 Size of central directory (bytes)
16 4 Offset of start of central directory, relative to start of archive
20 2 Comment length (n)
22 n Comment
"""
# search eocd_blob for eocd block, seek magic bytes 0x06054b50
def check_blob_magic_bytes(blob, magic):
# Recursively search the blob for a string of bytes.
# this is not optimized. I could tail-recursion this...-Erica
original_magic = magic
def _check_blob_magic_bytes(blob, magic, distance):
for distance, value in enumerate(blob):
if value == magic[:-1]:
if len(magic) == 0:
return distance + 1
sub_distance = _check_blob_magic_bytes(
blob[:-1], magic[:-1], distance + 1
)
if not sub_distance:
return _check_blob_magic_bytes(blob, original_magic, 0)
return None
return _check_blob_magic_bytes(blob, magic, 0)
eocd_block = check_blob_magic_bytes(reversed(iter(eocd_blob)), 0x06054B50)
if not eocd_block:
raise Exception("No zip central directory signature found.")
cd_file_offset = eocd_block[16:4]
cd_block_resp = requests.get(url, headers={"range": "%i-" % (cd_file_offset,)})
return cd_block_resp.content.read() | 694f6340145d509e7a18aa7b427b75f521c389df | 31,495 |
import torch
import numpy
import math
def project_ball(tensor, epsilon=1, ord=2):
"""
Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball.
**Assumes the first dimension to be batch dimension, which is preserved.**
:param tensor: variable or tensor
:type tensor: torch.autograd.Variable or torch.Tensor
:param epsilon: radius of ball.
:type epsilon: float
:param ord: order of norm
:type ord: int
:return: projected vector
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(tensor, torch.Tensor) or isinstance(tensor, torch.autograd.Variable), 'given tensor should be torch.Tensor or torch.autograd.Variable'
if ord == 0:
assert epsilon >= 0
size = list(tensor.shape)
flattened_size = int(numpy.prod(size[1:]))
tensor = tensor.view(-1, flattened_size)
k = int(math.ceil(epsilon))
k = min(k, tensor.size(1) - 1)
assert k > 0
for b in range(tensor.size(0)):
_, indices = topk(tensor[b], k=k)
complement_indices = numpy.delete(numpy.arange(tensor.size(1)), indices.cpu().numpy())
tensor[b][complement_indices] = 0
tensor = tensor.view(size)
elif ord == 1:
# ! Does not allow differentiation obviously!
cuda = is_cuda(tensor)
array = tensor.detach().cpu().numpy()
array = cnumpy.project_ball(array, epsilon=epsilon, ord=ord)
tensor = torch.from_numpy(array)
if cuda:
tensor = tensor.cuda()
elif ord == 2:
size = list(tensor.shape)
flattened_size = int(numpy.prod(size[1:]))
tensor = tensor.view(-1, flattened_size)
clamped = torch.clamp(epsilon/torch.norm(tensor, 2, dim=1), max=1)
clamped = clamped.view(-1, 1)
tensor = tensor * clamped
if len(size) == 4:
tensor = tensor.view(-1, size[1], size[2], size[3])
elif len(size) == 2:
tensor = tensor.view(-1, size[1])
elif ord == float('inf'):
tensor = torch.clamp(tensor, min=-epsilon, max=epsilon)
else:
raise NotImplementedError()
return tensor | 188eda46ede2b6ac08bc6fc4cfa72efb56e2918e | 31,496 |
import os
import base64
def decode_json(filepath="stocks.json"):
"""
Description: Generates a pathname to the service account json file
needed to access the google calendar
"""
# Check for stocks file
if os.path.exists(filepath):
return filepath
creds = os.environ.get("GOOGLE_SERVICE_CREDS")
if creds is None:
print("CREDENTIALS NOT AVAILABLE")
exit(1)
# get base64 string
message_bytes = base64.b64decode(creds)
decoded_string = message_bytes.decode("ascii")
# Output to decoded string to json file
with open(filepath, "w") as service_file:
service_file.write(decoded_string)
return filepath | 095dabf2a397576289bf1754f4eae4406e6648c1 | 31,497 |
def load_model(filename):
"""
Loads the specified Keras model from a file.
Parameters
----------
filename : string
The name of the file to read from
Returns
-------
Keras model
The Keras model loaded from a file
"""
return load_keras_model(__construct_path(filename)) | 89656f682f1e754a08c756f0db49fc3138171384 | 31,498 |
import os
def get_ffmpeg_executable_path(ffmpeg_folder_path):
"""
Get's ffmpeg's executable path for current system, given the folder.
:param ffmpeg_folder_path: Folder path for the ffmpeg and ffprobe executable.
:return: ffmpeg executable path as absolute path.
"""
return os.path.join(ffmpeg_folder_path, FFMPEG_EXECUTABLE_OS_DICT[get_current_os()]) | a309030dce3adc5e6252b915f6a48d540d64dee3 | 31,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.