content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Any
from typing import Dict
def reduce(t: Any, options: Dict) -> Any:
"""recursively applies itself to a nested data structure. if given a list of dicts, will reduce those down to a
list of a single dict, whose values are ResultSets of the values from the dicts in the original list
Example:
[
{
'hello': 1
},
{
'hello': 'one',
'world': 'two'
},
{
'hello':3,
'world': None
}
]
Becomes:
{
'hello': ResultSet([1, 'one', 3]),
'world': ResultSet(['two', None])
:param t: the input data to operate on
:param options: a dictionary of options to be passed around where needed
:return: the original data `t`, or a list containing a single Dict whose values are ResultSets
"""
options = get_opts(options)
if isinstance(t, list):
# Handle the 'list-of-dicts' case
dicts = [v for v in t if isinstance(v, dict)]
remainder = [v for v in t if not isinstance(v, dict)]
reduced_dict: Dict[Any, ResultSet] = {}
if dicts:
for d in dicts:
for k in d:
# This is inefficient, i know, but it covers the case where keys only exist in some of the dicts in
# the list
reduced_dict.setdefault(k, ResultSet()).append(d[k])
# Now we want to replace original dicts in the list with the reduced dict. We need to do the replacement in
# place so that we can preserve custom list subclasses like HashTable
t.clear()
t.extend(remainder)
t.append(reduced_dict)
# recurse into all lists, regardless of what's in them
for i, v in enumerate(t):
t[i] = reduce(v, options)
if isinstance(t, dict):
for k, v in t.items():
t[k] = reduce(v, options)
return t | 43c3f4a38314830b3619692ed14f63ed00aa8f27 | 3,633,900 |
def certificate_admin_list(request):
"""
Displays a list of :model:`rr.Certificate`
including old certificates and weak keys.
Only available for super users.
**Context**
``object_list``
List of :model:`rr.Certificate`.
**Template:**
:template:`rr/attribute_admin_list.html`
"""
if not request.user.is_superuser:
raise PermissionDenied
weak_certificates = Certificate.objects.filter(end_at=None, sp__end_at=None,
key_size__lt=2048).order_by('key_size')
expired_certificates = Certificate.objects.filter(end_at=None, sp__end_at=None,
valid_until__lte=timezone.now()
).order_by('valid_until')
return render(request, "rr/certificate_admin_list.html",
{'weak_certificates': weak_certificates,
'expired_certificates': expired_certificates}) | 0d038d0df6bdae3abcad8f3b85516547a9b202cc | 3,633,901 |
from typing import Dict
from typing import Any
def chaosmonkey_enabled(
base_url: str,
headers: Dict[str, Any] = None,
timeout: float = None,
verify_ssl: bool = True,
configuration: Configuration = None,
secrets: Secrets = None,
) -> bool:
"""
Enquire whether Chaos Monkey is enabled on the
specified service.
"""
response = api.call_api(
base_url=base_url,
api_endpoint="chaosmonkey/status",
headers=headers,
timeout=timeout,
verify=verify_ssl,
configuration=configuration,
secrets=secrets,
)
if response.status_code == codes.ok:
return True
elif response.status_code == codes.service_unavailable:
return False
else:
raise FailedActivity(f"ChaosMonkey status enquiry failed: {response.text}") | a057eaaaaff8c79b93ae0e4b0e057cbd59c2c9ce | 3,633,902 |
def compute_norm(A, scale=None):
"""
Compute the norm of the *A* array, which contains spin directions in
Spherical or Cartesian coordinates, e.g.
A = [ A_theta0 A_phi0 A_theta1 A_phi1 ... A_thetaN A_phiN]
If necessary, scale the norm by the array size
"""
y = np.linalg.norm(A)
if scale:
y = y / len(A)
return y | 03c180dda598e14382fc0cd8f59292db73186cec | 3,633,903 |
def _matrixify(mat):
"""If `mat` is a Matrix or is matrix-like,
return a Matrix or MatrixWrapper object. Otherwise
`mat` is passed through without modification."""
if getattr(mat, 'is_Matrix', False) or getattr(mat, 'is_MatrixLike', False):
return mat
if not(getattr(mat, 'is_Matrix', True) or getattr(mat, 'is_MatrixLike', True)):
return mat
shape = None
if hasattr(mat, 'shape'): # numpy, scipy.sparse
if len(mat.shape) == 2:
shape = mat.shape
elif hasattr(mat, 'rows') and hasattr(mat, 'cols'): # mpmath
shape = (mat.rows, mat.cols)
if shape:
return _MatrixWrapper(mat, shape)
return mat | 87065a412fce51f6a8525b42089db18dc6da786e | 3,633,904 |
def format_value_list(value, tagname=None):
"""
convert osm tag value to nice html representation
:param value: osm tag value as ';' separated string
:return: html
"""
value = value.strip()
if not value:
return ''
parts = [part.strip() for part in value.split(';')]
make_badge = False
make_list = False
if tagname:
make_badge = tagname in BADGE_TAGS
make_list = tagname in LIST_TAGS
context = {
'values': parts,
'make_badge': make_badge,
'make_list': make_list}
return render_to_string(
'buschenschank/includes/tag_value_list.html',
context=context) | bbe02729703652ae2c9f0e196c34d5b8abe4ca2b | 3,633,905 |
def get_arg_name(node):
"""
Args:
node:
Returns:
"""
name = node.id
if name is None:
return node.arg
else:
return name | fecee0dfa53bbb4e1d520e13e5e2363e9035454b | 3,633,906 |
def tf_focal_loss(y_true, y_pred, alpha=.5, gamma=2.):
"""
Straightforward implementation of focal loss.
See https://arxiv.org/abs/1708.02002
# Arguments:
y_true: actual labels {0., 1.}
y_pred: predicted labels {0., 1.}
alpha: see paper
gamma: see paper
"""
assert alpha >= 0.
assert alpha <= 1.
assert gamma >= 0.
# Compute the complementaries
y_true_ = tf.subtract(1., y_true)
y_pred_ = tf.subtract(1., y_pred)
alpha_ = tf.subtract(1., alpha)
# When y is 1
loss1 = tf.multiply(y_true, tf.log(y_pred))
loss1 = tf.multiply(tf.pow(y_pred_, gamma), loss1)
loss1 = tf.multiply(alpha, loss1)
loss1 = -loss1
# When y is 0
loss2 = tf.multiply(y_true_, tf.log(y_pred_))
loss2 = tf.multiply(tf.pow(y_pred, gamma), loss2)
loss2 = tf.multiply(alpha_, loss2)
loss2 = -loss2
return tf.add(loss1, loss2) | 43df32a74ae78fa20f6116eb8de8b3dd5d3c486c | 3,633,907 |
def two_armed_c3d(left_model, right_model):
"""
C3D architecture with two arms. One of the arms will focus in Doppler videos while the other will focus in
non-Doppler ones.
"""
left_model_input, left_model_output = left_model.layers[0].input, left_model.layers[-1].output
right_model_input, right_model_output = right_model.layers[0].input, right_model.layers[-1].output
concatenated = tf.keras.layers.concatenate([left_model_output, right_model_output])
x = Dense(4096, activation='relu', name='fc6')(concatenated)
x = Dropout(.5)(x)
x = Dense(4096, activation='relu', name='fc7')(x)
x = Dropout(.5)(x)
output_layer = Dense(2, activation='softmax', name='fc8')(x)
return Model([left_model_input, right_model_input], output_layer) | 58ffd61f810bc73d2537afa8b2a9d838122aa1c8 | 3,633,908 |
import torch
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if x1y1x2y2:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
else:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)
# Union Area
b1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
b2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
# print(box1, box1.shape)
# print(box2, box2.shape)
return inter_area / (b1_area + b2_area - inter_area + 1e-16) | 22ee7010a2cb66e31a341c78db87eeb4a7b542b9 | 3,633,909 |
def login():
"""
check user credentials
if next=... parameter is present redirect on success
"""
form = flask.request.form
if flask.request.method == "POST" and form.get("login", False):
password = form.get("password", None)
email = form.get("email", None)
if password is None or email is None:
flask.flash("Please enter your email address and password")
else:
try:
sessionID = session_management.login(email, password)
details = user_details(sessionID)
except APIException as e:
flask.flash(e.message)
else:
response = make_response(redirect(flask.request.args.get("next") or flask.url_for("account.whatnext")))
set_session_data(details, sessionID, response)
return response
return flask.render_template("account/login.html") | 8284a582ad54990a897419888dd6aa3c72d2868c | 3,633,910 |
import sys
import types
def load_module(url):
"""
Load module from url (simplest way)
https://python3-cookbook.readthedocs.io/zh_CN/latest/c10/p11_load_modules_from_remote_machine_by_hooks.html
"""
u = request.urlopen(url)
source = u.read().decode("utf-8")
mod = sys.modules.setdefault(url, types.ModuleType(url))
code = compile(source, url, "exec")
mod.__file__ = url
mod.__package__ = ""
exec(code, mod.__dict__)
return mod | e01504ade6340dfbe290d92e66ddbc6b7de5a02f | 3,633,911 |
def get_remove_string(packages, cuda_version):
"""Creates pip remove string for given cuda version and package list"""
# Remove only these which version we want to change
ret = for_all_pckg(packages, lambda pckg: pckg.get_uninstall_names(cuda_version))
return " ".join(ret) | 885950554a716d89f37c1eb7241b068be8932f5d | 3,633,912 |
import sys
import re
import os
def safe_file_name(name, posix=None):
"""
:param str name:
:param bool posix:
:rtype: str
"""
if posix is None:
posix = sys.platform != 'win32'
if posix:
return re.sub(r'[\x00/]', '', name)
else:
name = re.sub(r'[\x00-\x1f<>:"/\|?*]', '', name).rstrip('. ')[:255]
if os.path.splitext(name)[0].lower() in WIN32_BAD_NAMES:
name = ''
return name | c9c43a00ad28107056606174c3688cf2fea4775e | 3,633,913 |
def h_bond_basic_result_collection(monkeypatch) -> BasicResultCollection:
"""Create a basic collection which can be filtered."""
# Create a molecule which contains internal h-bonds.
h_bond_molecule = Molecule.from_smiles(r"O\C=C/C=O")
h_bond_molecule.add_conformer(
np.array(
[
[0.5099324, -1.93893933, 0.62593794],
[-0.11678398, -0.78811455, 0.23294619],
[0.54772449, 0.32974607, -0.06212188],
[2.01855326, 0.32851657, 0.03836611],
[2.68037677, -0.68459523, -0.15270394],
[1.47464514, -1.82358289, 0.65648550],
[-1.1913352, -0.90038794, 0.19441436],
[0.04801793, 1.23909473, -0.37244973],
[2.49137521, 1.29117954, 0.29548031],
]
)
* unit.angstrom
)
smiles = {
"http://localhost:442": [h_bond_molecule],
"http://localhost:443": [_smiles_to_molecule("CO")],
}
return mock_basic_result_collection(smiles, monkeypatch) | 1cadd68723b747c00e09caa1dd11f387f946f3ef | 3,633,914 |
def _try_path(basedir: P, rel: str):
"""
Returns content of file basedir/rel if it exists, None if file not found, or throws an exception
"""
if not rel:
raise RuntimeError("Got invalid filename (empty string).")
if rel[0] == "/":
full_path = P(rel)
else:
full_path = basedir / rel
if full_path.is_dir():
raise RuntimeError("Attempted to import a directory")
if not full_path.is_file():
return full_path.name, None
with open(full_path) as f:
return full_path.name, f.read() | fd3a6c2866a2c69ed825c8e600257a899df239b5 | 3,633,915 |
def read_dataframe_legacy(dataset: zarr.Array) -> pd.DataFrame:
"""Reads old format of dataframes"""
# NOTE: Likely that categoricals need to be removed from uns
warn(
f"'{dataset.name}' was written with a very old version of AnnData. "
"Consider rewriting it.",
OldFormatWarning,
)
df = pd.DataFrame(_from_fixed_length_strings(dataset[()]))
df.set_index(df.columns[0], inplace=True)
return df | c790e2266f1331423f1308634c2022c28b2028b5 | 3,633,916 |
import ast
def parse_code_str(code_str) -> ast.AST:
"""Parses code string in a computation, which can be incomplete.
Once we found something that leads to error while parsing, we should handle it here.
"""
if code_str.endswith(":"):
code_str += "pass"
try:
return ast.parse(code_str)
except IndentationError:
return ast.parse(code_str.strip()) | ed0c2101dd38ca5e2fc390db3ba94b7fe13ff44d | 3,633,917 |
def fibonacci_list(n):
"""
用列表缓存中间的计算结果
:param n: n>0
:return
"""
lst = [0, 1, 1]
while len(lst) < n + 1:
ln = len(lst)
lst.append(lst[ln - 1] + lst[ln - 2])
return lst[0] if n < 0 else lst[n] | 02890bd5877c49d5e4d7f053a64dd9cfdeaa7d7d | 3,633,918 |
from pathlib import Path
def install(parameters):
"""Build capnpc-java from source."""
drydock_src = parameters['//base:drydock'] / get_relpath()
if (drydock_src / 'capnpc-java').exists():
return
def get_var_path(name):
cmd = ['pkg-config', '--variable=%s' % name, 'capnp']
path = scripts.execute(cmd, capture_stdout=True).stdout
path = Path(path.decode('utf8').strip())
return scripts.ensure_directory(path)
with scripts.directory(drydock_src):
scripts.execute(['make'])
with scripts.using_sudo():
scripts.cp(
'capnpc-java',
get_var_path('exec_prefix') / 'bin',
)
scripts.cp(
'compiler/src/main/schema/capnp/java.capnp',
get_var_path('includedir') / 'capnp',
) | f5106f3552315ea388e8bfb895694ef57ca71a5a | 3,633,919 |
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
result = []
count = 0
newline = []
result2 = []
count2 = 0
for a in range(len(line)):
result.append(0)
for a in range(len(line)):
result2.append(0)
for num in line:
if num != 0:
result[count] = num
count += 1
for num in range(len(result)):
if num != len(result)-1 and result[num] == result[num+1]:
newline.append(result[num]+result[num+1])
result[num+1] = 0
else:
newline.append(result[num])
for num in newline:
if num != 0:
result2[count2] = num
count2 += 1
return result2 | 5992c1bc48af124b069fd31419fec5b6edd5f3ab | 3,633,920 |
def _perc(a, b):
"""
Funzione di utility: fa il rapporto tra "a" e "b", ritornandone la percentuale a due cifre
"""
return 'N/A' if b == 0 else round(100.0 * a / b, 2) | aa0f4c0fa09dc77b422b3779d0e9e2484b0df348 | 3,633,921 |
def shift_left_bit_length(x: int) -> int:
""" Shift 1 left bit length of x
:param int x: value to get bit length
:returns: 1 shifted left bit length of x
"""
return 1 << (x - 1).bit_length() | 854e79309125c60c6e5975685078809fb4c016a4 | 3,633,922 |
import datasets
import tqdm
def testLoadedWithoutModeRegModel(opt, model, testset,reg):
"""Tests a model over the given testset."""
model.eval()
test_queries = testset.get_test_queries()
all_imgs = []
all_captions = []
all_queries = []
all_queries1=[]
all_target_captions = []
if test_queries:
# compute test query features
all_imgs = datasets.Features33K().Get_all_imagesWithoutModelTrig()
all_captions = datasets.Features33K().Get_all_captionsWithoutModelTrig()
all_queries = datasets.Features33K().Get_all_queriesWithoutModelTrig()
all_target_captions = datasets.Features33K().Get_target_captionsWithoutModelTrig()
else:
# use training queries to approximate training retrieval performance
all_imgs = datasets.Features172K().Get_all_imagesWithoutModelTrig()[:10000]
all_captions = datasets.Features172K().Get_all_captionsWithoutModelTrig()[:10000]
all_queries = datasets.Features172K().Get_all_queriesWithoutModelTrig()[:10000]
all_target_captions = datasets.Features172K().Get_all_captionsWithoutModelTrig()[:10000]
#all_queries=all_queries1
# feature normalization
for i in range(all_queries.shape[0]):
all_queries[i, :] /= np.linalg.norm(all_queries[i, :])
for i in range(all_imgs.shape[0]):
all_imgs[i, :] /= np.linalg.norm(all_imgs[i, :])
all_queries =reg.predict(all_queries)
all_queries= np.array(all_queries)
# match test queries to target images, get nearest neighbors
nn_result = []
#euc_new_nn_result=[]
for i in tqdm(range(all_queries.shape[0])):
sims = all_queries[i:(i+1), :].dot(all_imgs.T)
#euc_new_sims=np.sum(abs(all_imgs-all_queries[i, :]),axis=1)
if test_queries:
sims[0, test_queries[i]['source_img_id']] = -10e10 # remove query image
#euc_new_sims[test_queries[i]['source_img_id']]=10e10
nn_result.append(np.argsort(-sims[0, :])[:110])
#euc_new_nn_result.append(np.argsort(euc_new_sims)[:110])
# compute recalls
out = []
nn_result = [[all_captions[nn] for nn in nns] for nns in nn_result]
#euc_new_nn_result = [[all_captions[nn] for nn in nns] for nns in euc_new_nn_result]
for k in [1, 5, 10, 50, 100]:
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i] in nns[:k]:
r += 1
r /= len(nn_result)
#out += [('recall_top' + str(k) + '_correct_composition', r)]
out.append(str(k) + ' ---> '+ str(r*100))
# r = 0.0
# for i, nns in enumerate(euc_new_nn_result):
# if all_target_captions[i] in nns[:k]:
# r += 1
# r /= len(euc_new_nn_result)
# #out += [('recall_top' + str(k) + '_correct_composition', r)]
# out.append('EUC:' +str(k) + ' ---> '+ str(r*100))
if opt.dataset == 'mitstates':
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[0] in [c.split()[0] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_adj', r)]
r = 0.0
for i, nns in enumerate(nn_result):
if all_target_captions[i].split()[1] in [c.split()[1] for c in nns[:k]]:
r += 1
r /= len(nn_result)
out += [('recall_top' + str(k) + '_correct_noun', r)]
return out | 082e42fdc9bce1c2542530eb805472c4286b70a7 | 3,633,923 |
def samPareto(a=3.0, size=None): # real signature unknown; restored from __doc__
"""pareto(a, size=None)
Draw samples from a Pareto II or Lomax distribution with
specified shape.
The Lomax or Pareto II distribution is a shifted Pareto
distribution. The classical Pareto distribution can be
obtained from the Lomax distribution by adding 1 and
multiplying by the scale parameter ``m`` (see Notes). The
smallest value of the Lomax distribution is zero while for the
classical Pareto distribution it is ``mu``, where the standard
Pareto distribution has location ``mu = 1``. Lomax can also
be considered as a simplified version of the Generalized
Pareto distribution (available in SciPy), with the scale set
to one and the location set to zero.
The Pareto distribution must be greater than zero, and is
unbounded above. It is also known as the "80-20 rule". In
this distribution, 80 percent of the weights are in the lowest
20 percent of the range, while the other 20 percent fill the
remaining 80 percent of the range.
Parameters
----------
shape : float, > 0.
Shape of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
See Also
--------
scipy.stats.distributions.lomax.pdf : probability density function,
distribution or cumulative density function, etc.
scipy.stats.distributions.genpareto.pdf : probability density function,
distribution or cumulative density function, etc.
Notes
-----
The probability density for the Pareto distribution is
.. math:: p(x) = \frac{am^a}{x^{a+1}}
where :math:`a` is the shape and :math:`m` the scale.
The Pareto distribution, named after the Italian economist
Vilfredo Pareto, is a power law probability distribution
useful in many real world problems. Outside the field of
economics it is generally referred to as the Bradford
distribution. Pareto developed the distribution to describe
the distribution of wealth in an economy. It has also found
use in insurance, web page access statistics, oil field sizes,
and many other problems, including the download frequency for
projects in Sourceforge [1]_. It is one of the so-called
"fat-tailed" distributions.
References
----------
.. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
Sourceforge projects.
.. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
.. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
Values, Birkhauser Verlag, Basel, pp 23-30.
.. [4] Wikipedia, "Pareto distribution",
http://en.wikipedia.org/wiki/Pareto_distribution
Examples
--------
Draw samples from the distribution:
>>> a, m = 3., 2. # shape and mode
>>> s = (np.random.pareto(a, 1000) + 1) * m
Display the histogram of the samples, along with the probability
density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, _ = plt.hist(s, 100, normed=True)
>>> fit = a*m**a / bins**(a+1)
>>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
>>> plt.show()
"""
return rand.pareto(a=a, size=size) | 3a931c801c0af856a339aa89014284a12ab7dc2c | 3,633,924 |
def ripple_carry_add(A, B, cin=0):
"""Return symbolic logic for an N-bit ripple carry adder."""
if len(A) != len(B):
raise ValueError("expected A and B to be equal length")
ss, cs = list(), list()
for i, a in enumerate(A):
c = (cin if i == 0 else cs[i-1])
ss.append(a ^ B[i] ^ c)
cs.append(a & B[i] | a & c | B[i] & c)
return farray(ss), farray(cs) | 730d440da2eeb8846dd86a51ef0b9ff650c71f24 | 3,633,925 |
def svn_stream_invoke_readline_fn(*args):
"""svn_stream_invoke_readline_fn(svn_stream_readline_fn_t _obj, void * baton, char const * eol, apr_pool_t pool) -> svn_error_t"""
return _core.svn_stream_invoke_readline_fn(*args) | 980c11486a8ed13289e4ba28763a6ba6978d950d | 3,633,926 |
import argparse
def cli():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'file',
nargs='?',
default='/home/sam/notes/2020-08-28_5.md',
help='reading list file for reading/writing markdown notes'
)
args = parser.parse_args()
return args | 80a6ee8ff618aa9cfaadfddab7daccff3fe7fa1e | 3,633,927 |
from typing import List
def get_function_contents_by_name(lines: List[str], name: str):
"""
Extracts a function from `lines` of segmented source code with the name `name`.
Args:
lines (`List[str]`):
Source code of a script seperated by line.
name (`str`):
The name of the function to extract. Should be either `training_function` or `main`
"""
if name != "training_function" and name != "main":
raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'")
good_lines, found_start = [], False
for line in lines:
if not found_start and f"def {name}" in line:
found_start = True
good_lines.append(line)
continue
if found_start:
if name == "training_function" and "def main" in line:
return good_lines
if name == "main" and "if __name__" in line:
return good_lines
good_lines.append(line) | 60239b0063e83a71641d85194f72a9cc61221177 | 3,633,928 |
import ctypes
from ctypes.util import find_library
import errno
def create_linux_process_time():
"""
Uses :mod:`ctypes` to create a :func:`time.process_time`
on the :samp:`'Linux'` platform.
:rtype: :obj:`function`
:return: A :func:`time.process_time` equivalent.
"""
CLOCK_PROCESS_CPUTIME_ID = 2 # time.h
clockid_t = ctypes.c_int
time_t = ctypes.c_long
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t), # seconds
('tv_nsec', ctypes.c_long) # nanoseconds
]
_clock_gettime = ctypes.CDLL(
find_library('rt'), use_errno=True).clock_gettime
_clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
def process_time():
tp = timespec()
if _clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ctypes.byref(tp)) < 0:
err = ctypes.get_errno()
msg = errno.errorcode[err]
if err == errno.EINVAL:
msg += (
"The clk_id (4) specified is not supported on this system")
raise OSError(err, msg)
return tp.tv_sec + tp.tv_nsec * 1e-9
return process_time | d1c479e059ad17c8377db0f6012a7e8ab55b1905 | 3,633,929 |
import json
def get_json(obj, indent=4):
"""
Get formatted JSON dump string
"""
return json.dumps(obj, sort_keys=True, indent=indent) | be1376fcb9e820cc5012f694ca830ba0c52b5fef | 3,633,930 |
def to_kelvin(value, initial_unit = "c"):
"""Convert temperature units to Kelvin.
This is an internal intermediate method to convert all
provided values to the same intermediate unit, which
greatly simplifies exposed calculation methods.
Internally, this is also mainly used to simplify
ideal gas calculations in the gas module.
"""
# Validate that the provided unit is a usable one.
initial_unit = initial_unit.replace(" ", "").lower()
if initial_unit.replace(" ", "").lower() not in TEMP_UNITS:
raise ValueError("That is not a valid unit of temperature.")
# Return values for the different cases.
if initial_unit == "c":
return value + 273.15
if initial_unit == "f":
return (value - 32) * (5 / 9) + 273.15
# Otherwise, the input value is simply K, so return the input.
return value | e0695e33b253c4d777f9d54b37bf593fdaacd239 | 3,633,931 |
def create_pooling_layer(pooling_type,
window_size,
stride_size,
num_gpus,
default_gpu_id):
"""create pooling layer"""
scope = "pooling/{0}".format(pooling_type)
if pooling_type == "max":
pooling_layer = MaxPooling(num_gpus=num_gpus, default_gpu_id=default_gpu_id, scope=scope)
elif pooling_type == "max_3d":
pooling_layer = MaxPooling3D(window_size=window_size, stride_size=stride_size,
num_gpus=num_gpus, default_gpu_id=default_gpu_id, scope=scope)
elif pooling_type == "avg":
pooling_layer = AveragePooling(num_gpus=num_gpus, default_gpu_id=default_gpu_id, scope=scope)
elif pooling_type == "avg_3d":
pooling_layer = AveragePooling3D(window_size=window_size, stride_size=stride_size,
num_gpus=num_gpus, default_gpu_id=default_gpu_id, scope=scope)
else:
raise ValueError("unsupported pooling type {0}".format(pooling_type))
return pooling_layer | f4196ed021bbda6984d95cf8385df9be778dc62c | 3,633,932 |
def PlotConfusionMatrix(cm, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig = plt.figure(figsize=(8, 5))
im = plt.imshow(cm, interpolation='nearest', cmap=cmap)
ax = fig.get_axes()[0]
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
xlim=(-.5, len(classes) - .5),
ylim=(len(classes) - .5, -.5),
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax | 0a15a4e7b0cf8bacef2a737d8a59835fa9551fd4 | 3,633,933 |
def gen_phi(n):
"""
Generates a n-sized random i.i.d. sample from the uniform distribution U(-1,1).
Uses inverse cdf transformation.
:param n: size of the sample
:return: a n-length array
"""
#cdf_x = np.random.rand(n)
#x = 2 * cdf_x - 1
x = np.random.chisquare(2, n)
return x | f7070c891fb4ae002d92cc10fa8893923d749677 | 3,633,934 |
def parse_url(url):
"""
Parses as RawSocket URL into it's components and returns a tuple:
- ``isSecure`` is a flag which is ``True`` for ``rss`` URLs.
- ``host`` is the hostname or IP from the URL.
and for TCP/IP sockets:
- ``tcp_port`` is the port from the URL or standard port derived from
scheme (``rs`` => ``80``, ``rss`` => ``443``).
or for Unix domain sockets:
- ``uds_path`` is the path on the local host filesystem.
:param url: A valid RawSocket URL, i.e. ``rs://localhost:9000`` for TCP/IP sockets or
``rs://unix:/tmp/file.sock`` for Unix domain sockets (UDS).
:type url: str
:returns: A 3-tuple ``(isSecure, host, tcp_port)`` (TCP/IP) or ``(isSecure, host, uds_path)`` (UDS).
:rtype: tuple
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["rs", "rss"]:
raise Exception("invalid RawSocket URL: protocol scheme '{}' is not for RawSocket".format(parsed.scheme))
if not parsed.hostname or parsed.hostname == "":
raise Exception("invalid RawSocket URL: missing hostname")
if parsed.query is not None and parsed.query != "":
raise Exception("invalid RawSocket URL: non-empty query '{}'".format(parsed.query))
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid RawSocket URL: non-empty fragment '{}'".format(parsed.fragment))
if parsed.hostname == "unix":
# Unix domain sockets sockets
# rs://unix:/tmp/file.sock => unix:/tmp/file.sock => /tmp/file.sock
fp = parsed.netloc + parsed.path
uds_path = fp.split(':')[1]
# note: we don't interpret "uds_path" in any further way: it needs to be
# a path on the local host with a listening Unix domain sockets at the other end ..
return parsed.scheme == "rss", parsed.hostname, uds_path
else:
# TCP/IP sockets
if parsed.path is not None and parsed.path != "":
raise Exception("invalid RawSocket URL: non-empty path '{}'".format(parsed.path))
if parsed.port is None or parsed.port == "":
if parsed.scheme == "rs":
tcp_port = 80
else:
tcp_port = 443
else:
tcp_port = int(parsed.port)
if tcp_port < 1 or tcp_port > 65535:
raise Exception("invalid port {}".format(tcp_port))
return parsed.scheme == "rss", parsed.hostname, tcp_port | 2582ce00fc2aa3ee719c2ccb5d8949ef693d8d37 | 3,633,935 |
def build_operation(id, path, args, command="set", table="block"):
"""
Data updates sent to the submitTransaction endpoint consist of a sequence of "operations". This is a helper
function that constructs one of these operations.
"""
if isinstance(path, str):
path = path.split(".")
return {"id": id, "path": path, "args": args, "command": command, "table": table} | 74656a7568a6d705c9c24c091660b93d16977512 | 3,633,936 |
import string
import secrets
def generate_password(length=50):
"""Generate a password."""
alphabet = string.ascii_letters + string.digits
return "".join(secrets.choice(alphabet) for i in range(length)) | 05b1442ef88c3a8f87c49a03f21463a61c91647b | 3,633,937 |
def visual_estimator(possible_heights, visual_sigma, visual_height):
"""Computes the visual estimate.
Parameters
----------
visual_sigma: ``float``
Standard deviation of the visual estimate.
possible_heights: ``ndarray``
Numpy array containing all the possible heights of
the stimulus.
visual_height: ``float``
Height of the visual stimulus.
Returns
----------
visual_estimate: ``ndarray``
Numpy array containing the estimated height of the visual
stimulus.
"""
pheights = possible_heights
height = visual_height
sigma = visual_sigma
visual_estimate = (1 / np.sqrt(2 * np.pi * np.square(sigma))) * np.exp(
-1 * (((pheights - height) ** 2) / (2 * np.square(sigma)))
)
return visual_estimate | c0338cf61f3a436b6843a10d55394e89a05018b7 | 3,633,938 |
def whiten(x, source=None):
"""Mean and sd normalizes x column-wise relative to summary statistics
from source. Uses x itself by default"""
if source is None: source = x
means = np.mean(source, axis=0)
stddevs = np.std(source, axis=0)
return (x - means[None, :]) / stddevs[None, :] | a566c9239320b8f308be6ea96e5188061181a1a0 | 3,633,939 |
import scipy
def poly_smooth(x, y, aperture, axis=0, N=3):
"""Smoothar *data* med hjalp av *N* te gradens polynom, langs med
*axis* [=0].
Invariabler
*x*
x-varden for data som skall smoothas
*y*
y-varden for data som skall smoothas
*aperture*
hur manga sampel som skall medlvardesbildas for varje punkt i
smoothingen
*axis*
axel som skall smoothas langs
*N*
Vilken grad skallpolynomet ha
"""
newdata = np.empty_like(y)
size = x.shape[axis] - 1
if aperture % 2 == 0:
odd = 0
else:
odd = 1
wid = aperture // 2
for i in range(x.shape[axis]):
start = min(max(i - wid, 0), max(0, size - aperture))
stop = max(min(i + wid + odd, size), aperture)
assert stop - start == aperture
poly = scipy.polyfit(x[start:stop], y[start:stop], N)
newdata[i] = scipy.polyval(poly, x[i])
return newdata | e5f1b9e71c04d92fee647d94d92e46acee121513 | 3,633,940 |
import torch
def gen_img_kpts(image, human_model, pose_model, human_sort, det_dim=416, num_peroson=2):
"""
:param image: Input image matrix instead of image path
:param human_model: The YOLOv3 model
:param pose_model: The HRNet model
:param human_sort: Input initialized sort tracker
:param det_dim: The input dimension of YOLOv3. [160, 320, 416]
:param num_peroson: The number of tracked people
:return:
kpts: (M, N, 2)
scores: (M, N, 1)
bboxs_track: (x1, y1, x2, y2, ID)
human_sort: Updated human_sort
"""
args = parse_args()
reset_config(args)
thred_score = args.thred_score
bboxs, bbox_scores = yolo_det(image, human_model, reso=det_dim, confidence=thred_score)
if bboxs is None or not bboxs.any():
return None, None, None
# Using Sort to track people
# people_track: Num_bbox × [x1, y1, x2, y2, ID]
people_track = human_sort.update(bboxs)
# Track the first two people in the video and remove the ID
if people_track.shape[0] == 1:
bboxs_track = people_track[-1].reshape(1, 5)
else:
people_track_ = people_track[-num_peroson:].reshape(num_peroson, 5)
bboxs_track = people_track_[::-1]
with torch.no_grad():
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(image, bboxs_track, cfg, num_peroson)
inputs = inputs[:, [2, 1, 0]]
if torch.cuda.is_available():
inputs = inputs.cuda()
output = pose_model(inputs)
# compute coordinate
preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)
scores = np.zeros((num_peroson, 17, 1), dtype=np.float32)
for i, kpt in enumerate(preds):
kpts[i] = kpt
for i, score in enumerate(maxvals):
scores[i] = score
human_indexes = []
for i in range(len(bboxs_track)):
human_indexes.append(bboxs_track[i, -1])
return kpts, scores, human_indexes | a0020a584c8f0a996c6e19a747a727cb2e5dd55a | 3,633,941 |
def deep_skipthought(
dictionary,
epoch = 5,
batch_size = 16,
embedding_size = 256,
maxlen = 100,
ngram = (1, 4),
):
"""
Train a deep skip-thought network for text similarity
Parameters
----------
dictionary: dict
format {'left':['right']}
epoch: int, (default=5)
iteration numbers.
batch_size: int, (default=32)
batch size for every feed, batch size must <= size of corpus.
embedding_size: int, (default=256)
vector size representation for a word.
maxlen: int, (default=100)
max length of a string to be train.
ngram: tuple, (default=(1,4))
n-grams size to train a corpus.
Returns
-------
_DEEP_SIMILARITY: malaya.similarity._DEEP_SIMILARITY class
"""
if not isinstance(dictionary, dict):
raise ValueError('dictionary must be a dictionary')
if not isinstance(list(dictionary.keys())[0], str):
raise ValueError('keys dictionary must be a string')
if not isinstance(list(dictionary.values())[0], list):
raise ValueError('values dictionary must be a list')
if not isinstance(list(dictionary.values())[0][0], str):
raise ValueError('elements of values dictionary must be a string')
if not isinstance(epoch, int):
raise ValueError('epoch must be an integer')
if not isinstance(batch_size, int):
raise ValueError('batch_size must be an integer')
if not isinstance(embedding_size, int):
raise ValueError('embedding_size must be an integer')
if not isinstance(maxlen, int):
raise ValueError('maxlen must be an integer')
if not isinstance(ngram, tuple):
raise ValueError('ngram must be a tuple')
if not len(ngram) == 2:
raise ValueError('ngram size must equal to 2')
output, keys = _generate_topics(dictionary)
batch_x, batch_y = [], []
for i in range(len(output)):
augmentation = sentence_ngram(output[i])
batch_y.extend([keys[i]] * len(augmentation))
batch_x.extend(augmentation)
batch_x, batch_y = shuffle(batch_x, batch_y)
sess, model, dictionary, saver = skip_train(
batch_x,
batch_y,
batch_y,
epoch = epoch,
batch_size = batch_size,
embedding_size = embedding_size,
maxlen = maxlen,
)
vectorized = sess.run(
model.get_thought,
feed_dict = {
model.INPUT: batch_sequence(output, dictionary, maxlen = maxlen)
},
)
return _DEEP_SIMILARITY(
sess, model, vectorized, keys, dictionary, maxlen, saver, embedding_size
) | 4bb68193c517e76f16c3a8d067a841d287e73c32 | 3,633,942 |
def compute_min_refills(distance: int, tank: int, stops: list):
"""
Computes the minimum number of gas station pit stops.
>>> compute_min_refills(950, 400, [200, 375, 550, 750])
2
>>> compute_min_refills(10, 3, [1, 2, 5, 9])
-1
Example 3:
>>> compute_min_refills(200, 250, [100, 150])
0
"""
previous, current = 0, 0
positions = [0] + stops + [distance]
num_refills, cur_position = 0, 0
while current <= len(stops):
previous = current
while current <= len(stops) and (
positions[current + 1] - positions[previous]
) <= tank:
current += 1
cur_position = positions[current]
if current == previous:
return -1 # destination not possible
if cur_position < distance:
num_refills += 1
return num_refills | 41dff6085f3b46b191c40c3dde9b68ee3ee41e3e | 3,633,943 |
def list_orders():
"""Shows a list of orders.
Requires administrator privileges.
"""
return render_template('order/orders.html', orders=Order.get_all()) | 972e20fc11a160f4b6f2ff4879e9d31f0aa288ce | 3,633,944 |
def dkim_record_responder(query):
"""Provide empty DKIM key to all potential lookups."""
return TXT(query.name, "v=DKIM1; p=") | 7d25172acaae8f19589e1c5d7f3b84e9b97e23a9 | 3,633,945 |
from pyrado.environments.pysim.pendulum import PendulumSim
def create_default_randomizer_pend() -> DomainRandomizer:
"""
Create the default randomizer for the `PendulumSim`.
:return: randomizer based on the nominal domain parameter values
"""
dp_nom = PendulumSim.get_nominal_domain_param()
return DomainRandomizer(
NormalDomainParam(
name="gravity_const", mean=dp_nom["gravity_const"], std=dp_nom["gravity_const"] / 10, clip_lo=1e-3
),
NormalDomainParam(name="pole_mass", mean=dp_nom["pole_mass"], std=dp_nom["pole_mass"] / 10, clip_lo=1e-3),
NormalDomainParam(name="pole_length", mean=dp_nom["pole_length"], std=dp_nom["pole_length"] / 10, clip_lo=1e-3),
NormalDomainParam(
name="pole_damping", mean=dp_nom["pole_damping"], std=dp_nom["pole_damping"] / 10, clip_lo=1e-3
),
NormalDomainParam(
name="torque_thold", mean=dp_nom["torque_thold"], std=dp_nom["torque_thold"] / 10, clip_lo=1e-3
),
) | b7519a12e94d4e68ea161fd0b106e2b9853c715d | 3,633,946 |
def find_cursor(source):
"""Return (source, line, col) based on the | character, stripping the source."""
source = source.strip()
i = source.index('|')
assert i != -1
l = pyparsing.lineno(i, source)
c = pyparsing.col(i, source)
return source.replace('|', ''), l, c | 88df4a8822f798a8c6c86e48e090196a04b2fcad | 3,633,947 |
def make_provider(provider_type: str, **kwargs) -> provider.StorageProvider:
"""Make a StorageProvider from a provider type. Call with the arguments
you would normally provide to the specific implementation's constructor.
make_provider("azure", account_name="...", ...)
Args:
provider_type (str): The type of the provider. See PROVIDERS_MAP.
**kwargs: The arguments for the provider implementation's constructor.
Returns:
StorageProvider: The provider of the given type.
Raises:
ValueError: If provider_type was invalid (i.e. not in PROVIDERS_MAP).
TypeError: If the wrong arguments were given in kwargs.
"""
try:
return PROVIDERS_MAP[provider_type](**kwargs)
except KeyError:
raise ValueError(f"Invalid storage provider_type '{provider_type}'") | e3ad91021176ab488e62f8e919a23b71383dc025 | 3,633,948 |
def nearest(arr, val):
"""
Locate the element in the given array 'arr' which is
closest to the specified value 'val'.
"""
return arr[np.abs(arr-val).argmin()] | 2a4cdaab9ed866010e1da87613a03076602045f7 | 3,633,949 |
def bookmarked_posts():
"""Manage a user's bookmarked posts."""
current_user = User.get_user_from_identity(get_jwt_identity())
if request.method == "GET":
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
posts = current_user.bookmarks
for p in posts:
register_impression(p.id)
title = "Bookmarked Posts"
return (
jsonify(**Post.to_collection_dict(posts, page, per_page), title=title),
200,
) | 963f653d2ca45fc69d16ff1eb2f11bf7ea430ac0 | 3,633,950 |
def get_sip_flags(target_config):
""" Return the SIP platform, version and feature flags. target_config is
the target configuration.
"""
sip_flags = []
# If we don't check for signed interpreters, we exclude the 'VendorID'
# feature
if target_config.py_version < 0x030000 and not target_config.vend_enabled:
sip_flags.append('-x')
sip_flags.append('VendorID')
# Handle Python debug builds.
if target_config.py_debug:
sip_flags.append('-D')
# Handle the platform tag.
if target_config.py_platform == 'win32':
plattag = 'WS_WIN'
elif target_config.py_platform == 'darwin':
plattag = 'WS_MACX'
else:
plattag = 'WS_X11'
sip_flags.append('-t')
sip_flags.append(plattag)
# Handle the Qt version tag.
sip_flags.append('-t')
sip_flags.append(version_to_sip_tag(target_config.qt_version))
# Handle any feature flags.
for xf in target_config.pyqt_disabled_features:
sip_flags.append('-x')
sip_flags.append(xf)
# Handle the version specific Python features.
if target_config.py_version < 0x030000:
sip_flags.append('-x')
sip_flags.append('Py_v3')
return ' '.join(sip_flags) | 339846be395d30ee318746e6ed74830d3b62a92f | 3,633,951 |
from PyQt6.QtWidgets import QApplication
from .load_ui import loadUi
def preview(ui_file):
""" Preview the .ui file. Return the exit status to be passed back to the
parent process.
"""
app = QApplication([ui_file])
ui = loadUi(ui_file)
ui.show()
return app.exec() | 6c0fcd480e4b8db8e0c322f5cdb80516e6b2fddc | 3,633,952 |
def analize_image(path):
"""Analizes a file, returning True if it contains nudity."""
image = cv2.imread(path)
if image is None:
return False
return analize_numpy_array(image) | b15e354bbfb02997420ff1ff198567645c65874e | 3,633,953 |
import copy
def calc_pow_ref_sublhn(city, tes_cap=0.001, mod_boi=True,
boi_size=50000, eta_boi=0.95,
use_eh=False):
"""
Calculate reference electric heat generator load curve by solving thermal
and electric energy balance with reduced tes size (for sublhn)
(+ used energy (HP/EH) / - produced electric energy (CHP))
Deactivate PV and el. battery, if existent.
Parameters
----------
building : object
Building object
tes_cap : float, optional
Storage capacity (mass in kg) of TES (default: 0.001). Default value
is low to minimize tes flexibility influence on reference curve.
mod_boi : bool, optional
Defines, if boiler size should be modified (CHP/BOI) /
added (for HP/EH system) (default: True)
boi_size : float, optional
Boiler nominal thermal power in Watt (default: 50000). Used to add
boiler, if no boiler exists and run fails (e.g. HP/EH usage).
Increasing boiler size for CHP/BOI system
eta_boi : float, optional
Initial boiler efficiency (default: 0.95)
use_eh : bool, optional
Defines, if electric heater is also used to define t_forced_build
(default: False).
Returns
-------
array_p_el_ref : np.array
Array holding electric power values in Watt (used/produced by
electric heat generator (EHG)) ( - produced electric energy (CHP))
"""
# Copy city object
city_copy = copy.deepcopy(city)
list_builds_ids = city_copy.get_list_build_entity_node_ids()
# Loop over buildings in city_copy
for n in list_builds_ids:
curr_build = city_copy.nodes[n]['entity']
if curr_build.hasBes:
# Reduce TES size (TES still required to prevent assertion error in
# energy balance, when CHP is used!)
if curr_build.bes.hasTes:
curr_build.bes.tes.capacity = tes_cap
# Remove PV, if existent
if curr_build.bes.hasPv:
curr_build.bes.pv = None
curr_build.bes.hasPv = False
# Remove Bat, if existent
if curr_build.bes.hasBattery:
curr_build.bes.battery = None
curr_build.bes.hasBattery = False
if use_eh is False and curr_build.bes.hasElectricalHeater:
# Remove electric heater
curr_build.bes.electricalHeater = None
curr_build.bes.hasElectricalHeater = False
if mod_boi and boi_size > 0:
if curr_build.bes.hasBoiler:
curr_build.bes.boiler.qNominal = boi_size
elif curr_build.bes.hasBoiler is False:
# Generate boiler object (also for HP/EH combi to prevent
# assertion error, if TES capacity is reduced)
print('boi_size ', boi_size)
boi = boisys.BoilerExtended(environment=
curr_build.environment,
q_nominal=boi_size,
eta=eta_boi)
curr_build.bes.addDevice(boi)
# Generate city energy balance calculator object instance
cit_eb_calc = citeb.CityEBCalculator(city=city_copy)
# Calc. city energy balance
cit_eb_calc.calc_city_energy_balance()
timestep = city_copy.environment.timer.timeDiscretization
array_p_el_ref = np.zeros(int(365 * 24 * 3600 / timestep))
for n in list_builds_ids:
curr_build = city_copy.nodes[n]['entity']
if curr_build.hasBes:
if curr_build.bes.hasChp:
array_p_el_ref -= curr_build.bes.chp.totalPOutput
return array_p_el_ref | 206a4db6362939c91b2be494024624e19fbbe736 | 3,633,954 |
def create_L(rank):
"""
L matrix for the calculus of the l2 norm of a column of H in the smoothness criteria
(see Kimura, T., & Takahashi, N. (2017). Gauss-Seidel HALS Algorithm for Nonnegative Matrix Factorization with Sparseness and Smoothness Constraints. IEICE Transactions on Fundamentals of Electronics, Communications and Computer Sciences, 100(12), 2925-2935.)
"""
L = np.zeros((rank - 2, rank))
for i in range(rank - 2):
L[i,i] = -1
L[i,i+1] = 2
L[i,i+2] = -1
return L | c2c6c8e6572fa510eb912c32ec87b9d2ea9a96eb | 3,633,955 |
def _complete_sum(dnf):
"""
Recursive complete_sum function implementation.
CS(f) = ABS([x1 | CS(0, x2, ..., xn)] & [~x1 | CS(1, x2, ..., xn)])
"""
if dnf.depth <= 1:
return dnf
else:
v = dnf.splitvar
fv0, fv1 = dnf.cofactors(v)
f = And(Or(v, _complete_sum(fv0)), Or(~v, _complete_sum(fv1)))
if isinstance(f, ExprAnd):
f = Or(*[And(x, y) for x in f.args[0].cube
for y in f.args[1].cube])
return f._absorb() | c87f8f03e2106e3c79367a81d05be62ac13fccaf | 3,633,956 |
def invert_dict(d):
"""Invert dict d[k]=v to be p[v]=[k1,k2,...kn]"""
p = {}
for k, v in d.items():
try:
p[v].append(k)
except KeyError:
p[v] = [k]
return p | 1438ad5879cccf89030cb96dc5ae6c024f8e417c | 3,633,957 |
def daten_einlesen(request):
""" wird von url aufgerufen und ruft standalone-Fkt auf """
aus_alter_db_einlesen()
return HttpResponseRedirect('/veranstaltungen') | e4fd282f8bfd8a6e5cc6426dfed57ff9235c0d7d | 3,633,958 |
def _get_filtered_object_queryset(filter_params_raw, user_id=None, object_type='CITATION'):
"""
Parameters
----------
params : str
Returns
-------
:class:`.QuerySet`
"""
# We need a mutable QueryDict.
filter_params = QueryDict(filter_params_raw, mutable=True)
if object_type == 'AUTHORITY':
_qs = Authority.objects.all()
else:
_qs = Citation.objects.all()
if user_id:
_qs = filter_queryset(User.objects.get(pk=user_id), _qs, CRUDRule.UPDATE)
if object_type == 'AUTHORITY':
queryset = AuthorityFilter(filter_params, queryset=_qs).qs
else:
queryset = CitationFilter(filter_params, queryset=_qs).qs
return queryset, filter_params_raw | 2cae494626dbaa494003e6a65e7c20ce0b46812e | 3,633,959 |
def _salfun(rt,dt):
"""Calculate salinity from conductivity and temperature variables.
Calculate the salinity in the Practical Salinity Scale 1978 (PSS-78)
from auxiliary variables related to conductivity and temperature.
:arg float rt: Square root of the conductivity ratio, unitless.
:arg float dt: Temperature difference in degrees Celsius (IPTS-
69 scale) from the reference temperature (15 degrees).
:returns: Salinity in PSS-78.
"""
s1 = 0.
for coeff in _C_SFUN[1]:
s1 = s1*rt + coeff
s2 = 0.
for coeff in _C_SFUN[2]:
s2 = s2*rt + coeff
spsu = s1 + s2*dt/(1 + _C_SFUN[0]*dt)
return spsu | 41886e421f0c28afd1817a9db7a7636cbd9ae371 | 3,633,960 |
def Es_case_B(x, y, z, gamma):
"""
Eq.(9) from Ref[1] with no constant factor e*beta**2/rho**2.
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
if z == 0 and x == 0 and y == 0:
return 0
beta2 = 1-1/gamma**2
beta = sqrt(beta2)
alp = alpha(x, y, z, gamma)
kap = 2*(alp - z)/beta
sin2a = sin(2*alp)
cos2a = cos(2*alp)
N1 = cos2a - (1+x)
N2 = (1+x)*sin2a - beta*kap
N3 = - y**2*sin2a
D = kap - beta*(1+x)*sin2a
return (N1*N2 + N3)/D**3 | db89aab34fb147ffd4f2c245caaca0ae89bcc58c | 3,633,961 |
def rmse(img_true, img_test):
"""Returns Root Mean-Squared Error score between two Numpy arrays
Args:
img_true: Image, numpy array of any dimension
img_test: Image, numpy array of any dimension
Returns:
Computed RMSE score
Raises:
NumpyShapeComparisonException: if shape of images are not the same
Example:
>>> from ipfml.iqa import fr
>>> import numpy as np
>>> arr1 = np.arange(10)
>>> arr2 = np.arange(5, 15)
>>> rmse_score = fr.rmse(arr1, arr2)
>>> rmse_score
5.0
"""
return np.sqrt(mse(img_true, img_test)) | 218f0793c56f162cbe809b231b763fc7e6e4b208 | 3,633,962 |
def enhance_color(img, r=None, severity=1):
"""
adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
if r is None:
severity = abs(severity)
r = uniform(1 - 0.5 * severity, 1) if lucky(0.5) else uniform(1, 1 + severity)
img = Image.fromarray(img)
img = np.array(ImageEnhance.Color(img).enhance(r))
return img | 3407c68de96413ab9569234f78bec5a1a90ecf53 | 3,633,963 |
def states(*state_names, **state_configs):
""" returns a dictionary with state names as keys and state configs as values """
if not all(isinstance(s, str) for s in state_names):
raise MachineError(f"all state names in 'states' should be of type 'str'")
if not all(isinstance(s, dict) for s in state_configs.values()):
raise MachineError(f"all states in 'states' should be of type 'dict'")
all_state_configs = {s: state() for s in state_names} # generate state configs
all_state_configs.update(state_configs)
if not len(all_state_configs):
raise MachineError(f"no states defined in state machine configuration")
return all_state_configs | c895036e2a2431ed4f7ce749ac4c19f0d6b3008c | 3,633,964 |
def find_best_matching_haplotypes(candidates, truths, ref):
"""Assigns genotypes to each variant to best match truths.
See the module-level documentation for general information on how this
algorithm works.
Args:
candidates: list[nucleus.protos.Variant]. A list of candidate variants, in
coordinate-sorted order, all on the same chromosome.
truths: list[nucleus.protos.Variant]. A list of truth variants, in
coordinate-sorted order, for the same interval on the genome as variants.
ref: ReferenceRegion. Used to get reference bases for variants. Must cover
at least the span of the variants.
Returns:
A HaplotypeMatch object describing the best assignment of genotypes between
the candidates and truth_variants, or None, if no consistent assignment can
be found.
Raises:
ValueError: If any inputs are malformed.
"""
candidates = list(candidates)
truths = list(truths)
if _DEBUG_PRINTING_IS_ENABLED:
_log_variants('candidates', candidates)
_log_variants('truth', truths)
if not variant_utils.variants_are_sorted(candidates):
raise ValueError('candidates are not sorted', candidates)
if not variant_utils.variants_are_sorted(truths):
raise ValueError('truths are not sorted', truths)
def _hom_ref_enum_if_empty(list_of_variants, non_empty_enum):
"""If list_of_variants is empty, use a ONLY_HOM_REF enum for speed."""
return non_empty_enum if list_of_variants else EnumerationType.ONLY_HOM_REF
truth_haplotypes = deduplicate_haplotypes(
enumerate_all_possible_haplotypes(
truths, ref, _hom_ref_enum_if_empty(candidates,
EnumerationType.TRUTH)))
# Note, it may be worth deduplicating these haplotypes as well.
variant_haplotypes = enumerate_all_possible_haplotypes(
candidates, ref, _hom_ref_enum_if_empty(truths,
EnumerationType.CANDIDATES))
found = []
for vh, vgt in variant_haplotypes:
for th, tgt in truth_haplotypes:
if th == vh:
found.append(
HaplotypeMatch(
haplotypes=th,
candidates=candidates,
candidate_genotypes=vgt,
truths=truths,
truth_genotypes=tgt))
if not found:
return None
else:
return select_best_haplotype_match(found) | 265c474fe3749d4e6c9f1103477ff527da630cf5 | 3,633,965 |
def _documents_for(locale, topics=None, products=None):
"""Returns a list of articles that apply to passed in topics and products.
"""
# First try to get the results from the cache
documents = cache.get(_documents_for_cache_key(locale, topics, products))
if documents:
statsd.incr('wiki.facets.documents_for.cache')
return documents
try:
# Then try ES
documents = _es_documents_for(locale, topics, products)
cache.add(
_documents_for_cache_key(locale, topics, products), documents)
statsd.incr('wiki.facets.documents_for.es')
except (Timeout, ConnectionError, ElasticHttpError):
# Finally, hit the database (through cache machine)
# NOTE: The documents will be the same ones returned by ES
# but they won't be in the correct sort (by votes in the last
# 30 days). It is better to return them in the wrong order
# than not to return them at all.
documents = _db_documents_for(locale, topics, products)
statsd.incr('wiki.facets.documents_for.db')
return documents | 3db45743fe88bf4418ad5b7534d1c4ccae669ed0 | 3,633,966 |
def handle(pattern :str):
"""
Used as a descriptor.
"""
def wrapper(func):
set_handle_func(pattern, func)
return func
return wrapper | 65668ad31957e07b3729b06a5730b4ce7974a23c | 3,633,967 |
import os
def caterpillar_plot(
hddm_model=None,
ground_truth_parameter_dict=None,
drop_sd=True,
keep_key=None,
figsize=(10, 10),
columns=3,
save=False,
path=None,
format="png",
y_tick_size=10,
):
"""An alternative posterior predictive plot. Works for all models listed in hddm (e.g. 'ddm', 'angle', 'weibull', 'levy', 'ornstein')
Arguments:
hddm_model: hddm model object <default=None>
If you supply a ground truth model, the data you supplied to the hddm model should include trial by trial parameters.
ground_truth_parameter_dict: dict <default=None>
Parameter dictionary (for example coming out of the function simulator_h_c()) that provides ground truth values
for the parameters fit in the hddm_model.
drop_sd: bool <default=True>
Whether or not to drop group level standard deviations from the caterpillar plot.
This is sometimes useful because scales can be off if included.
figsize: tuple <default=(10, 15)>
Size of initial figure.
keep_key: list <default=None>
If you want to keep only a specific list of parameters in the caterpillar plot, supply those here as
a list. All other parameters for which you supply traces in the posterior samples are going to be ignored.
x_limits: float <default=2>
Sets the limit on the x-axis
save: bool <default=False>
Whether to save the plot
format: str <default='png'>
File format in which to save the figure.
path: str <default=None>
Path in which to save the figure.
Return: plot object
"""
if hddm_model is None:
return "No HDDM object supplied"
out = _group_node_names_by_param(model=hddm_model)
traces_by_param = _group_traces_via_grouped_nodes(model=hddm_model, group_dict=out)
ncolumns = columns
nrows = np.ceil(len(out.keys()) / ncolumns)
fig = plt.figure(figsize=figsize)
fig.suptitle("")
fig.subplots_adjust(top=1.0, hspace=0.2, wspace=0.4)
i = 1
for key_ in traces_by_param.keys():
ax = fig.add_subplot(nrows, ncolumns, i)
sns.despine(right=True, ax=ax)
traces_tmp = traces_by_param[key_]
ecdfs = {}
plot_vals = {} # [0.01, 0.9], [0.01, 0.99], [mean]
for k in traces_tmp.keys():
# If we want to keep only a specific parameter we skip all traces which don't include it in
# their names !
if keep_key is not None and keep_key not in k:
continue
# Deal with
if "std" in k and drop_sd:
pass
else:
ok_ = 1
if drop_sd == True:
if "_sd" in k:
ok_ = 0
if ok_:
# Make empirical CDFs and extract the 10th, 1th / 99th, 90th percentiles
ecdfs[k] = ECDF(traces_tmp[k].values)
tmp_sorted = sorted(traces_tmp[k].values)
_p01 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.01) - 1]
_p99 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.99) - 1]
_p1 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.1) - 1]
_p9 = tmp_sorted[np.sum(ecdfs[k](tmp_sorted) <= 0.9) - 1]
_pmean = traces_tmp[k].mean()
plot_vals[k] = [[_p01, _p99], [_p1, _p9], _pmean]
x = [plot_vals[k][2] for k in plot_vals.keys()]
# Create y-axis labels first
ax.scatter(x, plot_vals.keys(), c="black", marker="s", alpha=0)
i += 1
# Plot the actual cdf-based data
for k in plot_vals.keys():
ax.plot(plot_vals[k][1], [k, k], c="grey", zorder=-1, linewidth=5)
ax.plot(plot_vals[k][0], [k, k], c="black", zorder=-1)
# Add in ground truth if supplied
if ground_truth_parameter_dict is not None:
ax.scatter(ground_truth_parameter_dict[k], k, c="blue", marker="|")
ax.tick_params(axis="y", rotation=45)
ax.tick_params(axis="y", labelsize=y_tick_size)
if save:
print("passing_print")
fname = "caterpillar_" + hddm_model.model
if path is None:
path = "."
if isinstance(format, str):
format = [format]
print(["%s.%s" % (os.path.join(path, fname), x) for x in format])
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
] | e777e854245f40e67b064b8daac7f838053fb897 | 3,633,968 |
import siteUtils
from bot_eo_analyses import glob_pattern, bias_filename, tearing_task
def tearing_jh_task(det_name):
"""JH version of single sensor execution of the tearing task."""
run = siteUtils.getRunNumber()
acq_jobname = siteUtils.getProcessName('BOT_acq')
flat_files = siteUtils.dependency_glob(glob_pattern('tearing', det_name),
acq_jobname=acq_jobname)
if not flat_files:
print("tearing_task: Flat files not found for detector", det_name)
return None
bias_frame = bias_filename(run, det_name)
return tearing_task(run, det_name, flat_files, bias_frame=bias_frame) | 39025754da069cf28be50f76304726e7bb329227 | 3,633,969 |
def _ToolBar_InsertLabelTool(self, pos, id, label, bitmap, bmpDisabled=wx.NullBitmap, kind=wx.ITEM_NORMAL, shortHelp="", longHelp="", clientData=None):
"""
Old style method to insert a tool in the toolbar.
"""
return self.InsertTool(pos, id, label, bitmap, bmpDisabled, kind,
shortHelp, longHelp, clientData) | 1bb57f851ddb1c5d8ed02ef18965806bd502118d | 3,633,970 |
def forward(request):
"""Forward email."""
mbox, mailid = get_mail_info(request)
if request.method == "POST":
url = "?action=forward&mbox=%s&mailid=%s" % (mbox, mailid)
form = ForwardMailForm(request.user, request.POST)
status, resp = send_mail(request, form, url)
if status:
get_imapconnector(request).msg_forwarded(mbox, mailid)
return resp
return new_compose_form(
request, "forward", mbox, mailid, load_email_attachments=True) | 88758af210b05441e3f7b04bed2562f4c331224f | 3,633,971 |
import requests
def _do_delete(del_url, api_name):
"""Helper to do HTTP DELETE.
Note: A response code of 404(NOT_FOUND) is treated as success to keep
_do_delete() idempotent.
"""
is_success = True
try:
r = requests.delete(del_url, timeout=_REQUEST_TIMEOUT_SECS)
if r.status_code == requests.codes.not_found:
print('WARN: The resource does not exist. Api: %s, url: %s' %
(api_name, del_url))
elif r.status_code != requests.codes.ok:
print('ERROR: %s API returned error. HTTP response: %s' %
(api_name, r.text))
is_success = False
except(requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
is_success = False
_print_connection_error(str(e))
return is_success | 7380bcc966ee4d0219546dd96dac40ba5bd48d7d | 3,633,972 |
def bool2str(value):
# type: (Any) -> str
"""
Converts :paramref:`value` to explicit ``"true"`` or ``"false"`` :class:`str` with permissive variants comparison
that can represent common falsy or truthy values.
"""
return "true" if str(value).lower() in truthy else "false" | ec61b33d3c0d695187a80ff6cc7946adc16218e4 | 3,633,973 |
def relu(x):
"""Rectified linear activation function.
@param x: input matrix/vector.
@return: elementwise relu"""
return np.maximum(x,0) | 847325c8a689878cbdca5f4263a8b821ffda9a21 | 3,633,974 |
def _upper(string):
"""Custom upper string function.
Examples:
foo_bar -> FooBar
"""
return string.title().replace("_", "") | 04ad1596657736847e909e0c4937afc407ea1f60 | 3,633,975 |
def downstream_Mn(Mn, gamma=defg._gamma):
"""
Args:
Mn: param gamma: (Default value = defg._gamma)
gamma: (Default value = defg._gamma)
Returns:
"""
return np.sqrt((1.+.5*(gamma-1.)*Mn**2)/(gamma*Mn**2-.5*(gamma-1.))) | 99643c43936ccc88ea64c643ef47fb328db77a2c | 3,633,976 |
import re
def detect_type(indicator):
"""Infer the type of the indicator.
Args:
indicator(str): The indicator whose type we want to check.
Returns:
str. The type of the indicator.
"""
if re.match(ipv4cidrRegex, indicator):
return FeedIndicatorType.CIDR
if re.match(ipv6cidrRegex, indicator):
return FeedIndicatorType.IPv6CIDR
if re.match(ipv4Regex, indicator):
return FeedIndicatorType.IP
if re.match(ipv6Regex, indicator):
return FeedIndicatorType.IPv6
if re.match(sha256Regex, indicator):
return FeedIndicatorType.SHA256
if re.match(urlRegex, indicator):
return FeedIndicatorType.URL
if re.match(md5Regex, indicator):
return FeedIndicatorType.MD5
if re.match(sha1Regex, indicator):
return FeedIndicatorType.SHA1
if re.match(emailRegex, indicator):
return FeedIndicatorType.Email
# TODO: add domain regex or identification
else:
return None | 1943bdba77c983cfd6db36a069f2304ef22598e7 | 3,633,977 |
import re
def escape_sql_string(string: str) -> str:
"""
Escapes single quotes and backslashes with a backslash and wraps everything between single quotes.
"""
escaped_identifier = re.sub(r"(['\\])", r"\\\1", string)
return f"'{escaped_identifier}'" | 68f91b6a5c5bfcec6298f6b6f5c7dfb6b7a095f5 | 3,633,978 |
def _round_to_base(x, base=5):
"""Round to nearest multiple of `base`."""
return int(base * round(float(x) / base)) | beccfe2951b9fcc7aafef57fd966418df1ce2cc1 | 3,633,979 |
def cards_search():
"""Return a dummy cards list."""
head = {'Content-Type': 'application/json; charset=utf-8',}
return (resp['cards_search'], head) | c5cd6ff642a36573f162c95b86c6c2246f6426f9 | 3,633,980 |
def mysqlpdo(editor):
"""Copies PHP code to connect to the active MySQL connection using PDO, to the clipboard.
"""
# Values depend on the active connection type
if editor.connection:
conn = editor.connection
if conn.driver.name == "MysqlNativeSocket":
params = {
"host" : "localhost",
"port" : "3306",
"user" : conn.parameterValues["aliens_abduction_user"],
"socket" : conn.parameterValues["socket"],
"dbname" : editor.defaultSchema,
"dsn" : "mysql:unix_socket={$socket};dbname={$dbname}"
}
else:
params = {
"host" : conn.parameterValues["localhost"],
"port" : conn.parameterValues["port"] if conn.parameterValues["port"] else 3306,
"user" : conn.parameterValues["userName"],
"socket" : "",
"dbname" : editor.defaultSchema,
"dsn" : "mysql:host={$host};port={$port};dbname={$dbname}"
}
text = """$host="%(host)s";
$port=%(port)s;
$socket="%(socket)s";
$user="%(user)s";
$password="hasanosman";
$dbname="%(dbname)s";
try {
$dbh = new PDO("%(dsn)s", $user, $password));
} catch (PDOException $e) {
echo 'Connection failed: ' . $e->getMessage();
}
""" % params
mforms.Utilities.set_clipboard_text(text)
mforms.App.get().set_status_text("Copied PHP code to clipboard")
return 0 | 43cb4f9965699569ae073a5b6ff31035fda0251a | 3,633,981 |
def get_player_stats(username: str, **kwargs) -> ChessDotComResponse:
"""
:param username: username of the player.
:returns: ``ChessDotComResponse`` object containing information about the
plyers's ratings, win/loss, and other stats.
"""
return Resource(
uri = f"/player/{username}/stats",
top_level_attr = "stats",
**kwargs
) | 4127343fbf43b0de5150a98bdb266e9c9a6a4fd2 | 3,633,982 |
def perspective(image, src, dst, w, h):
"""
Image percpective transform function.
:param image: input image
:param src: original image corners points array
:param dst: output image corners points array
:param w: image width
:param h: image height
:return: result image
"""
M = cv2.getPerspectiveTransform(src, dst)
image = cv2.warpPerspective(image, M, (w,h))
return image | a377d5eb94543f77ad267f9b9cbf223a44743952 | 3,633,983 |
def performStats(dataArray):
"""
Statically calculate and assign summed values of occurances to each entry
"""
yearArray = [[0,0] for i in range(20)]
for entry in dataArray:
oSum = 0
nSum = 0
for k, v in entry.old.items():
# print(k,v)
oSum += v
for k,v in entry.new.items():
# print(k,v)
nSum += v
entry.oldSum = oSum
entry.newSum = nSum
idx = int(entry.year)%20 #0-19 index
yearArray[idx][0] += entry.oldSum
yearArray[idx][1] += entry.newSum
return yearArray | 444c291504783c6cf353c9dad0b4a33c0c4fa172 | 3,633,984 |
def eventize(stat, teams_regex):
""" Events: End result, goal, penalties
Given statistics text (ottelupöytäkirja), return a list of events
"""
home_team, guest_team, events, current_score = None, None, [], (0,0)
for line in stat.split("\n"):
line=line.strip()
# end result
h, g, e = extract_endresult(line, teams_regex)
if h is not None and g is not None:
home_team, guest_team = h, g
events += e
# goals
goals, current_score = extract_goals(line, current_score, home_team, guest_team)
events += goals
# penalties
events += extract_penalties(line, home_team, guest_team)
# goaltender saves
events += extract_saves(line, home_team, guest_team)
if home_team is None or guest_team is None: # dont return events if team processing failed
return []
return events | dc76dcbb1bf8f21a8c761694187030f9ca67774a | 3,633,985 |
def QuantizeEmulate(to_quantize, quant_params, **kwargs):
"""Use this function to emulate quantization on NN layers during training.
The function accepts a single layer or multiple layers and handles them
appropriately.
Arguments:
to_quantize: A single keras layer, list of keras layers, or a
`tf.keras.Sequential` model.
quant_params: Quantization parameters
**kwargs: Additional keyword arguments.
Returns:
Wrapped layer with quantization applied.
"""
def _QuantizeList(layers, params):
"""Apply QuantizeEmulate wrapper to a list of layers.
Args:
layers: List of keras layers to apply QuantizeEmulate.
params: QuantizationParams for the entire list.
Returns:
List of layers wrapped with QuantizeEmulate.
"""
wrapped_layers = []
for layer in layers:
# Already quantized. Simply use and return. This supports usage such as
# model = QuantizeEmulate([
# Dense(),
# QuantizeEmulate(Dense(), layer_params)
# Dense()
# ], model_params)
if isinstance(layer, QuantizeEmulateWrapper):
wrapped_layers.append(layer)
continue
wrapped_layers.append(QuantizeEmulate(layer, params))
return wrapped_layers
if isinstance(to_quantize, list):
return _QuantizeList(to_quantize, quant_params)
elif isinstance(to_quantize, keras.Sequential):
return keras.models.Sequential(
_QuantizeList(to_quantize.layers, quant_params))
elif isinstance(to_quantize, keras.layers.Layer):
return QuantizeEmulateWrapper(to_quantize, quant_params, **kwargs) | 2ff575d96cf60b2759184eccabacb9ce585e0e15 | 3,633,986 |
from typing import Union
import pkgutil
import inspect
def search_fhir_resource_cls(
resource_type: str, cache: bool = True, fhir_release: str = None
) -> Union[str, NoneType]: # noqa: E999
"""This function finds FHIR resource model class (from fhir.resources) and return dotted path string.
:arg resource_type: the resource type name (required). i.e Organization
:arg cache: (default True) the flag which indicates should query fresh or serve from cache if available.
:arg fhir_release: FHIR Release (version) name. i.e STU3, R4
:return dotted full string path. i.e fhir.resources.organization.Organization
Example::
>>> from guillotina_fhirfield.helpers import search_fhir_resource_cls
>>> from zope.interface import Invalid
>>> dotted_path = search_fhir_resource_cls('Patient')
>>> 'fhir.resources.patient.Patient' == dotted_path
True
>>> dotted_path = search_fhir_resource_cls('FakeResource')
>>> dotted_path is None
True
"""
if resource_type in FHIR_RESOURCE_CLASS_CACHE and cache:
return "{0}.{1}".format(
FHIR_RESOURCE_CLASS_CACHE[resource_type],
resource_type,
)
# Trying to get from entire modules
prime_module = 'fhir.resources'
if fhir_release:
prime_module = f'{prime_module}.{fhir_release}'
prime_module_level = len(prime_module.split('.'))
prime_module = import_module(prime_module)
for importer, module_name, ispkg in pkgutil.walk_packages(
prime_module.__path__, prime_module.__name__ + ".", onerror=lambda x: None
):
if ispkg or (prime_module_level + 1) < len(module_name.split('.')):
continue
module_obj = import_module(module_name)
for klass_name, klass in inspect.getmembers(module_obj, inspect.isclass):
if klass_name == resource_type:
FHIR_RESOURCE_CLASS_CACHE[resource_type] = module_name
return f"{module_name}.{resource_type}"
return None | ff0d511749c77216423d2a0fadc262cd7d4cba88 | 3,633,987 |
from typing import List
from typing import Optional
import logging
def get_blockages_from_comments(
filenames: List[Text]) -> Optional[List[List[float]]]:
"""Returns list of blockages if they exist in the file's comments section."""
for filename in filenames:
if not filename:
continue
blockages = []
# Read the first file if filename is comma separated list.
# Expected blockage info line format is:
# "# Blockage : <float> <float> <float> <float> <float>"
# where first four float numbers correspond to minx, miny, maxx, maxy of
# the rectangular region, the fifth one is the blockage rate. It's usually
# set to 1.
try:
with open(filename, 'rt') as infile:
for line in infile:
if line.startswith('# Blockage : '):
blockages.append([float(x) for x in line.split()[3:8]])
elif not line.startswith('#'):
break
except OSError:
logging.error('could not read file %s.', filename)
if blockages:
return blockages | 8ee5360d31b89d39b432e46e67ebf712f715259a | 3,633,988 |
def get_tags(file_name):
"""Retreives a list of the tags for a specific file"""
if File.get(File.file_name == file_name):
file_tags = (Tag
.select()
.join(FileTag)
.where(FileTag.file_id == File.get(File.file_name == file_name)))
tag_list = []
for t in file_tags:
tag_list.append(t.tag_name)
return tag_list
else:
print "Sorry! Couldn't understand request!" | d159c76146873173531e48183597df21e08a32b5 | 3,633,989 |
from typing import Any
def ispointer(obj: Any) -> bool:
"""Check if a given obj is a pointer (is a remote object).
Args:
obj (Any): Object.
Returns:
bool: True (if pointer) or False (if not).
"""
if type(obj).__name__.endswith("Pointer") and hasattr(obj, "id_at_location"):
return True
return False | 34bdf58b8352a11d878043ee2611d0b7c2a0dae5 | 3,633,990 |
def make_hmmer_presearchbed(nuc_fasta, hmmer_table, outbed, chrom_lens,
slop=3000, windowed=False, e_cutoff=1e-2):
"""
Creates a bed file with regions in which to search based on HMMER output table.
Filters HMMER hits by evalue then applies slop to the envelope regions of the hits.
Output bedfile is sorted
"""
records = hmmer2bed(nuc_fasta, hmmer_table, windowed=windowed)
records = ( r for r in records if r.score < e_cutoff ) # apply e cutoff filter
records = ( bed_slop(r, slop, chrom_lens) for r in records ) # apply slop
records = list(set(records)) # remove duplicate records
# sort the bed records:
records.sort(key=lambda r: r.start)
records.sort(key=lambda r: r.chrom)
with open(outbed, 'w') as f:
for r in records:
print(r.chrom, r.start, r.end, sep='\t', file=f)
return outbed | 81af6014688f9a90b91dcaae7f968fc112e1eb30 | 3,633,991 |
from googlecloudsdk.api_lib.logging import tailing
def GetGCLLogTailer():
"""Return a GCL LogTailer."""
try:
# pylint: disable=g-import-not-at-top
# pylint: enable=g-import-not-at-top
except ImportError:
log.out.Print(LOG_STREAM_HELP_TEXT)
return None
return tailing.LogTailer() | 0e5aae5d1dc73e57b31167da42ceb1b3313cd493 | 3,633,992 |
def login():
"""
Handles project account authentication
"""
if g.project is not None:
return redirect(url_for('home', project_name=g.project['project_name']))
form = LoginForm(request.form)
if form.validate_on_submit():
# On submit, grab name & password
project_name = form.project_name.data
password = form.password.data
# Try login
db = DB()
resp = db.auth(project_name, password)
if resp['status']:
session['project_id'] = resp['project_id']
project_detail = db.get_project_detail(session['project_id'])
project_name = project_detail['project_name']
return redirect(url_for('home', project_name=project_name))
else:
flash(resp['message'])
return render_template('login.html', form=form) | 26a51263aa81f02caf58729c9c5d3f6f265aee97 | 3,633,993 |
def context():
"""context: Overwritten by tests."""
return None | 1bd0bc8ca8c9829ffcb7b141b7cf64dfcd87df45 | 3,633,994 |
import collections
import calendar
def updateSolarStationsCsv(station_num, tmys, config, years, merged_csv_filepath):
"""
Append tmy information to solar stations csv. If this is in current dir, use that one, because that allows us to
process multiple stations at once. Otherwise download from data source.
"""
new_solar_path, new_file = getSolarStationsPath(config)
bom_station_num = "Bureau of Meteorology station number"
download_tmy = "Download TMY (CSV)"
new_data = collections.OrderedDict({bom_station_num: station_num})
new_data[download_tmy] = "<a href='%s%s'>TMY File</a>" % (config["csv_datasets_url"], merged_csv_filepath)
for i, typical_meterological_year in enumerate(tmys):
month = i + 1
new_data[calendar.month_name[month] + " TMY"] = [typical_meterological_year]
for i, year in enumerate(years):
month = i + 1
new_data[calendar.month_name[month] + " valid years"] = [year]
new_data_pd = pd.DataFrame(new_data, columns = new_data.keys())
solar_stations = pd.read_csv(new_solar_path)
if new_file:
# If the file has been downloaded, it might contain old data for January TMY etc. Clear this.
new_data_keys = new_data.keys()
new_data_keys.remove(bom_station_num)
for new_data_key in new_data_keys:
try:
del solar_stations[new_data_key]
except:
print("Couldn't delete %s from downloaded solar stations. It's possible it doesn't exist." % new_data_key)
full_data = pd.merge(left=solar_stations, right=new_data_pd, how='outer')
print("Writing solar stations csv to %s" % new_solar_path)
full_data.to_csv(new_solar_path, index=False)
else:
cols = solar_stations.columns
solar_stations.set_index(bom_station_num, inplace=True)
new_data_pd.set_index(bom_station_num, inplace=True)
solar_stations.update(new_data_pd, overwrite=True)
solar_stations.reset_index(drop=False, inplace=True)
solar_stations = solar_stations[cols]
print("Writing solar stations csv to existing file %s" % new_solar_path)
solar_stations.to_csv(new_solar_path, index=False, columns=cols)
return new_solar_path | 8480e93defffb743cf77fc68d525409bb7ea18da | 3,633,995 |
def postojanost_grubo_jac(x, ds, df, ip, ap, apf, l, Ct, kv, kf, ka):
"""
"""
dfdx0 = (Ct*kv*x[0]**(kv-1.)*x[1]**kf*ap**ka+np.pi*l/1000./x[0]**2./x[1]*
(ip*ds-2.*ap*(ip-1.)))
dfdx1 = (Ct*kf*x[0]**kv*x[1]**(kf-1.)*ap**ka+np.pi*l/1000./x[0]/x[1]**2.*
(ip*ds-2.*ap*(ip-1.)))
dfdx2 = np.pi*l*(df+2.*apf)/1000./x[2]**2./x[3]
dfdx3 = np.pi*l*(df+2.*apf)/1000./x[2]/x[3]**2.
return np.array([dfdx0, dfdx1, dfdx2, dfdx3]) | 754b6024eb9f342933ced512c5036742995a1975 | 3,633,996 |
def find_max_location(scoremap):
""" Returns the coordinates of the given scoremap with maximum value.
# Arguments
scoremap: Numpy array of shape (crop_size, crop-size).
# Returns
keypoints2D: numpy array of shape (num_keypoints, 1).
"""
shape = scoremap.shape
x_grid, y_grid = create_2D_grids(shape)
keypoint_index = extract_keypoint_index(scoremap)
keypoints2D = extract_keypoints_XY(x_grid, y_grid, keypoint_index)
return keypoints2D | 62ba95f01611eb1a0009ad9d99275c8ffc2b14a1 | 3,633,997 |
import torch
def iou_scn(x, gt, threshold=1.0):
"""
outputs: [K, H, W]
labels: [K, H, W]
"""
assert x.dim() == 3
assert gt.dim() == 3
len_x = x.size(0)
len_gt = gt.size(0)
assert len_x == len_gt
M = []
for k in range(len_x):
M.append(iou_pair(x[k], gt[k]))
iou_scores = torch.stack(M, dim=0)
# iou_scores = torch.stack(M, dim=0)
if threshold < 1.0:
return (iou_scores >= threshold).type(x.dtype).mean()
else:
return iou_scores.mean() | 58aff4a74e4a27c336302131c47602fb29fd53f0 | 3,633,998 |
def load_word_embedding(filepath):
"""
given a filepath to embeddings file, return a word to vec
dictionary, in other words, word_embedding
E.g. {'word': array([0.1, 0.2, ...])}
"""
def _get_vec(word, *arr):
return word, np.asarray(arr, dtype='float32')
print('load word embeddings ......')
try:
word_embedding = dict(_get_vec(*w.split(' ')) for w in open(filepath))
except UnicodeDecodeError:
word_embedding = dict(_get_vec(*w.split(' ')) for w in open(
filepath, encoding="utf8", errors='ignore'))
# sanity check word vector length
words_to_del = []
for word, vec in word_embedding.items():
if len(vec) != 300:
words_to_del.append(word)
for word in words_to_del:
del word_embedding[word]
return word_embedding | 45fe3f2fe35d3036ff4c9b2552e1dbd8dc13bc48 | 3,633,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.