content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _find_popular_codon(aa):
"""
This function returns popular codon from a 4+ fold degenerative codon.
:param aa: dictionary containing amino acid information.
:return:
"""
codons = [c[:2] for c in aa["codons"]]
counts = []
for i in range(len(codons)):
pc = codons[i]
count = 0
for j in range(len(codons)):
if codons[j] == pc:
count += 1
counts.append(count)
# find index of the higest entry
highest = 0
for i in range(len(counts)):
if counts[i] > counts[highest]:
highest = i
return aa["codons"][highest]
|
a555a9d42ea4dfa0260d9d4d2040de3c6fca69a0
| 3,641,700
|
import pathlib
def initialize_cluster_details(scale_version, cluster_name, username,
password, scale_profile_path,
scale_replica_config):
""" Initialize cluster details.
:args: scale_version (string), cluster_name (string),
username (string), password (string), scale_profile_path (string),
scale_replica_config (bool)
"""
cluster_details = {}
cluster_details['scale_version'] = scale_version
cluster_details['scale_cluster_clustername'] = cluster_name
cluster_details['scale_service_gui_start'] = "True"
cluster_details['scale_gui_admin_user'] = username
cluster_details['scale_gui_admin_password'] = password
cluster_details['scale_gui_admin_role'] = "Administrator"
cluster_details['scale_sync_replication_config'] = scale_replica_config
cluster_details['scale_cluster_profile_name'] = str(
pathlib.PurePath(scale_profile_path).stem)
cluster_details['scale_cluster_profile_dir_path'] = str(
pathlib.PurePath(scale_profile_path).parent)
return cluster_details
|
5508733e0bfbd20fb76ecaaf0df7f41675b0c5c8
| 3,641,701
|
def load(data_home=None):
"""Load RWC-Genre dataset
Args:
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets`
Returns:
(dict): {`track_id`: track data}
"""
if data_home is None:
data_home = utils.get_default_dataset_path(DATASET_DIR)
rwc_popular_data = {}
for key in track_ids():
rwc_popular_data[key] = Track(key, data_home=data_home)
return rwc_popular_data
|
61d09f64ec7f36bc1dac6bfc6bea8e47fe82248b
| 3,641,702
|
import copy
def collate_spectra_by_source(source_list, tolerance, unit=u.arcsec):
"""Given a list of spec1d files from PypeIt, group the spectra within the
files by their source object. The grouping is done by comparing the
position of each spectra (using either pixel or RA/DEC) using a given tolerance.
Args:
source_list (list of :obj:`SourceObject`): A list of source objects, one
SpecObj per object, ready for collation.
tolerance (float):
Maximum distance that two spectra can be from each other to be
considered to be from the same source. Measured in floating
point pixels or as an angular distance (see ``unit`` argument).
unit (:obj:`astropy.units.Unit`):
Units of ``tolerance`` argument if match_type is 'ra/dec'.
Defaults to arcseconds. Ignored if match_type is 'pixel'.
Returns:
(list of `obj`:SourceObject): The collated spectra as SourceObjects.
"""
collated_list = []
for source in source_list:
# Search for a collated SourceObject that matches this one.
# If one can't be found, treat this as a new collated SourceObject.
found = False
for collated_source in collated_list:
if collated_source.match(source.spec_obj_list[0],
source.spec1d_header_list[0],
tolerance, unit):
collated_source.combine(source)
found = True
if not found:
collated_list.append(copy.deepcopy(source))
return collated_list
|
a82b470685ee53f3fe2de5e41a3f212e32a4d606
| 3,641,703
|
def tolist(obj):
"""
Convert given `obj` to list.
If `obj` is not a list, return `[obj]`, else return `obj` itself.
"""
if not isinstance(obj, list):
return [obj]
return obj
|
f511f4ebb86977b2db8646e692abc9840c2ae2d1
| 3,641,704
|
def bip44_tree(config: dict, cls=hierarchy.Node) -> hierarchy.Node:
"""
Return the root node of a BIP44-compatible partially ordered hierarchy.
https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
The `config` parameter is a dictionary of the following form:
- the keys of the dictionary are crypto-coins;
- the values of the dictionary specify the number of accounts to generate for each coin,
and the number of public/private addresses to generate for each account.
As an example:
{'BTC': (
(1, 2), (4, 5), (0, 1)
)}
The previous dictionary represents a single coin, BTC.
There are three accounts, that respectively have 1, 4, and 0 private addresses and 2, 5, and 1 public addresses.
"""
master_node = cls(0, tag='m')
purpose_node = cls(44 + constants.CryptoConstants.BIP32_HARDENED_INDEX.value, tag="44'")
master_node.edges.append(purpose_node)
for coin, coin_config in config.items():
assert isinstance(coin, str)
assert coin_config
coin_node = cls(constants.CoinType[coin].value, coin)
purpose_node.edges.append(coin_node)
for i, (n_private_addresses, n_public_addresses) in enumerate(coin_config):
assert n_private_addresses > 0 or n_public_addresses > 0
account_node = cls(i)
coin_node.edges.append(account_node)
public_node = cls(0, 'XPUB')
account_node.edges.append(public_node)
private_node = cls(1, 'XPRV')
account_node.edges.append(private_node)
previous_node = private_node
for j in range(n_private_addresses):
private_address_node = cls(j)
previous_node.edges.append(private_address_node)
previous_node = private_address_node
previous_node = public_node
for j in range(n_public_addresses):
public_address_node = cls(j)
previous_node.edges.append(public_address_node)
previous_node = public_address_node
return master_node
|
bd88895932b66963aa7f63f30ad49ac009ea41f1
| 3,641,705
|
def delete_useless_vrrp_subnets(client, to_delete, project_id):
"""
:param 'Client' client
:param dict((prefix_length, type, master_region, slave_region),
(state:quantity)) to_delete
:rtype: list
"""
result = []
vrrp_subnets = client.vrrp.list(project_id=project_id)
for key in to_delete:
vrrp_to_delete = [vrrp for vrrp in vrrp_subnets if (
int(vrrp.cidr.split('/')[1]), "ipv4",
vrrp.master_region, vrrp.slave_region) == key]
vrrp_to_delete.sort(key=itemgetter("status"), reverse=True)
for vrrp in vrrp_to_delete[:to_delete.get(key)]:
client.vrrp.delete(vrrp.id)
result.append(vrrp.id)
return result
|
b16019b026c32d310f9f938a7ca1fada31d02d84
| 3,641,706
|
import torch
import warnings
def barycenter_wbc(P, K, logweights, Kb=None, c=None, debiased=False,
maxiter=1000, tol=1e-4):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
if Kb is None:
b = torch.ones_like(P)[None, :]
Kb = convol_huge_imgs(b, K)
if c is None:
c = torch.ones(1, width, width, device=P.device)
q = c.clone()
logweights.requires_grad = True
err = 1
weights = torch.softmax(logweights, dim=1)[:, :, None, None]
for ii in range(maxiter):
with torch.no_grad():
qold = q.detach().clone()
a = P[None, :] / Kb
Ka = convol_huge_imgs(a, K.t())
q = c * torch.prod((Ka) ** weights, dim=1)
if debiased:
Kc = convol_imgs(c, K.t())
c = (c * q / Kc) ** 0.5
Q = q[:, None, :, :]
b = Q / Ka
Kb = convol_huge_imgs(b, K)
if torch.isnan(q).any():
warnings.warn("Numerical Errors ! Stopped early in debiased = %s" % debiased)
break
with torch.no_grad():
err = abs(q - qold).max()
if err < tol and ii > 5:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q
|
453c40ed988d4fe86dc202f816c5eb3bb6cbd452
| 3,641,707
|
import logging
def syllable_fingerprint(word):
"""
Use the pronuncation dict to map the potential syllable stress patterns
of a word to a ternary string "fingerprint"
0 is a syllable that must be unstressed
1 is a syllable that must be stressed
x is a syllable that may be stressed or unstressed
e.g. python => 10
pronunciation => 0x010
"""
stresses = get_syllable_stress(word)
if not stresses:
raise ValueError(f'Found no options for word {word}')
if len(stresses) == 1:
return stresses.pop()
syllables = len(list(stresses)[0])
if not all(len(s) == syllables for s in stresses):
logging.debug('Multiple syllables found')
logging.debug('%s, %s', word, stresses)
return stresses.pop() # lol just pick one. TODO
fp = []
for i in range(syllables):
if all(s[i] == '1' for s in stresses):
fp.append('1')
elif all(s[i] == '0' for s in stresses):
fp.append('0')
else:
fp.append('x')
return ''.join(fp)
|
7acce68ee686c5d5dbfc06eea00a95f7ae214ac9
| 3,641,708
|
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
z = np.dot(data, weights[:len(data[0])])
y = sigmoid(z)
return y
|
52c2ff3ed4b854de645b2252b4949b4a7a68bda1
| 3,641,709
|
def score_matrix(motifs, k):
"""returns matrix score formed from motifs"""
nucleotides = {'A': [0]*k, 'T': [0]*k, 'C': [0]*k, 'G': [0]*k}
for motif in motifs:
for index, nucleotide in enumerate(motif):
nucleotides[nucleotide][index] = nucleotides[nucleotide][index] + 1
i = 0
matrix_score = 0
while i < k:
output = []
column_score = 0
for key in nucleotides:
output.append(nucleotides[key][i])
max_consumed = False
max_item = max(output)
for item in output:
if item == max_item:
if not max_consumed:
max_consumed = True
continue
else:
column_score = column_score + item
else:
column_score = column_score+item
matrix_score = matrix_score + column_score
i = i + 1
return matrix_score
|
ce9f7b770ce75d4e872da7b3c9b4fa3fbcd1e900
| 3,641,710
|
def log_loss(y_true, dist_pred, sample=True, return_std=False):
""" Log loss
Parameters
----------
y_true: np.array
The true labels
dist_pred: ProbabilisticEstimator.Distribution
The predicted distribution
sample: boolean, default=True
If true, loss will be averaged across the sample
return_std: boolean, default=False
If true, the standard deviation of the
loss sample will be returned
Returns
-------
np.array
Loss (with standard deviation if ``return_std`` is True)
"""
pdf = dist_pred.pdf(y_true)
loss = -np.log(pdf)
if sample:
return sample_loss(loss, return_std)
return loss
|
0f3d19111593441011cfb1e532be50a19d423390
| 3,641,711
|
import scipy
def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
""" Older impleentation of the matrix pencil method with pencil p on given data to
extract energy levels.
Parameters
----------
data -- lists of Obs, where the nth entry is considered to be the correlation function
at x0=n+offset.
p -- matrix pencil parameter which corresponds to the number of energy levels to extract.
higher values for p can help decreasing noise.
noise_level -- If this argument is not None an additional prefiltering via singular
value decomposition is performed in which all singular values below 10^(-noise_level)
times the largest singular value are discarded. This increases the computation time.
verbose -- if larger than zero details about the noise filtering are printed to stdout
(default 1)
"""
n_data = len(data)
if n_data <= p:
raise Exception('The pencil p has to be smaller than the number of data samples.')
matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
if noise_level is not None:
u, s, vh = svd(matrix)
s_values = np.vectorize(lambda x: x.value)(s)
if verbose > 0:
print('Singular values: ', s_values)
digit = np.argwhere(s_values / s_values[0] < 10.0**(-noise_level))
if digit.size == 0:
digit = len(s_values)
else:
digit = int(digit[0])
if verbose > 0:
print('Consider only', digit, 'out of', len(s), 'singular values')
new_matrix = u[:, :digit] * s[:digit] @ vh[:digit, :]
y1 = new_matrix[:, :-1]
y2 = new_matrix[:, 1:]
else:
y1 = matrix[:, :-1]
y2 = matrix[:, 1:]
# Moore–Penrose pseudoinverse
pinv_y1 = pinv(y1)
# Note: Automatic differentiation of eig is implemented in the git of autograd
# but not yet released to PyPi (1.3). The code is currently part of pyerrors
e = eig((pinv_y1 @ y2), **kwargs)
energy_levels = -np.log(np.abs(e))
return sorted(energy_levels, key=lambda x: abs(x.value))
|
4bcb435b3b16b153d0d1f1689f542df1fdc74ca8
| 3,641,712
|
def ext_sum(text, ratio=0.8):
"""
Generate extractive summary using BERT model
INPUT:
text - str. Input text
ratio - float. Enter a ratio between 0.1 - 1.0 [default = 0.8]
(ratio = summary length / original text length)
OUTPUT:
summary - str. Generated summary
"""
bert_model = Summarizer()
summary = bert_model(text, ratio=ratio)
return summary
|
99285d08425340f70984ce0645efdbaaa3e9072a
| 3,641,713
|
def khinalug_input_normal(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace common shortcuts with valid Khinalug characters.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
text = text.replace('c1_', 'č̄')
text = text.replace('c1\'', 'č̣')
text = text.replace('7', 'ˁ')
text = text.replace('g1', 'ǧ')
text = text.replace('s1', 'š')
text = text.replace('z1', 'ž')
text = text.replace('c1', 'č')
text = text.replace('j1', 'ǯ')
text = text.replace('a1', 'ä')
text = text.replace('u1', 'ü')
text = text.replace('o1', 'ö')
text = text.replace('i1', 'ı')
text = text.replace('k_', 'k̄')
text = text.replace('t_', 't̄')
text = text.replace('q_', 'q̄')
text = text.replace('c_', 'c̄')
text = text.replace('c\'', 'c̣')
text = text.replace('k\'', 'ḳ')
text = text.replace('q\'', 'q̇')
text = text.replace('x\'', 'x̣')
text = text.replace('t\'', 'ṭ')
text = text.replace('h\'', 'ḥ')
return text
|
b9b9413ae461b6a03aa8c0db4396658dbe242c91
| 3,641,714
|
from typing import List
from typing import Dict
from typing import Any
def _shift_all_classes(classes_list: List[ndarray], params_dict: Dict[str, Any]):
"""Shift the locale of all classes.
Args:
classes_list: List of classes as numpy arrays.
params_dict: Dict including the shift values for all classes.
Returns:
List of shifted classes.
"""
classes_df = pd.DataFrame()
shifted_classes = []
# shift all classes
for generated_class, shift in zip(classes_list, params_dict["all_shifts"]):
# shift class data and exclude the label from shifting
label = generated_class[:, 0].reshape(-1, 1)
shifted_class_data = generated_class[:, 1:] + shift
classes_df["mean_" + str(shift)] = shifted_class_data.flatten()
labeled_shifted_class = np.hstack((label, shifted_class_data))
assert labeled_shifted_class[:, 0].all() == label.all()
shifted_classes.append(labeled_shifted_class)
return shifted_classes, classes_df
|
2176e5f4da6aecc25386e978182887fb8568faaa
| 3,641,715
|
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
"""Fully connected layer.
Parameters
----------
tensor: tf.Tensor
Input tensor.
size: int
Number of output nodes for this layer.
weight_init: float
Weight initializer.
bias_init: float
Bias initializer.
name: str
Name for this op. Defaults to 'fully_connected'.
Returns
-------
tf.Tensor:
A new tensor representing the output of the fully connected layer.
Raises
------
ValueError
If input tensor is not 2D.
"""
if weight_init is None:
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b)
|
605cc52e8c5262aead6cb758488940e7661286b1
| 3,641,716
|
from pathlib import Path
def fetch_osborne_magnetic(version):
"""
Magnetic airborne survey of the Osborne Mine and surroundings, Australia
This is a section of a survey acquired in 1990 by the Queensland
Government, Australia. The line data have approximately 80 m terrain
clearance and 200 m line spacing. Total field anomalies are in nT. The
flight height was calculated by summing the terrain clearance to
interpolated values of SRTM (referenced to sea level). The section contains
the total field magnetic anomalies associated with the Osborne Mine,
Lightning Creek sill complex, and the Brumby prospect.
There are ~990,000 measurements in total with 5 columns available: flight
line number, longitude, latitude (geodetic), height (orthometric), and the
total field magnetic anomaly.
**Format:** CSV with xz (lzma) compression.
**Load with:** :func:`pandas.read_csv`
**Original source:** `Geophysical Acquisition & Processing Section 2019.
MIM Data from Mt Isa Inlier, QLD (P1029), magnetic line data, AWAGS
levelled. Geoscience Australia, Canberra
<http://pid.geoscience.gov.au/dataset/ga/142419>`__
**Original license:** CC-BY
**Versions:**
* `1
<https://github.com/fatiando-data/osborne-magnetic/releases/tag/v1>`_
(doi:`10.5281/zenodo.5882209 <https://doi.org/10.5281/zenodo.5882209>`__)
Parameters
----------
version : int
The data version to fetch. See the available versions above.
Returns
-------
fname : :class:`pathlib.Path`
Path to the downloaded file on disk.
"""
_check_versions(version, allowed={1}, name="Osborne mine magnetic")
fname = "osborne-magnetic.csv.xz"
return Path(_repository(fname, version).fetch(fname))
|
2a0575557a18ca4442f0cf21ee51ccd94d316ffa
| 3,641,717
|
from sympy.core.symbol import Symbol
from sympy.printing.pycode import MpmathPrinter as Printer
from sympy.printing.pycode import SciPyPrinter as Printer
from sympy.printing.pycode import NumPyPrinter as Printer
from sympy.printing.lambdarepr import NumExprPrinter as Printer
from sympy.printing.tensorflow import TensorflowPrinter as Printer
from sympy.printing.pycode import SymPyPrinter as Printer
from sympy.printing.pycode import PythonCodePrinter as Printer
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=False):
"""
Returns an anonymous function for fast calculation of numerical values.
If not specified differently by the user, ``modules`` defaults to
``["scipy", "numpy"]`` if SciPy is installed, ``["numpy"]`` if only
NumPy is installed, and ``["math", "mpmath", "sympy"]`` if neither is
installed. That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise. To change
this behavior, the "modules" argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "scipy", "sympy",
"tensorflow"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
Arguments in the provided expression that are not valid Python identifiers
are substitued with dummy symbols. This allows for applied functions
(e.g. f(t)) to be supplied as arguments. Call the function with
dummify=True to replace all arguments with dummy symbols (if `args` is
not a string) - for example, to ensure that the arguments do not
redefine any built-in names.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableDenseMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the function definition is evaluated! So this
would be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.utilities.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow module:
>>> import tensorflow as tf
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
>>> result = func(tf.constant(1.0))
>>> result # a tf.Tensor representing the result of the calculation
<tf.Tensor 'Maximum:0' shape=() dtype=float32>
>>> sess = tf.Session()
>>> sess.run(result) # compute result
1.0
>>> var = tf.Variable(1.0)
>>> sess.run(tf.global_variables_initializer())
>>> sess.run(func(var)) # also works for tf.Variable and tf.Placeholder
1.0
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) # works with any shape tensor
>>> sess.run(func(tensor))
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
"""
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["scipy", "numpy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
elif _module_present('scipy', namespaces):
elif _module_present('numpy', namespaces):
elif _module_present('numexpr', namespaces):
elif _module_present('tensorflow', namespaces):
elif _module_present('sympy', namespaces):
else:
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
imp_mod_lines.append("from %s import %s" % (mod, k))
for ln in imp_mod_lines:
exec_(ln, {}, namespace)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify)
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
funclocals = {}
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec_(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename)
func = funclocals[funcname]
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func
|
cf7b65c503d1a7873f0ddacfb3f6aa841340ee0e
| 3,641,718
|
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
|
e494016affa28e9018f337cb7184e96858701208
| 3,641,719
|
import csv
from io import StringIO
def excl_import_route():
"""import exclustions from csv"""
form = ExclImportForm()
if form.validate_on_submit():
imported = []
try:
for row in csv.DictReader(StringIO(form.data.data), EXPORT_FIELDNAMES, quoting=csv.QUOTE_MINIMAL):
imported.append(Excl(family=ExclFamily(row['family']), value=row['value'], comment=row['comment']))
if imported:
if form.replace.data:
db.session.query(Excl).delete()
for tmp in imported:
db.session.add(tmp)
db.session.commit()
return redirect(url_for('scheduler.excl_list_route'))
except (csv.Error, ValueError, SQLAlchemyError, psycopg2.Error) as e:
db.session.rollback()
current_app.logger.exception(e)
flash('Import failed', 'error')
return render_template('scheduler/excl/import.html', form=form)
|
780c5646b2a5771691c538cb71bfde390cd9b847
| 3,641,720
|
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal and keyword arguments to connect::
@receiver(signal_object, sender=sender)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
signal.connect(func, **kwargs)
return func
return _decorator
|
dbbde0855b2a657adaff9fa688aa158053e46579
| 3,641,721
|
from typing import final
def create_new_connected_component(dict_projections, dict_cc, dict_nodes_cc, g_list_, set_no_proj, initial_method,
params, i, file_tags=None):
"""
If needed, create new connect component and update wanted dicts.
:param dict_projections: Embedding dict
:param dict_cc: Dict where keys are the number of the connected component and values are list of nodes that are in
this cc.
:param dict_nodes_cc: Dict where keys are nodes and values is the number representing the cc they are in.
:param g_list_: List of graphs for each time stamp.
:param set_no_proj: Set of nodes that are currently not in the embedding because they create together a new cc.
:param initial_method: State-of-the-art method to embed them with.
:param params: Dict of parameters corresponding to the initial method.
:param i: Index of the time stamp
:param file_tags: If GCN GEA is used, one needs to provide file of tags
:return: Updated dict_cc, dict_nodes_cc, and embedding dictionary.
"""
new_cc = create_new_cc(list(set_no_proj), g_list_[i + 1], to_undirected=True)
dict_cc, dict_nodes_cc = add_new_cc(new_cc, dict_nodes_cc, dict_cc)
if new_cc.number_of_nodes() < params["dimension"] and initial_method == "HOPE":
dim = params["dimension"]
initial_method = "node2vec"
params = {"dimension": dim, "walk_length": 80, "num_walks": 16, "workers": 2}
_, dict_proj_new_cc, _ = final(new_cc, initial_method, params, file_tags=file_tags)
z = {**dict_projections, **dict_proj_new_cc}.copy()
return dict_cc, dict_nodes_cc, z
|
18b8c046e78f17b125bf85250953c9d7a656892a
| 3,641,722
|
def laguerre(x, k, c):
"""Generalized Laguerre polynomials. See `help(_gmw.morsewave)`.
LAGUERRE is used in the computation of the generalized Morse
wavelets and uses the expression given by Olhede and Walden (2002),
"Generalized Morse Wavelets", Section III D.
"""
x = np.atleast_1d(np.asarray(x).squeeze())
assert x.ndim == 1
y = np.zeros(x.shape)
for m in range(k + 1):
# Log of gamma function much better ... trick from Maltab's ``beta''
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
y += (-1)**m * fact * x**m / gamma_fn(m + 1)
return y
|
4eac2e1cbd9fd2097763b56129873aa6af4e8419
| 3,641,723
|
def EGshelfIIseas2km_ERAI(daily = False,
gridpath = '/home/idies/workspace/OceanCirculation/exp_ERAI/grid_glued.nc',
kppspath = '/home/idies/workspace/OceanCirculation/exp_ERAI/kpp_state_glued.nc',
fldspath = '/home/idies/workspace/OceanCirculation/exp_ERAI/result_*/output_glued/*.*_glued.nc',
dailypath = '/home/idies/workspace/OceanCirculation/exp_ERAI/result_*/output_glued/daily/*.*_glued.nc'):
"""
High-resolution (~2km) numerical simulation covering the east Greenland shelf (EGshelf), and the Iceland and Irminger Seas (IIseas).
Surface forcing based on the global atmospheric reanalysis ERA-Interim (ERAI).
Model setup: [AHPM17]_.
Parameters
----------
daily: bool
If True, include diagnostics stored with daily resolution (SI, oce).
Return everything with daily time frequency (instead of 6H).
gridpath: str
grid path. Default is SciServer's path.
kppspath: str
kpp_state path. Default is SciServer's path.
fldspath: str
Fields path (use * for multiple files). Default is SciServer's path.
dailypath: str
Daily fields path (use * for multiple files). Default is SciServer's path.
Returns
-------
od: OceanDataset
References
----------
.. [AHPM17] Almansi, M., T.W. Haine, R.S. Pickart, M.G. Magaldi, R. Gelderloos, and D. Mastropole, 2017: High-Frequency Variability in the Circulation and Hydrography of the Denmark Strait Overflow from a High-Resolution Numerical Model. J. Phys. Oceanogr., 47, 2999–3013, https://doi.org/10.1175/JPO-D-17-0129.1
"""
# Check input
if not isinstance(daily, bool): raise TypeError('`daily` must be a bool')
if not isinstance(gridpath, str): raise TypeError('`gridpath` must be a str')
if not isinstance(kppspath, str): raise TypeError('`kppspath` must be a str')
if not isinstance(fldspath, str): raise TypeError('`fldspath` must be a str')
if not isinstance(dailypath, str): raise TypeError('`dailypath` must be a str')
# Message
name = 'EGshelfIIseas2km_ERAI'
description = 'High-resolution (~2km) numerical simulation covering the east Greenland shelf (EGshelf), and the Iceland and Irminger Seas (IIseas). Citation: Almansi et al., 2017 - JPO.'
print('Opening [{}]:\n[{}].'.format(name, description))
# Open, concatenate, and merge
gridset = _xr.open_dataset(gridpath,
drop_variables = ['RC', 'RF', 'RU', 'RL'],
chunks={})
kppset = _xr.open_dataset(kppspath,
chunks={})
fldsset = _xr.open_mfdataset(fldspath,
drop_variables = ['diag_levels','iter'])
ds = _xr.merge([gridset, kppset, fldsset])
# Read daily files and resample
if daily:
# Open, and concatenate daily files
dailyset = _xr.open_mfdataset(dailypath,
drop_variables = ['diag_levels','iter'])
# Resample and merge
ds = _xr.merge([ds.isel(T=slice(0,None,4)), dailyset])
# Squeeze 1D Zs and create Z, Zp1, Zu, and Zl only
ds = ds.rename({'Z': 'Ztmp'})
ds = ds.rename({'Ztmp': 'Z', 'Zmd000216': 'Z'})
ds = ds.squeeze('Zd000001')
for dim in ['Z', 'Zp1', 'Zu', 'Zl']: ds[dim].attrs.update({'positive': 'up'})
# Rename time
ds = ds.rename({'T': 'time'})
# Add attribute (snapshot vs average)
for var in [var for var in ds.variables if ('time' in ds[var].coords and var!='time')]:
ds[var].attrs.update({'original_output': 'snapshot'})
# Add missing names
ds['U'].attrs['long_name'] = 'Zonal Component of Velocity'
ds['V'].attrs['long_name'] = 'Meridional Component of Velocity'
ds['W'].attrs['long_name'] = 'Vertical Component of Velocity'
ds['phiHyd'].attrs['long_name'] = 'Hydrostatic Pressure Pot.(p/rho) Anomaly'
ds['phiHydLow'].attrs['long_name'] = 'Depth integral of (rho -rhoconst) * g * dz / rhoconst'
# Add missing units
for varName in ['drC', 'drF', 'dxC', 'dyC', 'dxF', 'dyF', 'dxG', 'dyG', 'dxV', 'dyU', 'R_low']:
ds[varName].attrs['units'] = 'm'
for varName in ['rA', 'rAw', 'rAs', 'rAz']:
ds[varName].attrs['units'] = 'm^2'
for varName in ['fCori', 'fCoriG']:
ds[varName].attrs['units'] = '1/s'
for varName in ['Ro_surf']:
ds[varName].attrs['units'] = 'kg/m^3'
for varName in ['Depth']:
ds[varName].attrs['units'] = 'm'
for varName in ['HFacC', 'HFacW', 'HFacS']:
ds[varName].attrs['units'] = '-'
for varName in ['S']:
ds[varName].attrs['units'] = 'psu'
for varName in ['phiHyd', 'phiHydLow']:
ds[varName].attrs['units'] = 'm^2/s^2'
# Consistent chunkink
chunks = {**ds.sizes,
'time': ds['Temp'].chunks[ds['Temp'].dims.index('time')]}
ds = ds.chunk(chunks)
# Initialize OceanDataset
od = _OceanDataset(ds).import_MITgcm_rect_nc()
od = od.set_name(name).set_description(description)
od = od.set_parameters({'rSphere' : 6.371E3, # km None: cartesian
'eq_state' : 'jmd95', # jmd95, mdjwf
'rho0' : 1027, # kg/m^3 TODO: None: compute volume weighted average
'g' : 9.81, # m/s^2
'eps_nh' : 0, # 0 is hydrostatic
'omega' : 7.292123516990375E-05, # rad/s
'c_p' : 3.986E3, # specific heat [J/kg/K]
'tempFrz0' : 9.01E-02, # freezing temp. of sea water (intercept)
'dTempFrz_dS': -5.75E-02, # freezing temp. of sea water (slope)
})
od = od.set_projection('Mercator',
central_longitude=float(od.dataset['X'].mean().values),
min_latitude=float(od.dataset['Y'].min().values),
max_latitude=float(od.dataset['Y'].max().values),
globe=None,
latitude_true_scale=float(od.dataset['Y'].mean().values))
return od
|
89b70cebbdecdde6d38310b1d0aa6cfec495ae9b
| 3,641,724
|
from typing import Optional
from typing import Dict
import os
import time
import logging
import pickle
import json
def generate_pkl_features_from_fasta(
fasta_path: str,
name: str,
output_dir: str,
data_pipeline: DataPipeline,
timings: Optional[Dict[str, float]] = None):
"""Predicts structure using Uni-Fold for the given sequence."""
if timings is None:
timings = {}
# Check output dir.
output_dir = os.path.join(output_dir, name)
# TODO: temp change for the feature generation, remember to fix this
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
if os.path.exists(os.path.join(output_dir, "timings.json")):
print(f"skip {fasta_path}")
return
msa_output_dir = os.path.join(output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
# Get features.
pt = time.time()
logging.info(f"processing file {fasta_path}...")
features = data_pipeline.process(
input_fasta_path=fasta_path,
msa_output_dir=msa_output_dir)
timings['data_pipeline'] = time.time() - pt
# Write out features as a pickled dictionary.
features_output_path = os.path.join(output_dir, 'features.pkl')
with open(features_output_path, 'wb') as f:
pickle.dump(features, f, protocol=4)
logging.info(f"process file {fasta_path} done.")
# Save timings.
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as fp:
json.dump(timings, fp, indent=4)
return features
|
dfd166f8af954e4bac22cb893c102c0773ece27b
| 3,641,725
|
import math
def find_all_combinations(participants, team_sizes):
""" Finds all possible experience level combinations for specific team
sizes with duplicated experience levels (e.g. (1, 1, 2))
Returns a list of tuples representing all the possible combinations """
num_teams = len(team_sizes)
participant_levels = [LMS_LEVELS.get(participant.current_lms_module) or 1
for participant in participants]
hackathon_level = sum(participant_levels)
team_level = math.floor(hackathon_level / num_teams)
missing = hackathon_level - (num_teams * team_level)
team_sizes = list(set(team_sizes))
combos = []
for team_size in team_sizes:
combos += find_group_combinations(participant_levels, team_size,
team_level, missing)
# to remove differently sorted combinations with the same elements
sorted_combinations = [sorted(combo) for combo in combos]
combos_without_dupes = list(set(set(tuple(i)
for i in sorted_combinations)))
return combos_without_dupes
|
d3f4de9911a1fc427fc2e01433634ccf815f9183
| 3,641,726
|
from typing import Callable
from typing import Any
def debug_callback(callback: Callable[..., Any], effect: DebugEffect, *args,
**kwargs):
"""Calls a stageable Python callback.
`debug_callback` enables you to pass in a Python function that can be called
inside of a staged JAX program. A `debug_callback` follows existing JAX
transformation *pure* operational semantics, which are therefore unaware of
side-effects. This means the effect could be dropped, duplicated, or
potentially reordered in the presence of higher-order primitives and
transformations.
We want this behavior because we'd like `debug_callback` to be "innocuous",
i.e. we want these primitives to change the JAX computation as little as
possible while revealing as much about them as possible, such as which parts
of the computation are duplicated or dropped.
Args:
callback: A Python callable.
effect: A `DebugEffect`.
*args: The positional arguments to the callback.
**kwargs: The positional arguments to the callback.
Returns:
The value of `callback(*args, **kwargs)`.
"""
if not isinstance(effect, DebugEffect):
raise ValueError("Can only use `DebugEffect` effects in `debug_callback`")
flat_args, in_tree = tree_util.tree_flatten((args, kwargs))
return debug_callback_p.bind(*flat_args, callback=callback, effect=effect,
in_tree=in_tree)
|
8e95dc55fbf5fe26875e5905b565f6ee0d9b143b
| 3,641,727
|
def normalized_copy(data):
"""
Normalize timeseries data, using the maximum across all regions and timesteps.
Parameters
----------
data : xarray Dataset
Dataset with all non-time dependent variables removed
Returns
-------
ds : xarray Dataset
Copy of `data`, with the absolute taken and normalized to 0-1
"""
ds = data.copy(deep=True) # Work off a copy
for var in ds.data_vars:
# Each DataArray is indexed over a different subset of loc_techs,
# so we find it in the list of dimensions
loc_tech_dim = [i for i in ds[var].dims if 'loc_techs' in i][0]
# For each technology, get the loc_techs which are relevant
loc_tech_subsets = [
get_loc_techs(ds[loc_tech_dim].values, tech)
for tech in set(i.split('::')[1] for i in ds[loc_tech_dim].values)
]
# remove empty lists within the _techs list
loc_tech_subsets = [i for i in loc_tech_subsets if i]
# For each technology, divide all values by the maximum absolute value
for loc_tech in loc_tech_subsets:
ds[var].loc[{loc_tech_dim: loc_tech}] = abs(
ds[var].loc[{loc_tech_dim: loc_tech}] /
abs(ds[var].loc[{loc_tech_dim: loc_tech}]).max()
)
return ds
|
cfcb94458deb6caa1125cfcf2904652900babc87
| 3,641,728
|
def _get_exception(ex: Exception) -> Exception:
"""Get exception cause/context from chained exceptions
:param ex: chained exception
:return: cause of chained exception if any
"""
if ex.__cause__:
return ex.__cause__
elif ex.__context__:
return ex.__context__
else:
return ex
|
3f670dc237ebd865e31c7d0fd3719e2ea929de6d
| 3,641,729
|
from typing import Any
from typing import Dict
def recursive_normalizer(value: Any, **kwargs: Dict[str, Any]) -> Any:
"""
Prepare a structure for hashing by lowercasing all values and round all floats
"""
digits = kwargs.get("digits", 10)
lowercase = kwargs.get("lowercase", True)
if isinstance(value, (int, type(None))):
pass
elif isinstance(value, str):
if lowercase:
value = value.lower()
elif isinstance(value, list):
value = [recursive_normalizer(x, **kwargs) for x in value]
elif isinstance(value, tuple):
value = tuple(recursive_normalizer(x, **kwargs) for x in value)
elif isinstance(value, dict):
ret = {}
for k, v in value.items():
if lowercase:
k = k.lower()
ret[k] = recursive_normalizer(v, **kwargs)
value = ret
elif isinstance(value, np.ndarray):
if digits:
# Round array
value = np.around(value, digits)
# Flip zeros
value[np.abs(value) < 5 ** (-(digits + 1))] = 0
elif isinstance(value, float):
if digits:
value = round(value, digits)
if value == -0.0:
value = 0
if value == 0.0:
value = 0
else:
raise TypeError("Invalid type in KeywordSet ({type(value)}), only simple Python types are allowed.")
return value
|
e274c3976405838054d7251fdca8520dc75c48fd
| 3,641,730
|
from typing import Set
def rip_and_tear(context) -> Set:
"""Edge split geometry using specified angle or unique mesh settings.
Also checks non-manifold geometry and hard edges.
Returns set of colors that are used to color meshes."""
processed = set()
angle_use_fixed = prefs.RenderFixedAngleUse
# Angle fixed in radians
angle_fixed = prefs.RenderFixedAngle
precision = prefs.RenderPrecision
# Colors are saved in format specified by render precision parameter
# Totally white and totally black (and close to them) colors are prohibited
colors = set()
# Apply split_n_paint function to every object and unite resulting colors
# colors.union(tuple(set(tuple([split_n_paint(context, colors, precision, obj,
# angle_use_fixed, angle_fixed, processed) for obj in context.scene.objects
# if obj.type == "MESH"]))))
for obj in context.scene.objects:
if obj.type == "MESH":
if obj.data in processed or len(obj.data.polygons) == 0:
processed.add(obj.data)
else:
colors.union(
split_n_paint(
context, colors, precision, obj,
angle_use_fixed, angle_fixed,
processed,
)
)
return colors
|
6a67e9a90b4909c1aec8f7f784b2bc41750f5f79
| 3,641,731
|
def generate_primes(d):
"""Generate a set of all primes with d distinct digits."""
primes = set()
for i in range(10**(d-1)+1, 10**d, 2):
string = str(i)
unique_string = "".join(set(string))
if len(string) == len(unique_string): # Check that all digits are unique
if isprime(i): # Check that the number is prime
primes.add(str(i))
return primes
|
4edf615165144f2ab6e5d12533adc4357d904506
| 3,641,732
|
def poinv(A, UPLO='L', workers=1, **kwargs):
"""
Compute the (multiplicative) inverse of symmetric/hermitian positive
definite matrices, with broadcasting.
Given a square symmetic/hermitian positive-definite matrix `a`, return
the matrix `ainv` satisfying ``matrix_multiply(a, ainv) =
matrix_multiply(ainv, a) = Identity matrix``.
Parameters
----------
a : (..., M, M) array
Symmetric/hermitian postive definite matrices to be inverted.
UPLO : {'U', 'L'}, optional
Specifies whether the calculation is done with the lower
triangular part of the elements in `a` ('L', default) or
the upper triangular part ('U').
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
ainv : (..., M, M) array
(Multiplicative) inverse of the `a` matrices.
Notes
-----
Numpy broadcasting rules apply.
The inverse is computed using LAPACK routines _potrf, _potri
For elements where the LAPACK routine fails, the result will be set
to NaNs.
Implemented for types single, double, csingle and cdouble. Numpy conversion
rules apply.
See Also
--------
inv : compute the multiplicative inverse of general matrices.
Examples
--------
>>> a = np.array([[5, 3], [3, 5]])
>>> ainv = poinv(a)
>>> np.allclose(matrix_multiply(a, ainv), np.eye(2))
True
>>> np.allclose(matrix_multiply(ainv, a), np.eye(2))
True
"""
uplo_choices = ['U', 'L']
if UPLO not in uplo_choices:
raise ValueError("Invalid UPLO argument '%s', valid values are: %s" %
(UPLO, uplo_choices))
if 'L' == UPLO:
gufunc = _impl.poinv_lo
else:
gufunc = _impl.poinv_up
workers, orig_workers = _check_workers(workers)
try:
out = gufunc(A, **kwargs)
finally:
# restore original number of workers
if workers != orig_workers:
_impl.set_gufunc_threads(orig_workers)
return out
|
ccba9b0fc518e0482c6ac647d56abe0e86d3409c
| 3,641,733
|
def gen_task3() -> np.ndarray:
"""Task 3: centre of cross or a plus sign."""
canv = blank_canvas()
r, c = np.random.randint(GRID-2, size=2, dtype=np.int8)
# Do we create a cross or a plus sign?
syms = rand_syms(5) # a 3x3 sign has 2 symbols, outer and centre
# syms = np.array([syms[0], syms[0], syms[1], syms[0], syms[0]])
if np.random.rand() < 0.5:
# Let's do a plus
rows, cols = [r, r+1, r+1, r+1, r+2], [c+1, c, c+1, c+2, c+1]
else:
# Let's do a cross
rows, cols = [r, r, r+1, r+2, r+2], [c, c+2, c+1, c, c+2]
canv[rows, cols] = syms
return [3, syms[2]], canv
|
aba9e78cf4d042cacd8787a90275947ba603b37c
| 3,641,734
|
def init_susceptible_00():
"""
Real Name: b'init Susceptible 00'
Original Eqn: b'8e+06'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 8e+06
|
acc506bdea96b224f3627084bbee9e1a025bcff9
| 3,641,735
|
def spectrum_1D_scalar(data, dx, k_bin_num=100):
"""Calculates and returns the 2D spectrum for a 2D gaussian field of scalars, assuming isotropy of the turbulence
Example:
d=np.random.randn(101,101)
dx=1
k_bins_weighted,spect3D=spectrum_2D_scalar(d, dx, k_bin_num=100)
fig,ax=plt.subplots()
ax.scatter(k_bins_weighted,spect3D)
Arguments:
data {(Mx,My) array of floats} -- 2D Gaussian field of scalars
dx {float} -- grid spacing, assumed the same for all
k_bin_num {int} -- number of bins in reciprocal space
Returns:
k_bins_weighted {array of floats} -- location of bin centres
spect2D {array of floats} -- spectral power within bin
"""
#fourier transform data, shift to have zero freq at centre, find power
f=np.fft.fftshift(np.fft.fftn(data))
fsqr=np.real(f*np.conj(f))
#calculate k vectors in each dimension
Mx = data.shape[0]
kx = np.fft.fftshift(np.fft.fftfreq(Mx, dx))
#calculate magnitude of k at each grid point
K = np.sqrt(kx**2)
#determine 1D spectrum of k, measured from origin
#sort array in ascending k, and sort power by the same factor
K_flat=K.flatten()
fsqr_flat=fsqr.flatten()
K_sort = K_flat[K_flat.argsort()]
fsqr_sort = fsqr_flat[K_flat.argsort()]
k_bin_width = K_sort.max()/k_bin_num
k_bins = k_bin_width*np.arange(0,k_bin_num+1)
k_bins_weighted = 0.5*(k_bins[:-1]+k_bins[1:])
spect1D=np.zeros_like(k_bins_weighted)
for i in range(1,k_bin_num):
upper=K_sort<i*k_bin_width # find only values below upper bound: BOOL
lower=K_sort>=(i-1)*k_bin_width #find only values above upper bound: BOOL
f_filtered=fsqr_sort[upper*lower] # use super numpy array filtering to select only those which match both!
spect1D[i-1] = f_filtered.mean() #and take their mean.
return k_bins_weighted, spect1D
|
88cdb3917d995fdf5d870ebfef3da90f8a4526fb
| 3,641,736
|
from operator import and_
def get_previous_cat(last_index: int) -> models.Cat:
"""Get previous cat.
Args:
last_index (int): View index of last seen cat.
"""
cat = models.Cat.query.filter(and_(models.Cat.disabled == False, models.Cat.index < last_index)).order_by(
desc(models.Cat.index)).first()
if cat is None:
cat = get_last_cat()
return cat
|
bd4b6511ab7b2f004b8539e46109ce128d7af4dd
| 3,641,737
|
def encode(file, res):
"""Encode an image. file is the path to the image, res is the resolution to use. Smaller res means smaller but lower quality output."""
out = buildHeader(res)
pixels = getPixels(file, res)
for i in range(0, len(pixels)):
px = encodePixel(pixels[i])
out += px
return out
|
07f9622bc222f91cb614165e432b4584374030a3
| 3,641,738
|
def process_image(img):
"""Resize, reduce and expand image.
# Argument:
img: original image.
# Returns
image: ndarray(64, 64, 3), processed image.
"""
image = cv2.resize(img, (416, 416), interpolation=cv2.INTER_CUBIC)
image = np.array(image, dtype='float32')
image /= 255.
image = np.expand_dims(image, axis=0)
return image
|
a139d0b82c82273de35d5e95b75cfd5f0e7635e3
| 3,641,739
|
def unnormalise_x_given_lims(x_in, lims):
"""
Scales the input x (assumed to be between [-1, 1] for each dim)
to the lims of the problem
"""
# assert len(x_in) == len(lims)
r = lims[:, 1] - lims[:, 0]
x_orig = r * (x_in + 1) / 2 + lims[:, 0]
return x_orig
|
1d4cd35f45ab8594e297eb64e152a481c01905cd
| 3,641,740
|
def scalar_projection(vector, onto):
"""
Compute the scalar projection of `vector` onto the vector `onto`.
`onto` need not be normalized.
"""
if vector.ndim == 1:
check(locals(), "vector", (3,))
check(locals(), "onto", (3,))
else:
k = check(locals(), "vector", (-1, 3))
if onto.ndim == 1:
check(locals(), "onto", (3,))
else:
check(locals(), "onto", (k, 3))
return dot(vector, normalize(onto))
|
d5b27d46e6d498b22adb1b081b9c7143c636307b
| 3,641,741
|
def update_table(page_current, page_size, sort_by, filter, row_count_value):
"""
This is the collback function to update the datatable
with the required filtered, sorted, extended values
:param page_current: Current page number
:param page_size: Page size
:param sort_by: Column selected for sorting
:param filter: Value entered in the filter
:param row_count_value: Number of rows
:param data: dataframe
:return: processed data aand column values
"""
# If uploaded dataframe is not empty use that, otherwise
# use the default dataframe
if not df_up.empty:
# df_temp = pd.read_json(data, orient='split')
df_tab = df_up
else:
df_tab = df
# Setting the page size as row count value
if row_count_value is not None:
page_size = row_count_value
# Applying sort logic
if len(sort_by):
dff = df_tab.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False
)
else:
# No sort is applied
dff = df_tab
# Filter logic
if filter is not None:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if selected_cols is not None:
# if len(selected_cols) != 0:
# return dff[selected_cols].iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# return dff.iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# Rounding the float values to 2 decimal places
dff = dff.round(2)
return [dff.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records'),
[{"name": [i, j], "id": i} for i, j in zip(df_tab.columns, [str(x) for x in df_tab.dtypes.to_list()])]]
|
e2669f3b98546731974e5706b8af9f6d82b47550
| 3,641,742
|
import os
def get_value(environment_variable, default_value=None):
"""Return an environment variable value."""
value_string = os.getenv(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string)
|
db3bba6567b53cb103a38b56055cdbf554a59db3
| 3,641,743
|
def load_mooring_csv(csvfilename):
"""Loads data contained in an ONC mooring csv file
:arg csvfilename: path to the csv file
:type csvfilename: string
:returns: data, lat, lon, depth - a pandas data frame object and the
latitude, longitude and depth of the morning
"""
data_line, lat, lon, depth = find_metadata(csvfilename)
# Look up headers
headers = pd.read_csv(csvfilename, skiprows=data_line-2, nrows=1,
header=None, skipinitialspace=True, dtype=str)
headers = np.array(headers)[0]
headers[0] = headers[0].replace('#', '')
headers[0] = headers[0].replace('"', '')
# Load data
data = pd.read_csv(csvfilename, header=None, skiprows=data_line,
names=headers, parse_dates=[0], low_memory=False)
data = data.convert_objects(convert_numeric=True)
data.rename(columns={'Time UTC (yyyy-mm-ddThh:mm:ss.fffZ)': 'time'},
inplace=True)
return data, lat, lon, depth
|
a974e8607916e8fbc1b2beb7af8768d048aca8f0
| 3,641,744
|
def ez_execute(query, engine):
"""
Function takes a query string and an engine object
and returns a dataframe on the condition that the
sql query returned any rows.
Arguments:
query {str} -- a Sql query string
engine {sqlalchemy.engine.base.Engine} -- a database engine object
to run the query
Returns:
DataFrame -- A dataframe containing the results of executing the
sql query with the specified engine
"""
data = pd.read_sql_query(query, engine)
assert not data.empty, "Query returned no results"
return data
|
c350d552f89dca550e766337fd7c071e138c43e6
| 3,641,745
|
def compute_lima_image(counts, background, kernel):
"""Compute Li & Ma significance and flux images for known background.
Parameters
----------
counts : `~gammapy.maps.WcsNDMap`
Counts image
background : `~gammapy.maps.WcsNDMap`
Background image
kernel : `astropy.convolution.Kernel2D`
Convolution kernel
Returns
-------
images : dict
Dictionary containing result maps
Keys are: significance, counts, background and excess
See Also
--------
gammapy.stats.significance
"""
# Kernel is modified later make a copy here
kernel = deepcopy(kernel)
kernel.normalize("peak")
counts_conv = counts.convolve(kernel.array).data
background_conv = background.convolve(kernel.array).data
excess_conv = counts_conv - background_conv
significance_conv = significance(counts_conv, background_conv, method="lima")
return {
"significance": counts.copy(data=significance_conv),
"counts": counts.copy(data=counts_conv),
"background": counts.copy(data=background_conv),
"excess": counts.copy(data=excess_conv),
}
|
8049f5a46ecf81459a64811aec917e72ec78a208
| 3,641,746
|
def get_list_from(matrix):
"""
Transforms capability matrix into list.
"""
only_valuable = []
counter = 1
for row_number in range(matrix.shape[0]):
only_valuable += matrix[row_number, counter::].tolist()
counter += 1
return only_valuable
|
bbfa52ff6a960d91d5aece948e9d416c3dcf0667
| 3,641,747
|
from typing import Tuple
def get_user_bubble_text_for_justify_statement(statement: Statement, user: User, is_supportive: bool,
_tn: Translator) -> Tuple[str, str]:
"""
Returns user text for a bubble when the user has to justify a statement and text for the add-position-container
:param statement: The statement that shall be justified
:param user: The user concerned
:param is_supportive: Indicates whether the justification is too be supportive
:param _tn: The default Translator
:return: The bubble text to be shown as well as the text for the corresponding premise
"""
LOG.debug("%s is supportive? %s", statement, is_supportive)
text = statement.get_text()
if _tn.get_lang() == 'de':
intro = _tn.get(_.itIsTrueThat if is_supportive else _.itIsFalseThat)
add_premise_text = start_with_capital(intro) + ' ' + text
else:
add_premise_text = start_with_capital(text) + ' ' + _tn.get(
_.holds if is_supportive else _.isNotAGoodIdea).strip()
add_premise_text += ', ...'
is_users_opinion = False
if user:
db_marked_statement = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement.uid,
MarkedStatement.author_uid == user.uid
).first()
is_users_opinion = db_marked_statement is not None
if is_users_opinion:
intro = _tn.get(_.youHaveTheOpinionThat)
outro = '' if is_supportive else ', ' + _tn.get(_.isNotAGoodIdea)
text = intro.format(text) + outro
return text, add_premise_text
if is_supportive:
intro = _tn.get(_.iAgreeWithX) if _tn.get_lang() == 'de' else '{}'
else:
intro = _tn.get(_.iDisagreeWith)
text = intro.format(text)
return text, add_premise_text
|
b303939794daada5b29d737270f474b380b5a192
| 3,641,748
|
def g1_constraint(x, constants, variables):
""" Constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
g2 = g_constraint(x, 0.)
return g2
|
51d55a03b608cef2c3b5d87fe5cb56bf73326ae3
| 3,641,749
|
import sqlite3
def disconnect(connection_handler):
""" Closes a current database connection
:param connection_handler: the Connection object
:return: 0 if success and -1 if an exception arises
"""
try:
if connection_handler is not None:
connection_handler.close()
return 0
except sqlite3.Error as e:
logger.error('Database disconnection error: {0}'.format(e))
return -1
|
aaba17e38ef48fe7e0be5ba825e114b6f5148433
| 3,641,750
|
def throw_out_nn_indices(ind, dist, Xind):
"""Throw out near neighbor indices that are used to embed the time series.
This is an attempt to get around the problem of autocorrelation.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
Xind : int
Indices to filter.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
ind_store = []
dist_store = []
#iterate through each row
for i in range(len(Xind)):
xrow = Xind[i]
indrow = ind[i]
distrow = dist[i]
mask = np.ones(len(indrow),dtype=bool)
for val in xrow:
mask[indrow == val] = False
ind_store.append( indrow[mask] )
dist_store.append(distrow[mask])
#keep up to the shortest mask. This is so that we can vstack them
ind_len = min( [len(m) for m in ind_store] )
#make all lists the same size for concatenation
ind_store = [m[:ind_len] for m in ind_store]
dist_store = [m[:ind_len] for m in dist_store]
ind_store = np.vstack(ind_store)
dist_store = np.vstack(dist_store)
return dist_store, ind_store
|
638fb43ac484ffa0e15e3c19a5b643aae5a749d9
| 3,641,751
|
import math
def lead_angle(target_disp,target_speed,target_angle,bullet_speed):
"""
Given the displacement, speed and direction of a moving target, and the speed
of a projectile, returns the angle at which to fire in order to intercept the
target. If no such angle exists (for example if the projectile is slower than
the target), then None is returned.
"""
"""
One can imagine the gun, target and point of
target collision at some time t forming a triangle
--o-.-.-.--- St collision of which one side has length St*t where St is
. /' ' ' ' . . . o the target speed, and another has length Sb*t
. /z . . where Sb is the bullet speed. We can eliminate
. . . t by scaling all sides of the triangle equally
. A. . leaving one side St and another Sb. This
. . . Sb triangle can be split into 2 right-angled
. a__ . triangles which share line A. Angle z can then
. / . be calculated and length A found
. . (A = sin(z)/St), and from this angle a can be
-----o----- found (a = arcsin(A/Sb) leading to the
gun calculation of the firing angle.
"""
# Check for situations with no solution
if target_speed > bullet_speed:
return None
if target_disp[0]==0 and target_disp[1]==0:
return None
# Find angle to target
ang_to_targ = math.atan2(target_disp[1],target_disp[0])
# Calculate angle
return math.asin(target_speed/bullet_speed*math.sin(
ang_to_targ-target_angle-math.pi
)) + ang_to_targ
|
fb5dfddf8b36d4e49df2d740b18f9aa97381d08f
| 3,641,752
|
def fix_attr_encoding(ds):
""" This is a temporary hot-fix to handle the way metadata is encoded
when we read data directly from bpch files. It removes the 'scale_factor'
and 'units' attributes we encode with the data we ingest, converts the
'hydrocarbon' and 'chemical' attribute to a binary integer instead of a
boolean, and removes the 'units' attribute from the "time" dimension since
that too is implicitly encoded.
In future versions of this library, when upstream issues in decoding
data wrapped in dask arrays is fixed, this won't be necessary and will be
removed.
"""
def _maybe_del_attr(da, attr):
""" Possibly delete an attribute on a DataArray if it's present """
if attr in da.attrs:
del da.attrs[attr]
return da
def _maybe_decode_attr(da, attr):
# TODO: Fix this so that bools get written as attributes just fine
""" Possibly coerce an attribute on a DataArray to an easier type
to write to disk. """
# bool -> int
if (attr in da.attrs) and (type(da.attrs[attr] == bool)):
da.attrs[attr] = int(da.attrs[attr])
return da
for v in ds.data_vars:
da = ds[v]
da = _maybe_del_attr(da, 'scale_factor')
da = _maybe_del_attr(da, 'units')
da = _maybe_decode_attr(da, 'hydrocarbon')
da = _maybe_decode_attr(da, 'chemical')
# Also delete attributes on time.
if hasattr(ds, 'time'):
times = ds.time
times = _maybe_del_attr(times, 'units')
return ds
|
a0d0c8bd8fcf8dfa999bec9a2022d29a1af22514
| 3,641,753
|
import time
def acme_parser(characters):
"""Parse records from acme global
Args:
characters: characters to loop through the url
Returns:
2 item tuple containing all the meds as a list and a count of all meds
"""
link = (
'http://acmeglobal.com/acme/'
'wp-content/themes/acme/trade_check.php'
'?initchar_trade={0!s}&divname_trade=human')
meds = []
for character in characters:
try:
meds += parse_char(link, character)
except:
wait = rand(5, 15)
print('Failed on character {!s}.'.format(character))
print('Trying again in {0:d}s.'.format(wait))
time.sleep(wait)
try:
meds += parse_char(link, character)
except:
print('Failed on character {!s} again.'.format(character))
print('Skipping character.')
return (meds, len(meds))
|
8e9fe3b020e05243075351d7eedbdba7a54d5d81
| 3,641,754
|
from typing import Any
import sys
def toStr(s: Any) -> str:
"""
Convert a given type to a default string
:param s: item to convert to a string
:return: converted string
"""
return s.decode(sys.getdefaultencoding(), 'backslashreplace') if hasattr(s, 'decode') else str(s)
|
10adab737ab909760215810b94743a15e39b9035
| 3,641,755
|
def standard_atari_env_spec(env):
"""Parameters of environment specification."""
standard_wrappers = [[tf_atari_wrappers.RewardClippingWrapper, {}],
[tf_atari_wrappers.StackWrapper, {"history": 4}]]
env_lambda = None
if isinstance(env, str):
env_lambda = lambda: gym.make(env)
if callable(env):
env_lambda = env
assert env_lambda is not None, "Unknown specification of environment"
return tf.contrib.training.HParams(
env_lambda=env_lambda, wrappers=standard_wrappers, simulated_env=False)
|
e9751e1b376cdee5ec0f9c27d8ab4bf2e303f35b
| 3,641,756
|
def load_bikeshare(path='data', extract=True):
"""
Downloads the 'bikeshare' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'bikeshare'
data = _load_file_data(name, path, extract)
return data
|
7cce01f22c37460800a44a85b18e6574d9d7f6fb
| 3,641,757
|
def file2bytes(filename: str) -> bytes:
"""
Takes a filename and returns a byte string with the content of the file.
"""
with open(filename, 'rb') as f:
data = f.read()
return data
|
f917a265c17895c917c3c340041586bef0c34dac
| 3,641,758
|
import json
def load_session() -> dict:
"""
Returns available session dict
"""
try:
return json.load(SESSION_PATH.open())
except FileNotFoundError:
return {}
|
342c8e143c878cfc4821454cebfcc3ba47a2cd2a
| 3,641,759
|
def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32):
"""Map image values from [0, 255] to [-1, 1]."""
preprocessed_inputs = (2.0 / 255.0) * tf.cast(inputs, tf.float32) - 1.0
return tf.cast(preprocessed_inputs, dtype=dtype)
|
08238566a04ed35346b8f4ff0874fff7be48bded
| 3,641,760
|
from typing import cast
def fill_like(input, value, shape=None, dtype=None, name=None):
"""Create a uniformly filled tensor / array."""
input = as_tensor(input)
dtype = dtype or input.dtype
if has_tensor([input, value, shape], 'tf'):
value = cast(value, dtype)
return tf.fill(value, input.shape, name)
else:
dtype = dtype or input.dtype
dtype = convert_dtype(dtype, 'np')
if shape is None:
return np.full_like(input, value, dtype=dtype)
else:
return np.full(shape, value, dtype=dtype)
|
1879ac8669396dfe3fe351dae97a96cd8d6a8e5e
| 3,641,761
|
from typing import Callable
import operator
def transform_item(key, f: Callable) -> Callable[[dict], dict]:
"""transform a value of `key` in a dict. i.e given a dict `d`, return a new dictionary `e` s.t e[key] = f(d[key]).
>>> my_dict = {"name": "Danny", "age": 20}
>>> transform_item("name", str.upper)(my_dict)
{'name': 'DANNY', 'age': 20}
"""
return functional_generic.itemmap(
functional_generic.when(
functional_generic.compose_left(operator.head, operator.equals(key)),
functional_generic.packstack(operator.identity, f),
),
)
|
a202fe59b29b0a1b432df759b4600388e2d9f72e
| 3,641,762
|
def mock_dataset(mocker, mock_mart, mart_datasets_response):
"""Returns an example dataset, built using a cached response."""
mocker.patch.object(mock_mart, 'get', return_value=mart_datasets_response)
return mock_mart.datasets['mmusculus_gene_ensembl']
|
bb9a8b828f0ac5bfa59b3faee0f9bcc22c7d954e
| 3,641,763
|
import torch
def loss_function(recon_x, x, mu, logvar):
"""Loss function for varational autoencoder VAE"""
BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
|
38c0d6ab7a8388e007324bdcfb8611f0a3072c35
| 3,641,764
|
import scipy
def resize_img(img, size):
"""
Given a list of images in ndarray, resize them into target size.
Args:
img: Input image in ndarray
size: Target image size
Returns: Resized images in ndarray
"""
img = scipy.misc.imresize(img, (size, size))
if len(img.shape) == 2:
img = img.reshape((size, size, 1))
return img
|
8a3ff8bfab0c864a6c0e4701be07b9296ad23f28
| 3,641,765
|
def cloudtopheight_IR(bt, cloudmask, latitude, month, method="modis"):
"""Cloud Top Height (CTH) from 11 micron channel.
Brightness temperatures (bt) are converted to CTHs using the IR window approach:
(bt_clear - bt_cloudy) / lapse_rate.
See also:
:func:`skimage.measure.block_reduce`
Down-sample image by applying function to local blocks.
:func:`lapserate_moist_adiabate`
Constant value 6.5 [K/km]
:func:`lapserate_modis`
Estimate of the apparent lapse rate in [K/km]
depending on month and latitude acc. to Baum et al., 2012.
Parameters:
bt (ndarray): brightness temperatures form 11 micron channel.
cloudmask (ndarray): binary cloud mask.
month (int): month of the year.
latitude (ndarray): latitudes in [°], positive North, negative South.
method (str): approach used to derive CTH: 'modis' see Baum et al., 2012,
'simple' uses the moist adiabatic lapse rate.
Returns:
ndarray: cloud top height.
References:
Baum, B.A., W.P. Menzel, R.A. Frey, D.C. Tobin, R.E. Holz, S.A.
Ackerman, A.K. Heidinger, and P. Yang, 2012: MODIS Cloud-Top Property
Refinements for Collection 6. J. Appl. Meteor. Climatol., 51,
1145–1163, https://doi.org/10.1175/JAMC-D-11-0203.1
"""
# Lapse rate
if method == "simple":
lapserate = lapserate_moist_adiabate()
elif method == "modis":
lapserate = lapserate_modis(month, latitude)
else:
raise ValueError("Method is not supported.")
resolution_ratio = np.shape(cloudmask)[0] // np.shape(bt)[0]
cloudmask_inverted = cloudmask.copy()
cloudmask_inverted[np.isnan(cloudmask_inverted)] = 1
cloudmask_inverted = np.asarray(
np.invert(np.asarray(cloudmask_inverted, dtype=bool)), dtype=int
)
cloudmask[np.isnan(cloudmask)] = 0
cloudmask = np.asarray(cloudmask, dtype=int)
# Match resolutions of cloud mask and brightness temperature (bt) arrays.
if resolution_ratio > 1:
# On bt resolution, flag pixels as cloudy only if all subgrid pixels
# are cloudy in the original cloud mask.
mask_cloudy = block_reduce(
cloudmask, (resolution_ratio, resolution_ratio), func=np.alltrue
)
# Search for only clear pixels to derive a bt clearsky/ocean value.
mask_clear = block_reduce(
cloudmask_inverted, (resolution_ratio, resolution_ratio), func=np.alltrue
)
elif resolution_ratio < 1:
try:
mask_cloudy = np.repeat(
np.repeat(cloudmask, resolution_ratio, axis=0), resolution_ratio, axis=1
)
mask_clear = np.repeat(
np.repeat(cloudmask_inverted, resolution_ratio, axis=0),
resolution_ratio,
axis=1,
)
except ValueError:
raise ValueError(
"Problems matching the shapes of provided cloud mask and bt arrays."
)
else:
mask_cloudy = cloudmask.copy()
mask_clear = cloudmask_inverted.copy()
bt_cloudy = np.ones(np.shape(bt)) * np.nan
bt_cloudy[mask_cloudy] = bt[mask_cloudy]
bt_clear_avg = np.nanmean(bt[mask_clear])
return (bt_clear_avg - bt_cloudy) / lapserate
|
b68dfb37b27d3067c2956fc3653640393491e014
| 3,641,766
|
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists
|
313fda757d386332e16a0a91bb4408fe3cb8c070
| 3,641,767
|
def calc_wave_number(g, h, omega, relax=0.5, eps=1e-15):
"""
Relaxed Picard iterations to find k when omega is known
"""
k0 = omega ** 2 / g
for _ in range(100):
k1 = omega ** 2 / g / tanh(k0 * h)
if abs(k1 - k0) < eps:
break
k0 = k1 * relax + k0 * (1 - relax)
else:
ocellaris_error(
'calc_wave_number did not converge',
'Input g=%r h=%r omega=%r, tolerance=%e' % (g, h, omega, eps),
)
return k1
|
7173fc9f38547864943046fed1e74d9b5cc832b5
| 3,641,768
|
def emit_live_notification_for_model(obj, user, history, *, type:str="change", channel:str="events",
sessionid:str="not-existing"):
"""
Sends a model live notification to users.
"""
if obj._importing:
return None
content_type = get_typename_for_model_instance(obj)
if content_type == "userstories.userstory":
if history.type == HistoryType.create:
title = _("User story created")
url = resolve("userstory", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("User story changed")
url = resolve("userstory", obj.project.slug, obj.ref)
else:
title = _("User story deleted")
url = None
body = _("US #{} - {}").format(obj.ref, obj.subject)
elif content_type == "tasks.task":
if history.type == HistoryType.create:
title = _("Task created")
url = resolve("task", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Task changed")
url = resolve("task", obj.project.slug, obj.ref)
else:
title = _("Task deleted")
url = None
body = _("Task #{} - {}").format(obj.ref, obj.subject)
elif content_type == "issues.issue":
if history.type == HistoryType.create:
title = _("Issue created")
url = resolve("issue", obj.project.slug, obj.ref)
elif history.type == HistoryType.change:
title = _("Issue changed")
url = resolve("issue", obj.project.slug, obj.ref)
else:
title = _("Issue deleted")
url = None
body = _("Issue: #{} - {}").format(obj.ref, obj.subject)
elif content_type == "wiki.wiki_page":
if history.type == HistoryType.create:
title = _("Wiki Page created")
url = resolve("wiki", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Wiki Page changed")
url = resolve("wiki", obj.project.slug, obj.slug)
else:
title = _("Wiki Page deleted")
url = None
body = _("Wiki Page: {}").format(obj.slug)
elif content_type == "milestones.milestone":
if history.type == HistoryType.create:
title = _("Sprint created")
url = resolve("taskboard", obj.project.slug, obj.slug)
elif history.type == HistoryType.change:
title = _("Sprint changed")
url = resolve("taskboard", obj.project.slug, obj.slug)
else:
title = _("Sprint deleted")
url = None
body = _("Sprint: {}").format(obj.name)
else:
return None
return emit_event(
{
"title": title,
"body": "Project: {}\n{}".format(obj.project.name, body),
"url": url,
"timeout": 10000,
"id": history.id
},
"live_notifications.{}".format(user.id),
sessionid=sessionid
)
|
94e7e91ec73537aad71ab3839fbd203552d4fec2
| 3,641,769
|
def is_chitoi(tiles):
"""
Returns True if the hand satisfies chitoitsu.
"""
unique_tiles = set(tiles)
return (len(unique_tiles) == 7 and
all([tiles.count(tile) == 2 for tile in unique_tiles]))
|
c04149174bb779cd07616d4f419fc86531ab95dd
| 3,641,770
|
import itertools
def get_hpo_ancestors(hpo_db, hpo_id):
"""
Get HPO terms higher up in the hierarchy.
"""
h=hpo_db.hpo.find_one({'id':hpo_id})
#print(hpo_id,h)
if 'replaced_by' in h:
# not primary id, replace with primary id and try again
h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})
hpo=[h]
if 'is_a' not in h: return hpo
for hpo_parent_id in h['is_a']:
#p=hpo_db.hpo.find({'id':hpo_parent_id}):
hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
|
2ef2c968bc3001b97529ccd269884cefad7a899f
| 3,641,771
|
def mcBufAir(params: dict, states: dict) -> float:
"""
Growth respiration
Parameters
----------
params : dict
Parameters saved as model constants
states : dict
State variables of the model
Returns
-------
float
Growth respiration of the plant [mg m-2 s-1]
"""
mcBufAir_ = (mcOrgAir_g(organ="fruit", params=params, states=states) +
mcOrgAir_g(organ="leaf", params=params, states=states) +
mcOrgAir_g(organ="stem", params=params, states=states))
return mcBufAir_
|
d31f201384fdab6c03856def1eed7d96fe28482a
| 3,641,772
|
import os
import re
def gene_calling (workflow, assembly_dir, assembly_extentsion, input_dir, extension, extension_paired,
gene_call_type, prokka_dir, prodigal_dir,
threads,
gene_file, gene_PC_file, protein_file, protein_sort,
gene_info, complete_gene, complete_protein):
"""
This set of tasks will run gene-calling workflow.
Args:
workflow (anadama2.workflow): An instance of the workflow class.
assembly_dir: The direcory path of assembly results.
sample_file: The sample list file.
prokka_dir: The direcory path of prokka results.
prodigal_dir: The direcory path of prodigal results.
gene_file: The fasta file of gene nucleotide sequences.
gene_PC_file: The fasta file of protein coding gene nucleotide sequences.
protein_file: The fasta file of protein sequences.
protein_sort: The sorted fasta file of protein sequences.
gene_info: The summaized gene calling file.
complete_gene: The fasta file of gene nucleotide sequences for complete ORFs.
complete_protein: The fasta file of protein sequences for complete ORFs.
Requires:
prokka 1.14-dev: rapid prokaryotic genome annotation (recommend to close '-c' parameter in prodigal)
prodigal v2.6: gene prediction
usearch (tested with usearch v9.0.2132_i86linux64)
assembled contig files
Returns:
string: name of gene files
Example:
from anadama2 import Workflow
from MetaWIBELE.characterize import characterization
# create an anadama2 workflow instance
workflow=Workflow()
# add gene calling tasks
mygene, myprotein = preprocessing_tasks.gene_calling (workflow, assembly_dir, args.sample_file,
prokka_dir, prodigal_dir,
gene_file, gene_PC_file, protein_file, protein_sort,
gene_info, complete_gene, complete_protein)
# run the workflow
workflow.go()
"""
config.logger.info("###### Start gene_calling module ######")
time_equation = config.time # xxx hours defined in global config
mem_equation = config.memory # xxx GB defined in global config
# ================================================
# collect sequences
# ================================================
if extension_paired:
extension_paireds = extension_paired.split(",")
sample_files = utilities.find_files(input_dir, extension_paireds[0], None)
samples = utilities.sample_names(sample_files, extension_paireds[0], None)
else:
sample_files = utilities.find_files(input_dir, extension, None)
samples = utilities.sample_names(sample_files, extension, None)
sequence_files = []
for mysample in samples:
myfile = os.path.join(assembly_dir, mysample, mysample + "%s" % assembly_extentsion)
sequence_files.append(myfile)
# foreach sample
filtered_contigs = sequence_files
# ================================================
# Gene calling
# ================================================
fna_file = []
faa_file = []
gff_files = []
fna_file_tmp = []
faa_file_tmp = []
gff_files_tmp = []
## Using Prodigal
if gene_call_type == "prodigal" or gene_call_type == "both":
os.system("mkdir -p " + prodigal_dir)
for contig in filtered_contigs:
contig_base = os.path.basename(contig).split(os.extsep)[0]
annotation_dir = os.path.join(prodigal_dir, contig_base)
os.system("mkdir -p " + annotation_dir)
gff_file = os.path.join(annotation_dir, '%s.gff' % contig_base)
cds_file = os.path.join(annotation_dir, '%s.fna' % contig_base)
cds_aa = os.path.join(annotation_dir, '%s.faa' % contig_base)
score = os.path.join(annotation_dir, '%s.gene_score.txt' % contig_base)
stdout_log = os.path.join(annotation_dir, '%s.stdout.log' % contig_base)
faa_file_tmp.append(cds_aa)
workflow.add_task_gridable('prodigal -m -p meta -i [depends[0]] '
'-f gff -o [targets[0]] -d [targets[1]] -s [targets[3]] '
'-a [targets[2]] '
'>[args[0]] 2>&1',
depends = [contig, TrackedExecutable("prodigal")],
targets = [gff_file, cds_file, cds_aa, score],
args = [stdout_log],
cores = threads,
mem = mem_equation,
time = time_equation,
name = contig_base + "__prodigal")
for myfile in faa_file_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prodigal_dir, myname)
faa_file.append(myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfile],
targets = [myfile_new],
cores = 1,
name = "ln__" + myname)
myfna = re.sub(".faa", ".fna", myfile)
myfna_new = re.sub(".faa", ".fna", myfile_new)
if gene_call_type == "prodigal":
fna_file.append(myfna_new)
prokka_dir = prodigal_dir
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfna],
targets = [myfna_new],
cores = 1,
name = "ln__" + myname)
mygff = re.sub(".faa", ".gff", myfile)
mygff_new = re.sub(".faa", ".gff", myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [mygff],
targets = [mygff_new],
cores = 1,
name = "ln__" + myname)
if gene_call_type == "prokka" or gene_call_type == "both":
## Calling genes with Prokka
os.system("mkdir -p " + prokka_dir)
for contig in filtered_contigs:
contig_base = os.path.basename(contig).split(os.extsep)[0]
sample = os.path.basename(contig_base)
annotation_dir = os.path.join(prokka_dir, sample)
os.system("mkdir -p " + annotation_dir)
stdout_log = os.path.join(annotation_dir, '%s.prokka.bacteria.stdout.log' % contig_base)
score = os.path.join(annotation_dir, '%s.gene_score.txt' % contig_base)
gene_nuc = os.path.join(annotation_dir, '%s.ffn' % contig_base)
gene_aa = os.path.join(annotation_dir, '%s.faa' % contig_base)
gff_file = os.path.join(annotation_dir, '%s.gff' % contig_base)
fna_file_tmp.append(gene_nuc)
gff_files_tmp.append(gff_file)
workflow.add_task_gridable('prokka --prefix [args[0]] --addgenes --addmrna --force --metagenome '
'--cpus [args[2]] '
'--outdir [args[1]] [depends[0]] '
'>[args[3]] 2>&1 ',
depends = [contig, TrackedExecutable("prokka")],
targets = [gene_nuc, gene_aa, gff_file],
args = [sample, annotation_dir, threads, stdout_log],
cores = threads,
mem = mem_equation,
time = time_equation,
name = contig_base + "__prokka")
for myfile in gff_files_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prokka_dir, myname)
gff_files.append(myfile_new)
for myfile in fna_file_tmp:
myname = os.path.basename(myfile)
myfile_new = os.path.join(prokka_dir, myname)
fna_file.append(myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfile],
targets = [myfile_new],
cores = 1,
name = "ln__" + myname)
myfaa = re.sub(".ffn", ".faa", myfile)
myfaa_new = re.sub(".ffn", ".faa", myfile_new)
if gene_call_type == "prokka":
faa_file.append(myfaa_new)
prodigal_dir = prokka_dir
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [myfaa],
targets = [myfaa_new],
cores = 1,
name = "ln__" + myname)
mygff = re.sub(".ffn", ".gff", myfile)
mygff_new = re.sub(".ffn", ".gff", myfile_new)
workflow.add_task(
"ln -fs [depends[0]] [targets[0]]",
depends = [mygff],
targets = [mygff_new],
cores = 1,
name = "ln__" + myname)
# ================================================
# Summarize sequences
# ================================================
#mem_equation = "50000"
### combine gene sequences ###
nuc_type = "ffn"
if gene_call_type == "prodigal":
nuc_type = "fna"
mylog = re.sub(".fna", ".log", gene_file)
workflow.add_task('metawibele_combine_gene_sequences -p [args[0]] -e [args[1]] -o [targets[0]] > [args[2]] 2>&1 ',
depends = utilities.add_to_list(fna_file,TrackedExecutable("metawibele_combine_gene_sequences")),
targets = [gene_file],
args = [prokka_dir, nuc_type, mylog],
cores = 1,
name = "combine_gene_sequences")
### combine protein sequences ###
## collect sequences
mylog = re.sub(".faa", ".log", protein_file)
workflow.add_task('metawibele_format_protein_sequences -p [args[0]] -q [args[1]] -e faa -o [targets[0]] '
'-m [targets[1]] >[args[2]] 2>&1 ',
depends = utilities.add_to_list(faa_file, TrackedExecutable("metawibele_format_protein_sequences")) + gff_files,
targets = [protein_file, gene_info],
args = [prokka_dir, prodigal_dir, mylog],
cores = 1,
name = "format_protein_sequences")
## sort by length and filter out short-length sequence
mylog = re.sub(".faa", ".log", protein_sort)
workflow.add_task('usearch -sortbylength [depends[0]] '
'-fastaout [targets[0]] -minseqlength 0 >[args[0]] 2>&1 ',
depends = [protein_file, TrackedExecutable("usearch")],
targets = [protein_sort],
args = [mylog],
cores = 1,
name = "usearch__sorting")
## extract nucleotide sequence for protein coding genes
mylog = re.sub(".fna", ".log", gene_PC_file)
workflow.add_task(
'metawibele_extract_protein_coding_genes -g [depends[0]] -p [depends[1]] -o [targets[0]] > [args[0]] 2>&1 ',
depends = [gene_file, protein_sort, TrackedExecutable("metawibele_extract_protein_coding_genes")],
targets = [gene_PC_file],
args = [mylog],
cores = 1,
name = "extract_protein_coding_genes")
## extract sequences
mylog = re.sub(".fna", ".log", complete_gene)
workflow.add_task(
'metawibele_extract_complete_ORF_seq -t complete -m [depends[0]] -i [depends[1]] -o [targets[0]] >[args[0]] 2>&1',
depends = [gene_info, gene_PC_file, TrackedExecutable("metawibele_extract_complete_ORF_seq")],
targets = [complete_gene],
args = [mylog],
cores = 1,
name = 'extract_complete_ORF_seq')
mylog = re.sub(".faa", ".log", complete_protein)
workflow.add_task(
'metawibele_extract_complete_ORF_seq -t complete -m [depends[0]] -i [depends[1]] -o [targets[0]] >[args[0]] 2>&1',
depends = [gene_info, protein_sort, TrackedExecutable("metawibele_extract_complete_ORF_seq")],
targets = [complete_protein],
args = [mylog],
cores = 1,
name = 'extract_complete_ORF_seq')
return complete_gene, complete_protein
|
5bd17208605c8ad99cf7b72db28506e26fc170c4
| 3,641,773
|
def space_boundaries_re(regex):
"""Wrap regex with space or end of string."""
return rf"(?:^|\s)({regex})(?:\s|$)"
|
68861da6218165318b6a446c173b4906a93ef850
| 3,641,774
|
import requests
import json
import dateutil
def get_jobs():
"""
this function will query USAJOBS api and return all open FEC jobs.
if api call failed, a status error message will be displayed in the
jobs.html session in the career page.
it also query code list to update hirepath info. a hard-coded code list
is used for backup if query failed.
"""
# url = 'https://data.usajobs.gov/api/Search'
# codes_url = 'https://data.usajobs.gov/api/codelist/hiringpaths'
querystring = {}
querystring["Organization"] = settings.USAJOBS_AGENCY_CODE
querystring["WhoMayApply"] = settings.USAJOBS_WHOMAYAPPLY
headers = {
"authorization-key": settings.USAJOBS_API_KEY,
"host": "data.usajobs.gov",
"cache-control": "no-cache",
}
# query usajobs API for all open fec jobs
response = requests.get(JOB_URL, headers=headers, params=querystring)
if response.status_code != 200:
return {"error": USAJOB_SEARCH_ERROR}
responses = response.json()
# query usajobs API for list of all hiring-path codes
codes_response = requests.get(CODES_URL, headers=headers)
if codes_response.status_code != 200:
codes_responses = json.loads(CODE_LIST)
else:
codes_responses = codes_response.json()
jobData = []
search_results = responses.get("SearchResult", {})
# iterate over returned job data
if "SearchResultItems" in search_results:
for result in search_results.get("SearchResultItems", None):
matched_object_descriptor = result.get("MatchedObjectDescriptor", {})
if len(matched_object_descriptor.get("JobGrade", [])) > 0:
job_grade = matched_object_descriptor.get("JobGrade", [])[0].get(
"Code", ""
)
else:
job_grade = ""
jobs_dict = {
"position_title": matched_object_descriptor.get("PositionTitle", ""),
"position_id": matched_object_descriptor.get("PositionID", ""),
"position_uri": matched_object_descriptor.get("PositionURI", ""),
"position_start_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionStartDate", "")
),
"position_end_date": dateutil.parser.parse(
matched_object_descriptor.get("PositionEndDate", "")
),
"job_grade": job_grade,
"low_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("LowGrade", ""),
"high_grade": matched_object_descriptor.get("UserArea", {})
.get("Details", {})
.get("HighGrade", ""),
}
# map hiring-path code(s) for each job to description(s)
if len(codes_responses.get("CodeList", [])) > 0:
hiring_path_codes = codes_responses.get("CodeList", [])[0].get(
"ValidValue", []
)
else:
hiring_path_codes = []
hiring_path = [
item
for item in result.get("MatchedObjectDescriptor", {})
.get("UserArea", {})
.get("Details", {})
.get("HiringPath", [])
]
hp = []
for path in hiring_path:
hpa = [
item for item in hiring_path_codes if item["Code"] == path.upper()
]
if hpa:
hp.append(hpa[0].get("Value", ""))
else:
hp.append(path)
hiring_path_list = ", ".join(str(n) for n in hp)
open_to = {"open_to": hiring_path_list}
jobs_dict.update(open_to)
jobData.append(jobs_dict)
return {"jobData": jobData}
|
46c69348b3f964fc1c4f35391aa5c7a8d049b47e
| 3,641,775
|
def artanh(x) -> ProcessBuilder:
"""
Inverse hyperbolic tangent
:param x: A number.
:return: The computed angle in radians.
"""
return _process('artanh', x=x)
|
d93ec8e7059df02ebf7a60506d2e9896bc146b32
| 3,641,776
|
from thunder.readers import normalize_scheme, get_parallel_reader
import array
def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
|
9ab049954b23888c2d2a17786edde57dd90507c0
| 3,641,777
|
def flop_gemm(n, k):
"""# of + and * for matmat of nxn matrix with nxk matrix, with accumulation
into the output."""
return 2*n**2*k
|
b217b725e2ac27a47bc717789458fd20b4aa56c1
| 3,641,778
|
def index() -> str:
"""Rest endpoint to test whether the server is correctly working
Returns:
str: The default message string
"""
return 'DeChainy server greets you :D'
|
ce0caeb9994924f8d6ea10462db2be48bbc126d0
| 3,641,779
|
import os
def get_git_hash() -> str:
"""Get the PyKEEN git hash.
:return:
The git hash, equals 'UNHASHED' if encountered CalledProcessError, signifying that the
code is not installed in development mode.
"""
with open(os.devnull, 'w') as devnull:
try:
ret = check_output( # noqa: S603,S607
['git', 'rev-parse', 'HEAD'],
cwd=os.path.dirname(__file__),
stderr=devnull,
)
except CalledProcessError:
return 'UNHASHED'
else:
return ret.strip().decode('utf-8')[:8]
|
dc00c46c07e2b819718a04fde878ec3669a81e4f
| 3,641,780
|
from typing import AnyStr
from typing import List
import json
def load_json_samples(path: AnyStr) -> List[str]:
"""
Loads samples from a json file
:param path: Path to the target file
:return: List of samples
"""
with open(path, "r", encoding="utf-8") as file:
samples = json.load(file)
if isinstance(samples, list):
return samples
else:
raise RuntimeError(f"File's content must be list-like")
|
b735e7265a31f6bc6d19381bfe9d0cbe26dcf170
| 3,641,781
|
import sys
def openPort(path = SERIALPATH):
"""open the serial port for the given path"""
try:
port = Serial(path, baudrate = 115200)
except :
print("No serial device on the given path :" + path)
sys.exit()
return(port)
|
67f6044a7cc1c726f7226dce8768e8f6cdca6cdb
| 3,641,782
|
import struct
import lzma
def decompress_lzma(data: bytes) -> bytes:
"""decompresses lzma-compressed data
:param data: compressed data
:type data: bytes
:raises _lzma.LZMAError: Compressed data ended before the end-of-stream marker was reached
:return: uncompressed data
:rtype: bytes
"""
props, dict_size = struct.unpack("<BI", data[:5])
lc = props % 9
props = props // 9
pb = props // 5
lp = props % 5
dec = lzma.LZMADecompressor(
format=lzma.FORMAT_RAW,
filters=[
{
"id": lzma.FILTER_LZMA1,
"dict_size": dict_size,
"lc": lc,
"lp": lp,
"pb": pb,
}
],
)
return dec.decompress(data[5:])
|
247c3d59d45f3f140d4f2c36a7500ff8a51e45b0
| 3,641,783
|
def validate(request):
"""
Validate actor name exists in database before searching.
If more than one name fits the criteria, selects the first one
and returns the id.
Won't render.
"""
search_for = request.GET.get('search-for', default='')
start_from = request.GET.get('start-from', default='')
data = {}
search_for_actor = get_actor(search_for)
start_from_actor = get_actor(start_from)
if not search_for_actor:
data['errors'] = {'search-for': 'Not a valid name'}
if not start_from_actor:
data['errors'] = {'start-from': 'Not a valid name'}
if 'errors' in data:
data['status'] = 'false'
return JsonResponse(data, status=404)
else:
data = {
'search-for': search_for_actor.id,
'start-from': start_from_actor.id,
}
return JsonResponse(data)
|
39b9183cd570cce0ddfd81febde0ec125f11c578
| 3,641,784
|
def merge(left, right, on=None, left_on=None, right_on=None):
"""Merge two DataFrames using explicit-comms.
This is an explicit-comms version of Dask's Dataframe.merge() that
only supports "inner" joins.
Requires an activate client.
Notice
------
As a side effect, this operation concatenate all partitions located on
the same worker thus npartitions of the returned dataframe equals number
of workers.
Parameters
----------
left: dask.dataframe.DataFrame
right: dask.dataframe.DataFrame
on : str or list of str
Column or index level names to join on. These must be found in both
DataFrames.
left_on : str or list of str
Column to join on in the left DataFrame.
right_on : str or list of str
Column to join on in the right DataFrame.
Returns
-------
df: dask.dataframe.DataFrame
Merged dataframe
"""
# Making sure that the "on" arguments are list of column names
if on:
on = [on] if isinstance(on, str) else list(on)
if left_on:
left_on = [left_on] if isinstance(left_on, str) else list(left_on)
if right_on:
right_on = [right_on] if isinstance(right_on, str) else list(right_on)
if left_on is None:
left_on = on
if right_on is None:
right_on = on
if not (left_on and right_on):
raise ValueError(
"Some combination of the on, left_on, and right_on arguments must be set"
)
return submit_dataframe_operation(
comms.default_comms(),
local_df_merge,
df_list=(left, right),
extra_args=(left_on, right_on),
)
|
847070e27007c049d0c58059ec9f7c66681f21bc
| 3,641,785
|
def estimate_fs(t):
"""Estimates data sampling rate"""
sampling_rates = [
2000,
1250,
1000,
600,
500,
300,
250,
240,
200,
120,
75,
60,
50,
30,
25,
]
fs_est = np.median(1 / np.diff(t))
fs = min(sampling_rates, key=lambda x: abs(x - fs_est))
return fs
|
82dbd115e3c7b656302d10339cdfe77b60ab0620
| 3,641,786
|
def get_case_number(caselist):
"""Get line number from file caselist."""
num = 0
with open(caselist, 'r') as casefile:
for line in casefile:
if line.strip().startswith('#') is False:
num = num + 1
return num
|
b1366d8e4a0e2c08da5265502d2dd2d72bf95c19
| 3,641,787
|
from typing import Any
def build_param_float_request(*, scenario: str, value: float, **kwargs: Any) -> HttpRequest:
"""Send a post request with header values "scenario": "positive", "value": 0.07 or "scenario":
"negative", "value": -3.0.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword scenario: Send a post request with header values "scenario": "positive" or "negative".
:paramtype scenario: str
:keyword value: Send a post request with header values 0.07 or -3.0.
:paramtype value: float
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = "/header/param/prim/float"
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["scenario"] = _SERIALIZER.header("scenario", scenario, "str")
header_parameters["value"] = _SERIALIZER.header("value", value, "float")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, **kwargs)
|
3e310a92ebe5760a00abc82c5c6465e160a5881d
| 3,641,788
|
from typing import List
def async_entries_for_config_entry(
registry: DeviceRegistry, config_entry_id: str
) -> List[DeviceEntry]:
"""Return entries that match a config entry."""
return [
device
for device in registry.devices.values()
if config_entry_id in device.config_entries
]
|
17af1610631e6b0f407883fa8386082a694f9cd2
| 3,641,789
|
from pathlib import Path
import yaml
def load_material(name: str) -> Material:
"""Load a material from the materials library
Args:
name (str): Name of material
Raises:
FileNotFoundError: If material is not found, raises an error
Returns:
Material: Loaded material
"""
try:
with open(
Path(__file__).parent.joinpath(f"resources/{name}.yaml"),
"r") as f:
matdict = yaml.full_load(f)
try:
matdict["properties"]["alpha"] = get_alpha(name)
except FileNotFoundError:
print(f"{name}.csv does not exist!")
return create_material(**matdict["properties"])
except FileNotFoundError:
print (f"{name} is not an available material!")
|
aa53eac4889d6c44e78d8f3f6e3b5c2099a7bb53
| 3,641,790
|
def reportBusniessModelSummaryView(request):
""" รายงาน สรุปจำนวนผู้สมัครตามสถานะธุรกิจ ทุกขั้นตอน"""
queryset = SmeCompetition.objects \
.values('enterpise__business_model', 'enterpise__business_model__name') \
.annotate(step_register=Count('enterpise__business_model')) \
.annotate(step_screen=Count('enterpise__business_model', filter=Q(state__in=[2,4,5, 6, 7, 8, 9, 10, 11]))) \
.annotate(step_interview=Count('enterpise__business_model', filter=Q(state__in=[4, 6, 7, 8, 9, 10, 11]))) \
.annotate(step_sitevisit=Count('enterpise__business_model', filter=Q(state__in=[6, 8, 9, 10]))) \
.order_by('enterpise__business_model__name')
total_sme = SmeCompetition.objects.filter(state__in=[6, 8, 9, 10, 11]).count()
total_register = SmeCompetition.objects.filter(active=True).count()
total_screen = SmeCompetition.objects.filter(active=True, state__in=[2,4,5,6,7,8,9,10,11]).count()
total_interview = SmeCompetition.objects.filter(active=True, state__in=[4, 6, 7, 8, 9, 10, 11]).count()
total_summary = SmeCompetition.objects.filter(active=True, state__in=[6, 8, 9, 10, 11]).count()
context = {'queryset': queryset, 'total_register':total_register, 'total_screen':total_screen \
,'total_interview':total_interview, 'total_summary':total_summary}
return render(request, 'app_sme12/report/businessmodel_summary.html', context)
|
a0bcb8e0b9d51fc1ed9070857ef9db2c4641ddd6
| 3,641,791
|
def _get_announce_url(rjcode: str) -> str:
"""Get DLsite announce URL corresponding to an RJ code."""
return _ANNOUNCE_URL.format(rjcode)
|
997b82270fcb115f8510d0ded1e1b6204e835e92
| 3,641,792
|
import torch
def get_r_adv(x, decoder, it=1, xi=1e-1, eps=10.0):
"""
Virtual Adversarial Training
https://arxiv.org/abs/1704.03976
"""
x_detached = x.detach()
with torch.no_grad():
pred = F.softmax(decoder(x_detached), dim=1)
d = torch.rand(x.shape).sub(0.5).to(x.device)
d = _l2_normalize(d)
for _ in range(it):
d.requires_grad_()
pred_hat = decoder(x_detached + xi * d)
logp_hat = F.log_softmax(pred_hat, dim=1)
adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean')
# print ('a')
# print (adv_distance)
adv_distance.backward()
# print('c')
d = _l2_normalize(d.grad)
# print ('b')
decoder.zero_grad()
r_adv = d * eps
return r_adv
|
7613e59d88117a8ed263aad76139b1e4d808582c
| 3,641,793
|
import os
def fetch_brats(datasetdir):
""" Fetch/prepare the Brats dataset for pynet.
Parameters
----------
datasetdir: str
the dataset destination folder.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading brats dataset.")
def _crop(arr):
return arr[45: 195, 30: 220, 10: 145]
def _norm(arr):
logical_mask = (arr != 0)
mean = arr[logical_mask].mean()
std = arr[logical_mask].std()
return ((arr - mean) / std).astype(np.single)
traindir = os.path.join(datasetdir, "MICCAI_BraTS_2019_Data_Training")
mapping_path = os.path.join(traindir, "name_mapping.csv")
if not os.path.isfile(mapping_path):
raise ValueError(
"You must first download the Brats data in the '{0}' folder "
"following the 'https://www.med.upenn.edu/sbia/brats2018/"
"registration.html' instructions.".format(datasetdir))
desc_path = os.path.join(datasetdir, "pynet_brats.tsv")
input_path = os.path.join(datasetdir, "pynet_brats_inputs.npy")
output_path = os.path.join(datasetdir, "pynet_brats_outputs.npy")
if not os.path.isfile(desc_path):
df = pd.read_csv(mapping_path, sep=",")
arr = df[["BraTS_2019_subject_ID", "Grade"]].values
input_dataset = []
output_dataset = []
nb_subjects = len(arr)
with progressbar.ProgressBar(max_value=nb_subjects,
redirect_stdout=True) as bar:
for cnt, (sid, grade) in enumerate(arr):
logger.debug("Processing {0}...".format(sid))
datadir = os.path.join(traindir, grade, sid)
data = []
for mod in MODALITIES:
path = os.path.join(
datadir, "{0}_{1}.nii.gz".format(sid, mod))
data.append(_norm(_crop(nib.load(path).get_data())))
data = np.asarray(data)
input_dataset.append(data)
path = os.path.join(datadir, "{0}_seg.nii.gz".format(sid))
_arr = nib.load(path).get_data()
data = []
for value in (0, 1, 2, 4):
data.append(_crop(_arr == value))
data = np.asarray(data)
output_dataset.append(data)
bar.update(cnt)
input_dataset = np.asarray(input_dataset)
np.save(input_path, input_dataset)
output_dataset = np.asarray(output_dataset)
np.save(output_path, output_dataset)
dataset_desc = pd.DataFrame(
arr, columns=["participant_id", "grade"])
dataset_desc.to_csv(desc_path, sep="\t")
return Item(input_path=input_path, output_path=output_path,
metadata_path=desc_path)
|
0adac8fa6c6c83c90c9f2809b8ebda52559e9600
| 3,641,794
|
def get_factors(shoppers, n_components=4, random_state=903, **kwargs):
"""
Find Factors to represent the shopper-level features in compressed space.
These factors will be used to map simplified user input from application
to the full feature space used in modeling.
Args:
shoppers (pd.DataFrame): full set of shoppers in feature data (train + test)
n_components (int): number of factors to mine. Defaults to 4 and should stay that way (application
UI based on these 4 analyzed factors)
random_state (int): sets random state for factor analysis algorithm. Defaults to 4 (and should stay that way)
kwargs: additional keyword arguments for sklearn.decomposition.FactorAnalysis
Returns:
pd.DataFrame: will have n_components rows and n_features columns. The values of this matrix
can be used to map factors to full feature set (on std normal scale).
"""
# Remove columns which should not be considered in factor analysis
x = shoppers
for col in ['user_id', 'n_orders', 'label']:
if col in x.columns:
x = x.drop(columns=col)
# Need to scale data as columns on incommensurate scales
cols = x.columns
x = preprocessing.scale(x)
fa = FactorAnalysis(n_components, random_state=random_state, **kwargs)
fa.fit(x)
return pd.DataFrame(fa.components_, columns=cols)
|
966ca305b87b836d9caa5c857608bc6b16120e26
| 3,641,795
|
def cols_to_array(*cols, remove_na: bool = True) -> Column:
"""
Create a column of ArrayType() from user-supplied column list.
Args:
cols: columns to convert into array.
remove_na (optional): Remove nulls from array. Defaults to True.
Returns:
Column of ArrayType()
"""
if remove_na:
return F.filter(F.array(*cols), lambda x: x.isNotNull())
else:
return F.array(*cols)
|
a33e9b907d95fc767c2f247e12c22bdac9ad7585
| 3,641,796
|
def _git_repo_status(repo):
"""Get current git repo status.
:param repo: Path to directory containing a git repo
:type repo: :class:`pathlib.Path()`
:return: Repo status
:rtype: dict
"""
repo_status = {
'path': repo
}
options = ['git', '-C', str(repo), 'status', '-s']
changes = _run_cmd(options).stdout.decode()
repo_status['uncommited changes'] = True if len(changes) else False
local, remote = _git_get_branches(repo)
repo_status['local only branches'] = bool(set(remote) - set(local))
repo_status['ahead of origin'] = _git_get_ahead(repo)
return repo_status
|
c35a20b7350dcf20bedcd9b201fd04a46c83449b
| 3,641,797
|
def _parseList(s):
"""Validation function. Parse a comma-separated list of strings."""
return [item.strip() for item in s.split(",")]
|
5bf9ac50a44a18cc4798ed616532130890803bac
| 3,641,798
|
def true_segments_1d(segments,
mode=SegmentsMode.CENTERS,
max_gap=0,
min_length=0,
name=None):
"""Labels contiguous True runs in segments.
Args:
segments: 1D boolean tensor.
mode: The SegmentsMode. Returns the start of each segment (STARTS), or the
rounded center of each segment (CENTERS).
max_gap: Fill gaps of length at most `max_gap` between true segments. int.
min_length: Minimum length of a returned segment. int.
name: Optional name for the op.
Returns:
run_centers: int32 tensor. Depending on `mode`, either the start of each
True run, or the (rounded) center of each True run.
run_lengths: int32; the lengths of each True run.
"""
with tf.name_scope(name, "true_segments", [segments]):
segments = tf.convert_to_tensor(segments, tf.bool)
run_starts, run_lengths = _segments_1d(segments, mode=SegmentsMode.STARTS)
# Take only the True runs. After whichever run is True first, the True runs
# are every other run.
first_run = tf.cond(
# First value is False, or all values are False. Handles empty segments
# correctly.
tf.logical_or(tf.reduce_any(segments[0:1]), ~tf.reduce_any(segments)),
lambda: tf.constant(0),
lambda: tf.constant(1))
num_runs = tf.shape(run_starts)[0]
run_nums = tf.range(num_runs)
is_true_run = tf.equal(run_nums % 2, first_run % 2)
# Find gaps between True runs that can be merged.
is_gap = tf.logical_and(
tf.not_equal(run_nums % 2, first_run % 2),
tf.logical_and(
tf.greater(run_nums, first_run), tf.less(run_nums, num_runs - 1)))
fill_gap = tf.logical_and(is_gap, tf.less_equal(run_lengths, max_gap))
# Segment the consecutive runs of True or False values based on whether they
# are True, or are a gap of False values that can be bridged. Then, flatten
# the runs of runs.
runs_to_merge = tf.logical_or(is_true_run, fill_gap)
run_of_run_starts, _ = _segments_1d(runs_to_merge, mode=SegmentsMode.STARTS)
# Get the start of every new run from the original run starts.
merged_run_starts = tf.gather(run_starts, run_of_run_starts)
# Make an array mapping the original runs to their run of runs. Increment
# the number for every run of run start except for the first one, so that
# the array has values from 0 to num_run_of_runs.
merged_run_inds = tf.cumsum(
tf.sparse_to_dense(
sparse_indices=tf.cast(run_of_run_starts[1:, None], tf.int64),
output_shape=tf.cast(num_runs[None], tf.int64),
sparse_values=tf.ones_like(run_of_run_starts[1:])))
# Sum the lengths of the original runs that were merged.
merged_run_lengths = tf.segment_sum(run_lengths, merged_run_inds)
if mode is SegmentsMode.CENTERS:
merged_starts_or_centers = (
merged_run_starts + tf.floordiv(merged_run_lengths - 1, 2))
else:
merged_starts_or_centers = merged_run_starts
# If there are no true values, increment first_run to 1, so we will skip
# the single (false) run.
first_run += tf.to_int32(tf.logical_not(tf.reduce_any(segments)))
merged_starts_or_centers = merged_starts_or_centers[first_run::2]
merged_run_lengths = merged_run_lengths[first_run::2]
# Only take segments at least min_length long.
is_long_enough = tf.greater_equal(merged_run_lengths, min_length)
is_long_enough.set_shape([None])
merged_starts_or_centers = tf.boolean_mask(merged_starts_or_centers,
is_long_enough)
merged_run_lengths = tf.boolean_mask(merged_run_lengths, is_long_enough)
return merged_starts_or_centers, merged_run_lengths
|
801541b7c3343fd59f79a3f3696c3cb17ab41c31
| 3,641,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.