content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
async def sayd(ctx, *, message: str):
"""Botに喋らせます(メッセージは自動で削除されます)"""
await ctx.send(message)
# message can't be deleted in private channel(DM/Group)
if not isinstance(ctx.message.channel, discord.abc.PrivateChannel):
await ctx.message.delete() | 5,332,900 |
def barycenter_wbc(P, K, logweights, Kb=None, c=None, debiased=False,
maxiter=1000, tol=1e-4):
"""Compute the Wasserstein divergence barycenter between histograms.
"""
n_hists, width, _ = P.shape
if Kb is None:
b = torch.ones_like(P)[None, :]
Kb = convol_huge_imgs(b, K)
if c is None:
c = torch.ones(1, width, width, device=P.device)
q = c.clone()
logweights.requires_grad = True
err = 1
weights = torch.softmax(logweights, dim=1)[:, :, None, None]
for ii in range(maxiter):
with torch.no_grad():
qold = q.detach().clone()
a = P[None, :] / Kb
Ka = convol_huge_imgs(a, K.t())
q = c * torch.prod((Ka) ** weights, dim=1)
if debiased:
Kc = convol_imgs(c, K.t())
c = (c * q / Kc) ** 0.5
Q = q[:, None, :, :]
b = Q / Ka
Kb = convol_huge_imgs(b, K)
if torch.isnan(q).any():
warnings.warn("Numerical Errors ! Stopped early in debiased = %s" % debiased)
break
with torch.no_grad():
err = abs(q - qold).max()
if err < tol and ii > 5:
break
if ii == maxiter - 1:
warnings.warn("*** Maxiter reached ! err = {} ***".format(err))
return q | 5,332,901 |
def compute_compensation(line1, line2):
"""Compute compensation statistic betwen two lines.
Explain the stat.
Parameters
----------
line1 : ndarray
First surface to use (two-dimensional matrix with x-z coordinates of
line).
line2 : ndarray
Second surface to use (two-dimensional matrix with x-z coordinates of
line).
Returns
-------
CV : float
Compensation statistic.
"""
pass | 5,332,902 |
def syllable_fingerprint(word):
"""
Use the pronuncation dict to map the potential syllable stress patterns
of a word to a ternary string "fingerprint"
0 is a syllable that must be unstressed
1 is a syllable that must be stressed
x is a syllable that may be stressed or unstressed
e.g. python => 10
pronunciation => 0x010
"""
stresses = get_syllable_stress(word)
if not stresses:
raise ValueError(f'Found no options for word {word}')
if len(stresses) == 1:
return stresses.pop()
syllables = len(list(stresses)[0])
if not all(len(s) == syllables for s in stresses):
logging.debug('Multiple syllables found')
logging.debug('%s, %s', word, stresses)
return stresses.pop() # lol just pick one. TODO
fp = []
for i in range(syllables):
if all(s[i] == '1' for s in stresses):
fp.append('1')
elif all(s[i] == '0' for s in stresses):
fp.append('0')
else:
fp.append('x')
return ''.join(fp) | 5,332,903 |
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
z = np.dot(data, weights[:len(data[0])])
y = sigmoid(z)
return y | 5,332,904 |
def score_matrix(motifs, k):
"""returns matrix score formed from motifs"""
nucleotides = {'A': [0]*k, 'T': [0]*k, 'C': [0]*k, 'G': [0]*k}
for motif in motifs:
for index, nucleotide in enumerate(motif):
nucleotides[nucleotide][index] = nucleotides[nucleotide][index] + 1
i = 0
matrix_score = 0
while i < k:
output = []
column_score = 0
for key in nucleotides:
output.append(nucleotides[key][i])
max_consumed = False
max_item = max(output)
for item in output:
if item == max_item:
if not max_consumed:
max_consumed = True
continue
else:
column_score = column_score + item
else:
column_score = column_score+item
matrix_score = matrix_score + column_score
i = i + 1
return matrix_score | 5,332,905 |
def log_loss(y_true, dist_pred, sample=True, return_std=False):
""" Log loss
Parameters
----------
y_true: np.array
The true labels
dist_pred: ProbabilisticEstimator.Distribution
The predicted distribution
sample: boolean, default=True
If true, loss will be averaged across the sample
return_std: boolean, default=False
If true, the standard deviation of the
loss sample will be returned
Returns
-------
np.array
Loss (with standard deviation if ``return_std`` is True)
"""
pdf = dist_pred.pdf(y_true)
loss = -np.log(pdf)
if sample:
return sample_loss(loss, return_std)
return loss | 5,332,906 |
def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
""" Older impleentation of the matrix pencil method with pencil p on given data to
extract energy levels.
Parameters
----------
data -- lists of Obs, where the nth entry is considered to be the correlation function
at x0=n+offset.
p -- matrix pencil parameter which corresponds to the number of energy levels to extract.
higher values for p can help decreasing noise.
noise_level -- If this argument is not None an additional prefiltering via singular
value decomposition is performed in which all singular values below 10^(-noise_level)
times the largest singular value are discarded. This increases the computation time.
verbose -- if larger than zero details about the noise filtering are printed to stdout
(default 1)
"""
n_data = len(data)
if n_data <= p:
raise Exception('The pencil p has to be smaller than the number of data samples.')
matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
if noise_level is not None:
u, s, vh = svd(matrix)
s_values = np.vectorize(lambda x: x.value)(s)
if verbose > 0:
print('Singular values: ', s_values)
digit = np.argwhere(s_values / s_values[0] < 10.0**(-noise_level))
if digit.size == 0:
digit = len(s_values)
else:
digit = int(digit[0])
if verbose > 0:
print('Consider only', digit, 'out of', len(s), 'singular values')
new_matrix = u[:, :digit] * s[:digit] @ vh[:digit, :]
y1 = new_matrix[:, :-1]
y2 = new_matrix[:, 1:]
else:
y1 = matrix[:, :-1]
y2 = matrix[:, 1:]
# Moore–Penrose pseudoinverse
pinv_y1 = pinv(y1)
# Note: Automatic differentiation of eig is implemented in the git of autograd
# but not yet released to PyPi (1.3). The code is currently part of pyerrors
e = eig((pinv_y1 @ y2), **kwargs)
energy_levels = -np.log(np.abs(e))
return sorted(energy_levels, key=lambda x: abs(x.value)) | 5,332,907 |
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert(len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
print('number of samples:', nSamples)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in xrange(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples) | 5,332,908 |
def ext_sum(text, ratio=0.8):
"""
Generate extractive summary using BERT model
INPUT:
text - str. Input text
ratio - float. Enter a ratio between 0.1 - 1.0 [default = 0.8]
(ratio = summary length / original text length)
OUTPUT:
summary - str. Generated summary
"""
bert_model = Summarizer()
summary = bert_model(text, ratio=ratio)
return summary | 5,332,909 |
def khinalug_input_normal(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace common shortcuts with valid Khinalug characters.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
text = text.replace('c1_', 'č̄')
text = text.replace('c1\'', 'č̣')
text = text.replace('7', 'ˁ')
text = text.replace('g1', 'ǧ')
text = text.replace('s1', 'š')
text = text.replace('z1', 'ž')
text = text.replace('c1', 'č')
text = text.replace('j1', 'ǯ')
text = text.replace('a1', 'ä')
text = text.replace('u1', 'ü')
text = text.replace('o1', 'ö')
text = text.replace('i1', 'ı')
text = text.replace('k_', 'k̄')
text = text.replace('t_', 't̄')
text = text.replace('q_', 'q̄')
text = text.replace('c_', 'c̄')
text = text.replace('c\'', 'c̣')
text = text.replace('k\'', 'ḳ')
text = text.replace('q\'', 'q̇')
text = text.replace('x\'', 'x̣')
text = text.replace('t\'', 'ṭ')
text = text.replace('h\'', 'ḥ')
return text | 5,332,910 |
def run(command, instance_size=None):
"""Runs the given script in foreground.
"""
project = projects.current_project()
job = project.run(command, instance_size=instance_size)
print("Started new job", job["jobid"]) | 5,332,911 |
def _shift_all_classes(classes_list: List[ndarray], params_dict: Dict[str, Any]):
"""Shift the locale of all classes.
Args:
classes_list: List of classes as numpy arrays.
params_dict: Dict including the shift values for all classes.
Returns:
List of shifted classes.
"""
classes_df = pd.DataFrame()
shifted_classes = []
# shift all classes
for generated_class, shift in zip(classes_list, params_dict["all_shifts"]):
# shift class data and exclude the label from shifting
label = generated_class[:, 0].reshape(-1, 1)
shifted_class_data = generated_class[:, 1:] + shift
classes_df["mean_" + str(shift)] = shifted_class_data.flatten()
labeled_shifted_class = np.hstack((label, shifted_class_data))
assert labeled_shifted_class[:, 0].all() == label.all()
shifted_classes.append(labeled_shifted_class)
return shifted_classes, classes_df | 5,332,912 |
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
"""Fully connected layer.
Parameters
----------
tensor: tf.Tensor
Input tensor.
size: int
Number of output nodes for this layer.
weight_init: float
Weight initializer.
bias_init: float
Bias initializer.
name: str
Name for this op. Defaults to 'fully_connected'.
Returns
-------
tf.Tensor:
A new tensor representing the output of the fully connected layer.
Raises
------
ValueError
If input tensor is not 2D.
"""
if weight_init is None:
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b) | 5,332,913 |
def fetch_osborne_magnetic(version):
"""
Magnetic airborne survey of the Osborne Mine and surroundings, Australia
This is a section of a survey acquired in 1990 by the Queensland
Government, Australia. The line data have approximately 80 m terrain
clearance and 200 m line spacing. Total field anomalies are in nT. The
flight height was calculated by summing the terrain clearance to
interpolated values of SRTM (referenced to sea level). The section contains
the total field magnetic anomalies associated with the Osborne Mine,
Lightning Creek sill complex, and the Brumby prospect.
There are ~990,000 measurements in total with 5 columns available: flight
line number, longitude, latitude (geodetic), height (orthometric), and the
total field magnetic anomaly.
**Format:** CSV with xz (lzma) compression.
**Load with:** :func:`pandas.read_csv`
**Original source:** `Geophysical Acquisition & Processing Section 2019.
MIM Data from Mt Isa Inlier, QLD (P1029), magnetic line data, AWAGS
levelled. Geoscience Australia, Canberra
<http://pid.geoscience.gov.au/dataset/ga/142419>`__
**Original license:** CC-BY
**Versions:**
* `1
<https://github.com/fatiando-data/osborne-magnetic/releases/tag/v1>`_
(doi:`10.5281/zenodo.5882209 <https://doi.org/10.5281/zenodo.5882209>`__)
Parameters
----------
version : int
The data version to fetch. See the available versions above.
Returns
-------
fname : :class:`pathlib.Path`
Path to the downloaded file on disk.
"""
_check_versions(version, allowed={1}, name="Osborne mine magnetic")
fname = "osborne-magnetic.csv.xz"
return Path(_repository(fname, version).fetch(fname)) | 5,332,914 |
def setup(bot: Bot) -> None:
"""Set up the Internal Eval extension."""
# Import the Cog at runtime to prevent side effects like defining
# RedisCache instances too early.
from ._internal_eval import InternalEval
bot.add_cog(InternalEval(bot)) | 5,332,915 |
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=False):
"""
Returns an anonymous function for fast calculation of numerical values.
If not specified differently by the user, ``modules`` defaults to
``["scipy", "numpy"]`` if SciPy is installed, ``["numpy"]`` if only
NumPy is installed, and ``["math", "mpmath", "sympy"]`` if neither is
installed. That is, SymPy functions are replaced as far as possible by
either ``scipy`` or ``numpy`` functions if available, and Python's
standard library ``math``, or ``mpmath`` functions otherwise. To change
this behavior, the "modules" argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "scipy", "sympy",
"tensorflow"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
.. warning::
Note that this function uses ``eval``, and thus shouldn't be used on
unsanitized input.
Arguments in the provided expression that are not valid Python identifiers
are substitued with dummy symbols. This allows for applied functions
(e.g. f(t)) to be supplied as arguments. Call the function with
dummify=True to replace all arguments with dummy symbols (if `args` is
not a string) - for example, to ensure that the arguments do not
redefine any built-in names.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableDenseMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
[[1]
[2]]
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the function definition is evaluated! So this
would be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
In the above examples, the generated functions can accept scalar
values or numpy arrays as arguments. However, in some cases
the generated function relies on the input being a numpy array:
>>> from sympy import Piecewise
>>> from sympy.utilities.pytest import ignore_warnings
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
>>> with ignore_warnings(RuntimeWarning):
... f(array([-1, 0, 1, 2]))
[-1. 0. 1. 0.5]
>>> f(0)
Traceback (most recent call last):
...
ZeroDivisionError: division by zero
In such cases, the input should be wrapped in a numpy array:
>>> with ignore_warnings(RuntimeWarning):
... float(f(array([0])))
0.0
Or if numpy functionality is not required another module can be used:
>>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
>>> f(0)
0
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
Usage with Tensorflow module:
>>> import tensorflow as tf
>>> f = Max(x, sin(x))
>>> func = lambdify(x, f, 'tensorflow')
>>> result = func(tf.constant(1.0))
>>> result # a tf.Tensor representing the result of the calculation
<tf.Tensor 'Maximum:0' shape=() dtype=float32>
>>> sess = tf.Session()
>>> sess.run(result) # compute result
1.0
>>> var = tf.Variable(1.0)
>>> sess.run(tf.global_variables_initializer())
>>> sess.run(func(var)) # also works for tf.Variable and tf.Placeholder
1.0
>>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) # works with any shape tensor
>>> sess.run(func(tensor))
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
"""
from sympy.core.symbol import Symbol
# If the user hasn't specified any modules, use what is available.
if modules is None:
try:
_import("scipy")
except ImportError:
try:
_import("numpy")
except ImportError:
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
else:
modules = ["numpy"]
else:
modules = ["scipy", "numpy"]
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if printer is None:
if _module_present('mpmath', namespaces):
from sympy.printing.pycode import MpmathPrinter as Printer
elif _module_present('scipy', namespaces):
from sympy.printing.pycode import SciPyPrinter as Printer
elif _module_present('numpy', namespaces):
from sympy.printing.pycode import NumPyPrinter as Printer
elif _module_present('numexpr', namespaces):
from sympy.printing.lambdarepr import NumExprPrinter as Printer
elif _module_present('tensorflow', namespaces):
from sympy.printing.tensorflow import TensorflowPrinter as Printer
elif _module_present('sympy', namespaces):
from sympy.printing.pycode import SymPyPrinter as Printer
else:
from sympy.printing.pycode import PythonCodePrinter as Printer
user_functions = {}
for m in namespaces[::-1]:
if isinstance(m, dict):
for k in m:
user_functions[k] = k
printer = Printer({'fully_qualified_modules': False, 'inline': True,
'allow_unknown_functions': True,
'user_functions': user_functions})
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
imp_mod_lines = []
for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
for k in keys:
if k not in namespace:
imp_mod_lines.append("from %s import %s" % (mod, k))
for ln in imp_mod_lines:
exec_(ln, {}, namespace)
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
# Create the function definition code and execute it
funcname = '_lambdifygenerated'
if _module_present('tensorflow', namespaces):
funcprinter = _TensorflowEvaluatorPrinter(printer, dummify)
else:
funcprinter = _EvaluatorPrinter(printer, dummify)
funcstr = funcprinter.doprint(funcname, args, expr)
funclocals = {}
global _lambdify_generated_counter
filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
_lambdify_generated_counter += 1
c = compile(funcstr, filename, 'exec')
exec_(c, namespace, funclocals)
# mtime has to be None or else linecache.checkcache will remove it
linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename)
func = funclocals[funcname]
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = (
"Created with lambdify. Signature:\n\n"
"{sig}\n\n"
"Expression:\n\n"
"{expr}\n\n"
"Source code:\n\n"
"{src}\n\n"
"Imported modules:\n\n"
"{imp_mods}"
).format(sig=sig, expr=expr_str, src=funcstr, imp_mods='\n'.join(imp_mod_lines))
return func | 5,332,916 |
def VerifyVersionOfBuiltClangMatchesVERSION():
"""Checks that `clang --version` outputs RELEASE_VERSION. If this
fails, update.RELEASE_VERSION is out-of-date and needs to be updated (possibly
in an `if args.llvm_force_head_revision:` block inupdate. main() first)."""
clang = os.path.join(LLVM_BUILD_DIR, 'bin', 'clang')
if sys.platform == 'win32':
clang += '-cl.exe'
version_out = subprocess.check_output([clang, '--version'],
universal_newlines=True)
version_out = re.match(r'clang version ([0-9.]+)', version_out).group(1)
if version_out != RELEASE_VERSION:
print(('unexpected clang version %s (not %s), '
'update RELEASE_VERSION in update.py')
% (version_out, RELEASE_VERSION))
sys.exit(1) | 5,332,917 |
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
with open(filename, 'r') as f:
yaml_cfg = EasyDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, cfg) | 5,332,918 |
def _SetEvenLength(options, paths):
"""Use the bounding box of paths to set even_length in options.
We want the option.smoothness parameter to control the length
of segments that we will try to divide Bezier curves into when
using the EVEN method. More smoothness -> shorter length.
But the user should think of this in terms of the overall dimensions
of their diagram, not in absolute terms.
Let's say that smoothness==0 means the length should 1/4 the
size of the longest size of the bounding box, and, for general
smoothness:
longest_side_length
even_length = -------------------
4 * (smoothness+1)
Args:
options: ConvertOptions
paths: list of geom.Path
Side effects:
Sets options.even_length according to above formula
"""
minx = 1e10
maxx = -1e10
miny = 1e10
maxy = -1e10
for p in paths:
for sp in p.subpaths:
for seg in sp.segments:
endi = 3 if seg[0] == 'A' else len(seg)
for (x, y) in seg[1:endi]:
minx = min(minx, x)
maxx = max(maxx, x)
miny = min(miny, y)
maxy = max(maxy, y)
longest_side_length = max(maxx - minx, maxy - miny)
if longest_side_length <= 0:
longest_side_length = 1.0
options.even_length = longest_side_length / \
(4.0 * (options.smoothness + 1)) | 5,332,919 |
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps) | 5,332,920 |
def excl_import_route():
"""import exclustions from csv"""
form = ExclImportForm()
if form.validate_on_submit():
imported = []
try:
for row in csv.DictReader(StringIO(form.data.data), EXPORT_FIELDNAMES, quoting=csv.QUOTE_MINIMAL):
imported.append(Excl(family=ExclFamily(row['family']), value=row['value'], comment=row['comment']))
if imported:
if form.replace.data:
db.session.query(Excl).delete()
for tmp in imported:
db.session.add(tmp)
db.session.commit()
return redirect(url_for('scheduler.excl_list_route'))
except (csv.Error, ValueError, SQLAlchemyError, psycopg2.Error) as e:
db.session.rollback()
current_app.logger.exception(e)
flash('Import failed', 'error')
return render_template('scheduler/excl/import.html', form=form) | 5,332,921 |
def test_join_memoization():
"""Testing python memoization disable
"""
x = random_uuid(0)
x.result()
for i in range(0, 2):
foo = random_uuid(0)
assert foo.result() == x.result(), "Memoized results were not used" | 5,332,922 |
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal and keyword arguments to connect::
@receiver(signal_object, sender=sender)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
signal.connect(func, **kwargs)
return func
return _decorator | 5,332,923 |
def create_new_connected_component(dict_projections, dict_cc, dict_nodes_cc, g_list_, set_no_proj, initial_method,
params, i, file_tags=None):
"""
If needed, create new connect component and update wanted dicts.
:param dict_projections: Embedding dict
:param dict_cc: Dict where keys are the number of the connected component and values are list of nodes that are in
this cc.
:param dict_nodes_cc: Dict where keys are nodes and values is the number representing the cc they are in.
:param g_list_: List of graphs for each time stamp.
:param set_no_proj: Set of nodes that are currently not in the embedding because they create together a new cc.
:param initial_method: State-of-the-art method to embed them with.
:param params: Dict of parameters corresponding to the initial method.
:param i: Index of the time stamp
:param file_tags: If GCN GEA is used, one needs to provide file of tags
:return: Updated dict_cc, dict_nodes_cc, and embedding dictionary.
"""
new_cc = create_new_cc(list(set_no_proj), g_list_[i + 1], to_undirected=True)
dict_cc, dict_nodes_cc = add_new_cc(new_cc, dict_nodes_cc, dict_cc)
if new_cc.number_of_nodes() < params["dimension"] and initial_method == "HOPE":
dim = params["dimension"]
initial_method = "node2vec"
params = {"dimension": dim, "walk_length": 80, "num_walks": 16, "workers": 2}
_, dict_proj_new_cc, _ = final(new_cc, initial_method, params, file_tags=file_tags)
z = {**dict_projections, **dict_proj_new_cc}.copy()
return dict_cc, dict_nodes_cc, z | 5,332,924 |
def _run_voice_detection_angle():
""" Private: create a thread to poll the Mic Array and set the DOA Global Variable """
print("EARS | Voice Detection | Voice Detection Loop Starting")
print("EARS | Voice Detection | VAD: ", vad_threshold)
# Counter to implement simple trigger for publishing to mqtt
mqtt_trigger_counter = 0
# Mic_tuning.set_vad_threshold(vad_threshold)
# Continue this block until the flag is raised to stop the thread - or the user interrupts
while True:
global _is_direction_of_arrival_stop_thread_flag
global voice_detection_angle_to_360
global Mic_tuning
try:
# Read the voice detected bool from the Mic tuninig script / hardware
is_voice_detected = Mic_tuning.is_voice()
if is_voice_detected:
# Set the 0-360 degree var for direction of arrival
voice_detection_angle_to_360 = Mic_tuning.direction
print("EARS | Voice Detection | Direction of Arrival: ",
voice_detection_angle_to_360)
# Convert the angle to the byte size scale (0-360 to 0-255)
global scaled_voice_detection_angle_to_255
scaled_voice_detection_angle_to_255 = int(voice_detection_angle_to_360 / 360 * 255)
# Briefly sleep to prevent unecessary runaway
time.sleep(0.3)
# Simple Trigger to Publish the DOA every 6th loop (0.3 * 6) ~1.8 seconds
mqtt_trigger_counter += 1
if mqtt_trigger_counter == 6:
# Publish the angle of arrival to the cloud via mqtt
publish(mqtt_topic_mic_angle, {
"mic_direction_of_arrival": voice_detection_angle_to_360
})
# Once published, reset the trigger
mqtt_trigger_counter = 0
except KeyboardInterrupt:
break
if _is_direction_of_arrival_stop_thread_flag:
print("Direction of Arrival Thread told to stop.")
break | 5,332,925 |
def test_problem_61(answer):
"""
test_problem_61(answer)
:return:
"""
from euler_python.easy import p061
output = p061.problem061()
expected_output = answer['Problem 061']
assert output == expected_output | 5,332,926 |
def test_do_status(config, mocker):
"""Verify that the Bundler has no additional state to offer."""
logger_mock = mocker.MagicMock()
p = Bundler(config, logger_mock)
assert p._do_status() == {} | 5,332,927 |
def do_eval(dataset=None, network=None, metric=None, load_checkpoint_path="", eval_type=None, tokenizer_file_path="",
generate_length=1, top_k=1, top_p=1.0, temperature=1.0):
"""
Do evaluation on Translation
Args:
dataset: the eval dataset.
network: the network with loss.
metric: the evaluation method.
load_checkpoint_path: the file path which saved finetune model checkpoint.
"""
if load_checkpoint_path == "":
raise ValueError("Finetune model missed, evaluation task must load finetune model!")
if metric.lower() == "bleu":
print("Prepare to calculate the BLEU score ...")
gpt2_translation = network(config=gpt2_net_cfg,
is_training=False,
use_one_hot_embeddings=False)
gpt2_translation.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path)
if eval_type == "zero-shot":
final_param_dict = {}
for name, _ in param_dict.items():
final_param_dict['gpt2.' + name] = param_dict[name]
final_param_dict['dense1.weight'] = param_dict['gpt2_embedding_lookup.embedding_table']
load_param_into_net(gpt2_translation, final_param_dict)
print("load pretrained parameter successfully!\n")
elif eval_type == "finetuned":
load_param_into_net(gpt2_translation, param_dict)
print("load finetuned parameter successfully!\n")
else:
raise ValueError("Evaluation type missed, eval_type should be [zero-shot, finetuned]")
model = Model(gpt2_translation)
tokenizer = Tokenizer(vocab_file=tokenizer_file_path + 'gpt2-vocab.json',
merge_file=tokenizer_file_path + 'gpt2-merges.txt')
callback = BLEU(tokenizer)
translation_generator = GenerateForTranslation(decoder=model,
config=gpt2_net_cfg,
tokenizer=tokenizer,
generate_length=1,
use_hint=True,
select_first_sentence=True,
topk_num=top_k,
topp_prob=float(top_p),
temperature=float(temperature)
)
columns_list = ["input_ids", "input_mask", "label_ids"]
print("==================== [BLEU] Testing ====================")
num_data = 1
for data in dataset.create_dict_iterator():
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, label_ids = input_data
print("| Data count: {}".format(num_data * gpt2_net_cfg.batch_size))
print("input_ids shape: {}".format(input_ids.shape))
print("input_mask shape: {}".format(input_mask.shape))
print("label_ids shape: {}".format(label_ids.shape))
ts_predict_list, ref_list = translation_generator.generate_for_translation(input_ids)
print("| Batch Reference translation:\n{}\n".format(ref_list))
if ref_list == '' or ref_list is None:
print("Sorry ref_list is None, skip it!")
continue
else:
print(" | Batch Predict translation:\n{}\n".format(ts_predict_list))
callback.update(ref_list, ts_predict_list)
num_data += 1
print("\n\n")
print("**************************************************************")
eval_result_print(metric, callback)
print("********************** Testing Finished **********************")
else:
raise ValueError("metric method not supported in translation, support: [BLEU]") | 5,332,928 |
def laguerre(x, k, c):
"""Generalized Laguerre polynomials. See `help(_gmw.morsewave)`.
LAGUERRE is used in the computation of the generalized Morse
wavelets and uses the expression given by Olhede and Walden (2002),
"Generalized Morse Wavelets", Section III D.
"""
x = np.atleast_1d(np.asarray(x).squeeze())
assert x.ndim == 1
y = np.zeros(x.shape)
for m in range(k + 1):
# Log of gamma function much better ... trick from Maltab's ``beta''
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
y += (-1)**m * fact * x**m / gamma_fn(m + 1)
return y | 5,332,929 |
def EGshelfIIseas2km_ERAI(daily = False,
gridpath = '/home/idies/workspace/OceanCirculation/exp_ERAI/grid_glued.nc',
kppspath = '/home/idies/workspace/OceanCirculation/exp_ERAI/kpp_state_glued.nc',
fldspath = '/home/idies/workspace/OceanCirculation/exp_ERAI/result_*/output_glued/*.*_glued.nc',
dailypath = '/home/idies/workspace/OceanCirculation/exp_ERAI/result_*/output_glued/daily/*.*_glued.nc'):
"""
High-resolution (~2km) numerical simulation covering the east Greenland shelf (EGshelf), and the Iceland and Irminger Seas (IIseas).
Surface forcing based on the global atmospheric reanalysis ERA-Interim (ERAI).
Model setup: [AHPM17]_.
Parameters
----------
daily: bool
If True, include diagnostics stored with daily resolution (SI, oce).
Return everything with daily time frequency (instead of 6H).
gridpath: str
grid path. Default is SciServer's path.
kppspath: str
kpp_state path. Default is SciServer's path.
fldspath: str
Fields path (use * for multiple files). Default is SciServer's path.
dailypath: str
Daily fields path (use * for multiple files). Default is SciServer's path.
Returns
-------
od: OceanDataset
References
----------
.. [AHPM17] Almansi, M., T.W. Haine, R.S. Pickart, M.G. Magaldi, R. Gelderloos, and D. Mastropole, 2017: High-Frequency Variability in the Circulation and Hydrography of the Denmark Strait Overflow from a High-Resolution Numerical Model. J. Phys. Oceanogr., 47, 2999–3013, https://doi.org/10.1175/JPO-D-17-0129.1
"""
# Check input
if not isinstance(daily, bool): raise TypeError('`daily` must be a bool')
if not isinstance(gridpath, str): raise TypeError('`gridpath` must be a str')
if not isinstance(kppspath, str): raise TypeError('`kppspath` must be a str')
if not isinstance(fldspath, str): raise TypeError('`fldspath` must be a str')
if not isinstance(dailypath, str): raise TypeError('`dailypath` must be a str')
# Message
name = 'EGshelfIIseas2km_ERAI'
description = 'High-resolution (~2km) numerical simulation covering the east Greenland shelf (EGshelf), and the Iceland and Irminger Seas (IIseas). Citation: Almansi et al., 2017 - JPO.'
print('Opening [{}]:\n[{}].'.format(name, description))
# Open, concatenate, and merge
gridset = _xr.open_dataset(gridpath,
drop_variables = ['RC', 'RF', 'RU', 'RL'],
chunks={})
kppset = _xr.open_dataset(kppspath,
chunks={})
fldsset = _xr.open_mfdataset(fldspath,
drop_variables = ['diag_levels','iter'])
ds = _xr.merge([gridset, kppset, fldsset])
# Read daily files and resample
if daily:
# Open, and concatenate daily files
dailyset = _xr.open_mfdataset(dailypath,
drop_variables = ['diag_levels','iter'])
# Resample and merge
ds = _xr.merge([ds.isel(T=slice(0,None,4)), dailyset])
# Squeeze 1D Zs and create Z, Zp1, Zu, and Zl only
ds = ds.rename({'Z': 'Ztmp'})
ds = ds.rename({'Ztmp': 'Z', 'Zmd000216': 'Z'})
ds = ds.squeeze('Zd000001')
for dim in ['Z', 'Zp1', 'Zu', 'Zl']: ds[dim].attrs.update({'positive': 'up'})
# Rename time
ds = ds.rename({'T': 'time'})
# Add attribute (snapshot vs average)
for var in [var for var in ds.variables if ('time' in ds[var].coords and var!='time')]:
ds[var].attrs.update({'original_output': 'snapshot'})
# Add missing names
ds['U'].attrs['long_name'] = 'Zonal Component of Velocity'
ds['V'].attrs['long_name'] = 'Meridional Component of Velocity'
ds['W'].attrs['long_name'] = 'Vertical Component of Velocity'
ds['phiHyd'].attrs['long_name'] = 'Hydrostatic Pressure Pot.(p/rho) Anomaly'
ds['phiHydLow'].attrs['long_name'] = 'Depth integral of (rho -rhoconst) * g * dz / rhoconst'
# Add missing units
for varName in ['drC', 'drF', 'dxC', 'dyC', 'dxF', 'dyF', 'dxG', 'dyG', 'dxV', 'dyU', 'R_low']:
ds[varName].attrs['units'] = 'm'
for varName in ['rA', 'rAw', 'rAs', 'rAz']:
ds[varName].attrs['units'] = 'm^2'
for varName in ['fCori', 'fCoriG']:
ds[varName].attrs['units'] = '1/s'
for varName in ['Ro_surf']:
ds[varName].attrs['units'] = 'kg/m^3'
for varName in ['Depth']:
ds[varName].attrs['units'] = 'm'
for varName in ['HFacC', 'HFacW', 'HFacS']:
ds[varName].attrs['units'] = '-'
for varName in ['S']:
ds[varName].attrs['units'] = 'psu'
for varName in ['phiHyd', 'phiHydLow']:
ds[varName].attrs['units'] = 'm^2/s^2'
# Consistent chunkink
chunks = {**ds.sizes,
'time': ds['Temp'].chunks[ds['Temp'].dims.index('time')]}
ds = ds.chunk(chunks)
# Initialize OceanDataset
od = _OceanDataset(ds).import_MITgcm_rect_nc()
od = od.set_name(name).set_description(description)
od = od.set_parameters({'rSphere' : 6.371E3, # km None: cartesian
'eq_state' : 'jmd95', # jmd95, mdjwf
'rho0' : 1027, # kg/m^3 TODO: None: compute volume weighted average
'g' : 9.81, # m/s^2
'eps_nh' : 0, # 0 is hydrostatic
'omega' : 7.292123516990375E-05, # rad/s
'c_p' : 3.986E3, # specific heat [J/kg/K]
'tempFrz0' : 9.01E-02, # freezing temp. of sea water (intercept)
'dTempFrz_dS': -5.75E-02, # freezing temp. of sea water (slope)
})
od = od.set_projection('Mercator',
central_longitude=float(od.dataset['X'].mean().values),
min_latitude=float(od.dataset['Y'].min().values),
max_latitude=float(od.dataset['Y'].max().values),
globe=None,
latitude_true_scale=float(od.dataset['Y'].mean().values))
return od | 5,332,930 |
def generate_pkl_features_from_fasta(
fasta_path: str,
name: str,
output_dir: str,
data_pipeline: DataPipeline,
timings: Optional[Dict[str, float]] = None):
"""Predicts structure using Uni-Fold for the given sequence."""
if timings is None:
timings = {}
# Check output dir.
output_dir = os.path.join(output_dir, name)
# TODO: temp change for the feature generation, remember to fix this
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
if os.path.exists(os.path.join(output_dir, "timings.json")):
print(f"skip {fasta_path}")
return
msa_output_dir = os.path.join(output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
# Get features.
pt = time.time()
logging.info(f"processing file {fasta_path}...")
features = data_pipeline.process(
input_fasta_path=fasta_path,
msa_output_dir=msa_output_dir)
timings['data_pipeline'] = time.time() - pt
# Write out features as a pickled dictionary.
features_output_path = os.path.join(output_dir, 'features.pkl')
with open(features_output_path, 'wb') as f:
pickle.dump(features, f, protocol=4)
logging.info(f"process file {fasta_path} done.")
# Save timings.
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as fp:
json.dump(timings, fp, indent=4)
return features | 5,332,931 |
def find_all_combinations(participants, team_sizes):
""" Finds all possible experience level combinations for specific team
sizes with duplicated experience levels (e.g. (1, 1, 2))
Returns a list of tuples representing all the possible combinations """
num_teams = len(team_sizes)
participant_levels = [LMS_LEVELS.get(participant.current_lms_module) or 1
for participant in participants]
hackathon_level = sum(participant_levels)
team_level = math.floor(hackathon_level / num_teams)
missing = hackathon_level - (num_teams * team_level)
team_sizes = list(set(team_sizes))
combos = []
for team_size in team_sizes:
combos += find_group_combinations(participant_levels, team_size,
team_level, missing)
# to remove differently sorted combinations with the same elements
sorted_combinations = [sorted(combo) for combo in combos]
combos_without_dupes = list(set(set(tuple(i)
for i in sorted_combinations)))
return combos_without_dupes | 5,332,932 |
def debug_callback(callback: Callable[..., Any], effect: DebugEffect, *args,
**kwargs):
"""Calls a stageable Python callback.
`debug_callback` enables you to pass in a Python function that can be called
inside of a staged JAX program. A `debug_callback` follows existing JAX
transformation *pure* operational semantics, which are therefore unaware of
side-effects. This means the effect could be dropped, duplicated, or
potentially reordered in the presence of higher-order primitives and
transformations.
We want this behavior because we'd like `debug_callback` to be "innocuous",
i.e. we want these primitives to change the JAX computation as little as
possible while revealing as much about them as possible, such as which parts
of the computation are duplicated or dropped.
Args:
callback: A Python callable.
effect: A `DebugEffect`.
*args: The positional arguments to the callback.
**kwargs: The positional arguments to the callback.
Returns:
The value of `callback(*args, **kwargs)`.
"""
if not isinstance(effect, DebugEffect):
raise ValueError("Can only use `DebugEffect` effects in `debug_callback`")
flat_args, in_tree = tree_util.tree_flatten((args, kwargs))
return debug_callback_p.bind(*flat_args, callback=callback, effect=effect,
in_tree=in_tree) | 5,332,933 |
def normalized_copy(data):
"""
Normalize timeseries data, using the maximum across all regions and timesteps.
Parameters
----------
data : xarray Dataset
Dataset with all non-time dependent variables removed
Returns
-------
ds : xarray Dataset
Copy of `data`, with the absolute taken and normalized to 0-1
"""
ds = data.copy(deep=True) # Work off a copy
for var in ds.data_vars:
# Each DataArray is indexed over a different subset of loc_techs,
# so we find it in the list of dimensions
loc_tech_dim = [i for i in ds[var].dims if 'loc_techs' in i][0]
# For each technology, get the loc_techs which are relevant
loc_tech_subsets = [
get_loc_techs(ds[loc_tech_dim].values, tech)
for tech in set(i.split('::')[1] for i in ds[loc_tech_dim].values)
]
# remove empty lists within the _techs list
loc_tech_subsets = [i for i in loc_tech_subsets if i]
# For each technology, divide all values by the maximum absolute value
for loc_tech in loc_tech_subsets:
ds[var].loc[{loc_tech_dim: loc_tech}] = abs(
ds[var].loc[{loc_tech_dim: loc_tech}] /
abs(ds[var].loc[{loc_tech_dim: loc_tech}]).max()
)
return ds | 5,332,934 |
def test_constructor_missing_config():
"""Fail with a TypeError if a configuration object isn't provided."""
with pytest.raises(TypeError):
Unpacker() | 5,332,935 |
def _get_exception(ex: Exception) -> Exception:
"""Get exception cause/context from chained exceptions
:param ex: chained exception
:return: cause of chained exception if any
"""
if ex.__cause__:
return ex.__cause__
elif ex.__context__:
return ex.__context__
else:
return ex | 5,332,936 |
def recursive_normalizer(value: Any, **kwargs: Dict[str, Any]) -> Any:
"""
Prepare a structure for hashing by lowercasing all values and round all floats
"""
digits = kwargs.get("digits", 10)
lowercase = kwargs.get("lowercase", True)
if isinstance(value, (int, type(None))):
pass
elif isinstance(value, str):
if lowercase:
value = value.lower()
elif isinstance(value, list):
value = [recursive_normalizer(x, **kwargs) for x in value]
elif isinstance(value, tuple):
value = tuple(recursive_normalizer(x, **kwargs) for x in value)
elif isinstance(value, dict):
ret = {}
for k, v in value.items():
if lowercase:
k = k.lower()
ret[k] = recursive_normalizer(v, **kwargs)
value = ret
elif isinstance(value, np.ndarray):
if digits:
# Round array
value = np.around(value, digits)
# Flip zeros
value[np.abs(value) < 5 ** (-(digits + 1))] = 0
elif isinstance(value, float):
if digits:
value = round(value, digits)
if value == -0.0:
value = 0
if value == 0.0:
value = 0
else:
raise TypeError("Invalid type in KeywordSet ({type(value)}), only simple Python types are allowed.")
return value | 5,332,937 |
def send_admin_logfile(subject, log_name):
"""
Send the System Administrator a log file, using the contents of the log file
as the body of the email.
Args:
subject - The subject line for the email
log_name - The name of the log file
"""
#TODO safty check the log size and send only the head, tail and log location if the log is over 4M
with open(log_name) as email_file:
send_admin_message(subject, email_file.read()) | 5,332,938 |
def SimInterfMeasPuls(Stokes,ofnPrefix,SN,nomPolPur,deltaJAmp, rxnoise=1., skynoise=0.):
"""Simulate an interferometer measurement of the pulsar profile Stokes spectrum, and writes spectrum to text file
inputs:
Stokes: template Stokes specturm
ofnPrefix: output filename prefix, two files are written ofnPrefix+'.cal.dat' and ofnPrefix+'.uncal.dat'
SN: nominal signal-to-noise ratio
nomPolPur: polarization purity, which is related to the IXR
deltaJAmp: calibration error, 1=100%
noise: the system can have sky and receiver noise. rxnoise+skynoise=1 to maintain S/N.
rxnoise=1, skynoise=0 is a receiver noise dominated system, such as a high frequency receiver
rxnoise=0, skynoise=1 is a sky noise dominated system, such as a low frequency receiver
rxnoise: amount of recevier noise
skynoise: amount of sky noise
"""
#Generate the polarimeter matrices for interferometer
#nomPolPur is the polarization purity or 1/sqrt(IXR) (gmin is 1)
# nomPolCond=(1.0+nomPolPur)/(1.0-nomPolPur)
nomPolCond=(1.+nomPolPur)/(1.-nomPolPur)
U,s,Vh=np.linalg.svd(np.matrix(np.random.randn(2,2))+1j*np.matrix(np.random.randn(2,2)))
d=np.matrix([[1.,0],[0,1./nomPolCond]])
Jtrue=np.dot(U,np.dot(d,Vh))
Mtrue=Jones2Mueller(Jtrue)
deltaJ=deltaJAmp*(np.matrix(np.random.randn(2,2))+1j*np.matrix(np.random.randn(2,2)))
Mest=Jones2Mueller(Jtrue+deltaJ) #full estimated system matrix (BGCD)
MestGain=Jones2Mueller(np.identity(2)+deltaJ) #gain-only estimated system matrix (BGC)
#Create receiver noise
spectrumLen=np.shape(Stokes)[1]
RecNoise=np.matrix(np.zeros((4,spectrumLen)))
for indJ in range(spectrumLen):
RecNoise[0,indJ]=np.random.randn()
RecNoise[1,indJ]=np.random.randn()
RecNoise[2,indJ]=np.random.randn()
RecNoise[3,indJ]=np.random.randn()
RecNoise*=rxnoise
#Create sky noise
SkyNoise=np.matrix(np.zeros((4,spectrumLen)))
for indJ in range(spectrumLen):
SkyNoise[0,indJ]=np.random.randn()
SkyNoise[1,indJ]=np.random.randn()
SkyNoise[2,indJ]=np.random.randn()
SkyNoise[3,indJ]=np.random.randn()
SkyNoise*=skynoise
#Scale data to SNR before adding receiver noise
maxI=np.max(np.abs(Stokes[0]))
Stokes=(SN)*Stokes/maxI
StokesRaw=Mtrue*(Stokes)+Mtrue*(SkyNoise)+RecNoise #Compute raw signal
StokesGainCalEst=np.linalg.pinv(MestGain)*StokesRaw #Compute gain-calibrated only signal
StokesCalEst=np.linalg.pinv(Mest)*StokesRaw #Compute full calibrated signal
StokesCalEst=np.real(StokesCalEst)
StokesToTextFile(StokesCalEst.transpose(),ofnPrefix+'.cal.dat')
StokesToTextFile(StokesGainCalEst.transpose(),ofnPrefix+'.uncal.dat')
#StokesToTextFile(StokesRaw.transpose(),ofnPrefix+'.uncal.dat') | 5,332,939 |
def rip_and_tear(context) -> Set:
"""Edge split geometry using specified angle or unique mesh settings.
Also checks non-manifold geometry and hard edges.
Returns set of colors that are used to color meshes."""
processed = set()
angle_use_fixed = prefs.RenderFixedAngleUse
# Angle fixed in radians
angle_fixed = prefs.RenderFixedAngle
precision = prefs.RenderPrecision
# Colors are saved in format specified by render precision parameter
# Totally white and totally black (and close to them) colors are prohibited
colors = set()
# Apply split_n_paint function to every object and unite resulting colors
# colors.union(tuple(set(tuple([split_n_paint(context, colors, precision, obj,
# angle_use_fixed, angle_fixed, processed) for obj in context.scene.objects
# if obj.type == "MESH"]))))
for obj in context.scene.objects:
if obj.type == "MESH":
if obj.data in processed or len(obj.data.polygons) == 0:
processed.add(obj.data)
else:
colors.union(
split_n_paint(
context, colors, precision, obj,
angle_use_fixed, angle_fixed,
processed,
)
)
return colors | 5,332,940 |
def test_validate_declarative_1():
""" Test that we reject children that are not type in enamldef.
This also serves to test the good working of try_squash_raise.
"""
source = dedent("""\
from enaml.widgets.api import *
a = 1
enamldef Main(Window):
a:
pass
""")
with pytest.raises(TypeError) as exc:
Main = compile_source(source, 'Main')
ftb = "\n".join(tb.format_tb(exc.tb))
assert " validate_declarative" not in ftb | 5,332,941 |
def clean_files():
"""Delete unnecessary files."""
flist = [
'move_to_box.geo',
'temp.geo',
'temp.brep',
]
for fil in flist:
if os.path.exists(fil):
os.remove(fil) | 5,332,942 |
def process_signature(app, _, name, obj, *other_ignored_args):
"""A callback for each signature in the docs.
Here, we build a map of the various config field names to their Field objects
so that we can later override the documentation for those types in
process_doc_nodes.
For full documentation on this callback, see
http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#docstring-preprocessing
"""
if isinstance(obj, streamer.configuration.Field):
name_to_type_map[name] = obj | 5,332,943 |
def generate_primes(d):
"""Generate a set of all primes with d distinct digits."""
primes = set()
for i in range(10**(d-1)+1, 10**d, 2):
string = str(i)
unique_string = "".join(set(string))
if len(string) == len(unique_string): # Check that all digits are unique
if isprime(i): # Check that the number is prime
primes.add(str(i))
return primes | 5,332,944 |
def poinv(A, UPLO='L', workers=1, **kwargs):
"""
Compute the (multiplicative) inverse of symmetric/hermitian positive
definite matrices, with broadcasting.
Given a square symmetic/hermitian positive-definite matrix `a`, return
the matrix `ainv` satisfying ``matrix_multiply(a, ainv) =
matrix_multiply(ainv, a) = Identity matrix``.
Parameters
----------
a : (..., M, M) array
Symmetric/hermitian postive definite matrices to be inverted.
UPLO : {'U', 'L'}, optional
Specifies whether the calculation is done with the lower
triangular part of the elements in `a` ('L', default) or
the upper triangular part ('U').
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
ainv : (..., M, M) array
(Multiplicative) inverse of the `a` matrices.
Notes
-----
Numpy broadcasting rules apply.
The inverse is computed using LAPACK routines _potrf, _potri
For elements where the LAPACK routine fails, the result will be set
to NaNs.
Implemented for types single, double, csingle and cdouble. Numpy conversion
rules apply.
See Also
--------
inv : compute the multiplicative inverse of general matrices.
Examples
--------
>>> a = np.array([[5, 3], [3, 5]])
>>> ainv = poinv(a)
>>> np.allclose(matrix_multiply(a, ainv), np.eye(2))
True
>>> np.allclose(matrix_multiply(ainv, a), np.eye(2))
True
"""
uplo_choices = ['U', 'L']
if UPLO not in uplo_choices:
raise ValueError("Invalid UPLO argument '%s', valid values are: %s" %
(UPLO, uplo_choices))
if 'L' == UPLO:
gufunc = _impl.poinv_lo
else:
gufunc = _impl.poinv_up
workers, orig_workers = _check_workers(workers)
try:
out = gufunc(A, **kwargs)
finally:
# restore original number of workers
if workers != orig_workers:
_impl.set_gufunc_threads(orig_workers)
return out | 5,332,945 |
def logs(timestamp, function_name, task):
"""
This is a custom function which generates logs in this format: timestamp --> function_name --> task --> status
All the logs are displayed on the screen and is also saved in files for later uses.
:param timestamp: current date and time
:param function_name: name of the function which is presently executing
:param task: status of the task
:return: None
"""
from datetime import date
today_date = date.today()
today_date = today_date.strftime("%d-%b-%Y")
log_data = log_structure.format(timestamp, function_name, task.upper())
print(log_data)
log_file = os.path.join(sub_dir['dir_logs'], today_date + ".log")
with open(log_file, 'a+') as fp:
fp.write(log_data + '\n') | 5,332,946 |
def parse_ip_element(ip_element, vulnerability_dictionary):
"""
Looks at every IP element to get the IP address value,
get the infos/services/vulns nodes and calls other functions.
:param ip_element: DOM Node object
:param vulnerability_dictionary: dictionary of vulnerabilities
"""
ip = str(ip_element.getAttribute('value'))
name = str(ip_element.getAttribute('name'))
infos_nodes = find_child_nodes_by_name(ip_element.childNodes, 'INFOS')
services_nodes = find_child_nodes_by_name(ip_element.childNodes,
'SERVICES')
vulns_nodes = find_child_nodes_by_name(ip_element.childNodes, 'VULNS')
parse_nodes(infos_nodes, ip, name, vulnerability_dictionary,
'INFO')
parse_nodes(services_nodes, ip, name, vulnerability_dictionary,
'SERVICE')
parse_nodes(vulns_nodes, ip, name, vulnerability_dictionary,
'VULN') | 5,332,947 |
def gen_task3() -> np.ndarray:
"""Task 3: centre of cross or a plus sign."""
canv = blank_canvas()
r, c = np.random.randint(GRID-2, size=2, dtype=np.int8)
# Do we create a cross or a plus sign?
syms = rand_syms(5) # a 3x3 sign has 2 symbols, outer and centre
# syms = np.array([syms[0], syms[0], syms[1], syms[0], syms[0]])
if np.random.rand() < 0.5:
# Let's do a plus
rows, cols = [r, r+1, r+1, r+1, r+2], [c+1, c, c+1, c+2, c+1]
else:
# Let's do a cross
rows, cols = [r, r, r+1, r+2, r+2], [c, c+2, c+1, c, c+2]
canv[rows, cols] = syms
return [3, syms[2]], canv | 5,332,948 |
def test_parameter_shape():
"""
Make sure that parameter initialization
produces the correct parameter shapes
"""
X = np.array([1, 2, 3, 4]).reshape((1, -1))
y = np.array([1, 2, 3, 4]).reshape((1, -1))
model = JENN(hidden_layer_sizes=(2, 2))
model._n_x = X.shape[0]
model._n_y = y.shape[0]
model._initialize()
assert model._W[0].shape == (2, 1)
assert model._W[1].shape == (2, 2)
assert model._W[2].shape == (1, 2)
assert model._b[0].shape == (2, 1)
assert model._b[1].shape == (2, 1)
assert model._b[2].shape == (1, 1) | 5,332,949 |
def init_susceptible_00():
"""
Real Name: b'init Susceptible 00'
Original Eqn: b'8e+06'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 8e+06 | 5,332,950 |
def test_store_records_str_and_repr():
"""
StoreRecords:
__str__ and __repr__ methods return the same string.
"""
msg = StoreRecords("logs", records=iter([]), wrapped=False)
assert str(msg) == f"<StoreRecords:logs:wrapped=False:records_number=0>"
assert repr(msg) == str(msg) | 5,332,951 |
def write_compile_commands(target, source, env):
"""
generator function to write the compilation database file (default 'compile_commands.json') for
the given list of source binaries (executables, libraries)
"""
getString = base.BindCallArguments(base.getString, target, source, env, None)
getList = base.BindCallArguments(base.getList, target, source, env, False)
getBool = base.BindCallArguments(base.getBool, target, source, env, lambda x: x)
obj_ixes = \
map(getString, [ 'CCCOM_OBJPREFIX', 'CCCOM_OBJSUFFIX', 'CCCOM_SHOBJPREFIX', 'CCCOM_SHOBJSUFFIX' ])
cc_suffixes = \
getList('CCCOM_SUFFIXES')
source = env.Flatten(source)
suffix_map = build_suffix_map(target, source, env)
has_previous_unit = False
keep_variant_dir = getBool('CCCOM_KEEP_VARIANT_DIR')
db_file = [ '[' ]
for src in source:
nodeWalker = SCons.Node.Walker(src)
child = nodeWalker.get_next()
while child:
if base.is_object_file(child, obj_ixes):
for child_src in child.sources:
if is_cc_source(child_src, cc_suffixes):
build_env = clone_build_env(child.get_build_env())
build_targets = [ child ] + child.alter_targets()[0]
if keep_variant_dir:
build_sources = child.sources
else:
build_sources = [ obj_src.srcnode() for obj_src in child.sources ]
append_flags = getList('CCCOM_APPEND_FLAGS')
filter_flags = getList('CCCOM_REMOVE_FLAGS')
abs_file_path = getBool('CCCOM_ABSOLUTE_FILE')
if not keep_variant_dir or append_flags or filter_flags or 'CCCOM_FILTER_FUNC' in env:
for filter_set in filter_flags:
for var_name in filter_set:
if var_name in build_env:
for val in env.Split(filter_set[var_name]):
if val in build_env[var_name]:
if val in build_env[var_name]:
if isinstance(build_env[var_name], str):
build_env[var_name] = re.sub(r'(^|\s+)' + re.escape(val) + r'(\s+|$)', ' ', build_env[var_name])
else:
while val in build_env[var_name]:
build_env[var_name].remove(val)
for flag_set in append_flags:
build_env.Append(**flag_set)
if 'CCCOM_FILTER_FUNC' in env:
build_env['CCCOM_FILTER_FUNC'] = env['CCCOM_FILTER_FUNC']
build_env['CCCOM_ENV'] = env
val = base.getBool(build_targets, build_sources, build_env, lambda x: x, 'CCCOM_FILTER_FUNC')
if not val:
continue
if has_previous_unit:
db_file.append(' },')
has_previous_unit = True
db_file.extend\
([
' {',
' "directory": ' + json_escape_string(build_env.fs.getcwd().get_abspath()) + ','
])
if keep_variant_dir:
src_file = child_src
else:
src_file = child_src.srcnode()
if abs_file_path:
src_file = src_file.get_abspath()
else:
src_file = src_file.get_path()
db_file.extend\
([
' "file": ' + json_escape_string(src_file) + ',',
' "command": '
+
json_escape_string\
(
build_env.subst\
(
get_build_command(obj_ixes, suffix_map, child, child_src, build_env),
False,
build_targets,
build_sources,
None
)
) + ',',
' "output": '
+
json_escape_string(env.subst('$TARGET', False, build_targets, build_sources))
])
child = nodeWalker.get_next()
if has_previous_unit:
db_file.append(' }')
db_file.append(']')
with open(str(target[0]), 'w') as output_file:
for line in db_file:
output_file.write(line + '\n') | 5,332,952 |
def spectrum_1D_scalar(data, dx, k_bin_num=100):
"""Calculates and returns the 2D spectrum for a 2D gaussian field of scalars, assuming isotropy of the turbulence
Example:
d=np.random.randn(101,101)
dx=1
k_bins_weighted,spect3D=spectrum_2D_scalar(d, dx, k_bin_num=100)
fig,ax=plt.subplots()
ax.scatter(k_bins_weighted,spect3D)
Arguments:
data {(Mx,My) array of floats} -- 2D Gaussian field of scalars
dx {float} -- grid spacing, assumed the same for all
k_bin_num {int} -- number of bins in reciprocal space
Returns:
k_bins_weighted {array of floats} -- location of bin centres
spect2D {array of floats} -- spectral power within bin
"""
#fourier transform data, shift to have zero freq at centre, find power
f=np.fft.fftshift(np.fft.fftn(data))
fsqr=np.real(f*np.conj(f))
#calculate k vectors in each dimension
Mx = data.shape[0]
kx = np.fft.fftshift(np.fft.fftfreq(Mx, dx))
#calculate magnitude of k at each grid point
K = np.sqrt(kx**2)
#determine 1D spectrum of k, measured from origin
#sort array in ascending k, and sort power by the same factor
K_flat=K.flatten()
fsqr_flat=fsqr.flatten()
K_sort = K_flat[K_flat.argsort()]
fsqr_sort = fsqr_flat[K_flat.argsort()]
k_bin_width = K_sort.max()/k_bin_num
k_bins = k_bin_width*np.arange(0,k_bin_num+1)
k_bins_weighted = 0.5*(k_bins[:-1]+k_bins[1:])
spect1D=np.zeros_like(k_bins_weighted)
for i in range(1,k_bin_num):
upper=K_sort<i*k_bin_width # find only values below upper bound: BOOL
lower=K_sort>=(i-1)*k_bin_width #find only values above upper bound: BOOL
f_filtered=fsqr_sort[upper*lower] # use super numpy array filtering to select only those which match both!
spect1D[i-1] = f_filtered.mean() #and take their mean.
return k_bins_weighted, spect1D | 5,332,953 |
def get_previous_cat(last_index: int) -> models.Cat:
"""Get previous cat.
Args:
last_index (int): View index of last seen cat.
"""
cat = models.Cat.query.filter(and_(models.Cat.disabled == False, models.Cat.index < last_index)).order_by(
desc(models.Cat.index)).first()
if cat is None:
cat = get_last_cat()
return cat | 5,332,954 |
def encode(file, res):
"""Encode an image. file is the path to the image, res is the resolution to use. Smaller res means smaller but lower quality output."""
out = buildHeader(res)
pixels = getPixels(file, res)
for i in range(0, len(pixels)):
px = encodePixel(pixels[i])
out += px
return out | 5,332,955 |
def process_image(img):
"""Resize, reduce and expand image.
# Argument:
img: original image.
# Returns
image: ndarray(64, 64, 3), processed image.
"""
image = cv2.resize(img, (416, 416), interpolation=cv2.INTER_CUBIC)
image = np.array(image, dtype='float32')
image /= 255.
image = np.expand_dims(image, axis=0)
return image | 5,332,956 |
def test_get_os_platform_linux(tmp_path):
"""Utilize an /etc/os-release file to determine platform."""
# explicitly add commented and empty lines, for parser robustness
filepath = tmp_path / "os-release"
filepath.write_text(
dedent(
"""
# the following is an empty line
NAME="Ubuntu"
VERSION="20.04.1 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.1 LTS"
VERSION_ID="20.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
# more in the middle; the following even would be "out of standard", but
# we should not crash, just ignore it
SOMETHING-WEIRD
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=focal
UBUNTU_CODENAME=focal
"""
)
)
with patch("platform.machine", return_value="x86_64"):
with patch("platform.system", return_value="Linux"):
os_platform = get_os_platform(filepath)
assert os_platform.system == "ubuntu"
assert os_platform.release == "20.04"
assert os_platform.machine == "x86_64" | 5,332,957 |
def unnormalise_x_given_lims(x_in, lims):
"""
Scales the input x (assumed to be between [-1, 1] for each dim)
to the lims of the problem
"""
# assert len(x_in) == len(lims)
r = lims[:, 1] - lims[:, 0]
x_orig = r * (x_in + 1) / 2 + lims[:, 0]
return x_orig | 5,332,958 |
def is_interdisciplinary(foo, environment):
""" Is interdisciplinary
Major approach that accepts all kinds of objects and detects whether they can be considered
in a defined environment.
Arguments:
foo (str): might be concept, method, journal, article, sentence, paragraph, person, project.
environment (dict):
Returns:
Result (Boolean) # Or more
Theory:
Based on Klein"""
return | 5,332,959 |
def scalar_projection(vector, onto):
"""
Compute the scalar projection of `vector` onto the vector `onto`.
`onto` need not be normalized.
"""
if vector.ndim == 1:
check(locals(), "vector", (3,))
check(locals(), "onto", (3,))
else:
k = check(locals(), "vector", (-1, 3))
if onto.ndim == 1:
check(locals(), "onto", (3,))
else:
check(locals(), "onto", (k, 3))
return dot(vector, normalize(onto)) | 5,332,960 |
def thread_task(lock, stock_id):
"""
task for thread
"""
print(f"Start process stock:{stock_id}")
df = pd.read_excel(f"tw_{stock_id}.xlsx")
# lock.acquire()
stock_insert(df)
# lock.release()
print(f"End of process stock:{stock_id}\n\n") | 5,332,961 |
def update_table(page_current, page_size, sort_by, filter, row_count_value):
"""
This is the collback function to update the datatable
with the required filtered, sorted, extended values
:param page_current: Current page number
:param page_size: Page size
:param sort_by: Column selected for sorting
:param filter: Value entered in the filter
:param row_count_value: Number of rows
:param data: dataframe
:return: processed data aand column values
"""
# If uploaded dataframe is not empty use that, otherwise
# use the default dataframe
if not df_up.empty:
# df_temp = pd.read_json(data, orient='split')
df_tab = df_up
else:
df_tab = df
# Setting the page size as row count value
if row_count_value is not None:
page_size = row_count_value
# Applying sort logic
if len(sort_by):
dff = df_tab.sort_values(
sort_by[0]['column_id'],
ascending=sort_by[0]['direction'] == 'asc',
inplace=False
)
else:
# No sort is applied
dff = df_tab
# Filter logic
if filter is not None:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if selected_cols is not None:
# if len(selected_cols) != 0:
# return dff[selected_cols].iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# return dff.iloc[
# page_current * page_size:(page_current + 1) * page_size
# ].to_dict('records')
# else:
# Rounding the float values to 2 decimal places
dff = dff.round(2)
return [dff.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records'),
[{"name": [i, j], "id": i} for i, j in zip(df_tab.columns, [str(x) for x in df_tab.dtypes.to_list()])]] | 5,332,962 |
def serialize(item: Any) -> bytes:
"""
Serializes the given value into its bytes representation.
:param item: value to be serialized
:type item: Any
:return: the serialized value
:rtype: bytes
:raise Exception: raised if the item's type is not serializable.
"""
pass | 5,332,963 |
def get_value(environment_variable, default_value=None):
"""Return an environment variable value."""
value_string = os.getenv(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string) | 5,332,964 |
def autogossip(*args):
"""\
autogossip on|off -- generate random background conversation
"""
mode = 'on' if (args and args[0].lower() == 'on') else 'off'
stdout.say('Turning autogossip %s.' % mode)
global should_autogossip
should_autogossip = bool(mode == 'on') | 5,332,965 |
def load_mooring_csv(csvfilename):
"""Loads data contained in an ONC mooring csv file
:arg csvfilename: path to the csv file
:type csvfilename: string
:returns: data, lat, lon, depth - a pandas data frame object and the
latitude, longitude and depth of the morning
"""
data_line, lat, lon, depth = find_metadata(csvfilename)
# Look up headers
headers = pd.read_csv(csvfilename, skiprows=data_line-2, nrows=1,
header=None, skipinitialspace=True, dtype=str)
headers = np.array(headers)[0]
headers[0] = headers[0].replace('#', '')
headers[0] = headers[0].replace('"', '')
# Load data
data = pd.read_csv(csvfilename, header=None, skiprows=data_line,
names=headers, parse_dates=[0], low_memory=False)
data = data.convert_objects(convert_numeric=True)
data.rename(columns={'Time UTC (yyyy-mm-ddThh:mm:ss.fffZ)': 'time'},
inplace=True)
return data, lat, lon, depth | 5,332,966 |
def ez_execute(query, engine):
"""
Function takes a query string and an engine object
and returns a dataframe on the condition that the
sql query returned any rows.
Arguments:
query {str} -- a Sql query string
engine {sqlalchemy.engine.base.Engine} -- a database engine object
to run the query
Returns:
DataFrame -- A dataframe containing the results of executing the
sql query with the specified engine
"""
data = pd.read_sql_query(query, engine)
assert not data.empty, "Query returned no results"
return data | 5,332,967 |
def compute_lima_image(counts, background, kernel):
"""Compute Li & Ma significance and flux images for known background.
Parameters
----------
counts : `~gammapy.maps.WcsNDMap`
Counts image
background : `~gammapy.maps.WcsNDMap`
Background image
kernel : `astropy.convolution.Kernel2D`
Convolution kernel
Returns
-------
images : dict
Dictionary containing result maps
Keys are: significance, counts, background and excess
See Also
--------
gammapy.stats.significance
"""
# Kernel is modified later make a copy here
kernel = deepcopy(kernel)
kernel.normalize("peak")
counts_conv = counts.convolve(kernel.array).data
background_conv = background.convolve(kernel.array).data
excess_conv = counts_conv - background_conv
significance_conv = significance(counts_conv, background_conv, method="lima")
return {
"significance": counts.copy(data=significance_conv),
"counts": counts.copy(data=counts_conv),
"background": counts.copy(data=background_conv),
"excess": counts.copy(data=excess_conv),
} | 5,332,968 |
def vagrant(name=''):
"""
Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from fabtools.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on your current Vagrant box::
$ fab vagrant some_task
"""
config = ssh_config(name)
extra_args = _settings_dict(config)
env.update(extra_args) | 5,332,969 |
def cli(ctx, db_url, default_folder):
"""Welcome to frames.
This project is just started and should be considered
experimental and unstable.
"""
pass | 5,332,970 |
def get_list_from(matrix):
"""
Transforms capability matrix into list.
"""
only_valuable = []
counter = 1
for row_number in range(matrix.shape[0]):
only_valuable += matrix[row_number, counter::].tolist()
counter += 1
return only_valuable | 5,332,971 |
def get_user_bubble_text_for_justify_statement(statement: Statement, user: User, is_supportive: bool,
_tn: Translator) -> Tuple[str, str]:
"""
Returns user text for a bubble when the user has to justify a statement and text for the add-position-container
:param statement: The statement that shall be justified
:param user: The user concerned
:param is_supportive: Indicates whether the justification is too be supportive
:param _tn: The default Translator
:return: The bubble text to be shown as well as the text for the corresponding premise
"""
LOG.debug("%s is supportive? %s", statement, is_supportive)
text = statement.get_text()
if _tn.get_lang() == 'de':
intro = _tn.get(_.itIsTrueThat if is_supportive else _.itIsFalseThat)
add_premise_text = start_with_capital(intro) + ' ' + text
else:
add_premise_text = start_with_capital(text) + ' ' + _tn.get(
_.holds if is_supportive else _.isNotAGoodIdea).strip()
add_premise_text += ', ...'
is_users_opinion = False
if user:
db_marked_statement = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement.uid,
MarkedStatement.author_uid == user.uid
).first()
is_users_opinion = db_marked_statement is not None
if is_users_opinion:
intro = _tn.get(_.youHaveTheOpinionThat)
outro = '' if is_supportive else ', ' + _tn.get(_.isNotAGoodIdea)
text = intro.format(text) + outro
return text, add_premise_text
if is_supportive:
intro = _tn.get(_.iAgreeWithX) if _tn.get_lang() == 'de' else '{}'
else:
intro = _tn.get(_.iDisagreeWith)
text = intro.format(text)
return text, add_premise_text | 5,332,972 |
def convert_yolo_to_coco_format(
data_dir,
out_dir,
split,
extensions=['.jpg', '.png']):
"""Convert YOLO format to COCO format.
Parameters
data_dir: str
Path to image directory.
out_label_path: str
Path to output label json file.
extensions: list
Supported image extensions.
"""
assert os.path.isdir(data_dir), f'Not found input {data_dir}'
# Load image paths and labels
image_paths = []
for ext in extensions:
image_paths += list(glob.glob(os.path.join(data_dir, f'*{ext}')))
label_paths = [os.path.splitext(path)[0] + '.txt' for path in image_paths]
assert len(image_paths) == len(label_paths), 'Number of images and labels must match'
print(f'Found {len(image_paths)} samples')
splits = {}
if split > 0:
num_train = int(len(image_paths) * (1 - split))
indices = np.arange(len(image_paths))
np.random.shuffle(indices)
image_paths = np.array(image_paths)[indices].tolist()
label_paths = np.array(label_paths)[indices].tolist()
train_image_paths = image_paths[:num_train]
train_label_paths = label_paths[:num_train]
test_image_paths = image_paths[num_train:]
test_label_paths = label_paths[num_train:]
print('Split the origin dataset into training and testing set')
print('Number of training samples:', len(train_image_paths))
print('Number of testing samples:', len(test_image_paths))
splits = {
'train': [train_image_paths, train_label_paths],
'test': [test_image_paths, test_label_paths],
}
else:
splits = {'train': [image_paths, label_paths]}
# Check class file
class_file = os.path.join(data_dir, '..', 'classes.txt')
assert os.path.isfile(class_file), f'Not found class file {class_file}'
# Load class file
classes = [x.strip() for x in open(class_file)]
for split, data in splits.items():
print(f'Creating dataset {split}...')
out_label_path = os.path.join(out_dir, f'{split}.json')
write_coco(data, classes, out_label_path) | 5,332,973 |
def g1_constraint(x, constants, variables):
""" Constraint that the initial value of tangent modulus > 0 at ep=0.
:param np.ndarray x: Parameters of updated Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
g2 = g_constraint(x, 0.)
return g2 | 5,332,974 |
def disconnect(connection_handler):
""" Closes a current database connection
:param connection_handler: the Connection object
:return: 0 if success and -1 if an exception arises
"""
try:
if connection_handler is not None:
connection_handler.close()
return 0
except sqlite3.Error as e:
logger.error('Database disconnection error: {0}'.format(e))
return -1 | 5,332,975 |
def init_finder(**kwargs):
"""Create the global VersionImporter and initialize the finder."""
global FINDER
if len(kwargs) > 0:
init_loader(**kwargs)
# I must insert it at the beginning so it goes before FileFinder
FINDER = PyLibImportFinder()
sys.meta_path.insert(0, FINDER) | 5,332,976 |
def ensure_directory_exists(folder: Union[str, Path]):
"""creates a folder if it not already exists
Args:
folder (str): path of the new folder
"""
folder = str(folder)
if folder == "":
return
try:
os.makedirs(folder)
except OSError as err:
if err.errno != errno.EEXIST:
raise | 5,332,977 |
def throw_out_nn_indices(ind, dist, Xind):
"""Throw out near neighbor indices that are used to embed the time series.
This is an attempt to get around the problem of autocorrelation.
Parameters
----------
ind : 2d array
Indices to be filtered.
dist : 2d array
Distances to be filtered.
Xind : int
Indices to filter.
Returns
-------
filt_ind : 2d array
Filtered indices.
filt_dist : 2d array
Filtered distances.
"""
ind_store = []
dist_store = []
#iterate through each row
for i in range(len(Xind)):
xrow = Xind[i]
indrow = ind[i]
distrow = dist[i]
mask = np.ones(len(indrow),dtype=bool)
for val in xrow:
mask[indrow == val] = False
ind_store.append( indrow[mask] )
dist_store.append(distrow[mask])
#keep up to the shortest mask. This is so that we can vstack them
ind_len = min( [len(m) for m in ind_store] )
#make all lists the same size for concatenation
ind_store = [m[:ind_len] for m in ind_store]
dist_store = [m[:ind_len] for m in dist_store]
ind_store = np.vstack(ind_store)
dist_store = np.vstack(dist_store)
return dist_store, ind_store | 5,332,978 |
def lead_angle(target_disp,target_speed,target_angle,bullet_speed):
"""
Given the displacement, speed and direction of a moving target, and the speed
of a projectile, returns the angle at which to fire in order to intercept the
target. If no such angle exists (for example if the projectile is slower than
the target), then None is returned.
"""
"""
One can imagine the gun, target and point of
target collision at some time t forming a triangle
--o-.-.-.--- St collision of which one side has length St*t where St is
. /' ' ' ' . . . o the target speed, and another has length Sb*t
. /z . . where Sb is the bullet speed. We can eliminate
. . . t by scaling all sides of the triangle equally
. A. . leaving one side St and another Sb. This
. . . Sb triangle can be split into 2 right-angled
. a__ . triangles which share line A. Angle z can then
. / . be calculated and length A found
. . (A = sin(z)/St), and from this angle a can be
-----o----- found (a = arcsin(A/Sb) leading to the
gun calculation of the firing angle.
"""
# Check for situations with no solution
if target_speed > bullet_speed:
return None
if target_disp[0]==0 and target_disp[1]==0:
return None
# Find angle to target
ang_to_targ = math.atan2(target_disp[1],target_disp[0])
# Calculate angle
return math.asin(target_speed/bullet_speed*math.sin(
ang_to_targ-target_angle-math.pi
)) + ang_to_targ | 5,332,979 |
def forward_rate_constants_func(self):
"""Update forward_rate_constants
"""
ln10 = torch.log(torch.Tensor([10.0])).to(self.device)
self.forward_rate_constants = (self.Arrhenius_A *
torch.exp(self.Arrhenius_b * torch.log(self.T) -
self.Arrhenius_Ea * 4184.0 / self.R / self.T)
* self.C_M2)
for i in self.list_reaction_type4:
reaction = self.reaction[i]
# high pressure
self.kinf = (reaction['A'] *
torch.exp(reaction['b'] * torch.log(self.T)
- reaction['Ea'] * 4184.0 / self.R / self.T))
# low pressure
self.k0 = (self.reaction[i]['A_0'] *
torch.exp(reaction['b_0'] * torch.log(self.T)
- reaction['Ea_0'] * 4184.0 / self.R / self.T))
Pr = self.k0 * self.C_M[:, i: i + 1] / self.kinf
lPr = torch.log10(Pr)
self.k = self.kinf * Pr / (1 + Pr)
if 'Troe' in self.reaction[i]:
A = reaction['Troe']['A']
T1 = reaction['Troe']['T1']
T2 = reaction['Troe']['T2']
T3 = reaction['Troe']['T3']
F_cent = ((1 - A) * torch.exp(-self.T / T3) +
A * torch.exp(-self.T / T1) + torch.exp(-T2 / self.T))
lF_cent = torch.log10(F_cent)
C = -0.4 - 0.67 * lF_cent
N = 0.75 - 1.27 * lF_cent
f1 = (lPr + C) / (N - 0.14 * (lPr + C))
F = torch.exp(ln10 * lF_cent / (1 + f1 * f1))
self.k = self.k * F
self.forward_rate_constants[:, i: i + 1] = self.k
for i in self.list_reaction_type5:
reaction = self.reaction[i]
self.kk = [[None]] * self.n_rate_constants[i]
# calculate rate expressions at all given pressures
for j in range(self.n_rate_constants[i]):
self.kk[j] = (reaction['p_dep']['A'][j] *
torch.exp(reaction['b'][j] * torch.log(self.T)
- reaction['Ea'][j] * 4184.0 / self.R / self.T))
# jhigh1 corresponds to the first Arrhenius expression given at the minumum pressure
# higher than actual pressure. Considering multiple rate expressions may be given
# at the same pressure, we need jhigh2, which corresponds to the last Arrhenius
# expression given at the minumum pressure higher than actual pressure.
jhigh1 = self.n_rate_constants[i]
for j in range(self.n_rate_constants[i]):
if self.P[0] <= reaction['P'][j]:
jhigh1 = j
break
if jhigh1 != self.n_rate_constants[i]:
for j in range(self.n_rate_constants[i] - 1, -1, -1):
if reaction['P'][j] == reaction['P'][jhigh1]:
jhigh2 = j
break
# jlow1 corresponds to the last Arrhenius expression given at the maximum pressure
# lower than actual pressure while jlow2 corresponds to the first.
jlow1 = -1
for j in range(self.n_rate_constants[i] - 1, -1, -1):
if self.P[0] >= reaction['P'][j]:
jlow1 = j
break
if jlow1 != -1:
for j in range(self.n_rate_constants[i]):
if reaction['P'][j] == reaction['P'][jlow1]:
jlow2 = j
break
# This is the case where the actual pressure is higher than all given pressures.
if jhigh1 == self.n_rate_constants[i]:
for j in range(self.n_rate_constants[i]):
if reaction['P'][j] == reaction['P'][jhigh1 - 1]:
jhigh2 = j
break
self.k = self.kk[jhigh1 - 1]
if jhigh2 != jhigh1 - 1:
for j in range(jhigh2, jhigh1 - 1):
self.k = self.k + self.kk[j]
# This is the case where the actual pressure is lower than all given pressures.
if jlow1 == -1:
for j in range(self.n_rate_constants[i] - 1, -1, -1):
if reaction['P'][j] == reaction['P'][0]:
jlow2 = j
break
self.k = self.kk[0]
if jlow2 != 0:
for j in range(1, jlow2 + 1):
self.k = self.k + self.kk[j]
# This is the case where the actual pressure is higher than the minimum
# given pressure and lower than the maximum given pressure.
if jhigh1 != self.n_rate_constants[i] and jlow1 != -1:
self.k1 = self.kk[jlow1]
self.k2 = self.kk[jhigh1]
if jhigh1 != jhigh2:
for j in range(jhigh1 + 1, jhigh2 + 1):
self.k2 = self.k2 + self.kk[j]
if jlow1 != jlow2:
for j in range(jlow2, jlow1):
self.k1 = self.k1 + self.kk[j]
logk = (torch.log(self.k1) + (torch.log(self.k2) - torch.log(self.k1))
* (torch.log(self.P[0]) - torch.log(reaction['P'][jlow1])) /
(torch.log(reaction['P'][jhigh1]) - torch.log(reaction['P'][jlow1])))
self.k = torch.exp(logk)
self.forward_rate_constants[:, i: i + 1] = self.k
self.forward_rate_constants = self.forward_rate_constants * self.uq_A.abs() | 5,332,980 |
def fix_attr_encoding(ds):
""" This is a temporary hot-fix to handle the way metadata is encoded
when we read data directly from bpch files. It removes the 'scale_factor'
and 'units' attributes we encode with the data we ingest, converts the
'hydrocarbon' and 'chemical' attribute to a binary integer instead of a
boolean, and removes the 'units' attribute from the "time" dimension since
that too is implicitly encoded.
In future versions of this library, when upstream issues in decoding
data wrapped in dask arrays is fixed, this won't be necessary and will be
removed.
"""
def _maybe_del_attr(da, attr):
""" Possibly delete an attribute on a DataArray if it's present """
if attr in da.attrs:
del da.attrs[attr]
return da
def _maybe_decode_attr(da, attr):
# TODO: Fix this so that bools get written as attributes just fine
""" Possibly coerce an attribute on a DataArray to an easier type
to write to disk. """
# bool -> int
if (attr in da.attrs) and (type(da.attrs[attr] == bool)):
da.attrs[attr] = int(da.attrs[attr])
return da
for v in ds.data_vars:
da = ds[v]
da = _maybe_del_attr(da, 'scale_factor')
da = _maybe_del_attr(da, 'units')
da = _maybe_decode_attr(da, 'hydrocarbon')
da = _maybe_decode_attr(da, 'chemical')
# Also delete attributes on time.
if hasattr(ds, 'time'):
times = ds.time
times = _maybe_del_attr(times, 'units')
return ds | 5,332,981 |
def cli(ctx, opt_fp_in_csv, opt_fp_in_img, opt_fp_out_dir):
"""Generate HTML report from deduped images"""
# ------------------------------------------------
# imports
import sys
from os.path import join
from glob import glob
import pandas as pd
from tqdm import tqdm
import jinja2
from flask import url_for
import shutil
from app.utils import logger_utils, im_utils, file_utils
log = logger_utils.Logger.getLogger()
log.info(f'Generating HTML report from: {opt_fp_in_csv}')
template_loader = jinja2.FileSystemLoader(searchpath="./static/")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = "perceptual_hash_report.html"
template = template_env.get_template(TEMPLATE_FILE)
# create project output dir
fp_out_dir_assets = join(opt_fp_out_dir, 'assets')
fp_out_dir_images = join(opt_fp_out_dir, 'images')
file_utils.ensure_dir(opt_fp_out_dir)
file_utils.ensure_dir(fp_out_dir_assets)
file_utils.ensure_dir(fp_out_dir_images)
df_dupes = pd.read_csv(opt_fp_in_csv)
image_groups = df_dupes.groupby('fname_a')
log.info(f'Saving HTML report to: {opt_fp_out_dir}')
# im_objs = df_dupes.to_records('dict')
fp_out_html = join(opt_fp_out_dir, 'index.html')
with open(fp_out_html, 'w') as fp:
html_text = template.render(image_groups=image_groups,
dir_ims=Path(fp_out_dir_images).name, dir_assets=Path(fp_out_dir_assets).name)
fp.write(html_text)
# copy css
fp_src = 'static/assets/css.css'
fp_dst = join(fp_out_dir_assets, Path(fp_src).name)
shutil.copy(fp_src, fp_dst)
# copy images
for fname_a, image_group in image_groups:
# get image a
for df_im in image_group.itertuples():
# image a
fp_src = join(opt_fp_in_img, df_im.fname_a)
fp_dst = join(fp_out_dir_images, df_im.fname_a)
shutil.copy(fp_src, fp_dst)
# image b
fp_src = join(opt_fp_in_img, df_im.fname_b)
fp_dst = join(fp_out_dir_images, df_im.fname_b)
shutil.copy(fp_src, fp_dst) | 5,332,982 |
def acme_parser(characters):
"""Parse records from acme global
Args:
characters: characters to loop through the url
Returns:
2 item tuple containing all the meds as a list and a count of all meds
"""
link = (
'http://acmeglobal.com/acme/'
'wp-content/themes/acme/trade_check.php'
'?initchar_trade={0!s}&divname_trade=human')
meds = []
for character in characters:
try:
meds += parse_char(link, character)
except:
wait = rand(5, 15)
print('Failed on character {!s}.'.format(character))
print('Trying again in {0:d}s.'.format(wait))
time.sleep(wait)
try:
meds += parse_char(link, character)
except:
print('Failed on character {!s} again.'.format(character))
print('Skipping character.')
return (meds, len(meds)) | 5,332,983 |
def toStr(s: Any) -> str:
"""
Convert a given type to a default string
:param s: item to convert to a string
:return: converted string
"""
return s.decode(sys.getdefaultencoding(), 'backslashreplace') if hasattr(s, 'decode') else str(s) | 5,332,984 |
def standard_task(self):
"""这是一个标准的task组件
用于schedule定时任务
"""
pass | 5,332,985 |
def standard_atari_env_spec(env):
"""Parameters of environment specification."""
standard_wrappers = [[tf_atari_wrappers.RewardClippingWrapper, {}],
[tf_atari_wrappers.StackWrapper, {"history": 4}]]
env_lambda = None
if isinstance(env, str):
env_lambda = lambda: gym.make(env)
if callable(env):
env_lambda = env
assert env_lambda is not None, "Unknown specification of environment"
return tf.contrib.training.HParams(
env_lambda=env_lambda, wrappers=standard_wrappers, simulated_env=False) | 5,332,986 |
def load_bikeshare(path='data', extract=True):
"""
Downloads the 'bikeshare' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'bikeshare'
data = _load_file_data(name, path, extract)
return data | 5,332,987 |
def file2bytes(filename: str) -> bytes:
"""
Takes a filename and returns a byte string with the content of the file.
"""
with open(filename, 'rb') as f:
data = f.read()
return data | 5,332,988 |
def load_session() -> dict:
"""
Returns available session dict
"""
try:
return json.load(SESSION_PATH.open())
except FileNotFoundError:
return {} | 5,332,989 |
def _preprocess_zero_mean_unit_range(inputs, dtype=tf.float32):
"""Map image values from [0, 255] to [-1, 1]."""
preprocessed_inputs = (2.0 / 255.0) * tf.cast(inputs, tf.float32) - 1.0
return tf.cast(preprocessed_inputs, dtype=dtype) | 5,332,990 |
def fill_like(input, value, shape=None, dtype=None, name=None):
"""Create a uniformly filled tensor / array."""
input = as_tensor(input)
dtype = dtype or input.dtype
if has_tensor([input, value, shape], 'tf'):
value = cast(value, dtype)
return tf.fill(value, input.shape, name)
else:
dtype = dtype or input.dtype
dtype = convert_dtype(dtype, 'np')
if shape is None:
return np.full_like(input, value, dtype=dtype)
else:
return np.full(shape, value, dtype=dtype) | 5,332,991 |
def transform_item(key, f: Callable) -> Callable[[dict], dict]:
"""transform a value of `key` in a dict. i.e given a dict `d`, return a new dictionary `e` s.t e[key] = f(d[key]).
>>> my_dict = {"name": "Danny", "age": 20}
>>> transform_item("name", str.upper)(my_dict)
{'name': 'DANNY', 'age': 20}
"""
return functional_generic.itemmap(
functional_generic.when(
functional_generic.compose_left(operator.head, operator.equals(key)),
functional_generic.packstack(operator.identity, f),
),
) | 5,332,992 |
def _insert_service_modes(target, connection, **kw):
""" Inserts service mode IDs and names after creating lookup table. """
statement = target.insert().values([
{"id": 1, "name": "bus"},
{"id": 2, "name": "coach"},
{"id": 3, "name": "tram"},
{"id": 4, "name": "metro"},
{"id": 5, "name": "underground"}
])
connection.execute(statement) | 5,332,993 |
def vectorisation():
"""generate dataframe with words from a document and corresponding tf-idf values
write dataframe to s3 bucket
"""
v = TfidfVectorizer(stop_words=stop_words, min_df=min_df, max_df=max_df)
vectorised_df = pd.DataFrame(
v.fit_transform(get_joined_skills()["skills_and_occup_descr"]).toarray(),
columns=v.get_feature_names(),
)
tfidf_values = pd.concat([get_joined_skills(), vectorised_df], axis=1)
write_to_s3(tfidf_values, "occup-groupings-nh", "data/processed/tfidf_values.csv") | 5,332,994 |
def mock_dataset(mocker, mock_mart, mart_datasets_response):
"""Returns an example dataset, built using a cached response."""
mocker.patch.object(mock_mart, 'get', return_value=mart_datasets_response)
return mock_mart.datasets['mmusculus_gene_ensembl'] | 5,332,995 |
def loss_function(recon_x, x, mu, logvar):
"""Loss function for varational autoencoder VAE"""
BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD | 5,332,996 |
def resize_img(img, size):
"""
Given a list of images in ndarray, resize them into target size.
Args:
img: Input image in ndarray
size: Target image size
Returns: Resized images in ndarray
"""
img = scipy.misc.imresize(img, (size, size))
if len(img.shape) == 2:
img = img.reshape((size, size, 1))
return img | 5,332,997 |
def cloudtopheight_IR(bt, cloudmask, latitude, month, method="modis"):
"""Cloud Top Height (CTH) from 11 micron channel.
Brightness temperatures (bt) are converted to CTHs using the IR window approach:
(bt_clear - bt_cloudy) / lapse_rate.
See also:
:func:`skimage.measure.block_reduce`
Down-sample image by applying function to local blocks.
:func:`lapserate_moist_adiabate`
Constant value 6.5 [K/km]
:func:`lapserate_modis`
Estimate of the apparent lapse rate in [K/km]
depending on month and latitude acc. to Baum et al., 2012.
Parameters:
bt (ndarray): brightness temperatures form 11 micron channel.
cloudmask (ndarray): binary cloud mask.
month (int): month of the year.
latitude (ndarray): latitudes in [°], positive North, negative South.
method (str): approach used to derive CTH: 'modis' see Baum et al., 2012,
'simple' uses the moist adiabatic lapse rate.
Returns:
ndarray: cloud top height.
References:
Baum, B.A., W.P. Menzel, R.A. Frey, D.C. Tobin, R.E. Holz, S.A.
Ackerman, A.K. Heidinger, and P. Yang, 2012: MODIS Cloud-Top Property
Refinements for Collection 6. J. Appl. Meteor. Climatol., 51,
1145–1163, https://doi.org/10.1175/JAMC-D-11-0203.1
"""
# Lapse rate
if method == "simple":
lapserate = lapserate_moist_adiabate()
elif method == "modis":
lapserate = lapserate_modis(month, latitude)
else:
raise ValueError("Method is not supported.")
resolution_ratio = np.shape(cloudmask)[0] // np.shape(bt)[0]
cloudmask_inverted = cloudmask.copy()
cloudmask_inverted[np.isnan(cloudmask_inverted)] = 1
cloudmask_inverted = np.asarray(
np.invert(np.asarray(cloudmask_inverted, dtype=bool)), dtype=int
)
cloudmask[np.isnan(cloudmask)] = 0
cloudmask = np.asarray(cloudmask, dtype=int)
# Match resolutions of cloud mask and brightness temperature (bt) arrays.
if resolution_ratio > 1:
# On bt resolution, flag pixels as cloudy only if all subgrid pixels
# are cloudy in the original cloud mask.
mask_cloudy = block_reduce(
cloudmask, (resolution_ratio, resolution_ratio), func=np.alltrue
)
# Search for only clear pixels to derive a bt clearsky/ocean value.
mask_clear = block_reduce(
cloudmask_inverted, (resolution_ratio, resolution_ratio), func=np.alltrue
)
elif resolution_ratio < 1:
try:
mask_cloudy = np.repeat(
np.repeat(cloudmask, resolution_ratio, axis=0), resolution_ratio, axis=1
)
mask_clear = np.repeat(
np.repeat(cloudmask_inverted, resolution_ratio, axis=0),
resolution_ratio,
axis=1,
)
except ValueError:
raise ValueError(
"Problems matching the shapes of provided cloud mask and bt arrays."
)
else:
mask_cloudy = cloudmask.copy()
mask_clear = cloudmask_inverted.copy()
bt_cloudy = np.ones(np.shape(bt)) * np.nan
bt_cloudy[mask_cloudy] = bt[mask_cloudy]
bt_clear_avg = np.nanmean(bt[mask_clear])
return (bt_clear_avg - bt_cloudy) / lapserate | 5,332,998 |
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists | 5,332,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.