content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def where_op(condition, x, y):
"""Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`.
If the element in condition is larger than 0,
it will take the `x` element, else it will take the `y` element
.. note::
The tensors :attr:`condition`, :attr:`x`, :attr:`y` must be broadcastable.
It will take the `x` element, else it will take the `y` element.
Args:
condition (IntTensor): When 1 (nonzero), yield x, otherwise yield y
x (Tensor or Scalar): value (if :attr:x is a scalar) or values selected at indices
where :attr:`condition` is True
y (Tensor or Scalar): value (if :attr:x is a scalar) or values selected at indices
where :attr:`condition` is False
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`x`, :attr:`y`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = flow.Tensor(
... np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]),
... dtype=flow.float32,
... )
>>> y = flow.Tensor(np.ones(shape=(3, 2)), dtype=flow.float32)
>>> condition = flow.Tensor(np.array([[0, 1], [1, 0], [1, 0]]), dtype=flow.int32)
>>> out = condition.where(x, y).numpy()
>>> print(out)
[[1. 0.3139]
[0.3898 1. ]
[0.0478 1. ]]
"""
return Where()(condition, x, y) | c4a34284c8105b8b0319f3e33db9d9ea0d74b0d2 | 3,626,800 |
def get_out_dir(key: str) -> str:
"""
Return the output directory
:param key: output product
"""
return OTD[key] | e510603d93cb72d916c3afab8aeb36756a563326 | 3,626,801 |
def jet_fire_api521(Tvessel):
"""
Incident heat flux of 100 kW/m2
"""
alpha = 0.75
e_flame = 0.33
e_surface = 0.75
h = 40
Tflame = 900 + 273.15
Tradiative = 1100 + 273.15
return stefan_boltzmann(alpha, e_flame, e_surface, h, Tflame, Tradiative, Tvessel) | e7d95073f9e899b8f48e5fc6fcd9505e622dea98 | 3,626,802 |
def ewma(values, window):
"""
Numpy-based implementation of EMA
"""
weights = np.exp(np.linspace(-1., 0., window))
weights /= weights.sum()
ema = np.convolve(weights, values)[window-1:-window+1]
return ema | a544b9a37bf227dcd12246d5ebd83d4788b217c9 | 3,626,803 |
import torch
def prepare_loss_weights(
labels,
pos_cls_weight=1.0,
neg_cls_weight=1.0,
loss_norm_type=LossNormType.NormByNumPositives,
dtype=torch.float32,
):
"""get cls_weights and reg_weights from labels.
"""
cared = labels >= 0
# cared: [N, num_anchors]
positives = labels > 0
negatives = labels == 0
negative_cls_weights = negatives.type(dtype) * neg_cls_weight
cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)
reg_weights = positives.type(dtype)
if loss_norm_type == LossNormType.NormByNumExamples:
num_examples = cared.type(dtype).sum(1, keepdim=True)
num_examples = torch.clamp(num_examples, min=1.0)
cls_weights /= num_examples
bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(bbox_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss
pos_normalizer = positives.sum(1, keepdim=True).type(dtype)
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
elif loss_norm_type == LossNormType.NormByNumPosNeg:
pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)
normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]
cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]
cls_normalizer = torch.clamp(cls_normalizer, min=1.0)
# cls_normalizer will be pos_or_neg_weight/num_pos_or_neg
normalizer = torch.clamp(normalizer, min=1.0)
reg_weights /= normalizer[:, 0:1, 0]
cls_weights /= cls_normalizer
else:
raise ValueError(
f"unknown loss norm type. available: {list(LossNormType)}")
return cls_weights, reg_weights, cared | 0f3a8bd3d9149c6264aa73f5a4daa0291f56d2e8 | 3,626,804 |
from typing import Optional
import subprocess
def capture_output(
command: str, ip: Optional[str] = None, **kwargs
) -> ADBCommandResult:
"""
Execute an adb command on the given device and return the result
:param command: command to execute
:param ip: device id
:param kwargs: if contains the 'args' key, its value is passed as arguments to the input command
:return:
"""
ip_arg = f"-s {ip} " if ip else ""
adb = get_adb_path()
command_line = f"{ip_arg}{command} {get_extra_arguments(**kwargs)}"
command_full = f"{adb} {command_line}".strip()
command_log = f"adb {command_line}".strip()
try:
if kwargs.get("log", True):
log().debug(f"Executing `{command_log}`")
out = subprocess.Popen(
command_full.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=False,
)
result = out.communicate()
return ADBCommandResult(result, out.returncode)
except subprocess.CalledProcessError as e:
log().warning(e)
return ADBCommandResult(code=ADBCommandResult.RESULT_ERROR) | 1f3077177c34a5fc6d1ac96638f34f4f88a51f4e | 3,626,805 |
def add_to_leftmost(branch, val):
"""adds value to the leftmost part of the branch and returns the modified branch and 0.
OR returns unchanged change and val if the val cannot be added"""
if val == 0:
return branch, val
if type(branch) is int:
return branch + val, 0
# add to children, will do nothing is val become 0
left, val = add_to_leftmost(branch[0], val)
right, val = add_to_leftmost(branch[1], val)
return [left, right], val | 1c2c3bdccfcb6f4966b9bf9228f092ee17ca49f9 | 3,626,806 |
def normalize_list_of_dict_into_dict(alist):
"""
Info is generated as a list of dict
objects with a single key.
@alist - the list in question.
@return - normalized dict with multiple keys
"""
result = {}
for element in alist:
for key in element.keys():
result[key] = element[key]
return result | 8de00b0923d07b99085ca3b4d694960aae9fc7f5 | 3,626,807 |
import hashlib
def hashhex(s):
"""Returns a heximal formated SHA1 hash of the input string."""
h = hashlib.sha1()
h.update(s)
return h.hexdigest() | 0d2b0dd9c54b71f3668b971fb81f9d78223acbb2 | 3,626,808 |
import os
import errno
from typing import OrderedDict
import sys
def prerank(rnk, gene_sets, outdir='gseapy_out', pheno_pos='Pos', pheno_neg='Neg',
min_size=15, max_size=500, permutation_n=1000, weighted_score_type=1,
ascending=False, figsize=[6.5,6], format='pdf', graph_num=20, seed=None):
""" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.
:param rnk: pre-ranked correlation table, Same input with ``GSEA`` .rnk file.
:param gene_sets: Gene sets file. e.g. gmt files. Same input with GSEA.
:param outdir: results output directory.
:param permutation_n: Number of permutations for significance computation. Default: 1000.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Defaut: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param weighted_score_type: Refer to :func:`algorithm.enrichment_socre`. Default:1.
:param ascending: Sorting order of rankings. Default: False.
:param figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param format: Matplotlib figure format. Default: 'pdf'.
:param graph_num: Plot graphs for top sets of each phenotype
:param seed: Random seed. expect an interger. Defalut:None.
:return: Return a DataFrame when inside python console.
Generate ``GSEA`` plots and store a dictionary into csv file,
where dictionary key is a gene set and values are::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set}
"""
argument = locals()
assert min_size <= max_size
try:
os.makedirs(outdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
pass
logger = log_init(outdir, module='prerank')
if isinstance(rnk, pd.DataFrame) :
argument['rnk'] = 'DataFrame'
#write command to log file
argument = OrderedDict(sorted(argument.items(), key=lambda t:t[0]))
logger.debug("Command: prerank, "+str(argument))
#Start Analysis
logger.info("Parsing data files for GSEA.............................")
dat2 = gsea_rank_metric(rnk)
assert len(dat2) > 1
#drop duplicates in ranking metrics.
dat2.drop_duplicates(subset='gene_name',inplace=True, keep='first')
#filtering out gene sets and build gene sets dictionary
gmt = gsea_gmt_parser(gene_sets, min_size=min_size, max_size=max_size, gene_list=dat2['gene_name'].values)
logger.info("%s gene_sets used for further statistical testing....."% len(gmt))
logger.info("Start to run GSEA...Might take a while..................")
#compute ES, NES, pval, FDR, RES
results,hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=permutation_n, gmt=gmt, weighted_score_type=weighted_score_type,
permutation_type='gene_set', method=None, phenoPos=pheno_pos, phenoNeg=pheno_neg,
classes=None, ascending=ascending, seed=seed, prerank=True)
logger.info("Start to generate gseapy reports, and produce figures...")
res = OrderedDict()
for gs,gseale,ind,RES in zip(subsets, list(results), hit_ind, rank_ES):
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['gene_set_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
rdict['rank_ES'] = RES
rdict['genes'] = dat2.ix[ind,'gene_name'].tolist()
rdict['hit_index'] = ind
res[gs] = rdict
res_df = pd.DataFrame.from_dict(res, orient='index')
res_df.index.name = 'Term'
res_df.sort_values(by='fdr', inplace=True)
res_df.drop(['rank_ES','hit_index'], axis=1, inplace=True)
res_df.to_csv('{a}/{b}.prerank.reports.csv'.format(a=outdir, b='gseapy'), float_format ='%.7f')
#Plotting
top_term = res_df.head(graph_num).index
for gs in top_term:
fig = gsea_plot(rank_metric=dat2, enrich_term=gs, hit_ind=res.get(gs)['hit_index'],
nes=res.get(gs)['nes'], pval=res.get(gs)['pval'], fdr=res.get(gs)['fdr'],
RES=res.get(gs)['rank_ES'], phenoPos=pheno_pos, phenoNeg=pheno_neg, figsize=figsize)
gs = gs.replace('/','_')
fig.savefig('{a}/{b}.gsea.{c}'.format(a=outdir, b=gs, c=format), dpi=300,)
logger.info("Congratulations...GSEAPY run successfully...............")
# return dataframe if run gsea inside python console
#if isinstance(rnk, pd.DataFrame):
if hasattr(sys, 'ps1'):
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
return res_df | f9ccadecba9f3c7b656dd5ce72c62051f807f0fd | 3,626,809 |
import argparse
def get_args():
"""Get our arguments"""
parser = argparse.ArgumentParser()
parser.add_argument('filename', metavar='F', type=str, nargs=1,
help='File to load')
parser.add_argument('-a', '--annealing', action='store_true',
default=False,
help='Use Simulated Annealing Algorithm?')
parser.add_argument('-g', '--genetic', action='store_true',
default=False,
help='Use Genetic Algorithm?')
parser.add_argument('-n', '--numdistricts', type=int, default=None,
help=('Number of districts to form. Defaults to the '
'width of the system'))
parser.add_argument('-z', '--animate', action='store_true', default=False,
help='Animate algorithms?')
parser.add_argument('-p', '--precision', type=int, default=1000,
help=('Tweak precision, lower is less. '
'In a nutshell, how many loops to run.'))
parser.add_argument('-r', '--report', action='store_true', default=False,
help='Generate all assets for the report')
parser.add_argument('-j', '--gif', action='store_true', default=False,
help='Generate gif versions of animations?')
parser.add_argument('-F', '--full', action='store_true', default=False,
help='Generate everything. Report assets, SA, and GA.')
args = parser.parse_args()
args.filename = args.filename[0] # We only allow 1 file at a time.
return args | 33e82867f37b1934f9622076459402beb2cb3214 | 3,626,810 |
import os
def read_results(folder, name):
"""
reads in cluster results
"""
tree_fname = os.path.join(folder, name + '.dg_01')
clu_fname = os.path.join(folder, name + '.dg_01.lab')
tree = np.loadtxt(tree_fname)
clu = np.loadtxt(clu_fname)
return clu, tree | 462fa9eae6b2616dde237e1fde86033b88c04b37 | 3,626,811 |
def planets(id='', name=''):
"""
Return a planet.
Like: Hoth, Naboo, etc.
"""
response = Render.show(id, name, 'planets')
return response | 1e1944d58b50cf3fed7efc6fe23b888837689a57 | 3,626,812 |
def literal(string):
"""
If `string` is a valid literal in NTriples syntax, return its value, lang tag and type.
Use `None` if there is no language tag or no datatype.
If `string` is not a valid literal return `None`.
"""
match = literal.pattern.match(string)
if not match:
return None
label, lang, dtype = match.groups()
return ntriples_unescape(label), ntriples_unescape(dtype), lang | a0d805d7b3365366b85c0ce576f75a69680251be | 3,626,813 |
import logging
def make_error_logger(name, level, filename):
"""
Création d'un Logger d'erreur
:param name: nom du logger
:param level: niveau de logging
:param filename: nom du fichier d'erreur
:return: logger
"""
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s",
"%d/%m %H:%M:%S")
sth_err = logging.FileHandler(filename)
sth_err.setFormatter(formatter)
logger = logging.getLogger(name)
logger.addHandler(sth_err)
logger.setLevel(level)
return logger | 0d78faa4657af348c06755298c2e1d3f717cd092 | 3,626,814 |
def correlate(a, b, shift, demean=True, normalize=True, domain='freq'):
"""
Cross-correlation of signals a and b with specified maximal shift.
:type a: :class:`~numpy.ndarray`, :class:`~obspy.core.trace.Trace`
:param a: first signal
:type b: :class:`~numpy.ndarray`, :class:`~obspy.core.trace.Trace`
:param b: second signal to correlate with first signal
:param int shift: Number of samples to shift for cross correlation.
The cross-correlation will consist of ``2*shift+1`` or
``2*shift`` samples. The sample with zero shift will be in the middle.
:param bool demean: Demean data beforehand.
:param bool normalize: Normalize cross-correlation. A perfect
correlation will correspond to the value 1.
:param str domain: Correlation will be performed in frequency domain with
:func:`scipy.signal.fftconvolve` for ``domain='freq'``
and in time domain with :func:`scipy.signal.correlate` for
``domain='time'``.
:return: cross-correlation function.
To calculate shift and value of the maximum of the returned
cross-correlation function use
:func:`~obspy.signal.cross_correlation.xcorr_max`.
.. note::
For most input parameters cross-correlation in frequency domain is much
faster.
Only for small values of ``shift`` (``⪅100``) time domain
cross-correlation migth save some time.
.. note::
If the signals have different length, they will be aligned around
their middle. The sample with zero shift in the cross-correlation
function corresponds to this correlation:
::
--aaaa--
bbbbbbbb
For odd ``len(a)-len(b)`` the cross-correlation function will
consist of only ``2*shift`` samples because a shift of 0
corresponds to the middle between two samples.
.. rubric:: Example
>>> a = np.random.randn(10000).astype(np.float32)
>>> cc = correlate(a, a, 1000)
>>> shift, value = xcorr_max(cc)
>>> shift
0
>>> round(value, 5)
1.0
>>> b = np.roll(a, 20) # shift a by 20 samples
>>> cc = correlate(a, b, 1000)
>>> shift, value = xcorr_max(cc)
>>> shift
-20
>>> round(value, 2)
1.0
"""
# if we get Trace objects, use their data arrays
if isinstance(a, Trace):
a = a.data
if isinstance(b, Trace):
b = b.data
a = np.asarray(a)
b = np.asarray(b)
if demean:
a = a - np.mean(a)
b = b - np.mean(b)
if normalize:
stdev = (np.sum(a ** 2)) ** 0.5 * (np.sum(b ** 2)) ** 0.5
if stdev == 0:
# set stdev to 1 to prevent division by 0
# cross-correlation function will have only zeros anyway
stdev = 1
else:
stdev = 1
# choose the usually faster xcorr method for each domain
if domain == 'freq':
_xcorr = _xcorr_slice
elif domain == 'time':
_xcorr = _xcorr_padzeros
else:
raise ValueError("domain keyword has to be one of ('freq', 'time')")
return _xcorr(a, b, shift, domain=domain) / stdev | ff0a4adcde2f62f7de94c31702dd2605ae8f390c | 3,626,815 |
def get_user_idle_time():
"""
Return the amount of time (in seconds) that the user is said to be idle.
This is normally obtained from a lack of keyboard and/or mouse input.
"""
if system == 'Windows':
return get_user_idle_time_windows()
elif system == 'Darwin':
return get_user_idle_time_mac()
raise NotImplementedError("This function is not yet implemented for %s" % system) | bcdb1a9710721b94f2c6c490a8ac9d453cda412a | 3,626,816 |
from typing import Dict
from typing import Any
from typing import Iterable
from typing import Optional
from typing import Tuple
def kwargs_from_config(
config: Dict[str, Any],
required_keys: Iterable[str],
optional_keys: Iterable[str],
renames: Optional[Iterable[Tuple[str, str]]] = None,
) -> Dict[str, Any]:
"""
Extract from a dictionary a dictionary with only those keys specified as required_keys or optional_keys.
Keys that appear in 'renames' are treated as optional.
"""
kwargs = {k: config[k] for k in required_keys}
kwargs.update({k: config[k] for k in optional_keys if k in config})
if renames is not None:
for old_name, new_name in renames:
if old_name in config:
kwargs[new_name] = config[old_name]
return kwargs | b3acef60b87dc8bb4c00157c169d1968c8751100 | 3,626,817 |
def as_array(a, dtype=DEFAULT_FLOAT_DTYPE):
"""
Converts given :math:`a` variable to *ndarray* with given type.
Parameters
----------
a : object
Variable to convert.
dtype : object
Type to use for conversion.
Returns
-------
ndarray
:math:`a` variable converted to *ndarray*.
Examples
--------
>>> as_array([1, 2, 3])
array([ 1., 2., 3.])
>>> as_array([1, 2, 3], dtype=DEFAULT_INT_DTYPE)
array([1, 2, 3])
"""
return np.asarray(a, dtype) | 5efde6e83812dec9ad16283cf13b4d3d07ba5cd8 | 3,626,818 |
def round_filters(filters, global_params):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(
min_depth,
int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters) | 057d209906cde8287051ea48cf3d97af76e66cf2 | 3,626,819 |
from typing import List
def equal_opportunity(confusion_matrix_list: List[np.ndarray],
tolerance: float = 0.2,
label_index: int = 0) -> np.ndarray:
"""
Checks for equal opportunity between all of the sub-populations.
This function checks if **true positive rate** difference of all grouping
pairs is within the tolerance level.
.. note::
This function expects a list of confusion matrices per sub-group for
tested data. To get this list please use either
:func:`fatf.utils.metrics.tools.confusion_matrix_per_subgroup` or
:func:`fatf.utils.metrics.tools.confusion_matrix_per_subgroup_indexed`
function.
Alternatively you can call either
:func:`fatf.fairness.models.measures.disparate_impact` or
:func:`fatf.fairness.models.measures.disparate_impact_indexed` function,
which handles both the grouping and calculates the desired
group-fairness criterion.
Parameters
----------
confusion_matrix_list : List[numpy.ndarray]
A list of confusion matrices, one for each sub-population.
tolerance : number, optional (default=0.2)
A number between 0 and 1 that indicates how much any two true positive
rates can differ to be considered "equal".
label_index : integer, optional (default=0)
The index of the "positive" class in the confusion matrix. (Not
required for binary problems.)
Raises
------
TypeError
The tolerance parameter is not a number.
ValueError
The tolerance parameter is out of [0, 1] range.
Returns
-------
disparity : numpy.ndarray
A square and diagonally symmetric numpy array with boolean values.
An entry is ``True`` if a pair of two sub-populations' true positive
rate difference is above the tolerance level and ``False`` otherwise.
"""
assert _validate_tolerance(tolerance), 'Invalid tolerance parameter.'
ppr = np.asarray(
fums.apply_metric(
confusion_matrix_list,
'true positive rate',
label_index=label_index))
disparity = np.abs(ppr[:, np.newaxis] - ppr[np.newaxis, :])
disparity = disparity > tolerance
assert np.array_equal(disparity, disparity.T), 'Must be symmetric.'
return disparity | f861293ece13ea5dc14c6397fbf99a991bf0f672 | 3,626,820 |
def determine_qc_protocol(project):
"""
Determine the QC protocol for a project
Arguments:
project (AnalysisProject): project instance
Return:
String: QC protocol for the project
"""
# Standard protocols
if project.info.paired_end:
protocol = "standardPE"
else:
protocol = "standardSE"
# Single cell protocols
if project.info.single_cell_platform is not None:
# Default
protocol = "singlecell"
single_cell_platform = project.info.single_cell_platform
library_type = project.info.library_type
if single_cell_platform.startswith('10xGenomics Chromium 3\''):
if library_type == "scRNA-seq":
# 10xGenomics scATAC-seq
protocol = "10x_scRNAseq"
elif library_type == "snRNA-seq":
# 10xGenomics snRNA-seq
protocol = "10x_snRNAseq"
elif library_type in ("CellPlex",
"CellPlex scRNA-seq",
"CellPlex snRNA-seq"):
# 10xGenomics CellPlex (cell multiplexing)
protocol = "10x_CellPlex"
elif library_type in ("scATAC-seq",
"snATAC-seq",):
if single_cell_platform == "10xGenomics Single Cell ATAC":
# 10xGenomics scATAC-seq
protocol = "10x_scATAC"
elif single_cell_platform == "ICELL8":
# ICELL8 scATAC-seq
protocol = "ICELL8_scATAC"
# Spatial RNA-seq
if project.info.single_cell_platform == "10xGenomics Visium":
# 10xGenomics Visium spatial transcriptomics
protocol = "10x_Visium"
# Multiome ATAC+GEX
if project.info.single_cell_platform == "10xGenomics Single Cell Multiome":
if library_type == "ATAC":
# 10xGenomics single cell Multiome ATAC
protocol = "10x_Multiome_ATAC"
elif library_type == "GEX":
# 10xGenomics single cell Multiome gene expression
protocol = "10x_Multiome_GEX"
return protocol | 6862ab84450d4d4d0ca74ee178b90f5eacb303fb | 3,626,821 |
def entitydata_list_url_query(viewname, kwargs, query_params, more_params):
"""
Helper function for generatinglist URLs
"""
list_url=reverse(viewname, kwargs=kwargs)
return uri_with_params(list_url, query_params, more_params) | 5264a2f45befc9662f22a1febc5451d35881c984 | 3,626,822 |
def _adaptive_order_weno3_robust(q,
i,
j,
recons,
keep_positive,
eps=1.0e-17,
c1=1.0,
c2=1.0e3,
c3=1.0,
exponent=4,
wenoz=False):
"""
A robust WENO3 reconstruction using 5 points.
The individual polynomials stencils for the reconstruction are written as
.. math::
\\begin{align}
u(\\xi) = u_0 + u_\\xi P_1(\\xi) + u_{\\xi\\xi} P_2(\\xi)
\\end{align}
The left-, central-, and right-biased stencils for the one-dimensional
coefficients are:
.. math::
\\begin{align}
u_{\\xi}^{(L)}&=\\frac{1}{2} u_{-2} - 2u_{-1} + \\frac{3}{2} u_0 \\\\
u_{\\xi\\xi}^{(L)}&=\\frac{u_{-2} - 2 u_{-1} + u_0}{2} \\\\
u_{\\xi}^{(C)}&=\\frac{1}{2}(u_1 - u_{-1}) \\\\
u_{\\xi\\xi}^{(C)}&=\\frac{u_{-1} - 2 u_0 + u_1}{2} \\\\
u_{\\xi}^{(R)}&=-\\frac{3}{2}u_0 + 2 u_1 - \\frac{1}{2} u_2 \\\\
u_{\\xi\\xi}^{(R)}&=\\frac{u_{0} - 2 u_{1} + u_2}{2}.
\\end{align}
The oscillation indicators are given by
.. math::
\\beta_{(i)} = \\left(u_\\xi^{(i)}\\right)^2 +
\\frac{13}{3}\\left(u_{\\xi\\xi}^{(i)}\\right)^2,
where :math:`i\\in\{L,C,R\}`. The nonlinear weights are:
.. math::
\\begin{align}
\\omega_k &= \\frac{\\alpha_k}{\sum_{l=0}^{2}\\alpha_l} \\\\
\\alpha_k &= \\frac{\\lambda_k}{(\\beta_k + \\epsilon_k)^p}
\\end{align}
where :math:`p` is usually chosen to be 4 or 8, and :math:`\\lambda_0=1`,
:math:`\\lambda_1=10^5`, and :math:`\\lambda_2=1`.
To obtain the WENOZ weights use :math:`p=1` and with the new oscillation
indicators
.. math::
\\beta_k^Z=\\frac{\\beta_k}{\\beta_k + \\tau_5 + \\epsilon_k}
where
.. math::
\\tau_5 = |\\beta_3 - \\beta_1|.
:param q: The variable values at the cell centers.
:type q: list of double
:param int i: The index into the reconstructed array
:param int j: The index of the cell whose faces are being
reconstructed in `q`
:param recons: The array of the reconstructed variable.
:type recons: list of double
:param bool keep_positive: If `True` then returns `False` if the
reconstructed solution is not positive.
:param double eps: The `epsilon` parameter to avoid division by zero.
:param double c0: The linear weight :math:`\\lambda_{0}`.
:param double c1: The linear weight :math:`\\lambda_{1}`.
:param double c2: The linear weight :math:`\\lambda_{2}`.
:param double exponent: The exponent :math:`p` in denominator of the
:math:`\\alpha_k`.
:param bool wenoz: If `True` then use the WENOZ weights.
:return: (`bool`) `True` if the reconstruction was successful, otherwise
`False`
"""
s1_ux = -2.0 * q[j - 1] + 0.5 * q[j - 2] + 1.5 * q[j]
s1_ux2 = 0.5 * (q[j - 2] - 2.0 * q[j - 1] + q[j])
s2_ux = 0.5 * (q[j + 1] - q[j - 1])
s2_ux2 = 0.5 * (q[j - 1] - 2.0 * q[j] + q[j + 1])
s3_ux = -1.5 * q[j] + 2.0 * q[j + 1] - 0.5 * (q[j + 2])
s3_ux2 = (q[j] - 2.0 * q[j + 1] + q[j + 2])
beta1 = s1_ux**2 + (13.0 / 3.0) * s1_ux2**2
beta2 = s2_ux**2 + (13.0 / 3.0) * s2_ux2**2
beta3 = s3_ux**2 + (13.0 / 3.0) * s3_ux2**2
if wenoz:
# WENOZ
tau5 = np.abs(beta3 - beta1)
beta1 = beta1 / (beta1 + tau5 + eps)
beta2 = beta2 / (beta2 + tau5 + eps)
beta3 = beta3 / (beta3 + tau5 + eps)
alpha_denom1 = (eps + beta1)**exponent
alpha_denom2 = (eps + beta2)**exponent
alpha_denom3 = (eps + beta3)**exponent
alpha1 = c1 / alpha_denom1
alpha2 = c2 / alpha_denom2
alpha3 = c3 / alpha_denom3
w1 = alpha1 / (alpha1 + alpha2 + alpha3)
w2 = alpha2 / (alpha1 + alpha2 + alpha3)
w3 = alpha3 / (alpha1 + alpha2 + alpha3)
# L0 = 1
# L1(1/2) = 1/2 L1(-1/2) = -1/2
# L2(1/2) = 1/6 L2(-1/2) = 1/6
if keep_positive:
def sgn(x):
return -1.0 if x < 0.0 else (1.0 if x > 0.0 else 0.0)
# a x^2 + b x + c
a = w1 * s1_ux2 + w2 * s2_ux2 + w3 * s3_ux2
b = w1 * s1_ux + w2 * s2_ux + w3 * s3_ux
c = (w1 + w2 + w3) * q[j] - 1.0 / 12.0 * (w1 * s1_ux2 + w2 * s2_ux2 +
w3 * s3_ux2)
q_root = -0.5 * (b + sgn(b) * np.sqrt(b**2 - 4.0 * a * c))
x1 = q_root / a
x2 = c / q_root
# Have a negative root, so return False
root_bound = 2.5
if (x1 < root_bound and x1 > -root_bound) or (x2 < root_bound
and x2 > -root_bound):
return False
recons[
2 * i +
1] = w1 * (q[j] + s1_ux * (-0.5) + s1_ux2 *
(1.0 / 6.0)) + w2 * (q[j] + s2_ux * (-0.5) + s2_ux2 *
(1.0 / 6.0)) + w3 * (q[j] + s3_ux *
(-0.5) + s3_ux2 *
(1.0 / 6.0))
recons[
2 * i +
2] = w1 * (q[j] + s1_ux * (0.5) + s1_ux2 *
(1.0 / 6.0)) + w2 * (q[j] + s2_ux * (0.5) + s2_ux2 *
(1.0 / 6.0)) + w3 * (q[j] + s3_ux *
(0.5) + s3_ux2 *
(1.0 / 6.0))
return True | dc772944d6eb13a02752d995f738b385a01fd7a0 | 3,626,823 |
import torch
def patch_and_fit_physio(time_series, replicates, patch=3, mask=None,
mode='gn', verbose=0):
"""Extract patches from an fMRI time + replicate series and fit parameters.
Parameters
----------
time_series : (replicates, *input_shape) tensor_like
fMRI time series.
replicates : (replicates, *input_shape) tensor_like
fMRI replicate series.
patch : int, default=3
Patch size. Should be odd.
Returns
-------
sig_p : (*output_shape) tensor
Fitted physiological noise amplitude.
lam_p : (*output_shape) tensor
Fitted physiological noise length-scale.
sig_0 : (*output_shape) tensor
Fitted thermal noise amplitude.
lam_0 : (*output_shape) tensor
Fitted intrinsic smoothness length-scale.
Notes
-----
The output maps only contain voxels for which full patches could be
extracted in the input volume. Therefore, the output shape is
`output_shape = input_shape - patch + 1`.
"""
# Compute output shape
time_series = torch.as_tensor(time_series)
replicates = torch.as_tensor(replicates)
backend = utils.backend(time_series)
dim = time_series.dim() - 1
shape = time_series.shape[1:]
output_shape = [s - patch + 1 for s in shape]
has_mask = mask is not None
if has_mask:
mask = get_patches(mask[None], patch)
mask = mask.reshape(mask, [len(mask), -1])
mask = mask.mean(dim=-1) > 0.75
sqdist = dist_map([patch]*dim, **backend)
# Estimate thermal/intrinsic from replicate series
replicates = patch_and_cov(replicates, patch)
if has_mask:
replicates = replicates[mask, :, :]
sig_0, lam_0 = fit_thermal(replicates, sqdist, dim=dim,
mode=mode, verbose=verbose)
se_thermal = build_se_thermal(sqdist, sig_0, lam_0, dim=dim)
del replicates
# Estimate physio from time series
time_series, mean = patch_and_cov(time_series, patch, return_mean=True)
if has_mask:
time_series = time_series[mask, :, :]
time_series -= se_thermal
del se_thermal
mean = mean.mean(-1)[..., None, None].square_()
time_series /= mean
del mean
sig_p, lam_p = fit_physio(time_series, lam_0, sqdist, dim=dim,
mode=mode, verbose=verbose)
del time_series
# Reshape as maps
if has_mask:
sig_p0, lam_p0, sig_00, lam_00 = sig_p, lam_p, sig_0, lam_0
sig_p = sig_p0.new_zeros(len(mask))
sig_p[mask] = sig_p0
lam_p = lam_p0.new_zeros(len(mask))
lam_p[mask] = lam_p0
sig_0 = sig_00.new_zeros(len(mask))
sig_0[mask] = sig_00
lam_0 = lam_00.new_zeros(len(mask))
lam_0[mask] = lam_00
del sig_p0, lam_p0, sig_00, lam_00
sig_p = sig_p.reshape(output_shape)
lam_p = lam_p.reshape(output_shape)
sig_0 = sig_0.reshape(output_shape)
lam_0 = lam_0.reshape(output_shape)
return sig_p, lam_p, sig_0, lam_0 | f22083e42927d0661a315a0825b1b4344be75e53 | 3,626,824 |
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
:type file_name: String
:param file_name: The name of file to check
:type file_location: String
:param file_location: The location of the file, derive from the os module
:rtype: Boolean
:return: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name)) | 8ac4869f3f758d9342f9047a6212851f7f463f35 | 3,626,825 |
import math
def plot_cdfs(x, y, ccdf=False):
"""plot cumulative density functions for each column in x, based on
the classification specified in y.
Parameters
----------
x : DataFrame
the experiments to use in the cdfs
y : ndaray
the categorization for the data
ccdf : bool, optional
if true, plot a complementary cdf instead of a normal cdf.
Returns
-------
a matplotlib Figure instance
"""
x = x.copy()
try:
x = x.drop('scenario', axis=1)
except KeyError:
pass
uncs = x.columns.tolist()
cp = sns.color_palette()
n_col = 4
n_row = math.ceil(len(uncs) / n_col)
size = 3
aspect = 1
figsize = n_col * size * aspect, n_row * size
fig, axes = plt.subplots(n_row, n_col,
figsize=figsize,
squeeze=False)
for i, unc in enumerate(uncs):
discrete = False
i_col = i % n_col
i_row = i // n_col
ax = axes[i_row, i_col]
data = x[unc]
if data.dtype.name == 'category':
discrete = True
plot_individual_cdf(ax, unc, data, y, discrete, ccdf=ccdf)
# last row might contain empty axis,
# let's make them disappear
for j_col in range(i_col + 1, n_col):
ax = axes[i_row, j_col]
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
sns.despine(ax=ax, top=True, right=True, left=True, bottom=True)
proxies, labels = build_legend(x, y)
fig.legend(proxies, labels, "upper center")
return fig | 3b98d7b3d474a374d17438b5263af53c20b24b83 | 3,626,826 |
def make_shell_context():
"""Pre-populate the shell environment when running run.py shell."""
return dict(app=keeper_app, db=db, models=models) | 9344b3d30f36c0c1a5b10847d93a92c10e58872c | 3,626,827 |
import contextlib
def _MaybeClosing(fileobj):
"""Returns closing context manager, if given fileobj is not None.
If the given fileobj is none, return nullcontext.
"""
return (contextlib.closing if fileobj else NullContext)(fileobj) | 05db3f9168d69c94513c95f0da396500319e079e | 3,626,828 |
def get_project_page(pid, cache_directory=settings.CACHE_DIRECTORY):
"""Get a project page rendered in HTML given a project ID.
Args:
pid (int): project ID.
cache_directory (str): the directory where cached projects are stored.
Returns:
A string containing the HTML for the page.
"""
# Load in the project db, project JSON, studio info, and schema
project, scratch_data = scrape.get_project(pid, cache_directory)
if len(project) == 0 or len(scratch_data) == 0:
message = 'We couldn’t find your project! \
<a href="/project/r/{}">Try again</a>'.format(pid)
return render_template("project_loader.html", message=message)
studio = scrape.get_studio(project["studio_id"])
if "challenge_id" in studio:
sc = schema.get_schema(studio["challenge_id"])
# Determine whether there's an error here
err = False
if str(studio["challenge_id"]) in project["validation"]:
project["validation"] = project["validation"][str(studio["challenge_id"])] # yapf: disable
else:
err = True
# Show error page
if project == {} or scratch_data == {} or studio == {} or sc == {} or err:
raise NotFound()
# Prepare helper tools
scraper = Scraper()
visualizer = Visualizer()
# Convert Markdown to HTML with Scratchblocks
if "text" in sc:
for key in sc["text"]:
sc["text"][key] = common.md(sc["text"][key])
# Get the code excerpt for the projects to be shown
excerpts = dict()
examples = get_comparisons(project, sc, 5) + [project]
for example in examples:
code, sprite = get_code_excerpt(example, sc)
excerpts[example["project_id"]] = {
"author": example["author"],
"code": code,
"sprite": sprite
}
# Get the saved reflection, if any
_reflections = scrape.ProjectReflection.objects(
project_id=pid).order_by("-timestamp")
try:
reflection = _reflections.first().to_mongo().to_dict()
reflection["editable"] = True if reflection[
"gu_uid"] == request.cookies.get("_gu_uid") else False
except:
reflection = dict()
else:
sc = dict()
excerpts = dict()
reflection = dict()
# One prompt variable to take the logic out of the templating language
prompt = {
"title":
sc["title"] if "title" in sc and sc["title"] is not None else
studio["title"] if "title" in studio else None,
"description":
sc["description"] if "description" in sc else
studio["description"] if "description" in studio else None
}
# Choose stats to show
studio["stats"] = get_studio_stats(sc, studio)
# Get the feels
feels = get_feels(randomize=True)
return render_template("project.html",
prompt=prompt,
project=project,
studio=studio,
schema=sc,
excerpts=excerpts,
feels=feels,
reflection=reflection) | bff55a1c6e51742cca264199b4ac669fe4b8b855 | 3,626,829 |
def is_slot_bound(module, device, slot):
"""Checks whether a specific slot in a given device is bound to clevis.
Return: <boolean> <error>"""
_unused, err = get_jwe(module, device, slot)
if err:
return False, err
return True, None | c103ae94ef86bad7eb3c3e33818faf20003e18b4 | 3,626,830 |
def to_matplotlib(img):
"""Returns a view of the image from Bob format to matplotlib format.
This function works with images, batches of images, videos, and higher
dimensional arrays that contain images.
Parameters
----------
img : numpy.ndarray
A N dimensional array containing an image in Bob format (channels
first): For an ND array (N >= 3), the image should have the following
format: ``(..., c, h, w)``.
Returns
-------
numpy.ndarray
A view of the ``img`` compatible with
:py:func:`matplotlib.pyplot.imshow`.
"""
if img.ndim < 3:
return img
return np.moveaxis(img, -3, -1) | f769af6d407d16543dc9ec98d8cc35338db47231 | 3,626,831 |
def energy_distance(x, y, **kwargs):
"""
energy_distance(x, y, *, exponent=1)
Computes the estimator for the energy distance of the
random vectors corresponding to :math:`x` and :math:`y`.
Both random vectors must have the same number of components.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Returns
-------
numpy scalar
Value of the estimator of the energy distance.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1]])
>>> dcor.energy_distance(a, a)
0.0
>>> dcor.energy_distance(a, b) # doctest: +ELLIPSIS
20.5780594...
>>> dcor.energy_distance(b, b)
0.0
A different exponent for the Euclidean distance in the range
:math:`(0, 2)` can be used:
>>> dcor.energy_distance(a, a, exponent=1.5)
0.0
>>> dcor.energy_distance(a, b, exponent=1.5)
... # doctest: +ELLIPSIS
99.7863955...
>>> dcor.energy_distance(b, b, exponent=1.5)
0.0
"""
return _energy_distance_imp(x, y, **kwargs) | a36d4277ed5cd9da049f129a2d8fa2b50a062ed3 | 3,626,832 |
import os
import os.path as op
from glob import glob
from warnings import warn
def bids_scan_file_walker(dataset=".", include_types=None, warn_no_files=False):
"""
Traverse a BIDS dataset and provide a generator interface
to the imaging files contained within.
:author: @chrisfilo
https://github.com/preprocessed-connectomes-project/quality-assessment-prot\
ocol/blob/master/scripts/qap_bids_data_sublist_generator.py
:param str dataset: path to the BIDS dataset folder.
:param list(str) include_types: a list of the scan types (i.e.
subfolder names) to include in the results. Can be any combination
of "func", "anat", "fmap", "dwi".
:param bool warn_no_files: issue a warning if no imaging files are found
for a subject or a session.
:return: a list containing, for each .nii or .nii.gz file found, the BIDS
identifying tokens and their values. If a file doesn't have an
identifying token its key will be None.
"""
def _no_files_warning(folder):
if not warn_no_files:
return
warn("No files of requested type(s) found in scan folder: %s"
% folder, RuntimeWarning, stacklevel=1)
def _walk_dir_for_prefix(target_dir, prefix):
return [x for x in next(os.walk(target_dir))[1]
if x.startswith(prefix)]
def _tokenize_bids_scan_name(scanfile):
scan_basename = op.splitext(op.split(scanfile)[1])[0]
# .nii.gz will have .nii leftover
scan_basename = scan_basename.replace(".nii", "")
file_bits = scan_basename.split('_')
# BIDS with non ses-* subfolders given default
# "single_session" ses.
file_tokens = {'scanfile': scanfile,
'sub': None, 'ses': 'single_session',
'acq': None, 'rec': None,
'run': None, 'task': None,
'modality': file_bits[-1]}
for bit in file_bits:
for key in file_tokens.keys():
if bit.startswith(key):
file_tokens[key] = bit
return file_tokens
#########
if include_types is None:
# include all scan types by default
include_types = ['func', 'anat', 'fmap', 'dwi']
subjects = _walk_dir_for_prefix(dataset, 'sub-')
if len(subjects) == 0:
raise GeneratorExit("No BIDS subjects found to examine.")
# for each subject folder, look for scans considering explicitly
# defined sessions or the implicit "single_session" case.
for subject in subjects:
subj_dir = op.join(dataset, subject)
sessions = _walk_dir_for_prefix(subj_dir, 'ses-')
for scan_type in include_types:
# seems easier to consider the case of multi-session vs.
# single session separately?
if len(sessions) > 0:
subject_sessions = [op.join(subject, x)
for x in sessions]
else:
subject_sessions = [subject]
for session in subject_sessions:
scan_files = glob(op.join(
dataset, session, scan_type,
'*.nii*'))
if len(scan_files) == 0:
_no_files_warning(session)
for scan_file in scan_files:
yield _tokenize_bids_scan_name(scan_file) | f3a4f3e1c96073e89fd69ff2768570c1f0667f9f | 3,626,833 |
def train_step(net, optim, batch):
"""
one training step
"""
(loss, net), grads = pax.value_and_grad(loss_fn, has_aux=True)(net, batch)
net, optim = opax.apply_gradients(net, optim, grads)
net = net.replace(rnn=net.gru_pruner(net.rnn))
net = net.replace(o1=net.o1_pruner(net.o1))
net = net.replace(o2=net.o2_pruner(net.o2))
return net, optim, loss | 3f3e9fa1f8487bafd0e0b70a673ef3af989e3dfc | 3,626,834 |
def insert_dim(arg, pos=-1):
"""insert 1 fake dimension inside the arg before pos'th dimension"""
shape = [i for i in arg.shape]
shape.insert(pos, 1)
return arg.reshape(shape) | 921cd27894df9910dbc12b31db6eb1f73d47f180 | 3,626,835 |
def encode_captions(captions):
"""
Convert all captions' words into indices.
Input:
- captions: dictionary containing image names and list of corresponding captions
Returns:
- word_to_idx: dictionary of indices for all words
- idx_to_word: list containing all words
- vocab_size: number of words
"""
word_counts = {}
for name, caption_list in captions.items():
for caption in caption_list:
for word in caption.split():
if word not in word_counts:
word_counts[word] = 1
else:
word_counts[word] += 1
idx_to_word = ['<START>', '<END>', '<PAD>'] + [w for w in word_counts if w not in ['<START>', '<END>', '<PAD>']]
word_to_idx = {}
for i in range(len(idx_to_word)):
word_to_idx[idx_to_word[i]] = i
vocab_size = len(idx_to_word)
return word_to_idx, idx_to_word, vocab_size | 2ba216c844723b0925b46d0db7bc8afd6ce0f5b4 | 3,626,836 |
import logging
def post_dataset(conn, dataset_name, project_id=None, description=None,
across_groups=True):
"""Create a new dataset.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
dataset_name : str
Name of the Dataset being created.
project_id : int, optional
Id of Project in which to create the Dataset. If no Project is
specified, the Dataset will be orphaned.
description : str, optional
Description for the new Dataset.
across_groups : bool, optional
Defines cross-group behavior of function - set to
``False`` to disable it.
Returns
-------
dataset_id : int
Id of the dataset that has been created.
Examples
--------
# Create a new orphaned Dataset:
>>> did = post_dataset(conn, "New Dataset")
>>> did
234
# Create a new Dataset in Project:120:
>>> did = post_dataset(conn, "Child of 120", project_id=120)
>>> did
"""
if type(dataset_name) is not str:
raise TypeError('Dataset name must be a string')
if type(description) is not str and description is not None:
raise TypeError('Dataset description must be a string')
project = None
if project_id is not None:
if type(project_id) is not int:
raise TypeError('Project ID must be integer')
project = conn.getObject('Project', project_id)
if project is not None:
ret = set_group(conn, project.getDetails().group.id.val)
if ret is False:
return None
else:
logging.warning(f'Project {project_id} could not be found '
'(check if you have permissions to it)')
return None
else:
default_group = conn.getDefaultGroup(conn.getUser().getId()).getId()
set_group(conn, default_group)
dataset = DatasetWrapper(conn, DatasetI())
dataset.setName(dataset_name)
if description is not None:
dataset.setDescription(description)
dataset.save()
if project is not None:
link_datasets_to_project(conn, [dataset.getId()], project_id)
return dataset.getId() | cd0e57d8184683c403002de085fa5122c1e3458d | 3,626,837 |
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words | 8127aeec8db8f7bc87130ea0d1e5faa4998ac86f | 3,626,838 |
def run_gcloud_command(cmd, project_id):
"""Execute a gcloud command and return the output.
Args:
cmd (list): a list of strings representing the gcloud command to run
project_id (string): append `--project {project_id}` to the command. Most
commands should specify the project ID, for those that don't, explicitly
set this to None.
Returns:
A string, the output from the command execution.
Raises:
CalledProcessError: when command execution returns a non-zero return code.
"""
gcloud_cmd = [FLAGS.gcloud_bin] + cmd
if project_id:
gcloud_cmd.extend(['--project', project_id])
return run_command(gcloud_cmd, get_output=True).strip() | 324345a3fdf687c3d36711c918060715fedfa79a | 3,626,839 |
def convert_gmx_flow_1_to_2(flow: GmxFlow, width: float) -> GmxFlow:
"""Convert flow data from 'GMX_FLOW_1' to 'GMX_FLOW_2'.
This changes the field 'M' to represent the mass density instead of
the total mass in the bin. Thus we also require the width of the system,
in order to calculate the bin volume.
If the `version` is already 'GMX_FLOW_2' an unmodified
copy of the original data is returned.
"""
converted = flow.copy()
if converted.version == GmxFlowVersion(1):
dx, dy = converted.spacing
bin_volume = dx * dy * width
try:
converted.data['M'] /= bin_volume
except KeyError:
pass
converted.version = GmxFlowVersion(2)
return converted | d1e005bf8adc73c27e4454730a744fb6b464100b | 3,626,840 |
def NullFlagHandler(feature):
""" This handler always returns False """
return False | 7d37ecc8518144b27b43580b7273adf5f68dfdfb | 3,626,841 |
def add_image(axes, path):
"""Add the image given by ``path`` to the plot ``axes``.
:param axes: represents an individual plot
:param path: path to the image
:type axes: matplotlib.pyplot.Axes
:type path: str
:return: mpimg.AxesImage
"""
try:
img = Image.open(path)
return axes.imshow(img)
except ValueError:
print("Can't add image to the plot. Check if '{}' is a valid path.".format(path)) | fa574ede75a5f2389380e906090e2d91c92944e9 | 3,626,842 |
from datetime import datetime
def abandonAffaire_reopenParentAffaire_view(request):
"""
Abandon child_affaire, reopen parent child_affaire and reattribute numbers to parent child_affaire.
"""
settings = request.registry.settings
etape_abandon_id = settings['affaire_etape_abandon_id']
etape_reactivation_id = settings['affaire_etape_reactivation_id']
child_affaire_id = request.params['affaire_id']
operateur_id = request.params['operateur_id']
# get child_affaire
child_affaire = request.dbsession.query(Affaire).filter(Affaire.id == child_affaire_id).first()
# get parent_affaire
parent_affaire_id = request.dbsession.query(ModificationAffaire).filter(
ModificationAffaire.affaire_id_fille == child_affaire_id).first().affaire_id_mere
parent_affaire = request.dbsession.query(Affaire).filter(Affaire.id == parent_affaire_id).first()
# reactivate parent-child_affaire if exists and abandon child_affaire
if parent_affaire is not None:
parent_affaire.date_cloture = None
parent_affaire.abandon = False
child_affaire.date_cloture = datetime.now().date()
child_affaire.abandon = True
# get numeros_affaire
numeros_affaire = request.dbsession.query(AffaireNumero).filter(AffaireNumero.affaire_id == child_affaire_id).all()
for numaff_child in numeros_affaire:
# deactivate affaire_numero in child_affaire
numaff_child.actif = False
numaff_child.affaire_destination_id = parent_affaire_id
# reactivate affaire-numero in parent_affaire
numaff_parent = request.dbsession.query(AffaireNumero).filter(
AffaireNumero.affaire_id == parent_affaire_id
).filter(
AffaireNumero.numero_id == numaff_child.numero_id
).first()
if numaff_parent is not None:
numaff_parent.actif = True
numaff_parent.affaire_destination_id = None
else:
# Create it if it doesn't exists
numaff_parent = AffaireNumero()
numaff_parent.affaire_id = parent_affaire_id
numaff_parent.numero_id = numaff_child.numero_id
numaff_parent.type_id = numaff_child.type_id
numaff_parent.actif = True
request.dbsession.add(numaff_parent)
else:
raise CustomError(CustomError.RECORD_WITH_ID_NOT_FOUND.format(Affaire.__tablename__, parent_affaire_id))
# get numeros_relations and update affaire
numeros_relations = request.dbsession.query(NumeroRelation).filter(NumeroRelation.affaire_id == child_affaire_id).all()
for numrel in numeros_relations:
numrel.affaire_id = parent_affaire_id
# Update etapes
child_affaire_etape = AffaireEtape()
child_affaire_etape.affaire_id = child_affaire_id
child_affaire_etape.operateur_id = operateur_id
child_affaire_etape.etape_id = etape_abandon_id
child_affaire_etape.datetime = datetime.now()
child_affaire_etape.remarque = "Affaire ouverte par erreur. Les numéros sont accessibles dans l'affaire " + str(parent_affaire_id) + "."
request.dbsession.add(child_affaire_etape)
parent_affaire_etape = AffaireEtape()
parent_affaire_etape.affaire_id = parent_affaire_id
parent_affaire_etape.operateur_id = operateur_id
parent_affaire_etape.etape_id = etape_reactivation_id
parent_affaire_etape.datetime = datetime.now()
parent_affaire_etape.remarque = "Récupération des données de l'affaire " + str(parent_affaire_id) + ", ouverte par erreur."
request.dbsession.add(parent_affaire_etape)
return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(Affaire.__tablename__)) | c2993de78590f708fc4d7e8c0ed08008a21103b6 | 3,626,843 |
from typing import Optional
import logging
import functools
def get_dataset(
*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32', # pylint: disable=unused-argument
shuffle_seed=0,
rng=None,
dataset_configs=None,
dataset_service_address: Optional[str] = None): # pylint: disable=unused-argument
"""Returns generators for the Wikipedia train and validation sets.
Args:
batch_size: int; Determines the train batch size.
eval_batch_size: int; Determines the evaluation batch size.
num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...].
dtype_str: Data type for inputs. Not used.
shuffle_seed: int; Seed for shuffling the training data.
rng: JAX rng key, which can be used for augmentation, shuffling, etc.
dataset_configs: dict; Dataset specific configurations.
dataset_service_address: If set, will distribute the training dataset using
the given tf.data service at the given address. Not used.
Returns:
A dataset_utils.Dataset() which includes a train_iter, a valid_iter,
a test_iter, and a dict of meta_data.
"""
del rng
assert dataset_configs is not None
logging.info('Loading train split of the wikibooks dataset.')
with dataset_configs.unlocked():
dataset_configs.train_data_loader.seed = shuffle_seed
dataset_configs.train_data_loader.is_training = True
dataset_configs.train_data_loader.cache = False
train_data_loader = pretrain_dataloader.BertPretrainDataLoader(
dataset_configs.train_data_loader)
input_context = tf.distribute.InputContext(
num_input_pipelines=jax.process_count(),
input_pipeline_id=jax.process_index(),
num_replicas_in_sync=jax.process_count())
train_ds = train_data_loader.load(input_context=input_context).prefetch(
dataset_configs.get('prefetch_to_host', 2))
maybe_pad_batches_train = functools.partial(
dataset_utils.maybe_pad_batch,
train=True,
batch_size=batch_size,
inputs_key='input_word_ids')
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
train_iter = iter(train_ds)
train_iter = map(dataset_utils.tf_to_numpy, train_iter)
train_iter = map(maybe_pad_batches_train, train_iter)
train_iter = map(reduce_next_sentence_label_dimension, train_iter)
train_iter = map(shard_batches, train_iter)
if dataset_configs.prefetch_to_device:
train_iter = jax_utils.prefetch_to_device(
train_iter, dataset_configs.prefetch_to_device)
logging.info('Loading validation split of the wikibooks dataset.')
maybe_pad_batches_eval = functools.partial(
dataset_utils.maybe_pad_batch,
train=False,
batch_size=eval_batch_size,
inputs_key='input_word_ids')
with dataset_configs.unlocked():
dataset_configs.val_data_loader.seed = shuffle_seed
# Some tricks to make sure that the dataset is repeated but not shuffled.
dataset_configs.val_data_loader.is_training = True
dataset_configs.val_data_loader.cache = False
dataset_configs.val_data_loader.shuffle_buffer_size = 1
val_data_loader = pretrain_dataloader.BertPretrainDataLoader(
dataset_configs.val_data_loader)
val_ds = val_data_loader.load(input_context=input_context).prefetch(
dataset_configs.get('prefetch_to_host', 2))
valid_iter = iter(val_ds)
valid_iter = map(dataset_utils.tf_to_numpy, valid_iter)
valid_iter = map(maybe_pad_batches_eval, valid_iter)
valid_iter = map(reduce_next_sentence_label_dimension, valid_iter)
valid_iter = map(shard_batches, valid_iter)
if dataset_configs.prefetch_to_device:
valid_iter = jax_utils.prefetch_to_device(
valid_iter, dataset_configs.prefetch_to_device)
input_shape = (-1, dataset_configs.train_data_loader.seq_length)
input_spec = {
'input_word_ids': (input_shape, jnp.int32),
'input_mask': (input_shape, jnp.int32),
'input_type_ids': (input_shape, jnp.int32),
'masked_lm_positions': (
(-1, dataset_configs.train_data_loader.max_predictions_per_seq),
jnp.int32)
}
meta_data = {
'type_vocab_size': _TYPE_VOCAB_SIZE,
'vocab_size': _VOCAB_SIZE,
'input_spec': input_spec,
# TODO(vlikhosherstov): Put the real value.
'num_train_examples': _NUM_TRAIN_EXAMPLES,
'num_eval_examples': _NUM_EVAL_EXAMPLES,
}
if dataset_configs.get('extra_meta_data'):
for k, v in dataset_configs.extra_meta_data.items():
meta_data[k] = v
return dataset_utils.Dataset(train_iter, valid_iter, None, meta_data) | 621ac7541489a08511e8e0e2ae4474a1591d3132 | 3,626,844 |
import copy
def find_paths(orbital_graph, starting_node, ending_node, visited_nodes=None):
"""Recursively find all the paths from starting_node to ending_node in the graph
Paths are returned as a list of paths, where paths are a list of nodes.
An empty list means that no valid path exists.
"""
path = copy.copy(visited_nodes) if visited_nodes else []
if starting_node not in orbital_graph:
return [] # We hit an dead end
if starting_node in path:
return [] # We hit a loop
paths = []
path.append(starting_node)
for node in orbital_graph[starting_node]:
if node == ending_node:
# We've found it!
path.append(node)
paths.append(path)
else:
paths += find_paths(orbital_graph, node, ending_node, path)
return paths | 55a47542c3d70bbc1f5c722c1e87908e10b3d0e5 | 3,626,845 |
def rp_from_filename(filename, split_char=ELT_SPLIT,
rp_regex=REGEX_RP):
"""Gets the Rp (proton radius?) label from the file name, returns None if
not found.
:param filename: the name of the file to parse
:param split_char: the character which separates filename elements
:param rp_regex: the regex that fully matches the rp element
:return: the Rp (integer) label, if found, otherwise returns None
"""
return _rp_from_felts(
reversed(filename_elts_list(filename, split_char)), rp_regex) | e5cae3428e6a7a30cceab779845b127df52c2ad1 | 3,626,846 |
def check_duplication(request):
"""API check_duplication"""
check_type = request.POST.get('check_type')
name = request.POST.get('username')
if check_type == 'id':
min_limit = settings.ID_MIN_LENGTH
max_limit = settings.ID_MAX_LENGTH
else:
min_limit = settings.NICKNAME_MIN_LENGTH
max_limit = settings.NICKNAME_MAX_LENGTH
q = Q(username__iexact=name) | Q(first_name__iexact=name)
idcheck = User.objects.filter(q).exists()
length = len(name)
if length < min_limit or length > max_limit:
return JsonResponse({'status': 'false'}, status=400)
if request.user.is_authenticated and idcheck:
if name == request.user.username or name == request.user.first_name:
idcheck = False
if idcheck:
msg = _('Already exist.')
else:
msg = _('Available')
data = {
'idcheck': idcheck,
'msg': msg,
}
return JsonResponse(data) | 2ef45506ada6b54cc86b1734a0301e6e48bb5ca6 | 3,626,847 |
def _merge_numeric_stats(
left, right,
feature_name):
"""Merge two partial numeric statistics and return the merged statistics."""
# Check if the types from the two partial statistics are not compatible.
# If so, raise an error.
if (left.type is not None and right.type is not None and
left.type != right.type):
raise TypeError('Cannot determine the type of feature %s. '
'Found values of types %s and %s.' %
(feature_name, left.type, right.type))
result = _PartialNumericStats()
result.sum = left.sum + right.sum
result.sum_of_squares = left.sum_of_squares + right.sum_of_squares
result.num_zeros = left.num_zeros + right.num_zeros
result.num_nan = left.num_nan + right.num_nan
result.min = min(left.min, right.min)
result.max = max(left.max, right.max)
result.total_num_values = left.total_num_values + right.total_num_values
result.type = left.type if left.type is not None else right.type
return result | 1eb4ea5a425ea70ae4e02da267d2553a8440376b | 3,626,848 |
import logging
def get_pod_names(client, namespace, name):
"""Get pod names from k8s.
"""
core_api = k8s_client.CoreV1Api(client)
resp = core_api.list_namespaced_pod(
namespace, label_selector=to_selector({TF_JOB_NAME_LABEL: name}))
logging.info("list_namespaced_pod: %s", str(resp))
pod_names = []
for pod in resp.items:
if pod.metadata and pod.metadata.name:
pod_names.append(pod.metadata.name)
return set(pod_names) | 6228ed3a596093260c0b17a95201068b2d70c1d6 | 3,626,849 |
def calculate_drawdown(input_series: pd.Series, is_returns: bool = False) -> pd.Series:
"""Calculate the drawdown (MDD) of historical series. Note that the calculation is done
on cumulative returns (or prices). The definition of drawdown is
DD = (current value - rolling maximum) / rolling maximum
Parameters
----------
input_series: pd.DataFrame
Dataframe of input values
is_returns: bool
Flag to indicate inputs are returns
Returns
pd.Series
Drawdown series
-------
"""
if is_returns:
input_series = (1 + input_series).cumprod()
rolling_max = input_series.cummax()
drawdown = (input_series - rolling_max) / rolling_max
return drawdown | 95e128f00f3667e5a22bd114074525feb6063e1c | 3,626,850 |
def search_traversal(**kwargs):
"""Search Traversal in Database"""
db_inst = app.config['ARANGO_CONN']
db_inst.get_database()
graph = db_inst.get_graph(kwargs.get('graph_name'))
try:
traversal_results = graph.traverse(
start_vertex=kwargs.get('start_vertex'),
direction=kwargs.get('direction'),
item_order=kwargs.get('item_order'),
strategy=kwargs.get('strategy'),
order=kwargs.get('order'),
edge_uniqueness=kwargs.get('edge_uniqueness'),
vertex_uniqueness=kwargs.get('vertex_uniqueness'),
max_iter=kwargs.get('max_iter'),
min_depth=kwargs.get('min_depth'),
max_depth=kwargs.get('max_depth'),
init_func=kwargs.get('init_func'),
sort_func=kwargs.get('sort_func'),
filter_func=kwargs.get('filter_func'),
visitor_func=kwargs.get('visitor_func'),
expander_func=kwargs.get('expander_func')
)
except GraphTraverseError as err:
if traverse_err.get(err.error_code):
if err.error_code == 1202:
msg = traverse_err.get(1202)
raise gmap_exceptions.GraphTraverseException(msg)
raise gmap_exceptions.GraphTraverseException(
traverse_err.get(err.error_code).format(err.message))
else:
raise gmap_exceptions.GraphTraverseException(
traverse_err.get(0).format(
kwargs.get('graph_name'), err.message))
except Exception as err:
raise gmap_exceptions.GraphTraverseException(
traverse_err.get(0).format(kwargs.get('graph_name'), err))
traversal_results = util.filter_transversal(traversal_results)
traversal_results.update({'graph': kwargs.get('graph_name')})
return traversal_results | 971751adb1970a0bead9e632e00181a4bc3914a9 | 3,626,851 |
def find_matching_nodes(search_for, search_in, matches=[]):
"""
Search Vertex tree 'search_in' for the first isomorphic occurance of the
Vertex tree search_for
Return a list of [(x,y)...] for node in search_for (x) matched with
a pair (y) from search in, such as the two graphs preserve their linking
vectors. From this, we might be able to determine the wider context,
and therefore provide a suggestion as to how to compose out search_for,
in the style of search_in.
Return None, if a match cannot be made.
If allow_partial is True, then in the event that a complete set of matches
cannot be made, return only those nodes for which a twin can be found.
matches - is s a list of matches to ignore (allowing the callee to 'mine'
for alternatives).
"""
matches = []
for v1 in search_for:
# at each root node in search_for, we start to compose a
# temporary tree, to hold our solution
temp_tree = None
found_match = False
if v1 in [x for (x,y) in matches]:
# the node v2 has already been matched, so pass
continue
for v2 in search_in:
if v2 in [y for (x,y) in matches]:
# the node v2 has already been matched, so pass
continue
# so we have found a new, unexplored node. Start building a potential solution:
solution = []
# fan out the neighbours of both nodes:
temp_tree = v2.neighbours
to_match = v1.neighbours
while len(to_match) > 0:
vectors_to_match = [v for (v,n) in to_match]
vectors_temp_tree = [v for (v,n) in temp_tree]
# we ask; are all the vectors joining the current node of 'search_in'
# to its neighbours, to be found at the reciprocal point search_in?
if len(list(filter(lambda x: x in vectors_temp_tree, vectors_to_match))) != 0:
# Ok, they match so far. Add each of these neighbours to the expanding solution:
for a,b in to_match:
for x,y in temp_tree:
if a == x:
solution.append((b,y))
else:
temp_tree.remove((x,y))
# now we drill down to the next layer of neighbour nodes on both sides:
_temp_tree = []
for (v,n) in temp_tree:
_temp_tree = _temp_tree + n.neighbours
_to_match = []
for (v,n) in to_match:
_to_match = _to_match + n.neighbours
to_match = _to_match
temp_tree = _temp_tree
else:
# in this case trees do not match, will not explore this avenue further
break
# at the point that to_match is empty, we have found a complete, matching tree
if to_match == []:
found_match = True
matches = matches + [(v1,v2)] + solution
break
if not found_match:
# Did not find a match anywhere for v1 and its children
return []
# 'happy path' outcome, isomorphic match was located
return matches | 9e6696533f7b5e313075fadade8b42fe6f09f0cf | 3,626,852 |
def getStrategicManagementBodies(project):
"""Returns the strategic management bodies for a given project."""
return getManagementBodies(project, MANAGEMENT_BODY_CATEGORY_STRATEGIC) | 526c60342f348f327436f4e1bdcb5c90c7820cbe | 3,626,853 |
def clip(x, min_value, max_value):
"""Element-wise value clipping."""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return tf.clip_by_value(x, min_value, max_value) | 58ba70a6212b2ab3b37f37aa8a4611bab262be81 | 3,626,854 |
import cmd
import subprocess
def dotnet_restore(path=""):
"""Restore the dotnet solution from the root of the project via dotnet restore """
if path:
cmd.append(path)
info("Restoring nuget packages (via %s" % " ".join(cmd))
result = subprocess.run(cmd)
status = result.returncode
if status != 0:
error("Solution failed to restore. See output for details.")
return status
success("Dotnet solution restored!")
return status | 7cf78f998c1d9c2bb79a1e28c984fc20f6a8ac28 | 3,626,855 |
def SendMessage(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message).execute())
print('Message Id: %s' % message['id'])
return message
except errors.HttpError, error:
print('An error occurred: %s' % error) | 9c8c9985fe80b22a94678c354774ebe0453fe860 | 3,626,856 |
def getPolicy(lunaToken, policyName, network, account_key=''):
""" Gets a specific policy on a given network in JSON format """
session.headers.update({'Luna-Token': lunaToken})
if network == 'staging':
get_policy_endpoint = "/imaging/v2/network/staging/policies/" + policyName
else:
get_policy_endpoint = "/imaging/v2/network/production/policies/" + policyName
if account_key != '':
get_policy_endpoint = get_policy_endpoint + '?accountSwitchKey=' + account_key
print("Retrieving: " + policyName + " from " + network)
policyResult = HttpCaller.getResult(get_policy_endpoint)
return(policyResult) | 52a9c7496813b55e74c19a084a367a5f242a379a | 3,626,857 |
import re
def clean_str(string):
# Remove punctuation
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^\u4e00-\u9fff]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() | 025a17cfc81217b6115f049694ff205c5a5e93ab | 3,626,858 |
from typing import List
from typing import Tuple
def extract_ops(page: PageObject) -> List[Tuple]:
"""extract all operators"""
content = page.getContents()
if not isinstance(content, ContentStream):
content = ContentStream(content, page.pdf)
return list(content.operations) | 402ec35f7de36dce93ae56b13d535ea8cebc1916 | 3,626,859 |
import requests
def get_project_info(project_name, dnac_jwt_token):
"""
This function will retrieve all templates associated with the project with the name {project_name}
:param project_name: project name
:param dnac_jwt_token: DNA C token
:return: list of all templates, including names and ids
"""
url = DNAC_URL + '/dna/intent/api/v1/template-programmer/project?name=' + project_name
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
project_json = response.json()
template_list = project_json[0]['templates']
return template_list | 61ff47100853175c76ecf05117d7016842a6745d | 3,626,860 |
import google
import os
def main(args):
"""This functions annotates a PDF document using the Document AI API"""
if not args.project_id:
_, project_id = google.auth.default()
args.project_id = project_id
parent = f"projects/{args.project_id}/locations/{args.multi_region_location}"
client = DocumentProcessorServiceClient()
processor_id = find_processor_id_of_type(client, parent, args.form_processor_type)
if processor_id is None:
print(
f"no form processor found. "
f'creating new processor of type "{args.form_processor_type}"',
)
processor_id = create_processor(client, parent, args.form_processor_type)
if not os.path.isfile(os.path.abspath(args.input)):
print(f"could not find file at {os.path.abspath(args.input)}")
return 1
# If a output path is not specified, use input directory
if not args.output:
args.output = f'{os.path.abspath(args.input).rstrip(".pdf")}_annotated.pdf'
print("Calling Document AI API...", end="")
with open(args.input, "rb") as pdf_file:
document = client.process_document(
request={
"name": f"{parent}/processors/{processor_id}",
"raw_document": {
"content": pdf_file.read(),
"mime_type": "application/pdf",
},
}
).document
original_pdf = pikepdf.Pdf.open(os.path.abspath(args.input))
annotated_pdf = pikepdf.Pdf.new()
for page_num, page_info in enumerate(document.pages):
annotated_pdf.pages.append(original_pdf.pages[page_num])
print(
f"Found { len(page_info.form_fields)} form fields on page {page_num + 1}:"
)
# Calculate the max "x" and "y" coordinate values for the PDF
# this uses the PDF's own built in measuring units which need
# to be used to place annotations
page_max_x = float(annotated_pdf.pages[page_num].trimbox[2])
page_max_y = float(annotated_pdf.pages[page_num].trimbox[3])
page_annotations = []
for field in page_info.form_fields:
# Use the normalized vertices of the form fields and the max
# "x" and "y" coordinates to calculate the position of the
# annotation using the PDF's built in measuring units
coord1 = field.field_name.bounding_poly.normalized_vertices[0]
coord2 = field.field_name.bounding_poly.normalized_vertices[1]
rect = [
coord1.x * page_max_x,
page_max_y - coord1.y * page_max_y,
coord2.x * page_max_x,
page_max_y - coord2.y * page_max_y,
]
# Extract the parsed name and values of each field
# as determined by Document AI's API
name = layout_to_text(field.field_name, document.text)
value = layout_to_text(field.field_value, document.text)
annotation_text = f"{name}: {value}"
# Create a PDF annotation for this field name value pair
page_annotations.append(
pikepdf.Dictionary(
Type=pikepdf.Name.Annot,
Subtype=pikepdf.Name.Text,
Rect=rect,
Name=pikepdf.Name.Note,
Contents=pikepdf.String(annotation_text),
Open=False,
)
)
print(f"adding annotation: {annotation_text}")
# Add all the annotations for this page
annotated_pdf.pages[page_num].Annots = annotated_pdf.make_indirect(
pikepdf.Array(page_annotations)
)
print(f"Saving annotated PDF to {args.output}.")
annotated_pdf.save(
os.path.join(args.output),
min_version=original_pdf.pdf_version,
# Disable annotation modification
encryption=pikepdf.Encryption(
owner="", user="", allow=pikepdf.Permissions(modify_annotation=False)
),
)
print("Done.")
return 0 | 34c6937f59758519bcb7b22e6dd59ff912e49116 | 3,626,861 |
import textwrap
import six
def generate(tag_cls):
"""
generate generates documentation for given wrapper tag class
:param tag_cls: wrapper_tag class
:return:
"""
doc = textwrap.dedent(tag_cls.__doc__ or '').strip()
arguments_doc = ""
for ag, arguments in six.iteritems(ArgumentsGroup.group_arguments(tag_cls)):
filtered = filter(lambda x: not x.readonly, arguments)
if not filtered:
continue
rendered_group = ag.render(filtered)
arguments_doc += rendered_group + "\n"
arguments_doc = arguments_doc.strip()
# if {argumets} found in documentation it will be replaced with arguments documentation, otherwose it will
# be appended to documentation
if '{arguments}' in doc:
doc = doc.replace('{arguments}', arguments_doc)
else:
doc = doc + "\n\n" + arguments_doc
return doc | 3b2fb93caa37552f4b4eaff1fc4a1c1d1d1412dd | 3,626,862 |
def load_data(database_filepath):
"""Load data from SQLite into memory.
"""
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql_table("Messages", engine)
X = df["message"]
Y = df.drop(["message", "id", "original", "genre"], axis=1)
return X, Y, Y.columns | e8e338d7c08113cd11f1e1efcb16f4687de354c7 | 3,626,863 |
def column_thresh(C, eps):
"""
cleans out C, removes all values below eps.
otherwise
"""
n1 = C.shape[1]
for i in range(n1):
if la.norm(C[:,i], 2) < eps: # norm here defaults to 2 norm for vector
C[:,i]=0
else:
C[:,i]=C[:,i]-eps*C[:,i]/la.norm(C[:,i],2)
return C | ba53b3a728ff363a684c0974676430c3850d2c43 | 3,626,864 |
def distance_between_points(p1, p2):
""" Function that computes the euclidean distance between to points.
Returns:
float: distance value
"""
return ((p1['x']-p2['x']) * (p1['x'] - p2['x']) + (p1['y']-p2['y']) * (p1['y']-p2['y'])) ** 0.5 | b8cb563f13f64f0511525e5428d47d9228220915 | 3,626,865 |
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
#(≈ 1 line of code)
# s = ...
# YOUR CODE STARTS HERE
s = 1/(1+np.exp(-z))
# YOUR CODE ENDS HERE
return s | 868599c3550a0e575d9a39632e0990f3dfc2f782 | 3,626,866 |
from typing import Tuple
from typing import Dict
import copy
def _decompose_expressions(circ: Circuit) -> Tuple[Circuit, bool]:
"""Rewrite a circuit command-wise, decomposing ClassicalExpBox."""
bit_heap = BitHeap()
reg_heap = RegHeap()
# add already used heap variables to heaps
for b in circ.bits:
if b.reg_name == _TEMP_BIT_NAME:
bit_heap._heap_vars.add(b)
elif b.reg_name.startswith(_TEMP_BIT_REG_BASE):
reg_heap._heap_vars.add(BitRegister(b.reg_name, _TEMP_REG_SIZE))
newcirc = Circuit(0, name=circ.name)
for qb in circ.qubits:
newcirc.add_qubit(qb)
for cb in circ.bits:
# lose all temporary bits, add back as required later
if not (
cb.reg_name.startswith(_TEMP_BIT_NAME)
or cb.reg_name.startswith(_TEMP_BIT_REG_BASE)
):
newcirc.add_bit(cb)
# recursive functions for converting expressions to gates
bit_recursive_walk = _gen_walk(Bit, newcirc, bit_heap)
reg_recursive_walk = _gen_walk(BitRegister, newcirc, reg_heap)
# targets of predicates that need to be relabelled
replace_targets: Dict[Variable, Variable] = dict()
modified = False
for command in circ:
op = command.op
optype = op.type
args = command.args
kwargs = dict()
if optype == OpType.ConditionalGate:
bits = args[: op.width]
# check if conditional on previously decomposed expression
if len(bits) == 1 and bits[0] in replace_targets:
# this op should encode comparison and value
assert op.value in (0, 1)
replace_bit = replace_targets[bits[0]]
# temporary condition bit is available for reuse
bit_heap.push(replace_bit)
# write new conditional op
args = args[op.width :]
kwargs = {"condition_bits": [replace_bit], "condition_value": op.value}
op = op.op
elif optype == OpType.RangePredicate:
target = args[-1]
newcirc.add_bit(target, reject_dups=False)
temp_reg = temp_reg_in_args(args)
# ensure predicate is reading from correct output register
if temp_reg in replace_targets:
new_target = replace_targets[temp_reg]
for i, a in enumerate(args):
if a.reg_name == temp_reg.name:
args[i] = Bit(new_target.name, a.index[0])
# operations conditional on this bit should remain so
replace_targets[target] = target
elif optype == OpType.ClassicalExpBox:
pred_exp = copy.deepcopy(op.get_exp())
# copied as it will be modified in place
if op.get_n_o() == 1:
assert isinstance(pred_exp, BitLogicExp)
target = args[-1]
bit_heap.push(target)
comp_bit = bit_recursive_walk(pred_exp)
replace_targets[target] = comp_bit
else:
temp_reg = temp_reg_in_args(args)
reg_heap.push(temp_reg)
comp_reg = reg_recursive_walk(pred_exp)
replace_targets[temp_reg] = comp_reg
modified = True
continue
if optype == OpType.Barrier:
# add_gate doesn't work for metaops like barrier
newcirc.add_barrier(args)
else:
newcirc.add_gate(op, args, **kwargs)
return newcirc, modified | 8dad1b0e438930541e0604a48114b8d5628db97d | 3,626,867 |
def get_trailing_app_metrics(args):
"""
Returns trailing app_name metrics for a given time period.
Args:
args: dict The parsed args from the request
args.limit: number The max number of apps to return
args.time_range: one of "week", "month", "all_time"
Returns:
[{ name: string, count: number }, ...]
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_trailing_app_metrics(session, args) | e0b6f89a83af7250baa16926fcf644ca7b5b0e51 | 3,626,868 |
def get_all_related_objects(opts):
"""
Django 1.8 changed meta api, see
https://docs.djangoproject.com/en/1.8/ref/models/meta/#migrating-old-meta-api
https://code.djangoproject.com/ticket/12663
https://github.com/django/django/pull/3848
:param opts: Options instance
:return: list of relations except many-to-many ones
"""
if django.VERSION < (1, 9):
return opts.get_all_related_objects()
else:
return [r for r in opts.related_objects if not r.field.many_to_many] | bc3cc8ec4b83a26ff5c409eb840733ca7bdfaaea | 3,626,869 |
import fastapi
async def fetch_dialog(
customer_id: str,
dialog_id: str,
db: motor_asyncio.AsyncIOMotorClient = fastapi.Depends(mongodb.get_database),
) -> utils.OrjsonResponse:
"""
Fetch a dialog.
- **customer_id**: customer id of the dialog to return
- **dialog_id**: dialog id of the dialog to return
"""
return await dialog_controller.fetch_dialog(db, customer_id, dialog_id) | 4fd8f66df8375b5620c400c88446206e5acf337b | 3,626,870 |
def _get(pseudodict, key, single=True):
"""Helper method for getting values from "multi-dict"s"""
matches = [item[1] for item in pseudodict if item[0] == key]
if single:
return matches[0]
else:
return matches | f68156535d897dd719b05d675e66cadc284ce1a3 | 3,626,871 |
from typing import Counter
def guess_domain(tree, blacklist=_DOMAIN_BLACKLIST, get_domain=get_domain):
""" Return most common domain not in a black list. """
domains = [get_domain(href) for href in tree.xpath('//*/@href')]
domains = [d for d in domains if d and d not in blacklist]
if not domains:
return '' # unknown
cnt = Counter(domains)
max_count = cnt.most_common(1)[0][1]
top_domains = [k for k, v in cnt.items() if v == max_count]
return sorted(top_domains)[0] | 0d0c0ab8876092e8e06783cd9b4adaf50d9996cf | 3,626,872 |
from xmodule.modulestore.django import modulestore
from openedx.core.djangoapps.content.block_structure.models import BlockStructureModel
from openedx.core.djangoapps.content.block_structure.exceptions import BlockStructureNotFound
def get_course_last_published(course_key):
"""
We use the CourseStructure table to get when this course was last
published.
Args:
course_key: a CourseKey
Returns: The datetime the course was last published at, converted into
text, or None, if there's no record of the last time this course
was published.
"""
# Import is placed here to avoid model import at project startup.
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
try:
structure = BlockStructureModel.get(course_usage_key)
course_last_published_date = str(structure.modified)
except BlockStructureNotFound:
course_last_published_date = None
return course_last_published_date | c7a8e503553790ed05ca84a8bfa7ac6decf3bc0d | 3,626,873 |
def rgb_to_hex(rgb_triplet):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to a normalized hexadecimal value for that color.
Examples:
>>> rgb_to_hex((255, 255, 255))
'#ffffff'
>>> rgb_to_hex((0, 0, 128))
'#000080'
"""
return '#%02x%02x%02x' % normalize_integer_triplet(rgb_triplet) | 53a21a387e19c8cf989cec868f8862bfcb2dbaed | 3,626,874 |
def dec2stringTime(decim, precision=5):
""" Convert a decimale time or coordinate to a formatted string.
Parameters
----------
decim : int, float
precision : int
Returns
-------
String formatted HH:MM:SS.SSSSS
"""
return hms2stringTime(*dec2sex(decim), precision=precision) | d7ea8334f021dc302c1291556bd5d6a8bb921b46 | 3,626,875 |
def getCartShape(dimension, communicator=None):
"""
Returns :samp:`getCartShapeForSize(dimension, communicator.Get_size())`.
:type dimension: int
:param dimension: Spatial dimension for returned cartesian layout.
:type communicator: :obj:`mpi4py.MPI.Comm`
:param communicator: If :samp:`None`, uses :obj:`world`.
:rtype: list
:return: :samp:`getCartShapeForSize(dimension, communicator.Get_size())`
"""
if communicator == None:
communicator = world
if communicator != None:
commSz = communicator.Get_size()
else:
commSz = 1
return getCartShapeForSize(dimension, commSz) | fe26d0bef1f7e244b1bcbf78cc7754b04790f121 | 3,626,876 |
def gumbel_log_survival(x):
"""Returns log P(g > x) for a standard Gumbel g.
log P(g > x) = log(1 - P(g < x)) = log(1 - exp(-exp(-x))). The implementation
is more numerically robust than a naive implementation of that formula.
Args:
x: The cutoff Gumbel value.
"""
# Adapted from
# https://gist.github.com/wouterkool/a3bb2aae8d6a80f985daae95252a8aa8.
y = np.exp(-x)
return np.where(x >= 10,
-x - y / 2 + y ** 2 / 24 - y ** 4 / 2880,
np.log(-np.expm1(-np.exp(-x)))) | 416069914e011f82db4d47dd667c95b8f2539a2d | 3,626,877 |
import platform
def platform_is(requested_platform: str) -> bool:
"""
Compare requested platform with current platform.
Common platforms:
- Win / Windows
- Mac / macOS / Darwin
- Linux
- Unix (Mac, SunOS, BSD unix's)
- *nix / posix (Not Windows)
:return: True if current platform matches requested platform.
:return: False otherwise.
"""
return platform_matches(requested_platform, platform.system()) | 8e9ca64fc9053369da100da46083685cb6dcd47a | 3,626,878 |
def prepare_bert(content, max_len, bow_vocab_size=1000, vectorizer=None, ctx=mx.cpu()):
"""
Utility function to take text content (e.g. list of document strings), a maximum sequence
length and vocabulary size, returning a data_train object that can be used
by a SeqBowEstimator object for the call to fit_with_validation. Also returns
the BOW matrix as a SciPy sparse matrix along with the BOW vocabulary.
"""
x_ids, x_val_lens, x_segs, bert_base, bert_vocab, _ = _load_dataset_bert(content, 0, max_len, ctx)
tf_vectorizer = vectorizer or TMNTVectorizer(vocab_size = bow_vocab_size)
X, _ = tf_vectorizer.transform(content) if vectorizer else tf_vectorizer.fit_transform(content)
data_train = gluon.data.ArrayDataset(
mx.nd.array(x_ids, dtype='int32'),
mx.nd.array(x_val_lens, dtype='int32'),
mx.nd.array(x_segs, dtype='int32'),
mx.nd.sparse.csr_matrix(X, dtype='float32').tostype('default'))
return data_train, X, tf_vectorizer, bert_base, bert_vocab | 76f6ebc3d874668507e8e7b9326a82dbd1b01997 | 3,626,879 |
import Qconfig
import functools
import unittest
import os
def requires_qe_access(func):
"""
Decorator that signals that the test uses the online API:
* determines if the test should be skipped by checking environment
variables.
* if the test is not skipped, it reads `QE_TOKEN` and `QE_URL` from
`Qconfig.py` or from environment variables.
* if the test is not skipped, it appends `QE_TOKEN` and `QE_URL` as
arguments to the test function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@functools.wraps(func)
def _(*args, **kwargs):
# pylint: disable=invalid-name
if SKIP_ONLINE_TESTS:
raise unittest.SkipTest('Skipping online tests')
# Try to read the variables from Qconfig.
try:
QE_TOKEN = Qconfig.APItoken
QE_URL = Qconfig.config["url"]
except ImportError:
# Try to read them from environment variables (ie. Travis).
QE_TOKEN = os.getenv('QE_TOKEN')
QE_URL = os.getenv('QE_URL')
if not QE_TOKEN or not QE_URL:
raise Exception(
'Could not locate a valid "Qconfig.py" file nor read the QE '
'values from the environment')
kwargs['QE_TOKEN'] = QE_TOKEN
kwargs['QE_URL'] = QE_URL
return func(*args, **kwargs)
return _ | 8c29a089ef2098fe8e22c572b5d6f53fd16c702c | 3,626,880 |
def to_homogeneous(t, is_point):
"""Makes a homogeneous space tensor given a tensor with ultimate coordinates.
Args:
t: Tensor with shape [..., K], where t is a tensor of points in
K-dimensional space.
is_point: Boolean. True for points, false for directions
Returns:
Tensor with shape [..., K+1]. t padded to be homogeneous.
"""
padding = 1 if is_point else 0
rank = len(t.get_shape().as_list())
paddings = []
for _ in range(rank):
paddings.append([0, 0])
paddings[-1][1] = 1
# print('paddings:', paddings) # [[0, 0], [0, 0], [0, 0], [0, 1]]
return tf.pad(
t, tf.constant(paddings), mode='CONSTANT', constant_values=padding) | 484668c34b6c61e7e2479ea44c7beb4a0111676b | 3,626,881 |
import zipfile
def name_from_archive(archive_path):
""" Name From Archive """
archive = zipfile.ZipFile(archive_path, allowZip64=True)
xml_data = archive.read("manifest.xml")
elem = etree.fromstring(xml_data)
return elem.get("uuid") | 34c4e89cb75d86cd0d703a79ac0ed70b223a91c0 | 3,626,882 |
import glob
def datedfile(filename,date):
""" select file based on observation date and latest version
Parameters
----------
filename: text file name pattern, including "yyyymmdd_vnn" place holder for date and version
date: yyyymmdd of observation
Returns: file name
"""
filelist = sorted(glob.glob(filename.replace('yyyymmdd_vnn','????????_v??')))
if len(filelist)==0: return ""
dateoffs = filename.find('yyyymmdd')
datelist = [file[dateoffs:dateoffs+8] for file in filelist]
file = filelist[0]
for (f,fdate) in enumerate(datelist):
if date < fdate: continue
for (v,vdate) in enumerate(datelist[f:]):
if vdate > fdate: continue
file = filelist[f+v]
return file | 203cf848e351ef9b8b77bda62d5850b35485762a | 3,626,883 |
import random
import string
def random_user(n):
"""generate a random user id of size n"""
chars = []
for i in range(n):
chars.append(random.choice(string.ascii_lowercase))
return ''.join(chars) | 21d8ec2ef8b275ffca481e4553ec396ff4010653 | 3,626,884 |
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place,
or a player tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
with get_cursor() as cursor:
cursor.execute("SELECT * FROM standings_view;")
standings = cursor.fetchall()
return standings | 7b8d50b1b4dbc592e2792f248df6f848c9c3aac6 | 3,626,885 |
import logging
def load_statistics(
input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Loads data statistics proto from file.
Args:
input_path: Data statistics file path. The file should be a one-record
TFRecord file or a plain file containing the statistics proto in Proto
Text Format.
Returns:
A DatasetFeatureStatisticsList proto.
Raises:
IOError: If the input path does not exist.
"""
if not tf.io.gfile.exists(input_path):
raise IOError('Invalid input path {}.'.format(input_path))
try:
return load_stats_tfrecord(input_path)
except Exception: # pylint: disable=broad-except
logging.info('File %s did not look like a TFRecord. Try reading as a plain '
'file.', input_path)
return load_stats_text(input_path) | a86b6cfd25b6e77db78419afad41aa6b7e456b7e | 3,626,886 |
from typing import Counter
def get_counter(request):
"""
Get the Counter object associated with a request.
Raise AjaxError if session is invalid or counter is not found.
"""
if "counter" not in request.session:
raise AjaxError(RET_UNAUTHORIZED, _(u"Not logged in."))
counter_id = request.session["counter"]
try:
counter_object = Counter.objects.get(pk=counter_id)
except Counter.DoesNotExist:
raise AjaxError(
RET_UNAUTHORIZED,
_(u"Counter has gone missing."),
)
return counter_object | 6d676880fccb47b05c1936e26c8be819136ad399 | 3,626,887 |
def _get_parse_input(parse_args, args_in, dict_in):
"""Return default for parse_input.
This is to decide if context_parser should run or not.
To make it easy on an API consumer, default behavior is ALWAYS to run
parser UNLESS dict_in initializes context and there is no args_in.
If dict_in specified, but no args_in: False
If dict_in specified, AND args_in too: True
If no dict_in specified, but args_in is: True
If no dict_in AND no args_in: True
If parse_args explicitly set, always honor its value.
Args:
parse_args (bool): Whether to run context parser.
args_in (list[str]): String arguments as passed from the cli.
dict_in (dict): Initialize context with this dict.
Returns:
Boolean. True if should parse input.
"""
if parse_args is None:
return not (args_in is None and dict_in is not None)
return parse_args | 64dcfd32a3d9f66749a27d4b26bd5fb3a66edf28 | 3,626,888 |
from typing import Dict
from typing import List
import logging
import re
def get_haiku(
text: str,
inflect_p,
pronounce_dict: Dict,
syllable_dict: Dict,
emoticons_list: List,
guess_syl_method: str,
) -> str:
"""Attempt to turn a string into a haiku.
Returns haiku if able, otherwise returns empty string.
Inspired by https://github.com/tomwardill/python-haiku/blob/master/haiku_checker.py
"""
haiku_form = [5, 12, 17]
haiku = [[] for _ in range(len(haiku_form))]
syllable_count = 0
haiku_line = 0
haiku_line_prev = 0
text_split = text.split()
# Add tokens to create potential haiku
for i, token in enumerate(text_split):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"Original token: {token}")
# Add tokens with no syllables (punctuation, emoji)) to the end of the
# previous line instead of the start of the current line
if re.findall(r"[^\w']", token) and (
count_syllables(
token,
inflect_p,
pronounce_dict,
syllable_dict,
emoticons_list,
guess_syl_method,
)
== 0
):
if haiku_line_prev == haiku_line:
haiku[haiku_line].append(token)
else:
haiku[haiku_line - 1].append(token)
continue
else:
# Add token to this line of the potential haiku
haiku[haiku_line].append(token)
# note what line was being worked on for this token
haiku_line_prev = haiku_line
# Count number of syllables for this token
syllable_count += count_syllables(
token,
inflect_p,
pronounce_dict,
syllable_dict,
emoticons_list,
guess_syl_method,
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"{syllable_count} syllables counted total")
if syllable_count == haiku_form[haiku_line]:
# Reached exactly the number of syllables for this line, go to next line
haiku_line += 1
if (
i < len(text_split) - 1
and haiku_line >= len(haiku_form)
and (
count_syllables(
" ".join(text_split[i + 1 :]),
inflect_p,
pronounce_dict,
syllable_dict,
emoticons_list,
guess_syl_method,
)
> 0
)
):
# There are syllables in the remaining tokens to check,
# but have reached the number of lines in a haiku.
# Therefore not a haiku coincidence!
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"Not a haiku because are more lines to check: {' '.join(text_split[i + 1:])}"
)
return ""
if haiku_line == len(haiku_form):
# Reached the end, and found the right number of lines. Haiku coincidence!
return ["\n".join([" ".join(line) for line in haiku])][0]
else:
# Did not find the right number of lines. Not a haiku coincidence!
return "" | 0405dc44095945b7414940c61a5d15238661514b | 3,626,889 |
import argparse
def get_parser():
"""
Creates and returns the argument parser for jExam
Returns:
``argparse.ArgumentParser``: the argument parser for jExam
"""
parser = argparse.ArgumentParser()
parser.add_argument("master", type=str, help="Path to exam master notebook")
parser.add_argument("result", nargs="?", default="dist", help="Path at which to write output notebooks")
parser.add_argument("-f", "--format", type=str, default="otter", help="Name of autograder format; 'otter' or 'ok'")
parser.add_argument("-s", "--seed", type=int, default=None, help="Random seed for NumPy to run before execution")
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="Run without printing status")
return parser | 03e433f3b3cdb371dff74489f619f0e65311f5dd | 3,626,890 |
def is_average_pooling(layer):
"""Checks if layer is an average-pooling layer."""
AVERAGEPOOLING_LAYERS = (
keras_layers.AveragePooling1D,
keras_layers.AveragePooling2D,
keras_layers.AveragePooling3D,
keras_layers.GlobalAveragePooling1D,
keras_layers.GlobalAveragePooling2D,
keras_layers.GlobalAveragePooling3D,
)
return isinstance(layer, AVERAGEPOOLING_LAYERS) | 7a0d91d291b13006341a86bf3864b612bad247c2 | 3,626,891 |
import os
def find_source_filename(source_name, dir_path):
"""Find the filename matching the source/module name
in the specified path. For example searching for "queue"
might return "queue.py" or "queue.pyc"
"""
source_filenames = [
os.path.join(dir_path, source_name + ext) for
ext in (".py", "pyc")]
for source_filename in source_filenames:
if os.path.exists(source_filename):
return source_filename
return None | 0360e57d4071c389d28768946551ad041236e6e3 | 3,626,892 |
def MergeDictsRecursively(original_dict, merging_dict):
"""
Merges two dictionaries by iterating over both of their keys and returning the merge
of each dict contained within both dictionaries.
The outer dict is also merged.
ATTENTION: The :param(merging_dict) is modified in the process!
:param dict original_dict
:param dict merging_dict
"""
items = iter(original_dict.items())
for key, value in items:
try:
other_value = merging_dict[key]
MergeDictsRecursively(value, other_value)
del merging_dict[key]
except KeyError:
continue
except TypeError:
continue
except AttributeError:
continue
try:
original_dict.update(merging_dict)
except ValueError:
raise TypeError(
'Wrong types passed. Expecting two dictionaries, got: "%s" and "%s"'
% (type(original_dict).__name__, type(merging_dict).__name__)
)
return original_dict | 43174a7f5163a36eb850bc2c4d0f557790920189 | 3,626,893 |
def eq_assoc(u, v, eq=core.eq, n=None):
""" Goal for associative equality
>>> from logpy import run, var, fact
>>> from logpy.assoccomm import eq_assoc as eq
>>> fact(commutative, 'add') # declare that 'add' is commutative
>>> fact(associative, 'add') # declare that 'add' is associative
>>> x = var()
>>> run(0, x, eq(('add', 1, 2, 3), ('add', 1, x)))
(('add', 2, 3),)
"""
uop, _ = op_args(u)
vop, _ = op_args(v)
if uop and vop:
return conde([(core.eq, u, v)], [(eq, uop, vop), (associative, uop),
lambda s: assocunify(u, v, s, eq, n)])
if uop or vop:
if vop:
uop, vop = vop, uop
v, u = u, v
return conde([(core.eq, u, v)], [(associative, uop),
lambda s: assocunify(u, v, s, eq, n)])
return (core.eq, u, v) | b7ba81dbd73091dc6a2e01ec5136c80cf3c49c88 | 3,626,894 |
import six
def printer(func=None, **options):
""" Decorator used to print whatever text is returned in the caller
function.
When a list or a tuple are returned, then the contents are iterated
and printed.
Options:
dedent - whether or not to dedent the text (default: True)
"""
if func:
def decorator(*args, **kwargs):
ret = func(*args, **kwargs)
do_dedent = options.get('dedent', True)
if not ret:
# Nothing to print
return
if isinstance(ret, six.text_type):
# Print single text/var
tprint(ret, do_dedent)
elif isinstance(ret, tuple) or isinstance(ret, list):
# Iterate and print all
for text in ret:
tprint(text, do_dedent)
return decorator
# Function was not received
def partial(func):
return printer(func, **options)
return partial | 632cfb59fbc213b3c8f69f05d6f8ff686207a6e3 | 3,626,895 |
import logging
import sys
import time
def stdoutlogger(name, level=logging.INFO):
"""
Return a standard python logger with a stdout handler attached and using a prefix
format that will make logging consistent between scripts.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# send messages to sys.stderr
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger | 865f00a0baebf5032b1820893b0a062ff61ac501 | 3,626,896 |
import argparse
def parse_args():
"""
Parse command line arguments for CLI
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--login',
type=str,
required=True,
help="Full path to file containing JSON with DB login credentials",
)
parser.add_argument(
'--project_id',
type=str,
default=None,
help="Project ID substring (first part of dataset ID)",
)
parser.add_argument(
'--microscope',
type=str,
default=None,
help="Substring of microscope column",
)
parser.add_argument(
'--start_date',
type=str,
default=None,
help="Find >= dates in date_time column",
)
parser.add_argument(
'--end_date',
type=str,
default=None,
help="Find <= dates in date_time column",
)
parser.add_argument(
'--description',
type=str,
default=None,
help="Find substring in description column",
)
return parser.parse_args() | 0982407f808c9af9996bae0a36e8ae252cae0df6 | 3,626,897 |
from StringIO import StringIO
from io import StringIO
def strio():
"""
This was difficult to get right in doctests when porting to Python 3.
"""
try:
except ImportError:
return StringIO() | 9900b0a15da617278e3a5a1de8ef45a417e4e813 | 3,626,898 |
def getJamLengthMeters(detID):
"""getJamLengthMeters(string) -> double
Returns the jam length in meters within the last simulation step.
"""
return _getUniversal(tc.JAM_LENGTH_METERS, detID) | 750d7579ffded917e18faa5ec2f991576606aaf8 | 3,626,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.