content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def export_ruptures_csv(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
oq = dstore['oqparam']
if 'scenario' in oq.calculation_mode:
return []
dest = dstore.export_path('ruptures.csv')
header = ('rupid multiplicity mag centroid_lon centroid_lat centroid_depth'
' trt strike dip rake boundary').split()
csm_info = dstore['csm_info']
grp_trt = csm_info.grp_by("trt")
rows = []
ruptures_by_grp = get_ruptures_by_grp(dstore)
for grp_id, trt in sorted(grp_trt.items()):
rups = ruptures_by_grp.get(grp_id, [])
rup_data = calc.RuptureData(trt, csm_info.get_gsims(grp_id))
for r in rup_data.to_array(rups):
rows.append(
(r['rup_id'], r['multiplicity'], r['mag'],
r['lon'], r['lat'], r['depth'],
trt, r['strike'], r['dip'], r['rake'],
r['boundary']))
rows.sort() # by rupture serial
comment = 'investigation_time=%s, ses_per_logic_tree_path=%s' % (
oq.investigation_time, oq.ses_per_logic_tree_path)
writers.write_csv(dest, rows, header=header, sep='\t', comment=comment)
return [dest] | b23f6b9fea092822d9700017bf92aab322577da6 | 3,634,800 |
import argparse
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLabLFOV NetworkEv")
parser.add_argument("--pred-path", type=str, default='',
help="Path to predicted segmentation.")
parser.add_argument("--gt-path", type=str, default='',
help="Path to the groundtruth dir.")
return parser.parse_args() | 12e8f214e5ef97e0a5a5e3aafaa8395a9e845601 | 3,634,801 |
def get_google_auth(state=None, token=None):
"""Helper function to create OAuth2Session object."""
if token:
return requests_oauthlib.OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return requests_oauthlib.OAuth2Session(Auth.CLIENT_ID,
state=state,
redirect_uri=Auth.REDIRECT_URI)
oauth = requests_oauthlib.OAuth2Session(Auth.CLIENT_ID,
redirect_uri=Auth.REDIRECT_URI,
scope=Auth.SCOPES)
return oauth | 70f60828f6ad7c6a7658a217f98a22cfd07067ae | 3,634,802 |
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
# Fill empty missing with 0, see GH5639
averaged = averaged.fillna(0.0)
return averaged | fdc49912f538694048560f1c1453714791a7c6e4 | 3,634,803 |
import json
def load_network_from_checkpoint(checkpoint, model_json, input_shape=None):
"""Function to read the weights from checkpoint based on json description.
Args:
checkpoint: tensorflow checkpoint with trained model to
verify
model_json: path of json file with model description of
the network list of dictionary items for each layer
containing 'type', 'weight_var', 'bias_var' and
'is_transpose' 'type'is one of {'ff', 'ff_relu' or
'conv'}; 'weight_var' is the name of tf variable for
weights of layer i; 'bias_var' is the name of tf
variable for bias of layer i; 'is_transpose' is set to
True if the weights have to be transposed as per
convention Note that last layer is always feedforward
net_weights: list of numpy matrices of weights of each layer
convention: x[i+1] = W[i] x[i]
net_biases: list of numpy arrays of biases of each layer
net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv'
or 'ff_conv_relu']
'ff': Simple feedforward layer with no activations
'ff_relu': Simple feedforward layer with ReLU activations
'ff_conv': Convolution layer with no activation
'ff_conv_relu': Convolution layer with ReLU activation
Raises:
ValueError: If layer_types are invalid or variable names
not found in checkpoint
"""
# Load checkpoint
reader = tf.train.load_checkpoint(checkpoint)
variable_map = reader.get_variable_to_shape_map()
checkpoint_variable_names = variable_map.keys()
# Parse JSON file for names
with tf.gfile.Open(model_json) as f:
list_model_var = json.load(f)
net_layer_types = []
net_weights = []
net_biases = []
cnn_params = []
# Checking validity of the input and adding to list
for layer_model_var in list_model_var:
if layer_model_var["type"] not in {"ff", "ff_relu", "conv"}:
raise ValueError("Invalid layer type in description")
if (
layer_model_var["weight_var"] not in checkpoint_variable_names
or layer_model_var["bias_var"] not in checkpoint_variable_names
):
raise ValueError("Variable names not found in checkpoint")
net_layer_types.append(layer_model_var["type"])
layer_weight = reader.get_tensor(layer_model_var["weight_var"])
layer_bias = reader.get_tensor(layer_model_var["bias_var"])
# TODO(aditirag): is there a way to automatically check when to transpose
# We want weights W such that x^{i+1} = W^i x^i + b^i
# Can think of a hack involving matching shapes but if shapes are equal
# it can be ambiguous
if layer_model_var["type"] in {"ff", "ff_relu"}:
layer_weight = np.transpose(layer_weight)
cnn_params.append(None)
if layer_model_var["type"] in {"conv"}:
if "stride" not in layer_model_var or "padding" not in layer_model_var:
raise ValueError("Please define stride and padding for conv layers.")
cnn_params.append(
{
"stride": layer_model_var["stride"],
"padding": layer_model_var["padding"],
}
)
net_weights.append(layer_weight)
net_biases.append(np.reshape(layer_bias, (np.size(layer_bias), 1)))
return NeuralNetwork(
net_weights, net_biases, net_layer_types, input_shape, cnn_params
) | 75c877c66c7397366c1f88aaa218487aa11c08d4 | 3,634,804 |
def get_indicator_plugin_manager():
"""
Import all Hook classes that are in the plugins package
and make this availables for be called from master sources
"""
pm = pluggy.PluginManager("indicator")
pm.add_hookspecs(IndicatorSpec)
for class_imported in indicatorPluginClasses: # noqa: F405
pm.register(globals()[class_imported]())
return pm | 610d21bab6f58a0b43539b5b382233b71937e764 | 3,634,805 |
def run(command, **kwargs):
"""Run and return the output of a command.
Raise CalledProcessError on error.
Pass in any kind of shell-executable line you like, with one or more
commands, pipes, etc. Any kwargs will be shell-escaped and then subbed into
the command using ``format()``::
>>> run('echo hi')
"hi"
>>> run('echo {name}', name='Fred')
"Fred"
This is optimized for callsite readability. Internalizing ``format()``
keeps noise off the call. If you use named substitution tokens, individual
commands are almost as readable as in a raw shell script. The command
doesn't need to be read out of order, as with anonymous tokens.
"""
return check_output(
command.format(**dict((k, quote(v)) for k, v in kwargs.items())),
shell=True) | 5022e5cb1fe4863e1bd2293385eb8f13fe763e58 | 3,634,806 |
from datetime import datetime
def to_date(string, format="%d/%m/%Y"):
"""Converts a string to datetime
:param string: String containing the date.
:type string: str
:param format: The date format. Use %Y for year, %m for months and %d for daus, defaults to "%d/%m/%Y"
:type format: str, optional
:return: The present data in string format
:rtype: `str`
"""
return datetime.strptime(string, format) | 83fa8e8a0cdfae9546c7a83e55ddcf84ec667646 | 3,634,807 |
def invert_apply_grouping2(grouped_items, groupxs, dtype=None):
"""use only when ungrouping will be complete"""
maxval = _max(list(map(_max, groupxs)))
ungrouped_items = np.zeros((maxval + 1,), dtype=dtype)
for itemgroup, ix_list in zip(grouped_items, groupxs):
ungrouped_items[ix_list] = itemgroup
return ungrouped_items | c1e7d46ddf57bc7bf1f7123fbd18061b63fb8a8d | 3,634,808 |
def make_aware_assuming_local(dt):
"""
Just a wrapper for Django's method, which will takes a naive datetime, and makes it timezone
aware, assuming the current timezone if none is passed (which it isn't from this wrapper
function). It will also raise an exception if the passed datetime is already timezone-aware.
"""
return timezone.make_aware(dt, is_dst=True) | 3b9f142f11bc918a7faebcb0309f43dc6e9a5d2b | 3,634,809 |
def pad_word_array(word_array, MAX_SEQUENCE_LENGTH, padding='pre', truncating='pre'):
"""Return a word array that is of a length MAX_SEQUENCE_LENGTH by truncating the original array or padding it
Args:
word_array:
MAX_SEQUENCE_LENGTH:
padding:
truncating:
Returns:
"""
if len(word_array) > MAX_SEQUENCE_LENGTH:
word_array = word_array[:MAX_SEQUENCE_LENGTH] if truncating == 'pre' else word_array[len(
word_array) - MAX_SEQUENCE_LENGTH:]
else:
if padding == 'pre':
word_array = word_array + ['<pad>'] * (MAX_SEQUENCE_LENGTH - len(word_array))
else:
word_array = ['<pad>'] * (MAX_SEQUENCE_LENGTH - len(word_array)) + word_array
return word_array | 33d33284eb347f9f4b242932c42b7b8b68219135 | 3,634,810 |
def nbconvert(code):
"""Create Jupyter Notebook code
Return dict in ipynb format
Arguments:
code -- code string separated by \\n
"""
cells = []
for cell in code.split("\n# <codecell>\n"):
cells.append({
"cell_type": "code",
"execution_count": None,
"metadata": {
"collapsed": True,
},
"outputs": [],
"source": [cell]
})
result = {
"cells": cells,
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
}
}
return result | f7e895e107f07850652e762a4b382ec299e6d352 | 3,634,811 |
def add_box(width, height, depth):
"""
This function takes inputs and returns vertex and face arrays.
no actual mesh data creation is done here.
"""
verts = [
(+1.0, +1.0, 0.0),
(+1.0, -1.0, 0.0),
(-1.0, -1.0, 0.0),
(-1.0, +1.0, 0.0),
(+1.0, +1.0, +2.0),
(+1.0, -1.0, +2.0),
(-1.0, -1.0, +2.0),
(-1.0, +1.0, +2.0),
]
faces = [
(0, 1, 2, 3),
(4, 7, 6, 5),
(0, 4, 5, 1),
(1, 5, 6, 2),
(2, 6, 7, 3),
(4, 0, 3, 7),
]
# apply size
for i, v in enumerate(verts):
verts[i] = v[0] * width, v[1] * depth, v[2] * height
return verts, faces | 930dedbba8a1c11999d4ffdb98b0032bae743498 | 3,634,812 |
import time
def pixmap(randoms, targets, rand_density, nside=256, gaialoc=None):
"""HEALPix map of useful quantities for a Legacy Surveys Data Release
Parameters
----------
randoms : :class:`~numpy.ndarray` or `str`
Catalog or file of randoms as made by :func:`select_randoms()` or
:func:`quantities_at_positions_in_a_brick()`.
targets : :class:`~numpy.ndarray` or `str`
Corresponding (i.e. same Data Release) catalog or file of targets
as made by, e.g., :func:`desitarget.cuts.select_targets()`, or
the the name of a directory containing HEALPix-split targets that
can be read by :func:`desitarget.io.read_targets_in_box()`.
rand_density : :class:`int`
Number of random points per sq. deg. at which the random catalog
was generated (see also :func:`select_randoms()`).
nside : :class:`int`, optional, defaults to nside=256
Resolution (HEALPix nside) at which to build the (NESTED) map.
The default corresponds to ~0.0525 sq. deg. (or "brick-sized")
gaialoc : :class:`str`, optional, defaults to ``None``
Name of a FITS file that already contains a column "STARDENS",
which is simply read in. If ``None``, the stellar density is
constructed from files in $GAIA_DIR.
Returns
-------
:class:`~numpy.ndarray`
An array of useful information that includes
- HPXPIXEL: HEALPixel integers at the passed `nside`.
- FRACAREA: Fraction of pixel with at least one observation
in any band. Made with :func:`pixweight()`.
- STARDENS: The stellar density in a pixel from Gaia. Made
with :func:`stellar_density()`.
- EBV: E(B-V) in pixel from the SFD dust map, from the
median of EBV values in the passed `randoms`.
- PSFDEPTH_G, R, Z: PSF depth in the pixel, from the median
of PSFDEPTH values in `randoms`.
- GALDEPTH_G, R, Z: Galaxy depth in the pixel, from the
median of GALDEPTH values in `randoms`.
- PSFDEPTH_W1, W2: (AB PSF) depth in the pixel, from the
median of values in the passed `randoms`.
- PSFSIZE_G, R, Z: Weighted average PSF FWHM, in arcsec, in
the pixel, from the median of PSFSIZE
values in the passed random catalog.
- FRACAREA_X: Fraction of pixel with at least one observation
in any band with MASKBITS==X (bitwise OR, so,
e.g. if X=7.
- One column for every bit that is returned by
:func:`desitarget.QA._load_targdens()`. Each column
contains the target density in the pixel.
:class:`str`
Survey to which `targets` corresponds, e.g., 'main', 'sv1', etc.
Notes
-----
- If `gaialoc` is ``None`` then $GAIA_DIR must be set.
"""
# ADM if a file name was passed for the random catalog, read it in
if isinstance(randoms, str):
log.info('Reading in random catalog...t = {:.1f}s'.format(time()-start))
randoms = fitsio.read(randoms)
# ADM if a file name was passed for the targets catalog, read it in
if isinstance(targets, str):
log.info('Reading in target catalog...t = {:.1f}s'.format(time()-start))
# ADM grab appropriate columns for an SV/cmx/main survey file.
targcols = target_columns_from_header(targets)
cols = np.concatenate([["RA", "DEC"], targcols])
targets = read_targets_in_box(targets, columns=cols)
log.info('Read targets and randoms...t = {:.1f}s'.format(time()-start))
# ADM change target column names, and retrieve associated survey information.
_, Mx, survey, targets = main_cmx_or_sv(targets, rename=True)
# ADM determine the areal coverage of the randoms at this nside.
log.info('Determining footprint...t = {:.1f}s'.format(time()-start))
pw = pixweight(randoms, rand_density, nside=nside)
npix = len(pw)
# ADM areal coverage for some combinations of MASKBITS.
mbcomb = []
mbstore = []
for mb in [[10, 12, 13],
[1, 10, 12, 13],
[1, 5, 6, 7, 11, 12, 13]]:
bitint = np.sum(2**np.array(mb))
mbcomb.append(bitint)
log.info('Determining footprint for maskbits not in {}...t = {:.1f}s'
.format(bitint, time()-start))
mbstore.append(pixweight(randoms, rand_density,
nside=nside, maskbits=bitint))
log.info('Determining footprint...t = {:.1f}s'.format(time()-start))
pw = pixweight(randoms, rand_density, nside=nside)
npix = len(pw)
# ADM get the target densities.
log.info('Calculating target densities...t = {:.1f}s'.format(time()-start))
targdens = get_targ_dens(targets, Mx, nside=nside)
# ADM set up the output array.
datamodel = [('HPXPIXEL', '>i4'), ('FRACAREA', '>f4'), ('STARDENS', '>f4'), ('EBV', '>f4'),
('PSFDEPTH_G', '>f4'), ('PSFDEPTH_R', '>f4'), ('PSFDEPTH_Z', '>f4'),
('GALDEPTH_G', '>f4'), ('GALDEPTH_R', '>f4'), ('GALDEPTH_Z', '>f4'),
('PSFDEPTH_W1', '>f4'), ('PSFDEPTH_W2', '>f4'),
('PSFSIZE_G', '>f4'), ('PSFSIZE_R', '>f4'), ('PSFSIZE_Z', '>f4')]
# ADM the maskbits-dependent areas.
datamodel += [("FRACAREA_{}".format(bitint), '>f4') for bitint in mbcomb]
# ADM the density of each target class.
datamodel += targdens.dtype.descr
hpxinfo = np.zeros(npix, dtype=datamodel)
# ADM set initial values to -1 so that they can easily be clipped.
hpxinfo[...] = -1
# ADM add the areal coverage, pixel information and target densities.
hpxinfo['HPXPIXEL'] = np.arange(npix)
hpxinfo['FRACAREA'] = pw
for bitint, fracarea in zip(mbcomb, mbstore):
hpxinfo['FRACAREA_{}'.format(bitint)] = fracarea
for col in targdens.dtype.names:
hpxinfo[col] = targdens[col]
# ADM build the stellar density, or if gaialoc was passed as a file, just read it in.
if gaialoc is None:
log.info('Calculating stellar density using Gaia files in $GAIA_DIR...t = {:.1f}s'
.format(time()-start))
sd = stellar_density(nside=nside)
else:
sd = fitsio.read(gaialoc, columns=["STARDENS"])
if len(sd) != len(hpxinfo):
log.critical('Stellar density map in {} was not calculated at NSIDE={}'
.format(gaialoc, nside))
hpxinfo["STARDENS"] = sd
# ADM add the median values of all of the other systematics.
log.info('Calculating medians of systematics from random catalog...t = {:.1f}s'
.format(time()-start))
ras, decs = randoms["RA"], randoms["DEC"]
pixnums = hp.ang2pix(nside, np.radians(90.-decs), np.radians(ras), nest=True)
# ADM some sorting to order the values to extract the medians.
pixorder = np.argsort(pixnums)
pixels, pixcnts = np.unique(pixnums, return_counts=True)
pixcnts = np.insert(pixcnts, 0, 0)
pixcnts = np.cumsum(pixcnts)
log.info('Done sorting...t = {:.1f}s'.format(time()-start))
# ADM work through the ordered pixels to populate the median for
# ADM each quantity of interest.
cols = ['EBV', 'PSFDEPTH_W1', 'PSFDEPTH_W2',
'PSFDEPTH_G', 'GALDEPTH_G', 'PSFSIZE_G',
'PSFDEPTH_R', 'GALDEPTH_R', 'PSFSIZE_R',
'PSFDEPTH_Z', 'GALDEPTH_Z', 'PSFSIZE_Z']
t0 = time()
npix = len(pixcnts)
stepper = npix//50
for i in range(npix-1):
inds = pixorder[pixcnts[i]:pixcnts[i+1]]
pix = pixnums[inds][0]
for col in cols:
hpxinfo[col][pix] = np.median(randoms[col][inds])
if i % stepper == 0 and i > 0:
elapsed = time() - t0
rate = i / elapsed
log.info('{}/{} pixels; {:.1f} pix/sec; {:.1f} total mins elapsed'
.format(i, npix, rate, elapsed/60.))
log.info('Done...t = {:.1f}s'.format(time()-start))
return hpxinfo, survey | cda21bb679c84d1842ca1de675db0daf3ed79534 | 3,634,813 |
from typing import Union
from typing import Callable
from typing import Sequence
from typing import Tuple
def filter_keys(data: dict, keys: Union[Callable, Sequence],
return_popped=False) -> Union[dict, Tuple[dict, dict]]:
"""
Filters keys from a given data dict
Args:
data: the dictionary to pop the keys from
keys: if callable it must return a boolean for each key indicating
whether it should be retained in the dict.
if sequence of strings, the strings shall be the keys to be
retained
return_popped: whether to also return the popped values
(default: False)
Returns:
dict: the data without the popped values
dict: the popped values; only if :attr:`return_popped` is True
"""
if callable(keys):
keys = [k for k in data.keys() if keys(k)]
keys_to_pop = [k for k in data.keys() if k not in keys]
return pop_keys(data=data, keys=keys_to_pop, return_popped=return_popped) | ecfd6985242d802401c25046745afade16ceec24 | 3,634,814 |
def beta_ion(T_rad, species):
"""Case-B photoionization coefficient.
Parameters
----------
T_rad : float
The radiation temperature.
species : {'HI', 'HeI_21s', 'HeI_23s'}
The relevant species.
Returns
-------
float
Case-B photoionization coefficient in s\ :sup:`-1`\ .
Notes
-----
For HeI, returns beta with respect to the 2s state,
in agreement with convention in RECFAST.
"""
de_broglie_wavelength = (
c * 2*np.pi*hbar
/ np.sqrt(2 * np.pi * me * T_rad)
)
if species == 'HI':
return (
(1/de_broglie_wavelength)**3
* np.exp(-rydberg/4/T_rad) * alpha_recomb(T_rad, 'HI')
)/4
elif species == 'HeI_21s':
E_21s_inf = He_ion_eng - He_exc_eng['21s']
# print(E_21s_inf)
# print(de_broglie_wavelength)
return 4*(
(1/de_broglie_wavelength)**3
* np.exp(-E_21s_inf/T_rad) * alpha_recomb(T_rad, 'HeI_21s')
)
elif species == 'HeI_23s':
E_23s_inf = He_ion_eng - He_exc_eng['23s']
return (4/3)*(
(1/de_broglie_wavelength)**3
* np.exp(-E_23s_inf/T_rad) * alpha_recomb(T_rad, 'HeI_23s')
)
else:
return TypeError('invalid species.') | d82b37fcfd4852722260a7b4f6c92e58e8588b11 | 3,634,815 |
from pathlib import Path
def load_template(template_path, template_name):
"""Loads a Jinja template from a given path and name
Arguments:
template_file {PathToDir: Path/String}
template_name {Filename: String}
Raises:
IOError: This path does not exist
"""
if isinstance(template_path, Path):
template_path = str(template_path)
file_loader = j2.FileSystemLoader(template_path)
env = j2.Environment(loader=file_loader, trim_blocks=True,
lstrip_blocks=True)
return env.get_template(template_name) | 8e935d2f0ab41174237d4b7c803d5e16687d3bd0 | 3,634,816 |
def html(string):
"""Return inline html element."""
return RawInline('html', string) | 980b409f769a38102398c81006dfd220b8865715 | 3,634,817 |
def mock_user_moira_lists(mocker):
"""Return a fake moira client"""
mocked = mocker.patch("ui.utils.user_moira_lists")
mocked.return_value = set()
return mocked | 8dedab7071deae4f1e5fa3ffc7b79149fc49e795 | 3,634,818 |
def translateAllIndex(text):
"""
This is the translator API
Call this api passing a piece of text and get back the Swedish translation
---
tags:
- Translation API
parameters:
- name: text
in: path
type: string
required: true
description: The text
responses:
200:
description: Whether it's good or bad
schema:
id: translate
properties:
google_translation:
type: string
description: The translation
google_quality:
type: number
description: How good it is
improved_translation:
type: string
description: The improved translation
improved_quality:
type: number
description: How good it is
"""
gt = translateSimple(text)
tt = translateAdvanced(text)
return jsonify(
google_translation=gt,
google_quality=classification(gt),
improved_translation=tt,
improved_quality=classification(tt)
) | e7740738d112c64a6dfa30be7825e4db9b89f6f0 | 3,634,819 |
import os
import logging
def get_network_config(net_topology, ignore_env_vars=False,
net_topology_file="network.yaml"):
"""Get network info from environment.
Get network info from network.yaml, override the values if specific
environment variables are set for the undercloud.
This function may be used when running network configuration from CLI to
pass in network configuration settings from a YAML file.
:param net_topology: Network topology name from network.yaml
:type net_topology: string
:param ignore_env_vars: Ignore enviroment variables or not
:type ignore_env_vars: boolean
:returns: Dictionary of network configuration
:rtype: dict
"""
if os.path.exists(net_topology_file):
net_info = get_yaml_config(net_topology_file)[net_topology]
else:
raise Exception("Network topology file: {} not found."
.format(net_topology_file))
if not ignore_env_vars:
logging.info("Consuming network environment variables as overrides "
"for the undercloud.")
net_info.update(get_undercloud_env_vars())
logging.info("Network info: {}".format(dict_to_yaml(net_info)))
return net_info | 2fd063de58cabbfcd42be8efe30dd0a7fd4fd826 | 3,634,820 |
import imp
import os
def get_testbeds_dict():
"""Return a dictionary containing mapping from dut hostname to testbed name."""
testbed = imp.load_source('testbed', os.path.join(SONIC_MGMT_DIR, 'tests/common/testbed.py'))
testbeds_dict = testbed.TestbedInfo(TESTBED_FILE).testbed_topo
return testbeds_dict | f04902b0f599ed0c9acc6fec6da5883b3924652e | 3,634,821 |
def AIHT(x, A, AT, m, M, thresh, proximalProjection=None):
"""
Accelerated Iterative Hard thresholding algorithm that keeps exactly M elements
in each iteration. This algorithm includes an additional double
overrelaxation step that significantly improves convergence speed without
destroying any of the theoretical guarantees of the IHT algorithm
detrived in [1], [2] and [3].
This algorithm is used to solve the problem A*z=x
Inputs:
x: observation vector to be decomposed
A: it can be a (nxm) matrix that gives the effect of the forward matrix A on a vector or an operator that does the same
AT: it can be a (nxm) matrix that gives the effect of the backward matrix A.T on a vector or an operator that does the same
m: length of the solution vector s
M: number of non-zero elements to keep in each iteration
thresh: stopping criterion
proximalProjection (optional): function that carries out the hard thresholding projection. The input is the vector to be
thresholded and the number of elements to be left. The output is the thresholded vector.
Outputs:
s: solution vector
err_mse: vector containing mse of approximation error for each iteration
"""
x = np.atleast_2d(x).T
n1, n2 = x.shape
if (n2 == 1):
n = n1
elif (n1 == 1):
x = x.T
n = n2
else:
exit('x must be a vector')
sigsize = np.dot(x.T, x) / n
oldERR = sigsize
err_mse = []
iter_time = []
STOPTOL = 1e-16
MAXITER = n**2
verbose = True
initial_given=0
s_initial = np.zeros((m,1))
MU = 0
acceleration= 0
Count = 0
# Define the appropriate functions whether the forward/backward operator is given as a call to a function or a matrix
# This makes everything transparent in the following
if (hasattr(A, '__call__')):
P = lambda z: A(z)
PT = lambda z: AT(z)
else:
P = lambda z: np.dot(A, z)
PT = lambda z: np.dot(AT,z)
s_initial = np.zeros((m,1))
Residual = x
s = np.copy(s_initial)
Ps = np.zeros((n,1))
oldErr = sigsize
x_test = np.random.randn(m,1)
x_test = x_test / np.linalg.norm(x_test)
nP = np.linalg.norm(P(x_test))
if (np.abs(MU*nP) > 1):
exit('WARNING! Algorithm likely to become unstable. Use smaller step-size or || P ||_2 < 1.')
# Main algorithm
t = 0
done = False
iteration = 1
min_mu = 1e5
max_mu = 0
while (not done):
Count += 1
if (MU == 0):
# Calculate optimal step size and do line search
if ((Count > 1) & (acceleration == 0)):
s_very_old = s_old
s_old = s
IND = s != 0
d = PT(Residual)
# If the current vector is zero, we take the largest element in d
if (np.sum(IND) == 0):
if (proximalProjection):
s = proximalProjection(d, M)
IND = s != 0
else:
sortind = np.argsort(np.abs(d), axis=0)[::-1]
IND[sortind[0:M]] = 1
id = IND * d
Pd = P(id)
mu = np.dot(id.T, id) / np.dot(Pd.T, Pd)
max_mu = np.max([mu,max_mu])
min_mu = np.min([mu,min_mu])
mu = min_mu
s = s_old + mu*d
if (proximalProjection):
s = proximalProjection(s, M)
else:
sortind = np.argsort(np.abs(s), axis=0)[::-1]
s[sortind[M:]] = 0
if ((Count > 1) & (acceleration == 0)):
very_old_Ps = old_Ps
old_Ps = Ps
Ps = P(s)
Residual = x-Ps
if ((Count > 2) & (acceleration == 0)):
# First overrelaxation
Dif = (Ps-old_Ps)
a1 = np.dot(Dif.T, Residual) / np.dot(Dif.T, Dif)
z1 = s + a1 * (s-s_old)
Pz1 = (1+a1)*Ps - a1*old_Ps
Residual_z1 = x-Pz1
# Second overrelaxation
Dif = Pz1 - very_old_Ps
a2 = np.dot(Dif.T, Residual_z1) / np.dot(Dif.T, Dif)
z2 = z1 + a2 * (z1-s_very_old)
# Threshold z2
if (proximalProjection):
z2 = proximalProjection(z2, M)
else:
sortind = np.argsort(np.abs(z2), axis=0)[::-1]
z2[sortind[M:]] = 0
Pz2 = P(z2)
Residual_z2 = x - Pz2
# Decide if z2 is any good
if (np.dot(Residual_z2.T, Residual_z2) / np.dot(Residual.T, Residual) < 1):
s = z2
Residual = Residual_z2
Ps = Pz2
#if (acceleration > 0):
#s, Residual = mySubsetCG(x, s, P, Pt
# Calculate step-size requirements
omega = (np.linalg.norm(s-s_old) / np.linalg.norm(Ps-old_Ps))**2
# As long as the support changes and mu > omega, we decrease mu
while ((mu > 1.5*omega) & (np.sum(np.logical_xor(IND, s != 0)) != 0) & (np.sum(IND) != 0)):
print("Decreasing mu")
# We use a simple line search, halving mu in each step
mu = mu / 2
s = s_old + mu*d
if (proximalProjection):
s = proximalProjection(s, M)
else:
sortind = np.argsort(np.abs(s), axis=0)[::-1]
s[sortind[M:]] = 0
Ps = P(s)
# Calculate optimal step size and do line search
Residual = x - Ps
if ((Count > 2) & (acceleration == 0)):
# First overrelaxation
Dif = (Ps-old_Ps)
a1 = np.dot(Dif.T, Residual) / np.dot(Dif.T, Dif)
z1 = s + a1 * (s-s_old)
Pz1 = (1+a1)*Ps - a1*old_Ps
Residual_z1 = x-Pz1
# Second overrelaxation
Dif = Pz1 - very_old_Ps
a2 = np.dot(Dif.T, Residual_z1) / np.dot(Dif.T, Dif)
z2 = z1 + a2 * (z1-s_very_old)
# Threshold z2
if (proximalProjection):
z2 = proximalProjection(z2, M)
else:
sortind = np.argsort(np.abs(z2), axis=0)[::-1]
z2[sortind[M:]] = 0
Pz2 = P(z2)
Residual_z2 = x - Pz2
# Decide if z2 is any good
if (np.dot(Residual_z2.T, Residual_z2) / np.dot(Residual.T, Residual) < 1):
s = z2
Residual = Residual_z2
Ps = Pz2
# Calculate step-size requirements
omega = (np.linalg.norm(s-s_old) / np.linalg.norm(Ps-old_Ps))**2
ERR = np.dot(Residual.T, Residual) / n
err_mse.append(ERR)
# Are we done yet?
gap = np.linalg.norm(s-s_old)**2 / m
if (gap < thresh):
done = True
if (not done):
iteration += 1
oldERR = ERR
if (verbose):
print("Iter={0} - gap={1} - target={2}".format(Count,gap,thresh))
return s, err_mse | a4eed242acddf61059d3a77367a89d6966b16c63 | 3,634,822 |
def permutate(array: list, permutation: list):
""" permutate a fixed array with a given permutation list
Args:
array: An array of random elements
permutation: The permutation of the given array
Returns:
"""
_swapped_array = []
_counter = 0
for i in permutation:
if _counter == i or i in _swapped_array:
_counter += 1
continue
_temp = i-1
_swap = array[_temp]
_sub = array[_counter]
array[_temp] = _sub
array[_counter] = _swap
_swapped_array.append(_counter)
_counter += 1
return array | 5b4f603c030276dcd78b6334ec00c901ca003c63 | 3,634,823 |
def has_collided_with_wall(
width: int, height: int, segments: list[SnakeSegment]
) -> bool:
"""Return True if the snake has collided with a wall."""
head = segments[0]
return (
head.x <= 1 or head.x > width - 3 or head.y < 1 or head.y >= height - 2
) | 54f96b2a28f56e440f647316fe9e9e0dac356c34 | 3,634,824 |
def plot_heatmap_max_val(env, value):
"""
Generate heatmap showing maximum value at each state (not for n-armed
bandit).
"""
if env.name == 'n_armed_bandit':
print("Heatmap can only be generated for grid worlds.")
return None
if value.ndim == 1:
value_max = np.reshape(value, (env.dim_y,env.dim_x))
else:
value_max = np.reshape(value.max(axis=1), (env.dim_y,env.dim_x))
if env.name != 'windy_cliff_grid':
value_max = value_max[::-1,:]
fig = plt.figure()
plt.title('Maximum value per state')
ax = fig.add_subplot(111)
im = ax.imshow(value_max, interpolation='none', cmap='afmhot')
ax.set_xticks(np.linspace(0, env.dim_x-1, num=env.dim_x))
ax.set_xticklabels(["%d" % x for x in np.arange(env.dim_x)])
ax.set_yticks(np.linspace(0, env.dim_y-1, num=env.dim_y))
ax.set_yticklabels(
["%d" % y for y in np.arange(0, env.dim_y*env.dim_x, env.dim_x)])
if env.name != 'windy_cliff_grid':
ax.set_yticklabels(
["%d" % y for y in np.arange(
0, env.dim_y*env.dim_x, env.dim_x)][::-1])
fig.colorbar(im)
return fig | b39c43aa87bbae78b519e5f1042a63359a0c9f48 | 3,634,825 |
def separate_lines(lines,imshape):
"""
separate_lines(lines)
Classifies left and right lines based on slope
---------------------------------------------------------------------------
INPUT:
lines: line points [[x1,y1,x2,y2]]
OUTPUT:
right{}: right line dictionary with the following structure
['slope'] = line slope (y2-y1)/(x2-x1)
['lane'] = [[x1,y1,x2,y2]]
left{}:
['slope'] = line slope (y2-y1)/(x2-x1)
['lane'] = [[x1,y1,x2,y2]]
===========================================================================
"""
# Generate a structure of data for left and right lines
left = {'slope':[],'intercept':[],'lane':np.empty((0,4),dtype=np.int32)}
right = {'slope':[],'intercept':[],'lane':np.empty((0,4),dtype=np.int32)}
# Compute Lines and Separate right and left lines
for line in lines:
for x1,y1,x2,y2 in line:
# Skip vertical line iteration
if x1==x2 or y1==y2:
continue
m = (y2-y1)/(x2-x1)
b = y2 - m*x2
if m < 0 and x2 < imshape[1]/2:
left['slope'].append(m)
left['intercept'].append(b)
left['lane'] = np.append(left['lane'],line,axis= 0)
elif m > 0 and x2> imshape[1]/2:
right['slope'].append(m)
right['intercept'].append(b)
right['lane'] =np.append(right['lane'],line,axis= 0)
return left,right | 3d7ee319456202cc7478a97afc78922761a9e86e | 3,634,826 |
def p_climo_one_season( seasonname, datafilenames, omit_files, varnames, fileout_template,
time_units, calendar, dt, force_scalar_avg1,
input_global_attributes, filerank={}, filetag={},
outseasons=None, queue1=None, lock1=None, comm1=None ):
"""climo_one_season but run as a separate process. returns the process, the caller should
join it."""
# p.join()
argtuple = ( seasonname, datafilenames, omit_files, varnames, fileout_template,
time_units, calendar, dt, force_scalar_avg1,
input_global_attributes, filerank, filetag, outseasons, queue1, lock1, comm1 )
p = Process( target=climo_one_season, args=argtuple )
p.start()
return p | dfd41e9cce28fbca51a4a838df831aa95c62b3af | 3,634,827 |
def two_view_reconstruction_rotation_only(p1, p2, camera1, camera2, threshold):
"""Find rotation between two views from point correspondences.
Args:
p1, p2: lists points in the images
camera1, camera2: Camera models
threshold: reprojection error threshold
Returns:
rotation and inlier list
"""
b1 = camera1.pixel_bearings(p1)
b2 = camera2.pixel_bearings(p2)
R = pyopengv.relative_pose_ransac_rotation_only(
b1, b2, 1 - np.cos(threshold), 1000)
inliers = _two_view_rotation_inliers(b1, b2, R, threshold)
return cv2.Rodrigues(R.T)[0].ravel(), inliers | 25bc0038970eae23cf8443f4de1a7f89e5ff4f34 | 3,634,828 |
def state_lookup():
"""Look up state from given zipcode.
Once state is found, redirect to call_senators for forwarding.
"""
zip_digits = request.values.get('Digits', None)
# NB: We don't do any error handling for a missing/erroneous zip code
# in this sample application. You, gentle reader, should to handle that
# edge case before deploying this code.
zip_obj = Zipcode.query.filter_by(zipcode=zip_digits).first()
return redirect(url_for('call_senators', state_id=zip_obj.state_id)) | 13219025777e50422ab30902f0f3d35f7b73afed | 3,634,829 |
def vmtkmeshtosurface(mesh, cleanoutput=1):
"""Convert a mesh to a surface by throwing out volume elements and (optionally) the relative points
Args:
mesh: Volumetric mesh.
cleanoutput (bool): Remove unused points.
Returns:
vtkPolyData object.
"""
extractor = vmtkscripts.vmtkMeshToSurface()
extractor.Mesh = mesh
extractor.CleanOutput = cleanoutput
extractor.Execute()
return extractor.Surface | 78b311d8523b495b36be5f64767389bf3c71a13a | 3,634,830 |
def time_delta_calc(contiguous_trajectory,order = 2):
"""Computes the time derivatives of a contiguous trajectory.
INPUT
contiguous_trajectory An array of space-time cordinates
order The order up to which the time derivatives are calculated. If
order=1, the velocity is returned. If order=2, the velocity
and acceleration are returned. etc.
OUTPUT
velocity An array of time derivatives. If order = 1, this is (vx,vy,vz).
If order = 2, (vx,vy,vz,ax,ay,az), etc.
"""
eps = np.finfo(float).eps
# take the difference in coordinates
delta_coordinates = np.diff(contiguous_trajectory,1,0)
# find the velocity by dividing change in spatial coordinates by change in time
# the eps is added to prevent warnings about dividing by 0.
velocity = map(lambda x:x[1:]/(x[0]+eps),delta_coordinates)
# pad the velocity array with initial and final values and take the average.
# This is equivalent to a linear interpolation between velocities. It's done to
# make the velocity array the same size as the coordinate array
velocity = np.pad(velocity,((1,1),(0,0)),'edge')
velocity = .5*(velocity[1:,:]+velocity[:-1,:])
#very large velocities are due to eps, and are nulled
velocity[np.abs(velocity)>10**10]=np.nan
if order == 1:
return velocity
else:
return np.concatenate((velocity,
time_delta_calc(
np.concatenate(
(np.array([contiguous_trajectory[:,0]]).T,velocity),1),
order-1)),1) | 17352f37f69e9947c0cea8e9fd7cf54ea3ff6e2e | 3,634,831 |
def tasks_page():
""" Tasks and completions page
"""
return flask.render_template(
'tasks.html',
config=g.project.config,
project=g.project,
version=label_studio.__version__,
**find_editor_files()
) | 56656a8571e33fb40f3790a207faa2fccc7315a0 | 3,634,832 |
def outformathtml(pandasdf):
"""
change a few formating things to prettify and make it match
"""
pandas_table = pandasdf.to_html()
pandas_table = pandas_table.replace(""" border="1" """, " ")
pandas_table = pandas_table.replace("""<tr style="text-align: right;">\n <th></th>\n <th></th>\n </tr>\n""",
"")
pandas_table = pandas_table.replace("""<thead>""",
"""<thead style="text-align: left; color: #094D92; font-size: 30px"> """)
return(pandas_table) | 5f8abf88a2aead4f095f52c1f49ab4ad609c04a5 | 3,634,833 |
def list_catalogs(**kwargs):
"""
Return the available Cone Search catalogs as a list of strings.
These can be used for the ``catalog_db`` argument to
:func:`conesearch`.
Parameters
----------
cache : bool
Use caching for VO Service database. Access to actual VO
websites referenced by the database still needs internet
connection.
verbose : bool
Show download progress bars.
pattern : str or `None`
If given string is anywhere in a catalog name, it is
considered a matching catalog. It accepts patterns as
in :py:mod:`fnmatch` and is case-insensitive.
By default, all catalogs are returned.
sort : bool
Sort output in alphabetical order. If not sorted, the
order depends on dictionary hashing. Default is `True`.
Returns
-------
arr : list of str
List of catalog names.
"""
return vos_catalog.list_catalogs(conf.conesearch_dbname, **kwargs) | b539746edb4b6aa256bcbac3153b9a323bb41882 | 3,634,834 |
def rot_decode(data: str, n: int = 13) -> str:
"""Decode a ROT-encoded string that was shifted by `n` places."""
if not 1 <= n < 26:
raise ValueError('n must be in range [1, 26)')
return rot_encode(data, 26 - n) | f001f15684cc77e8ea52cf481626c6bdaf97c071 | 3,634,835 |
from datetime import datetime
def searchlight(x, y, m=None, groups=None, cv=None,
write=False, logger=None, permutations=0, random_state=42, **searchlight_args):
"""
Wrapper to launch searchlight
:param x: Data
:param y: labels
:param m: mask
:param groups: group labels
:param cv: cross validator
:param write: if image for writing is desired or not
:param logger:
:param searchlight_args:(default) process_mask_img(None),
radius(2mm), estimator(svc),
n_jobs(-1), scoring(none), cv(3fold), verbose(0)
:return: trained SL object and SL results
"""
write_to_logger("starting searchlight at " + str(datetime.now()), logger=logger)
if m is None:
m = masking.compute_epi_mask(x)
searchlight_args["process_mask_img"] = m
write_to_logger("searchlight params: " + str(searchlight_args), logger=logger)
sl = SearchLight(mask_img=m, cv=cv, **searchlight_args)
sl.fit(x, y, groups, permutations=permutations, random_state=random_state)
write_to_logger("Searchlight ended at " + str(datetime.now()), logger=logger)
if write:
return sl, data_to_img(sl.scores_, x, logger=logger)
else:
return sl | 799f2496c0609050e6914576cfbdaba972320723 | 3,634,836 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_ioc_parser_v2 package"""
reload_params = {"package": u"fn_ioc_parser_v2",
"incident_fields": [],
"action_fields": [],
"function_params": [u"ioc_parser_v2_artifact_id", u"ioc_parser_v2_artifact_value", u"ioc_parser_v2_attachment_id", u"ioc_parser_v2_incident_id", u"ioc_parser_v2_task_id"],
"datatables": [],
"message_destinations": [u"fn_ioc_parser_v2"],
"functions": [u"func_ioc_parser_v2"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_parse_iocs_artifact", u"example_parse_iocs_attachment"],
"actions": [u"Example: Parse IOCs (Artifact)", u"Example: Parse IOCs (Attachment)"],
"incident_artifact_types": []
}
return reload_params | 9c279e24b2e05adc5b1573393368216483307071 | 3,634,837 |
def Pattern2(s):
""" Compute the correlator for this pattern:
↓
↓ ↑ ↑
and symmetry-equivalent patterns
"""
res = 0.0
s = np.pad(s, ((0, 0), (2, 2), (2, 2)))
L = s.shape[-1]
for i in range(L-2):
for j in range(L-2):
res += s[1, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[0, i+2, j]
res += s[1, i+1, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[0, i+1, j+2]
res += s[0, i, j+1] * s[1, i+1, j] * s[0, i+1, j+1] * s[1, i+2, j+1]
res += s[0, i, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[1, i, j+2]
res += s[0, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[1, i+2, j]
res += s[0, i+1, j] * s[1, i, j+2] * s[0, i+1, j+1] * s[1, i+1, j+2]
res += s[1, i, j+1] * s[1, i+1, j] * s[0, i+1, j+1] * s[0, i+2, j+1]
res += s[1, i, j] * s[1, i+1, j+1] * s[0, i, j+1] * s[0, i, j+2]
res += s[0, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[1, i+2, j]
res += s[0, i+1, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[1, i+1, j+2]
res += s[1, i, j+1] * s[0, i+1, j] * s[1, i+1, j+1] * s[0, i+2, j+1]
res += s[1, i, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[0, i, j+2]
res += s[1, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[0, i+2, j]
res += s[1, i+1, j] * s[0, i, j+2] * s[1, i+1, j+1] * s[0, i+1, j+2]
res += s[0, i, j+1] * s[0, i+1, j] * s[1, i+1, j+1] * s[1, i+2, j+1]
res += s[0, i, j] * s[0, i+1, j+1] * s[1, i, j+1] * s[1, i, j+2]
return res | 390cf6b8262f00d396235fb8ef5d4acc72d1df9e | 3,634,838 |
import math
def gauss(x, x0, sigma):
"""
This function returns a Gaussian distribution.
"""
return (1/(sigma*math.sqrt(2 * math.pi))) * np.exp(-(x - x0)**2 / (2 * sigma**2)) | c4ebd141e68e59567b21355fb3b813c7f4ff914b | 3,634,839 |
import math
def distance():
"""
Calculate the distance between two points.
return:
Distance.
"""
return lambda a, b: math.sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*+(a.y-b.y)+(a.z-b.z)*(a.z-b.z)) | ab3a14e7033afab66db7c283aefda745158bad65 | 3,634,840 |
import numpy
def rational_sum(numerator, denominator, *argv):
"""Sum of rational numbers."""
if len(argv) < 2:
gcd = numpy.gcd(numerator, denominator)
num_out, den_out = numerator//gcd, denominator//gcd
else:
num_2 = argv[0]
den_2 = argv[1]
num_3 = numerator*den_2 + num_2*denominator
den_3 = denominator*den_2
gcd = numpy.gcd(num_3, den_3)
num_out, den_out = rational_sum(num_3//gcd, den_3//gcd, argv[2:])
return num_out, den_out | b34d01ea2bcfd072430501828401d5b492b2bae5 | 3,634,841 |
import time
def nonce() -> str:
"""Return a nounce counter (monotonic clock).
References:
* https://support.kraken.com/hc/en-us/articles/360000906023-What-is-a-nonce-
""" # pylint: disable=line-too-long
return str(time.monotonic_ns()) | fb6221fef4c2c8af66200c4c9da8f6253854b186 | 3,634,842 |
def fetch_single_equity(stock_code, start, end):
"""
从本地数据库读取股票期间日线交易数据
注
--
1. 除OHLCV外,还包括涨跌幅、成交额、换手率、流通市值、总市值、流通股本、总股本
2. 添加后复权价格,使用复权价在图中去除间隙断层
3. 使用bcolz格式写入时,由于涨跌幅存在负数,必须剔除该列
Parameters
----------
stock_code : str
要获取数据的股票代码
start_date : datetime-like
自开始日期(包含该日)
end_date : datetime-like
至结束日期
return
----------
DataFrame: OHLCV列的DataFrame对象。datetimeindex.tz 为 None
Examples
--------
>>> # 600710 股票代码重用
>>> stock_code = '600710'
>>> start = '2016-03-29'
>>> end = pd.Timestamp('2017-07-31')
>>> df = fetch_single_equity(stock_code, start, end)
>>> df.iloc[-6:,:8]
date symbol open high low close prev_close change_pct
322 2017-07-24 600710 9.36 9.36 9.36 9.36 9.36 NaN
323 2017-07-25 600710 9.36 9.36 9.36 9.36 9.36 NaN
324 2017-07-26 600710 9.36 9.36 9.36 9.36 9.36 NaN
325 2017-07-27 600710 9.36 9.36 9.36 9.36 9.36 NaN
326 2017-07-28 600710 9.36 9.36 9.36 9.36 9.36 NaN
327 2017-07-31 600710 9.25 9.64 7.48 7.55 9.31 -18.9044
"""
# 指数日线数据
if len(stock_code) == 7:
return _fetch_single_index(stock_code, start, end)
start, end = sanitize_dates(start, end)
# 首先提取全部数据,确保自IPO以来复权价一致
df = _fetch_single_equity(stock_code, None, None)
if df.empty:
return df
# 恢复0股价
df = _fill_zero(df)
# 添加复权价格
df = _add_back_prices(df)
cond = df['date'].between(start, end)
df = df.loc[cond, :]
if df.empty:
return df
t_start, t_end = df['date'].values[0], df['date'].values[-1]
# 判断数据长度是否缺失
dts = [t for t in _tdates() if t >= t_start and t <= t_end]
dts = pd.to_datetime(dts)
# 填充停牌数据
df = _reindex(df, dts)
assert len(df) == len(dts), f"股票:{stock_code},期间{t_start} ~ {t_end} 数据不足"
df.loc[:, 'shares_outstanding'] = df.market_cap / df.close
df.loc[:, 'total_shares'] = df.total_cap / df.close
if not df.empty:
cond = df['close'] > 0.0
df = df[cond]
return df | 04e63665fe9b05dfcca8387519d2a178d117acb5 | 3,634,843 |
import os
import re
def generate():
"""Generates a dictionary of all the known CRC formats from:
https://reveng.sourceforge.io/crc-catalogue/all.htm
See pwnlib/data/crcsum.txt for more information.
"""
curdir, _ = os.path.split(__file__)
path = os.path.join(curdir, '..', '..', 'data', 'crcsums.txt')
with open(path) as fd:
data = fd.read()
out = {}
def fixup(s):
if s == 'true':
return True
elif s == 'false':
return False
elif s.startswith('"'):
assert re.match('"[^"]+"', s)
return s[1:-1]
elif s.startswith('0x'):
assert re.match('0x[0-9a-fA-F]+', s)
return int(s[2:], 16)
else:
assert re.match('[0-9]+', s)
return int(s, 10)
for l in data.strip().split('\n'):
if not l or l[0] == '#':
continue
ref, l = l.split(' ', 1)
cur = {}
cur['link'] = 'https://reveng.sourceforge.io/crc-catalogue/all.htm#' + ref
for key in ['width', 'poly', 'init', 'refin', 'refout', 'xorout', 'check', 'name']:
cur[key] = fixup(re.findall(r'%s=(\S+)' % key, l)[0])
cur['name'] = cur['name'].lower().replace('/', '_').replace('-', '_')
assert cur['name'] not in out
out[cur['name']] = cur
return out | bac4d66babe3e01c703fb6f10d78b85021c62f3b | 3,634,844 |
import string
def base62_encode(number):
"""Encode a number in base62 (all digits + a-z + A-Z)."""
base62chars = string.digits + string.ascii_letters
l = []
while number > 0:
remainder = number % 62
number = number // 62
l.insert(0, base62chars[remainder])
return ''.join(l) or '0' | b1f10fe69b6263d54f2e00a32b8260cbb3c42747 | 3,634,845 |
import json
def handle_update(config_path):
"""
handle changes in globalConfig.json
Args:
config_path : path to globalConfig.json
Returns:
dictionary : schema_content (globalConfig.json)
"""
with open(config_path) as config_file:
schema_content = json.load(config_file)
version = schema_content.get("meta").get("schemaVersion", "0.0.0")
if version_tuple(version) < version_tuple("0.0.1"):
schema_content = handle_biased_terms_update(schema_content)
with open(config_path, "w") as config_file:
json.dump(schema_content, config_file, ensure_ascii=False, indent=4)
if version_tuple(version) < version_tuple("0.0.2"):
ta_tabs = schema_content.get("pages").get("configuration", {}).get("tabs", {})
for tab in ta_tabs:
if tab["name"] == "account":
conf_entities = tab.get("entity")
oauth_state_enabled_entity = {}
for entity in conf_entities:
if entity.get("field") == "oauth_state_enabled":
logger.warning(
"oauth_state_enabled field is no longer a separate "
"entity since UCC version 5.0.0. It is now an "
"option in the oauth field. Please update the "
"globalconfig.json file accordingly."
)
oauth_state_enabled_entity = entity
if entity.get("field") == "oauth" and not entity.get(
"options", {}
).get("oauth_state_enabled"):
entity["options"]["oauth_state_enabled"] = False
if oauth_state_enabled_entity:
conf_entities.remove(oauth_state_enabled_entity)
tab_options = tab.get("options", {})
if tab_options.get("onChange"):
logger.error(
"The onChange option is no longer supported since UCC "
"version 5.0.0. You can use custom hooks to implement "
"these actions."
)
del tab_options["onChange"]
if tab_options.get("onLoad"):
logger.error(
"The onLoad option is no longer supported since UCC "
"version 5.0.0. You can use custom hooks to implement "
"these actions."
)
del tab_options["onLoad"]
is_inputs = "inputs" in schema_content.get("pages")
if is_inputs:
services = schema_content.get("pages").get("inputs", {}).get("services", {})
for service in services:
service_options = service.get("options", {})
if service_options.get("onChange"):
logger.error(
"The onChange option is no longer supported since UCC "
"version 5.0.0. You can use custom hooks to implement "
"these actions."
)
del service_options["onChange"]
if service_options.get("onLoad"):
logger.error(
"The onLoad option is no longer supported since UCC "
"version 5.0.0. You can use custom hooks to implement "
"these actions."
)
del service_options["onLoad"]
schema_content["meta"]["schemaVersion"] = "0.0.2"
with open(config_path, "w") as config_file:
json.dump(schema_content, config_file, ensure_ascii=False, indent=4)
if version_tuple(version) < version_tuple("0.0.3"):
schema_content = handle_dropping_api_version_update(schema_content)
with open(config_path, "w") as config_file:
json.dump(schema_content, config_file, ensure_ascii=False, indent=4)
return schema_content | f51f52f9353339ba1cf080b10ad8f28691deefe9 | 3,634,846 |
from typing import Sequence
from typing import Tuple
import urllib
def encode(s: Sequence[Tuple[str, str]], similar_to: str=None) -> str:
"""
Takes a list of (key, value) tuples and returns a urlencoded string.
If similar_to is passed, the output is formatted similar to the provided urlencoded string.
"""
remove_trailing_equal = False
if similar_to:
remove_trailing_equal = any("=" not in param for param in similar_to.split("&"))
encoded = urllib.parse.urlencode(s, False, errors="surrogateescape")
if remove_trailing_equal:
encoded = encoded.replace("=&", "&")
if encoded[-1] == '=':
encoded = encoded[:-1]
return encoded | c0153b77ec03708d54574062d15c7f81df59510b | 3,634,847 |
def gis_ellipse_polygon(origin, a, b, orientation=0.0, complexity=128):
"""
Generate a polygon of an ellipse suitable for GIS applications.
:param origin: the center of the ellipse (expected to be UTM, m)
:param a: the a/semi-major axis length (expected to be UTM, m)
:param b: the b/semi-minor axis length (expected to be UTM, m)
:param orientation: the rotation/orientation (in degrees) of the ellipse [0.0 means no rotation applied] [rotated about origin]
:param complexity: number of data points that make up the polygon [note that result will have length complexity + 1] with
the last entry equal to the first entry to ensure compatibility for GIS applications.
:return: numpy array containing (complexity+1) points representing the selected ellipse
"""
x = origin[0]
y = origin[1]
result = np.zeros((complexity + 1, 2), dtype=np.float32)
angles = np.linspace(0.0, 2.0 * pi, complexity + 1, endpoint=True)
result[:, 0] = x + a * np.cos(angles)
result[:, 1] = y + b * np.sin(angles)
if orientation != 0.0: # handle rotation
result[:, 0], result[:, 1] = rotate_2d(result[:, 0], result[:, 1], orientation, x, y)
result[-1] = result[0] # this should be true anyway, but we make sure they're identical before returning...
return result | 48df0adbc7fd04508a6d3a8288c6b46fe9f9bdb0 | 3,634,848 |
def activate_lesson():
"""
{
"id": 51,
"active": "A"
}
"""
domain = request.get_json()
return lesson_service.activate_lesson(domain) | a4e48ca2c8e65e9ff0e2fb95b61ef776573a9649 | 3,634,849 |
def split(labels, n_per_class=20, seed=0):
"""
Randomly split the training data.
Parameters
----------
labels: array-like [n_nodes]
The class labels
n_per_class : int
Number of samples per class
seed: int
Seed
Returns
-------
split_train: array-like [n_per_class * nc]
The indices of the training nodes
split_val: array-like [n_per_class * nc]
The indices of the validation nodes
split_test array-like [n_nodes - 2*n_per_class * nc]
The indices of the test nodes
"""
np.random.seed(seed)
nc = labels.max() + 1
split_train, split_val = [], []
for l in range(nc):
perm = np.random.permutation((labels == l).nonzero()[0])
split_train.append(perm[:n_per_class])
split_val.append(perm[n_per_class:2 * n_per_class])
split_train = np.random.permutation(np.concatenate(split_train))
split_val = np.random.permutation(np.concatenate(split_val))
assert split_train.shape[0] == split_val.shape[0] == n_per_class * nc
split_test = np.setdiff1d(np.arange(len(labels)), np.concatenate((split_train, split_val)))
return split_train, split_val, split_test | 69c7510f9be494afe7bb0c1492870c1d7c2d6694 | 3,634,850 |
from setfilter.setfilter import Setfilter
def base():
"""test Setfilter """
# build fixture
# initialization
center = np.array([0, 0, 0, 0.])
body = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
noise = np.eye(len(center))
tracker = Setfilter(center, body,
noise*.2, noise*.1,
initNoise=False)
return tracker | b306d6425a6e1e329e0af006585c06be4585972e | 3,634,851 |
def stack(*cons):
"""
Combine constraints into a large constaint
by intersection.
Parameters
----------
cons : [`selection.affine.constraints`_]
A sequence of constraints.
Returns
-------
intersection : `selection.quasi_affine.constraints`_
Notes
-----
Resulting constraint will have mean 0 and covariance $I$.
If each is of type `constraints`, then quietly assumes that all residual projectors
are the same, so it uses the first residual projector
in the stack. If they are of type `orthogonal` then quietly
assumes that all RSS and RSS_df are the same.
If they are of mixed type, raises an exception.
"""
ineq, ineq_LHS_off, ineq_RHS_off = [], [], []
if np.all([isinstance(con, constraints) for con in cons]):
for con in cons:
ineq.append(con.linear_part)
ineq_LHS_off.append(con.LHS_offset)
ineq_RHS_off.append(con.RHS_offset)
intersection = constraints(np.vstack(ineq),
np.hstack(ineq_LHS_off),
np.hstack(ineq_RHS_off),
cons[0].residual_projector
)
elif np.all([isinstance(con, orthogonal) for con in cons]):
for con in cons:
ineq.append(con.linear_part)
ineq_LHS_off.append(con.LHS_offset)
ineq_RHS_off.append(con.RHS_offset)
intersection = constraints(np.vstack(ineq),
np.hstack(ineq_LHS_off),
np.hstack(ineq_RHS_off),
cons[0].RSS,
cons[0].RSS_df
)
else:
raise ValueError('all constraints must of same type')
return intersection | 8ddc52aa41c2ef4ec784067692efa1f3643a130c | 3,634,852 |
def _create_poi_gdf(
tags,
polygon=None,
north=None,
south=None,
east=None,
west=None,
timeout=180,
memory=None,
custom_settings=None,
):
"""
Create GeoDataFrame from POIs json returned by Overpass API.
Parameters
----------
tags : dict
Dict of tags used for finding POIs from the selected area. Results
returned are the union, not intersection of each individual tag.
Each result matches at least one tag given. The dict keys should be
OSM tags, (e.g., `amenity`, `landuse`, `highway`, etc) and the dict
values should be either `True` to retrieve all items with the given
tag, or a string to get a single tag-value combination, or a list of
strings to get multiple values for the given tag. For example,
tags = {
'amenity':True,
'landuse':['retail','commercial'],
'highway':'bus_stop'}
would return all amenities, `landuse=retail`, `landuse=commercial`,
and `highway=bus_stop`.
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the POIs within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
timeout : int
Timeout for the API request.
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
custom_settings : string
custom settings to be used in the overpass query instead of defaults
Returns
-------
geopandas.GeoDataFrame
POIs and their associated tags
"""
responses = _osm_poi_download(
tags,
polygon=polygon,
north=north,
south=south,
east=east,
west=west,
timeout=timeout,
memory=memory,
custom_settings=custom_settings,
)
# Parse coordinates from all the nodes in the response
coords = _parse_nodes_coords(responses)
# POI nodes
poi_nodes = {}
# POI ways
poi_ways = {}
# A list of POI relations
relations = []
for result in responses["elements"]:
if result["type"] == "node" and "tags" in result:
poi = _parse_osm_node(response=result)
# Add element_type
poi["element_type"] = "node"
# Add to 'pois'
poi_nodes[result["id"]] = poi
elif result["type"] == "way":
# Parse POI area Polygon
poi_area = _parse_polygonal_poi(coords=coords, response=result)
if poi_area:
# Add element_type
poi_area["element_type"] = "way"
# Add to 'poi_ways'
poi_ways[result["id"]] = poi_area
elif result["type"] == "relation":
# Add relation to a relation list (needs to be parsed after
# all nodes and ways have been parsed)
relations.append(result)
# Create GeoDataFrames
gdf_nodes = gpd.GeoDataFrame(poi_nodes).T
gdf_nodes.crs = settings.default_crs
gdf_ways = gpd.GeoDataFrame(poi_ways).T
gdf_ways.crs = settings.default_crs
# Parse relations (MultiPolygons) from 'ways'
gdf_ways = _parse_osm_relations(relations=relations, osm_way_df=gdf_ways)
# Combine GeoDataFrames
gdf = gdf_nodes.append(gdf_ways, sort=False)
# if caller requested pois within a polygon, only retain those that
# fall within the polygon
if polygon and len(gdf) > 0:
gdf = gdf.loc[gdf["geometry"].centroid.within(polygon)]
return gdf | 4d88c0bb60ee836131bf5197c0caad17c0358f5b | 3,634,853 |
def prepare_data(seqs, labels, maxlen=None, x_dim = 3, mapping=None, max_mapping=None):
"""Create the matrices from the datasets.
This pad each sequence to the same length: the length of the
longest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
length.
This swap the axis!
"""
assert mapping is not None;
assert max_mapping is not None;
# Trim all output seqs to have only maxlen steps
if maxlen is not None:
Iseqs = []
Oseqs = []
for i_seq, o_seq in zip(seqs, labels):
if len(o_seq) < maxlen:
Iseqs.append(i_seq)
Oseqs.append(o_seq)
seqs = Iseqs
labels = Oseqs
else:
maxlen = 40
new_seqs = [];
memory = [];
for seq in seqs:
#print "In seq:";
new_seq = [];
mem_seq = [];
for item_num in range( 0, len(seq) ):
item = seq[item_num];
# Create a memory item corresponding to every input item.
memitem = [];
if not is_number(item):
#print "not a number: " + item;
for k in range(0, max_mapping):
if mapping.has_key(item) and k == mapping[item]:
new_seq.append( 1 );
memitem.append( 1 );
else:
new_seq.append( 0 );
memitem.append( 0 );
# Add to read-only memory matrix.
mem_seq.append( memitem );
elif item_num < 18:
for k in range(0, max_mapping):
if mapping.has_key(item) and k == mapping[item]:
memitem.append( 1 );
else:
memitem.append( 0 );
new_seq.append( item );
# Add to read-only memory matrix.
mem_seq.append( memitem );
else:
new_seq.append( item );
new_seqs.append( new_seq );
memory.append( mem_seq );
# Pad and compute masks
ret_X = np.zeros((maxlen, len(seqs), x_dim))
mask_X = np.zeros((maxlen, len(seqs)))
# start out with ones. Ones are the null characters.
labels_X = np.ones((maxlen, len(seqs)))
# NxMxW
memory_X = np.array( memory );
#print memory_X;
for k in range(len(seqs)):
mask_X[:len(labels[k]), k] = 1
ret_X[:len(labels[k]), k] = np.asarray(new_seqs[k])
labels_X[:len(labels[k]), k] = labels[k]
return ret_X, mask_X, labels_X, memory_X | e2582ce07fa770b78bcd02dae26b345059c051c0 | 3,634,854 |
import types
def _get_prediction_tensor(
predictions_dict):
"""Returns prediction Tensor for a specific Estimators.
Returns the prediction Tensor for some regression Estimators.
Args:
predictions_dict: Predictions dictionary.
Returns:
Predictions tensor, or None if none of the expected keys are found in
the predictions_dict.
"""
if types.is_tensor(predictions_dict):
return predictions_dict
key_precedence = (prediction_keys.PredictionKeys.LOGISTIC,
prediction_keys.PredictionKeys.PREDICTIONS,
prediction_keys.PredictionKeys.PROBABILITIES,
prediction_keys.PredictionKeys.LOGITS)
for key in key_precedence:
ref_tensor = predictions_dict.get(key)
if ref_tensor is not None:
return ref_tensor
return None | 0bd509d4021e0f0622b0cf685e5bf8682f47daa0 | 3,634,855 |
def gen_confirm_code():
""" Generate a new email confirmation code and return it
"""
return generate_uuid_readable(9) | 992844a42886892cfa7eebc44e8efb61aa547725 | 3,634,856 |
from typing import List
from typing import Dict
from typing import cast
import json
from datetime import datetime
def make_event_crawl_jobs(entries: List[BugoutSearchResult]) -> List[EventCrawlJob]:
"""
Create EventCrawlJob objects from bugout entries.
"""
crawl_job_by_hash: Dict[str, EventCrawlJob] = {}
for entry in entries:
abi_hash = _get_tag(entry, "abi_method_hash")
contract_address = Web3().toChecksumAddress(_get_tag(entry, "address"))
existing_crawl_job = crawl_job_by_hash.get(abi_hash)
if existing_crawl_job is not None:
if contract_address not in existing_crawl_job.contracts:
existing_crawl_job.contracts.append(contract_address)
else:
abi = cast(str, entry.content)
new_crawl_job = EventCrawlJob(
event_abi_hash=abi_hash,
event_abi=json.loads(abi),
contracts=[contract_address],
created_at=int(datetime.fromisoformat(entry.created_at).timestamp()),
)
crawl_job_by_hash[abi_hash] = new_crawl_job
return [crawl_job for crawl_job in crawl_job_by_hash.values()] | 3aba2871df1cf35bb6337cbe8d8482437c3c91e0 | 3,634,857 |
def update_profile():
"""
Update profile page
"""
form = UpdateProfileForm() # Update profile form
if form.validate_on_submit(): # If form is submitted and validated
if form.picture.data: # If picture is uploaded
picture = save_picture(form.picture.data) # Save picture
current_user.picture = picture # Update picture
current_user.username = form.username.data # Update username
current_user.about_user = form.about_user.data # Update username
db.session.commit() # Commit changes
flash("Your profile was updated!", "info")
return redirect(
url_for("profile", username=current_user.username)
) # Redirect to profile, due to post-get-redirect pattern, to avoid "Are you sure you want to resubmit?"
elif request.method == "GET": # If form is not submitted, but GET request
form.username.data = (
current_user.username
) # Set username to current user's username
form.about_user.data = (
current_user.about_user
) # Set about_user to current user's about_user
return render_template(
"update-profile.html", title="Clippr. - Update Profile", form=form
) | ad2b427487cc63aa64068b86254c2d5b6b363c2c | 3,634,858 |
def get_version_info(pe):
"""Return version information"""
res = {}
for fileinfo in pe.FileInfo:
if fileinfo.Key == 'StringFileInfo':
for st in fileinfo.StringTable:
for entry in st.entries.items():
res[entry[0]] = entry[1]
if fileinfo.Key == 'VarFileInfo':
for var in fileinfo.Var:
res[var.entry.items()[0][0]] = var.entry.items()[0][1]
if hasattr(pe, 'VS_FIXEDFILEINFO'):
res['flags'] = pe.VS_FIXEDFILEINFO.FileFlags
res['os'] = pe.VS_FIXEDFILEINFO.FileOS
res['type'] = pe.VS_FIXEDFILEINFO.FileType
res['file_version'] = pe.VS_FIXEDFILEINFO.FileVersionLS
res['product_version'] = pe.VS_FIXEDFILEINFO.ProductVersionLS
res['signature'] = pe.VS_FIXEDFILEINFO.Signature
res['struct_version'] = pe.VS_FIXEDFILEINFO.StrucVersion
return res | 8a01062de92cd4887f5cc7a292ffb644a701b43c | 3,634,859 |
import torch
def data_prepare(coord, feat, label, split='train', voxel_size=0.04, voxel_max=None, transform=None, shuffle_index=False, origin='min'):
""" coord, feat, label - an entire cloud
"""
if transform:
coord, feat, label = transform(coord, feat, label)
if voxel_size:
# voxelize the entire cloud
coord_min = np.min(coord, 0)
coord -= coord_min
uniq_idx = voxelize(coord, voxel_size)
coord, feat, label = coord[uniq_idx], feat[uniq_idx], label[uniq_idx]
if 'train' in split and voxel_max and label.shape[0] > voxel_max:
init_idx = np.random.randint(label.shape[0])
else:
# NOTE: not random during test
init_idx = label.shape[0] // 2
coord_init = coord[init_idx]
if voxel_max and label.shape[0] > voxel_max:
# radius crop with a random center point
crop_idx = np.argsort(np.sum(np.square(coord - coord_init), 1))[:voxel_max]
coord, feat, label = coord[crop_idx], feat[crop_idx], label[crop_idx]
if shuffle_index:
shuf_idx = np.arange(coord.shape[0])
np.random.shuffle(shuf_idx)
coord, feat, label = coord[shuf_idx], feat[shuf_idx], label[shuf_idx]
xyz = coord
if origin == 'min':
coord_min = np.min(coord, 0)
coord -= coord_min
elif origin == 'mean':
coord[..., :-1] -= coord[..., :-1].mean(0)
coord[..., -1] -= coord[..., -1].min()
elif origin == 'center':
coord[..., :-1] -= coord_init[..., :-1]
coord[..., -1] -= coord[..., -1].min()
else:
raise ValueError(f'not support origin={origin}')
coord = torch.FloatTensor(coord)
feat = torch.FloatTensor(feat) / 255.
label = torch.LongTensor(label)
xyz = torch.FloatTensor(coord)
return coord, feat, label, xyz | 988a7068d0fc383f86c853b0593189633a87a53d | 3,634,860 |
def add_wsl_blobs(blobs, im_scales, im_crops, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_rois(entry, im_scales[im_i], im_crops[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
# TODO(YH): NOT SUPPORT
# Add FPN multilevel training RoIs, if configured
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois(blobs)
# Perform any final work and validity checks after the collating blobs for
# all minibatch images
valid = True
if cfg.MODEL.KEYPOINTS_ON:
valid = keypoint_rcnn_roi_data.finalize_keypoint_minibatch(
blobs, valid)
return valid | 296ae92af7656be15963508e1562afc5e62fb05b | 3,634,861 |
def _right_branching(nodes):
"""
Parameters
----------
nodes: list[T], where T denotes NonTerminal or Terminal
Returns
-------
list[T], where T denotes NonTerminal or Terminal
"""
if len(nodes) == 2:
return nodes
lhs = nodes[0] # The left-most child node is head
index_span = (nodes[1].index_span[0], nodes[-1].index_span[1])
relation = nodes[1].relation
nuclearity = nodes[1].nuclearity
rhs = make_nonterminal(index_span=index_span, relation=relation, nuclearity=nuclearity)
rhs.children = _right_branching(nodes[1:])
return [lhs, rhs] | 9bfa47daa95be30b7f9a9ada882e1fdceb1295e5 | 3,634,862 |
import random
def random_swap(o_a, o_b):
"""
Randomly swap elements of two observation vectors and return new vectors.
:param o_a: observation vector a
:param o_b: observation vector b
:return: shuffled vectors
"""
X, Y = [], []
tf = [True, False]
for x, y in zip(o_a, o_b):
if random.choice(tf):
x, y = y, x
X.append(x)
Y.append(y)
return X, Y | f243e91e5b281c682601fdb8df49bd7e6209274c | 3,634,863 |
def parse_multiplicative(index):
"""Parse multiplicative expression."""
return parse_series(
index, parse_unary,
{token_kinds.star: expr_nodes.Mult,
token_kinds.slash: expr_nodes.Div,
token_kinds.mod: expr_nodes.Mod}) | e314ca44735db31d00fe55268be8a7f69816db45 | 3,634,864 |
def return_factorized_dict(ls):
"""
###### Factorize any list of values in a data frame using this neat function
if your data has any NaN's it automatically marks it as -1 and returns that for NaN's
Returns a dictionary mapping previous values with new values.
"""
factos = pd.unique(pd.factorize(ls)[0])
categs = pd.unique(pd.factorize(ls)[1])
if -1 in factos:
categs = np.insert(categs,np.where(factos==-1)[0][0],np.nan)
return dict(zip(categs,factos)) | 7804f12f953eabcdc7bb398121500c2c8f2e278f | 3,634,865 |
def _strip_asserts(source):
"""
Remove assert method calls from source code.
Using RedBaron, replace some assert calls with print statements that print the actual
value given in the asserts. Depending on the calls, the actual value can be the first or second
argument.
Parameters
----------
source : str
String containing source lines.
Returns
-------
str
Source with asserts removed.
"""
rb = RedBaron(source) # convert to RedBaron internal structure
# findAll is slow, so only check the ones that are present.
asserts = ['assertAlmostEqual', 'assertLess', 'assertGreater', 'assertEqual',
'assert_equal_arrays', 'assertTrue', 'assertFalse', 'assert_near_equal',
'assert_rel_error', 'assert_almost_equal', 'assert_allclose']
for assert_type in asserts:
assert_nodes = rb.findAll("NameNode", value=assert_type)
for i in reversed(range(len(assert_nodes))):
parent = assert_nodes[i].parent
for j in reversed(range(len(parent.value))):
assert_nodes[i].parent.remove(parent.value[j])
return rb.dumps() | 4c484635328e9610bd07004cfba63acc4ae039d1 | 3,634,866 |
def _index_clusters(feat_mat, init_mat):
"""Creates a hierarchical binary tree till the top-level clusters supplied."""
cluster_feat = init_mat.T.dot(feat_mat)
cluster_feat = cluster_feat.tocsr()
cluster_feat = skprep.normalize(cluster_feat, "l2", axis=1)
init_cluster = HierarchicalKMeans.gen(
feat_mat=cluster_feat,
kdim=2,
max_leaf_size=2,
imbalanced_ratio=0.0,
)
return init_cluster.chain | f48e32507e298c4b089c9358c14727c4b8444ee5 | 3,634,867 |
from functools import reduce
def gcd(numbers):
"""Return greatest common divisor of integer numbers.
Using Euclid's algorithm.
Examples
--------
>>> gcd([4])
4
>>> gcd([3, 6])
3
>>> gcd([6, 7])
1
"""
def _gcd(a, b):
"""Return greatest common divisor of two integer numbers."""
while b:
a, b = b, a % b
return a
return reduce(_gcd, numbers) | da7ae2a24649bc05e233533735baf850a37dcc5a | 3,634,868 |
def normal_attention(tensor_base, tensor_to_attend,
mask_for_tensor_base,
mask_for_tensor_to_attend,
similarity_method='inner', hn=100,
use_pooling=False, pooling_method='max',
reverse=False, scope=None):
"""
normal_attention for attention strategy 2
:param tensor_base: rank 3 [bs,sl,vec]
:param tensor_to_attend: rank 3 [bs,ql,vec]
:param mask_for_tensor_base: [bs,ql]
:param mask_for_tensor_to_attend: [bs,sl]
:param similarity_method: 'inner' 'tri_linear' 'map_linear'
:param hn: some method need
:param use_pooling: True or False
:param pooling_method: 'max' or 'mean'
:param reverse: if use strategy 3
:param scope:
:return: use_pooling==True: [bs,sl,hn] else [bs,hn]
"""
with tf.variable_scope(scope or 'normal_attention'):
# --------parameters--------
t_main = tensor_base # [bs,sl,vec]
t_sec = tensor_to_attend # [bs,ql,vec]
mask_main = mask_for_tensor_base # [bs,sl]
mask_sec = mask_for_tensor_to_attend # [bs,ql]
bs, sl, vec = tf.shape(t_main)[0], tf.shape(t_main)[1], tf.shape(t_main)[2]
ql = tf.shape(t_sec)[1]
# -------------------------------
# --------similarity_mat--------
mask_main_etd = tf.expand_dims(mask_main, 2) # bs,sl,1
mask_sec_etd = tf.expand_dims(mask_sec, 1) # bs,1,ql
mask_similarity_mat = tf.logical_and(mask_main_etd, mask_sec_etd) # bs,sl,ql
if similarity_method == 'inner':
t_main_etd = tf.expand_dims(t_main, 2) # bs,sl,1,vec
t_sec_etd = tf.expand_dims(t_sec, 1) # bs,1,ql,vec
similarity_mat = tf.reduce_sum(t_main_etd*t_sec_etd, -1) # bs,sl,ql
elif similarity_method == 'tri_linear':
t_main_tiled = tf.tile(tf.expand_dims(t_main, 2), [1, 1, ql, 1]) # bs,sl,ql,vec
t_sec_tiled = tf.tile(tf.expand_dims(t_sec, 1), [1, sl, 1, 1]) # bs,sl,ql,vec
similarity_mat = get_logits([t_main_tiled, t_sec_tiled], None, False,
scope='tri_linear_tri_linear', func='tri_linear')
elif similarity_method == 'map_linear':
t_main_map = tf.nn.relu(linear([t_main], hn, True, scope='linear_map_main'))
t_sec_map = tf.nn.relu(linear([t_sec], hn, True, scope='linear_map_sec'))
t_main_map_etd = tf.expand_dims(t_main_map, 2) # bs,sl,1,hn
t_sec_map_etd = tf.expand_dims(t_sec_map, 1) # bs,1,ql,hn
similarity_mat = tf.reduce_sum(t_main_map_etd * t_sec_map_etd, -1) # bs,sl,ql
else:
raise AttributeError('No similarity matrix calculation method \'%s\'' % similarity_method)
# -------------------------------
if use_pooling:
# pool mat along -2
if pooling_method == 'max':
pooling_out = tf.reduce_max(exp_mask(similarity_mat, mask_similarity_mat), -2) # bs,sl,ql -> bs,ql
elif pooling_method == 'mean':
sum_out = tf.reduce_sum(normal_mask(similarity_mat, mask_similarity_mat), -2) # bs,sl,ql -> bs,ql
num = tf.reduce_sum(tf.cast(mask_similarity_mat, tf.int32), -2) # bs,ql
num = tf.where(tf.equal(num, tf.zeros_like(num, tf.int32)),
tf.ones_like(num, tf.int32), num)
pooling_out = sum_out / tf.cast(num, tf.float32) # bs,ql
else:
raise AttributeError('No pooling method \'%s\'' % pooling_method)
return softsel(t_sec, pooling_out, mask_sec) # bs,ql,vec -> bs,ql
else:
t_sec_tiled = tf.tile(tf.expand_dims(t_sec, 1), [1, sl, 1, 1]) # bs,sl,ql,vec
# target: q_tiled:[bs,sl,ql,hn]; logits: [bs,sl,ql]
if not reverse:
out = normal_softsel(t_sec_tiled, similarity_mat, mask_similarity_mat)
else:
out = reverse_softsel(t_sec_tiled, similarity_mat, mask_similarity_mat)
return out | 93df4d084bb76cba4ab227928de6eb72e7f5af76 | 3,634,869 |
from bs4 import BeautifulSoup
def getPageNum(html):
"""解析第一页网页,返回该用户的书评页数
"""
soup=BeautifulSoup(html,'html.parser')
paginator=soup.find('div','paginator')
pas=paginator.findAll('a')
num=int(pas[-2].text)
return num | 626a6f580e5634ba741d0794f6fc4c020aecabd0 | 3,634,870 |
async def get_user(api_management_name=None,resource_group_name=None,user_id=None,opts=None):
"""
Use this data source to access information about an existing API Management User.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/api_management_user.html.markdown.
"""
__args__ = dict()
__args__['apiManagementName'] = api_management_name
__args__['resourceGroupName'] = resource_group_name
__args__['userId'] = user_id
__ret__ = await kulado.runtime.invoke('azure:apimanagement/getUser:getUser', __args__, opts=opts)
return GetUserResult(
api_management_name=__ret__.get('apiManagementName'),
email=__ret__.get('email'),
first_name=__ret__.get('firstName'),
last_name=__ret__.get('lastName'),
note=__ret__.get('note'),
resource_group_name=__ret__.get('resourceGroupName'),
state=__ret__.get('state'),
user_id=__ret__.get('userId'),
id=__ret__.get('id')) | c5bdf6aa7d0c69a59b9b8f6a93f63a9cba9a00cd | 3,634,871 |
def makeCC_allpair(spikes, Begin, End, N_thred):
"""
全てのCCをBegin~Endの間で計算する
args:
spikes: list型
Begin, End: int型
N_thred: 並列計算をするときに与えるスレッド数
return:
X: ペアごとに計算したCC. X.shape = (ニューロンのペア数、CCの幅)
index: ペアのニューロンの番号. index.shape = (ニューロンのペア数、2). index[i][0]は結合先ニューロン、index[i][1]は結合元のニューロンを表す.
"""
#-----------------------------------------------------
def func(i, j, spiketrain, Begin, End):
"""
並列計算の中に入れる関数.
Auto CrossCorrelogramを計算する場合と別のニューロン間のCrossCorrelogramで分けている.
"""
if i > j:
pass
# print("Because {} > {}, we pass this pair".format(i, j))
elif i == j:
# print("Because {} == {}, we compute autoCC".format(i, j))
histogram = computeAutoCC(spiketrain[i], Begin, End)
return i, j, histogram
else:
return computeHist(i, j, spiketrain, Begin, End)
#-----------------------------------------------------
# 並列計算でCCを計算する
N_neuron = len(spikes)
print("Compute Cross-Correlogram between {} ms and {} ms".format(Begin, End))
result = Parallel(n_jobs=N_thred)([delayed(func)(i, j, spikes, Begin, End) for i in range(N_neuron) for j in range(N_neuron)])
# 計算結果を扱いやすい形に変更する
X = []
index = []
for _ in range(len(result)):
# 計算していないデータ(i<j)はNoneが入っているのでpassする
if result[_] == None:
pass
else:
# i, jのこと. neuronのindexに変換する
post = result[_][0]
pre = result[_][1]
cc = result[_][2]
if post == pre:
# i == jについて
index.append([post, pre])
X.append(cc)
else:
# i < jについて
index.append([post, pre])
X.append(cc)
# i > j. 反転した方向も作る
index.append([pre, post])
X.append(cc[::-1])
return np.array(X), np.array(index) | e9e5b9bd805251b4244ab89e6a1e11316617101d | 3,634,872 |
def move_position(facility, cur_position, direction):
"""Move position from cur_position in the direction on facility."""
changes = DIRECTIONS[direction]
doors = DIRECTION_TO_DOOR[direction]
for cell in (doors, ROOM):
next_position = []
for coordinate, change in zip(cur_position, changes):
next_position.append(coordinate + change)
row, col = next_position
facility[row][col] = cell
cur_position = next_position
return row, col | 64601f07378da56eaa03d896987491cd6f93182b | 3,634,873 |
from typing import Callable
import itertools
from typing import OrderedDict
import pprint
def search_for_improvements(
targets : [Exp],
wf_solver : ModelCachingSolver,
context : Context,
examples : [{str:object}],
cost_model : CostModel,
stop_callback : Callable[[], bool],
hints : [Exp],
ops : [Op],
blacklist : {(Exp, Context, Pool, Exp) : str}):
"""Search for potential improvements to any of the target expressions.
This function yields expressions that look like improvements (or are
ambiguous with respect to some target). The expressions are only
guaranteed to be correct on the given examples.
This function may add new items to the given blacklist.
"""
root_ctx = context
def check_wf(e, ctx, pool):
with task("pruning", size=e.size()):
is_wf = exp_wf(e, pool=pool, context=ctx, solver=wf_solver)
if not is_wf:
return is_wf
res = possibly_useful(wf_solver, e, ctx, pool, ops=ops)
if not res:
return res
if cost_pruning.value and pool == RUNTIME_POOL and cost_model.compare(e, targets[0], ctx, pool) == Order.GT:
return No("too expensive")
return True
with task("setting up hints"):
frags = list(unique(itertools.chain(
*[all_subexpressions_with_context_information(t, root_ctx) for t in targets],
*[all_subexpressions_with_context_information(h, root_ctx) for h in hints])))
frags.sort(key=hint_order)
enum = Enumerator(
examples=examples,
cost_model=cost_model,
check_wf=check_wf,
hints=frags,
heuristics=try_optimize,
stop_callback=stop_callback,
do_eviction=enable_eviction.value)
target_fp = Fingerprint.of(targets[0], examples)
with task("setting up watches"):
watches_by_context = OrderedDict()
for target in targets:
for e, ctx, pool in unique(all_subexpressions_with_context_information(target, context=root_ctx, pool=RUNTIME_POOL)):
l = watches_by_context.get(ctx)
if l is None:
l = []
watches_by_context[ctx] = l
l.append((target, e, pool))
watches = OrderedDict()
for ctx, exprs in watches_by_context.items():
exs = ctx.instantiate_examples(examples)
for target, e, pool in exprs:
fp = Fingerprint.of(e, exs)
k = (fp, ctx, pool)
l = watches.get(k)
if l is None:
l = []
watches[k] = l
l.append((target, e))
watched_ctxs = list(unique((ctx, pool) for _, _, ctx, pool in exploration_order(targets, root_ctx)))
search_info = SearchInfo(
context=root_ctx,
targets=targets,
target_fingerprint=target_fp,
examples=examples,
check_wf=check_wf,
cost_model=cost_model,
blacklist=blacklist)
size = 0
while True:
print("starting minor iteration {} with |cache|={}".format(size, enum.cache_size()))
if stop_callback():
raise StopException()
for ctx, pool in watched_ctxs:
with task("searching for obvious substitutions", ctx=ctx, pool=pool_name(pool)):
for info in enum.enumerate_with_info(size=size, context=ctx, pool=pool):
with task("searching for obvious substitution", expression=pprint(info.e)):
fp = info.fingerprint
for ((fpx, cc, pp), reses) in watches.items():
if cc != ctx or pp != pool:
continue
if not fpx.equal_to(fp):
continue
for target, watched_e in reses:
replacement = info.e
event("possible substitution: {} ---> {}".format(pprint(watched_e), pprint(replacement)))
event("replacement locations: {}".format(pprint(replace(target, root_ctx, RUNTIME_POOL, watched_e, ctx, pool, EVar("___")))))
if alpha_equivalent(watched_e, replacement):
event("no change")
continue
yield from _consider_replacement(target, watched_e, ctx, pool, replacement, search_info)
if check_blind_substitutions.value:
print("Guessing at substitutions...")
for target, e, ctx, pool in exploration_order(targets, root_ctx):
with task("checking substitutions",
target=pprint(replace(target, root_ctx, RUNTIME_POOL, e, ctx, pool, EVar("___"))),
e=pprint(e)):
for info in enum.enumerate_with_info(size=size, context=ctx, pool=pool):
with task("checking substitution", expression=pprint(info.e)):
if stop_callback():
raise StopException()
replacement = info.e
if replacement.type != e.type:
event("wrong type (is {}, need {})".format(pprint(replacement.type), pprint(e.type)))
continue
if alpha_equivalent(replacement, e):
event("no change")
continue
should_consider = should_consider_replacement(
target, root_ctx,
e, ctx, pool, Fingerprint.of(e, ctx.instantiate_examples(examples)),
info.e, info.fingerprint)
if not should_consider:
event("skipped; `should_consider_replacement` returned {}".format(should_consider))
continue
yield from _consider_replacement(target, e, ctx, pool, replacement, search_info)
if not enum.expressions_may_exist_above_size(context, RUNTIME_POOL, size):
raise StopException("no more expressions can exist above size={}".format(size))
size += 1 | 374a5a16f6ec4e69e983613aedbb298df1411cca | 3,634,874 |
def down_sample(fft_vec, freq_ratio):
"""
Downsamples the provided data vector
Parameters
----------
fft_vec : 1D complex numpy array
Waveform that is already FFT shifted
freq_ratio : float
new sampling rate / old sampling rate (less than 1)
Returns
-------
fft_vec : 1D numpy array
downsampled waveform
"""
if freq_ratio >= 1:
warn('Error at downSample: New sampling rate > old sampling rate')
return fft_vec
vec_len = len(fft_vec)
ind = np.round(float(vec_len) * (0.5 * float(freq_ratio)))
fft_vec = fft_vec[max(0.5 * vec_len - ind, 0):min(0.5 * vec_len + ind, vec_len)]
fft_vec = fft_vec * freq_ratio * 2
return np.fft.ifft(np.fft.ifftshift(fft_vec)) | 1ddfaa075c1d8bc68f9348d4f49807b61eceb60c | 3,634,875 |
import os
import json
def main(args=None):
"""This is the Main Method.
"""
# loads setting file set parameters
settings = os.path.join(get_package_share_directory('ros2_camera_publish'),
"settings.json")
with open(settings) as fp:
content = json.load(fp)
# creates OpenCV Videocapture object
capture = cv2.VideoCapture(content["device_index"])
capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
# initializes node and start publishing
rclpy.init(args=args)
camera_publisher = CameraPublisher(capture, content["topic"],
content["queue_size"],
content["period"])
rclpy.spin(camera_publisher)
# shuts down nose and releases everything
camera_publisher.destroy_node()
rclpy.shutdown()
capture.release()
return None | 96e34fc4e4283fbe5f33050cf97969a6f1bba446 | 3,634,876 |
import torch
def set_device(cuda: bool) -> int:
"""Set the device for computation.
Args:
cuda (bool): Determine whether to use GPU or not (if available).
Returns:
int: Index of a currently selected device, CPU or GPU.
"""
device = torch.device("cuda" if (torch.cuda.is_available() and cuda) else "cpu")
if device.type == "cuda":
device = int(torch.cuda.current_device())
else:
device = -1
return device | f65fc5e38f14b8de78d14ac150ac14e5c8788e26 | 3,634,877 |
import torch
def hsic_regular(x, y, sigma=None, use_cuda=True, to_numpy=False):
"""
"""
Kxc = kernelmat(x, sigma)
Kyc = kernelmat(y, sigma)
KtK = torch.mul(Kxc, Kyc.t())
Pxy = torch.mean(KtK)
return Pxy | 551ed76c2f902b662ffe2c693da4474b6eb3958e | 3,634,878 |
def random_correlation(size, n_factors, random_seed=None):
"""
Generates a random correlation matrix with 'size' lines and columns and
'n_factors' factors in the underlying structure of correlation.
:param size: int. Size of the correlation matrix
:param n_factors: int. number of factors in the correlation structure
:param random_seed: int. random seed number
:return: numpy.array. correlation matrix
"""
cov = random_covariance(size, n_factors, random_seed)
corr = cov2corr(cov)
return corr | 92cd98bdf95e0ffef51c016c1c9ef05391c483dd | 3,634,879 |
import urllib
import requests
def http_get_request(url, params, add_to_headers=None, _async=False):
"""
from 火币demo, get方法
:param url:
:param params:
:param add_to_headers:
:return:
"""
headers = {
'Content-type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
if add_to_headers:
headers.update(add_to_headers)
postdata = urllib.parse.urlencode(params)
if _async:
response = async_session.get(url, params=postdata, headers=headers, timeout=5)
return response
else:
try:
response = requests.get(url, postdata, headers=headers, timeout=5)
if response.status_code == 200:
return response.json()
else:
logger.debug(
f'<GET>error_code:{response.status_code} reason:{response.reason} detail:{response.text}')
return
except Exception as e:
logger.exception(f'<GET>httpGet failed, detail is:{response.text},{e}')
return | b9274adeae67f9509fa5926d404866716b0d775e | 3,634,880 |
def eigendecompose(S):
"""Eigendecompose the input matrix."""
eigvals, V = np.linalg.eig(S)
return eigvals, V | 0658d3b83d54a7435862b89d0a1fa3e82f4082fd | 3,634,881 |
def _CreateSampleDirectoryCoverageData(builder='linux-code-coverage',
modifier_id=0):
"""Returns a sample directory SummaryCoverageData for testing purpose.
Note: only use this method if the exact values don't matter.
"""
return SummaryCoverageData.Create(
server_host='chromium.googlesource.com',
project='chromium/src',
ref='refs/heads/main',
revision='aaaaa',
data_type='dirs',
path='//dir/',
bucket='coverage',
builder=builder,
modifier_id=modifier_id,
data={
'dirs': [],
'path':
'//dir/',
'summaries':
_CreateSampleCoverageSummaryMetric(),
'files': [{
'path': '//dir/test.cc',
'name': 'test.cc',
'summaries': _CreateSampleCoverageSummaryMetric()
}]
}) | 3edebb67f0b4dafa837c14b99fd0b2b241da784d | 3,634,882 |
def sched_time(update_time) -> int:
""" Return interval between the current time and the update time in
seconds
"""
current_time_ss = hhmm_to_seconds(current_time_hhmm())
update = hhmm_to_seconds(update_time)
interval = update-current_time_ss
return interval | 5a57f591550a9f72dc7e9aa87a75c7aacf624cbe | 3,634,883 |
def http_jsonrpc_post(hostname, port, uri, method, params):
"""Perform a plain HTTP JSON RPC post (for task farming)"""
url = "http://%s:%s%s" % (hostname, port, uri)
data = simplejson.dumps({ 'method': method, 'params': params,
'jsonrpc': '2.0', 'id': 1 })
req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
res = urllib2.urlopen(req).read()
return res | 8ac5e836564c4ddc2a277c4aea8bf0ff6b8836c2 | 3,634,884 |
def get_user_event(current_user, event_id):
"""
Query the user to find and return the event specified by the event Id
:param event_id: Event Id
:param current_user: User
:return:
"""
user_event = User.get_by_id(current_user.id).events.filter_by(event_id=event_id).first()
return user_event | d65e3a65cb4400a9b4173b8ea4d45f6f94e0993c | 3,634,885 |
def get_distances(scr_data_dict, distance_keys, data_centroid=None):
"""
@param scr_data_dict:
@param distance_keys:
@param data_centroid: Do not provide if calculating distances for establishing s thresholding
@return:
"""
dataset_keys = scr_data_dict.keys()
# Extracting dataset point of interests
if data_centroid is None:
data_centroid = {ds: {clf: {gr: {usr: scr_data_dict[ds][clf][gr][usr].mean()
for usr in scr_data_dict[ds][clf][gr].keys()}
for gr in scr_data_dict[ds][clf].keys()}
for clf in scr_data_dict[ds].keys()}
for ds in dataset_keys}
else:
data_centroid = data_centroid
sample_dis_fr_cent = {
ds: {
clf: {gr: {
usr: {dis: scr_data_dict[ds][clf][gr][usr].apply(lambda x: dist_calc(x, data_centroid[ds][clf][gr][usr],
dis_type=dis), axis=1).values
for dis in distance_keys}
for usr in scr_data_dict[ds][clf][gr].keys()}
for gr in scr_data_dict[ds][clf].keys()}
for clf in scr_data_dict[ds].keys()}
for ds in dataset_keys}
p1p2_dist = {
ds: {clf: {gr: {
usr: {dis: np.array(
[dist_calc(scr_data_dict[ds][clf][gr][usr].iloc[i + 1, :], scr_data_dict[ds][clf][gr][usr].iloc[i, :],
dis_type=dis) for i in np.arange(len(scr_data_dict[ds][clf][gr][usr]) - 2)])
for dis in distance_keys}
for usr in scr_data_dict[ds][clf][gr].keys()}
for gr in scr_data_dict[ds][clf].keys()}
for clf in scr_data_dict[ds].keys()}
for ds in dataset_keys}
p1p3_dist = {
ds: {clf: {gr: {
usr: {dis: np.array(
[dist_calc(scr_data_dict[ds][clf][gr][usr].iloc[i + 2, :], scr_data_dict[ds][clf][gr][usr].iloc[i, :],
dis_type=dis) for i in np.arange(len(scr_data_dict[ds][clf][gr][usr]) - 2)])
for dis in distance_keys}
for usr in scr_data_dict[ds][clf][gr].keys()}
for gr in scr_data_dict[ds][clf].keys()}
for clf in scr_data_dict[ds].keys()}
for ds in dataset_keys}
p2p3_dist = {
ds: {clf: {gr: {
usr: {dis: np.array(
[dist_calc(scr_data_dict[ds][clf][gr][usr].iloc[i + 1, :],
scr_data_dict[ds][clf][gr][usr].iloc[i + 2, :],
dis_type=dis) for i in np.arange(len(scr_data_dict[ds][clf][gr][usr]) - 2)])
for dis in distance_keys}
for usr in scr_data_dict[ds][clf][gr].keys()}
for gr in scr_data_dict[ds][clf].keys()}
for clf in scr_data_dict[ds].keys()}
for ds in dataset_keys}
return {'data_centroid': data_centroid, 'sample_centroid_dis': sample_dis_fr_cent, 'p1p2_dis': p1p2_dist,
'p1p3_dis': p1p3_dist, 'p2p3_dis': p2p3_dist} | c0aec2de89bd7c7150d3f5fcbc3161f9f2d441c0 | 3,634,886 |
def compare_numeric_abundances(values_in_taxa_list_1, values_in_taxa_list_2):
"""Retun a Pandas Series with [abundance-in, abundance-out, p-value]."""
mean1 = mean_ignore_nans(values_in_taxa_list_1)
mean2 = mean_ignore_nans(values_in_taxa_list_2)
keyslist1 = list(values_in_taxa_list_1.keys())
keyslist2 = list(values_in_taxa_list_2.keys())
valueslist1 = list(values_in_taxa_list_1.values())
valueslist2 = list(values_in_taxa_list_2.values())
tenthousand_samples1 = choices(keyslist1, valueslist1, k=10**4)
tenthousand_samples2 = choices(keyslist2, valueslist2, k=10**4)
a = stats.ks_2samp(tenthousand_samples1, tenthousand_samples2)
return pd.Series({
'abundance_in': mean1,
'abundance_out': mean2,
'p-value': a.pvalue,
}) | f2e6e4326b12242a8493b82e37190a47d60b44e6 | 3,634,887 |
def parse_folder(folder, start_time=None, grepcmd=None, tmp_folder=None, force_grep=False):
"""
Args:
grepcmd, tmp_folder, force_grep's default value should be the same with parse_single_log_with_pregrep
Yields:
[[yield], []]: nested yields
"""
def get_first_last_line(filepath):
f = open(filepath, 'rb')
filesize = os.fstat(f.fileno()).st_size
if filesize == 0:
return '', ''
offset = -min(1024, filesize)
first = next(f)
while True:
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 1:
last = lines[-1]
break
offset *= 2
return first, last
all_files = get_recursive_files(folder)
# It is very IMPORTANT to parse each log file in order
# since the other application may find the last row in database to resume its operation
all_files.sort()
for each_file in all_files:
if start_time:
first, last = get_first_last_line(each_file)
last_row = list(parse_lines([last], start_time))
if last_row:
if grepcmd:
yield parse_single_log_with_pregrep(each_file,
grepcmd=grepcmd, start_time=start_time, tmp_folder=tmp_folder, force_grep=force_grep)
else:
yield parse_single_log(each_file, start_time=start_time)
else:
try:
output('%s ignored, lastline: %s, start_time: %s' % (each_file, last, start_time))
except UnicodeDecodeError as e:
print e, each_file, start_time, last
else:
if grepcmd:
yield parse_single_log_with_pregrep(each_file,
grepcmd=grepcmd, start_time=start_time, tmp_folder=tmp_folder, force_grep=force_grep)
else:
yield parse_single_log(each_file) | b8d233ba8ad579d38e3353b841ea03d52bb83962 | 3,634,888 |
def search(request):
"""
search function which reads get data from a requests and uses it to find stuff in elasticsearch
"""
context = {"table": []}
if "subject" in request.GET or "predicate" in request.GET or "object" in request.GET:
es = elasticsearch.Elasticsearch(settings.ELASTICSEARCH)
# create search pattern
wildcards = []
wildcards.extend([{"wildcard": {"subject": {"value": f"*{item}*", "case_insensitive": True}}}
for item in request.GET["subject"].split()])
wildcards.extend([{"wildcard": {"predicate": {"value": f"*{item}*", "case_insensitive": True}}}
for item in request.GET["predicate"].split()])
wildcards.extend([{"wildcard": {"object": {"value": f"*{item}*", "case_insensitive": True}}}
for item in request.GET["object"].split()])
search = {"size": 1000,
"query":
{"bool":
{"must": wildcards}}}
res = es.search(body=search, index="jvmg_search",)
# change scoring and rewrite_URLs
new_score_res = []
for item in res["hits"]["hits"]:
score = SequenceMatcher(None, item["_source"]["subject"], request.GET["subject"]).ratio()
score += SequenceMatcher(None, item["_source"]["predicate"], request.GET["predicate"]).ratio()
score += SequenceMatcher(None, item["_source"]["object"], request.GET["object"]).ratio()
object_link = None
if isinstance(item["_source"]["object"], URIRef):
object_link = rewrite_URL(item["_source"]["object"])
new_item = {"subject_link": rewrite_URL(item["_source"]["subject"]),
"subject": item["_source"]["subject"],
"predicate_link": rewrite_URL(item["_source"]["predicate"]),
"predicate": item["_source"]["predicate"],
"object_link": object_link,
"object": item["_source"]["object"],
"score": score}
new_score_res.append(new_item)
new_score_res.sort(key=lambda item: item["score"], reverse=True)
context = {"table": new_score_res,
"table_len": len(new_score_res),
"subject": request.GET["subject"],
"predicate": request.GET["predicate"],
"object": request.GET["object"]}
return render(request, "jvmg/search.html", context) | 85b1daba7d6a815f3e465b23ef369d439834de9c | 3,634,889 |
import os
def Write_data(data_name,parameters,metrics_name_list,length_input,metrics_mean_list,metrics_std_dev_list):
"""Writes the metrics of a given simulation in a datasheet in .txt format.
Parameters
----------
data_name: string
Desired name of data_sheet archive.
metrics_name_list: list of strings
Names of used metrics
legnth_input: int
Size of sigma_input_list.
metrics_mean_list: array
Array with the mean values of each metric in each input.
metrics_std_dev_list: array
Array with the standard deviation of each metric in each input.
Return
------
References
----------
"""
os.chdir('Data')
data = open(data_name +'.txt','w')
num_metrics = len(metrics_name_list)
variables_name_list=['Sigma_mean','Sigma_std_dev']
for metrics_name in metrics_name_list:
variables_name_list.append(metrics_name+'_mean')
variables_name_list.append(metrics_name+'_std_dev')
data.write(parameters+'\n')
variables='#'
variables+='\t'.join(variables_name_list)
variables+='\n'
data.write(variables)
data_line=''
for j in range(length_input):
for k in range(num_metrics+1):
m,s=metrics_mean_list[j][k],metrics_std_dev_list[j][k]
data_line+='{}\t{}'.format(m,s)
data_line+= '\t'
data_line+='\n'
data.write(data_line)
data.close()
os.chdir('..')
return None | 83478d88f45203b22373dfb42ae0e4a1338b2355 | 3,634,890 |
from typing import Dict
def parse_measurement_lines(xml_root: Element) -> Dict[int, LineString]:
"""Parses the measurement line from the given xml root
Args:
xml_root (ET.ElementTree): root of the xml file
Returns:
measurement lines to use in the analysis (id, line)
"""
measurement_lines = {}
if xml_root.find("measurement_areas") is None:
raise IniFileParseException(
"There could no measurement_areas tag be found in your ini-file, but it is mandatory."
)
for measurement in xml_root.iter("measurement_areas"):
parse_xml_attrib(
measurement,
"unit",
str,
(
lambda x: x.lower() == "m",
"Only 'm' is supported as unit in the measurement areas tag: ",
),
mandatory=False,
)
for measurement_line in measurement.iter("area_L"):
line_id = parse_xml_attrib(
measurement_line,
"id",
int,
(
lambda x: x not in measurement_lines,
"There is a duplicated ID in your measurement lines: ",
),
)
start_node = measurement_line.find("start")
if start_node is None:
raise ValueError(
"The measurement_areas/area_L tag is incomplete, it should contain a start"
" child, e.g., \n"
'<area_L id="1">\n'
' <start x="62.0" y="102.600"/>\n'
' <end x="62.0" y="101.400"/>\n'
"</area_L>"
)
start_x = parse_xml_attrib(
start_node,
"x",
float,
)
start_y = parse_xml_attrib(
start_node,
"y",
float,
)
end_node = measurement_line.find("end")
if end_node is None:
raise IniFileParseException(
"The measurement_areas/area_L tag is incomplete, it should contain a end "
"child, e.g., \n"
'<area_L id="1">\n'
' <start x="62.0" y="102.600"/>\n'
' <end x="62.0" y="101.400"/>\n'
"</area_L>"
)
end_x = parse_xml_attrib(
end_node,
"x",
float,
)
end_y = parse_xml_attrib(
end_node,
"y",
float,
)
line = LineString([Point(start_x, start_y), Point(end_x, end_y)])
if line.length <= 1e-5:
raise IniFileValueException(
f"The measurement line {id} is too narrow. Check your start and end point."
f"Distance between start and end is {line.length}."
"Please check your ini-file."
)
measurement_lines[line_id] = line
return measurement_lines | 8e7ff2a75189e8f6c1960f6849c1b63bc1ff4821 | 3,634,891 |
from typing import Any
from typing import Optional
def compute_and_apply_approximate_vocabulary(
x: common_types.ConsistentTensorType,
default_value: Any = -1,
top_k: Optional[int] = None,
num_oov_buckets: int = 0,
vocab_filename: Optional[str] = None,
weights: Optional[tf.Tensor] = None,
file_format: common_types.VocabularyFileFormatType = analyzers
.DEFAULT_VOCABULARY_FILE_FORMAT,
name: Optional[str] = None) -> common_types.ConsistentTensorType:
"""Generates an approximate vocabulary for `x` and maps it to an integer.
Args:
x: A `Tensor` or `CompositeTensor` of type tf.string or tf.int[8|16|32|64].
default_value: The value to use for out-of-vocabulary values, unless
'num_oov_buckets' is greater than zero.
top_k: Limit the generated vocabulary to the first `top_k` elements. If set
to None, the full vocabulary is generated.
num_oov_buckets: Any lookup of an out-of-vocabulary token will return a
bucket ID based on its hash if `num_oov_buckets` is greater than zero.
Otherwise it is assigned the `default_value`.
vocab_filename: The file name for the vocabulary file. If None, a name based
on the scope name in the context of this graph will be used as the file
name. If not None, should be unique within a given preprocessing function.
NOTE in order to make your pipelines resilient to implementation details
please set `vocab_filename` when you are using the vocab_filename on a
downstream component.
weights: (Optional) Weights `Tensor` for the vocabulary. It must have the
same shape as x.
file_format: (Optional) A str. The format of the resulting vocabulary file.
Accepted formats are: 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires
tensorflow>=2.4. The default value is 'text'.
name: (Optional) A name for this operation.
Returns:
A `Tensor` or `CompositeTensor` where each string value is mapped to an
integer. Each unique string value that appears in the vocabulary
is mapped to a different integer and integers are consecutive starting from
zero. String value not in the vocabulary is assigned default_value.
Alternatively, if num_oov_buckets is specified, out of vocabulary strings
are hashed to values in [vocab_size, vocab_size + num_oov_buckets) for an
overall range of [0, vocab_size + num_oov_buckets).
Raises:
ValueError: If `top_k` is negative.
If `file_format` is not in the list of allowed formats.
If x.dtype is not string or integral.
"""
with tf.compat.v1.name_scope(name,
'compute_and_apply_approximate_vocabulary'):
deferred_vocab_and_filename = experimental_analyzers.approximate_vocabulary(
x=x,
top_k=top_k,
vocab_filename=vocab_filename,
weights=weights,
file_format=file_format,
name=name)
return mappers.apply_vocabulary(
x,
deferred_vocab_and_filename,
default_value,
num_oov_buckets,
file_format=file_format) | 9662796984852ff255ca923e1048121427debfe6 | 3,634,892 |
import io
def album_cover():
"""Get the current song's album cover."""
cover = app.config["player"].album_cover()
image = io.BytesIO(cover) if cover else "static/no_cover.jpg"
return send_file(image, mimetype="image/jpeg") | 3c2c7b9513bd36e49f27d4b58ca1408932402d72 | 3,634,893 |
def verify_bgp_community(
tgen,
addr_type,
router,
network,
input_dict=None,
vrf=None,
bestpath=False,
expected=True,
):
"""
API to veiryf BGP large community is attached in route for any given
DUT by running "show bgp ipv4/6 {route address} json" command.
Parameters
----------
* `tgen`: topogen object
* `addr_type` : ip type, ipv4/ipv6
* `dut`: Device Under Test
* `network`: network for which set criteria needs to be verified
* `input_dict`: having details like - for which router, community and
values needs to be verified
* `vrf`: VRF name
* `bestpath`: To check best path cli
* `expected` : expected results from API, by-default True
Usage
-----
networks = ["200.50.2.0/32"]
input_dict = {
"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
}
result = verify_bgp_community(tgen, "ipv4", dut, network, input_dict=None)
Returns
-------
errormsg(str) or True
"""
logger.debug("Entering lib API: verify_bgp_community()")
if router not in tgen.routers():
return False
rnode = tgen.routers()[router]
logger.info(
"Verifying BGP community attributes on dut %s: for %s " "network %s",
router,
addr_type,
network,
)
command = "show bgp"
for net in network:
if vrf:
cmd = "{} vrf {} {} {} json".format(command, vrf, addr_type, net)
elif bestpath:
cmd = "{} {} {} bestpath json".format(command, addr_type, net)
else:
cmd = "{} {} {} json".format(command, addr_type, net)
show_bgp_json = run_frr_cmd(rnode, cmd, isjson=True)
if "paths" not in show_bgp_json:
return "Prefix {} not found in BGP table of router: {}".format(net, router)
as_paths = show_bgp_json["paths"]
found = False
for i in range(len(as_paths)):
if (
"largeCommunity" in show_bgp_json["paths"][i]
or "community" in show_bgp_json["paths"][i]
):
found = True
logger.info(
"Large Community attribute is found for route:" " %s in router: %s",
net,
router,
)
if input_dict is not None:
for criteria, comm_val in input_dict.items():
show_val = show_bgp_json["paths"][i][criteria]["string"]
if comm_val == show_val:
logger.info(
"Verifying BGP %s for prefix: %s"
" in router: %s, found expected"
" value: %s",
criteria,
net,
router,
comm_val,
)
else:
errormsg = (
"Failed: Verifying BGP attribute"
" {} for route: {} in router: {}"
", expected value: {} but found"
": {}".format(criteria, net, router, comm_val, show_val)
)
return errormsg
if not found:
errormsg = (
"Large Community attribute is not found for route: "
"{} in router: {} ".format(net, router)
)
return errormsg
logger.debug("Exiting lib API: verify_bgp_community()")
return True | 12190c76f9d2bae40c8c2859de9813d3c8af38a4 | 3,634,894 |
def create_new_filename(original_filename: str,
user_response: str):
"""
Creates new file name depending on the users response
:param original_filename: str
:param user_response: str
:return: str | new filename
"""
if user_response == "1":
time = str(today.time())[:8].replace(":", "_")
date = str(today.date()).replace("-", "_")
print(time)
new_filename = f"{original_filename}_{date}_{time}.xlsx"
else:
new_filename = f"{user_response}.xlsx"
return new_filename | 789bf9f73b4647c534859a2ae97b9ec806b02d1c | 3,634,895 |
from typing import Optional
import os
from pathlib import Path
import shutil
def get_maestral_command_path() -> str:
"""
Returns the path to the maestral executable. May be an empty string if the
executable cannot be found.
"""
try:
dist_files = files("maestral")
except PackageNotFoundError:
# we may have had installation issues
dist_files = []
path: Optional[os.PathLike]
if dist_files:
try:
rel_path = next(p for p in dist_files if p.match("**/bin/maestral"))
path = rel_path.locate()
except StopIteration:
path = None
else:
path = None
if isinstance(path, Path):
# resolve any symlinks and “..” components
path = path.resolve()
if path and osp.isfile(path):
return str(path)
else:
return shutil.which("maestral") or "" | 6291cc4da5771d60d3fdfbe3abceb4c76733fef6 | 3,634,896 |
from typing import OrderedDict
def genertate_info_tree(traces, trace_events, level="module"):
"""
"""
assert level in ["module", "operator", "mixed"]
tree = OrderedDict()
for trace in traces:
path, module = trace
# unwrap all of the events, in case model is called multiple times
events = [te for tevents in trace_events[path] for te in tevents]
if level == "module":
tree[path] = ModuleInfo(
repr(module),
sum([e.self_cpu_time_total for e in events]),
sum([e.cpu_time_total for e in events]),
sum([e.cuda_time_total for e in events]),
len(trace_events[path])
)
elif level == "operator" or level == "mixed":
for op in set(event.name for event in events):
op_events = [e for e in events if e.name == op]
measure = Measure(
sum([e.self_cpu_time_total for e in op_events]),
sum([e.cpu_time_total for e in op_events]),
sum([e.cuda_time_total for e in op_events]),
len(op_events),
)
if level == "mixed":
tree[path + "." + op] = measure
else:
# operator mode
if op not in tree:
tree[op] = measure
else:
tree[op] = Measure(*(a + b for a, b in zip(tree[op], measure)))
return tree | cc885b41df2d2ecf0484c55a07f9b8ef381263d8 | 3,634,897 |
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled | 54ceb6aee15de3c775d3be93bbc48422dc542641 | 3,634,898 |
from astroquery.mast import MastClass
def _resolve_object(target):
"""Ask MAST to resolve an object string to a set of coordinates."""
# Note: `_resolve_object` was renamed `resolve_object` in astroquery 0.3.10 (2019)
return MastClass().resolve_object(target) | bf9c0bb3a09fac1622cc107cf3f9532888b3a4f9 | 3,634,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.