content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def load_image(folder, test):
"""Load the data for a single letter label."""
image_files = glob.glob(folder + "*[0-9].tif")
dataset = np.ndarray(shape=(len(image_files), FLAGS.image_size, FLAGS.image_size),
dtype=np.float32)
mask_train = []
print(folder)
num_images = 0
for image_file in image_files:
#image_file = os.path.join(folder, image)
try:
dataset[num_images, :, :] = get_im_cv2(image_file, FLAGS.image_size, FLAGS.image_size)
if(test == False):
flbase = os.path.basename(image_file)
mask_path = "./input/trainChanged/" + flbase[:-4] + "_mask.tif"
mask = get_im_cv2(mask_path, FLAGS.image_size, FLAGS.image_size)
mask_train.append(mask)
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset, mask_train | 83b824d7127cd2e0751578aadbde8ab4936ff909 | 3,628,700 |
def get_log_p_k_given_omega_int_analytic(k_train, k_bnn, interim_pdf_func):
"""Evaluate the log likelihood, log p(k|Omega_int),
using kernel density estimation (KDE) on training kappa,
on the BNN kappa samples of test sightlines
Parameters
----------
k_train : np.array of shape `[n_train]`
kappa in the training set. Unused.
k_bnn : np.array of shape `[n_test, n_samples]`
interim_pdf_func : callable
function that evaluates the PDF of the interim prior
Returns
-------
np.array of shape `[n_test, n_samples]`
log p(k|Omega_int)
"""
log_p_k_given_omega_int = interim_pdf_func(k_bnn)
log_p_k_given_omega_int = np.log(log_p_k_given_omega_int)
assert not np.isnan(log_p_k_given_omega_int).any()
assert not np.isinf(log_p_k_given_omega_int).any()
return log_p_k_given_omega_int | 83b4c43d86eddc6fd0a4f51deaec570889054d3f | 3,628,701 |
import theano.tensor as T
def LeakyReLU(a=0.33):
"""
Leaky rectified linear unit with different scale
:param a: scale
:return: max(x, a*x)
"""
def inner(x):
return T.switch(x < a*x, a*x, x)
return inner | 5b9227f9e013fff14fbd49be77230dd1e9069525 | 3,628,702 |
def get_top_element_count(mol, top):
"""
Returns the element count for the molecule considering only the atom indices in ``top``.
Args:
mol (Molecule): The molecule to consider.
top (list): The atom indices to consider.
Returns:
dict: The element count, keys are tuples of (element symbol, isotope number), values are counts.
"""
if not isinstance(top, list):
top = list(top)
element_count = {}
for i, atom in enumerate(mol.atoms):
if i in top:
key = (atom.element.symbol, atom.element.isotope)
if key in element_count:
element_count[key] += 1
else:
element_count[key] = 1
return element_count | f7293c1d154346c955052ceff0ee59483538bdc3 | 3,628,703 |
def load_data(filename, start=0.0, end=1.0, include_profiles=False):
"""
Wrapper function to load data from a file.
Args:
filename: The path of the file to load the data from.
start: Fractional position from which to start reading the data.
end: Fractional position up to which to read the data.
Returns:
Dictionary containing each database variables as numpy array.
"""
input_file = BinFile(filename, include_profiles=include_profiles)
return input_file.to_xarray_dataset(start, end) | e234e037260ef31292f939cd39f20bc394197d31 | 3,628,704 |
def query_spf(domain: str) -> str:
"""
takes in a domain as a string and trys to find a spf with a query, if it finds one it returns the spf record if not returns an empty string.
"""
q = query(domain,'TXT')
if not q:
return ""
for txtd in q.rrset:
if txtd.strings[0].decode('utf-8').split()[0] == 'v=spf1':
return txtd.strings[0].decode('utf-8').lower()
return "" | 0c3bd664930445372cff2c9ffcd88ae1224ef6f8 | 3,628,705 |
def str_wire(obj):
"""Returns a string listing the edges of a wire."""
s = []
edges = obj.Edges()
edge_count = len(edges)
if edge_count == 1:
s.append("Wire (1x Edge)\n")
else:
s.append("Wire (%dx Edges) " % (edge_count))
s.append("length:")
s.append(_str_value(wire_length(obj), colour="yellow"))
s.append("\n")
for i, e in enumerate(edges):
s.append(" %3d/%3d %s\n" % (i + 1, edge_count, str_edge(e)))
return "".join(s) | 8434b804178e0e480b1804a2186f360c93c9fd4a | 3,628,706 |
from environmental import EFM
from performance import calculate_performance
import os
def run_model(model_input, resmap, ID, full_output=False):
""" run model
Parameters
----------
'model_inputs' (dictionary): contains inputdata, interference flag,
logit flag, candidate model parameters for species response curves,
threshold flag, number of input variables K
'resmap' (string): path to results map
'full_output' (boolean)
'ID' (int): unique tag of chromosome
Returns
-------
'performance' (dictionary): values for evaluation measures, number of
data samples, threshold
"""
"Extract inputdata and habitat preference curve parameters"
inputdata = model_input["data"]
interference = model_input["interference"]
logit = model_input["logit"]
parameters = model_input["parameters"]
threshold = model_input["threshold"]
K = model_input["K"]
# "Run dispersal filter model"
# from dispersal import DFM
# if np.sum(parameters["type"]=="categorical")>0:
# model = DFM(inputdata,parameters[parameters["type"]=="categorical"])
# model.run_models()
# #model.save_model(os.path.join(resmap,str(neighbour)+"_DF.csv"))
# output_DF = model.interference(interference)
# else:
# output_DF = []
"Run environmental model"
if np.sum(parameters["type"] == "continuous") > 0:
model = EFM(inputdata, parameters[parameters["type"] == "continuous"], logit=logit)
model.run_models()
# model.save_model(os.path.join(resmap,str(neighbour)+"_EF.csv"))
output_EF = model.interference(interference)
else:
output_EF = []
output = deepcopy(output_EF)
# if len(output_DF)==0:
# output = output_EF
# output["RSI"] = 1.
# if len(output_EF)==0:
# output = output_DF
# output["HSI"] = 1.
# if (len(output_EF)!=0) & (len(output_DF)!=0):
# output = output_DF.merge(output_EF,on=["X","Y","date","sample","taxon"],how="inner")
"Link observed abundance to model output"
output = output.merge(inputdata[["X", "Y", "date", "sample", "taxon", "abundance"]], how="left",
on=["sample", "taxon"]).drop_duplicates()
"Get prediction presence/absence"
output.loc[:, "prediction"] = output["HSI"]
"Evaluate model output on presence/absence"
output.loc[:, "observation"] = 0
output.loc[output["abundance"] != 0, "observation"] = 1
"Print"
if full_output == True:
output.to_csv(os.path.join(resmap, str(int(ID)) + "-model_run.csv"))
"Calculate criteria"
if threshold == "max":
threshold = np.nan
elif threshold == "prev":
threshold = 1 - np.sum(output["observation"]) / len(output)
elif threshold == "prob":
threshold = "prob"
else:
threshold = float(threshold)
performance = calculate_performance(output, K, evaluate=True, threshold=threshold)
return performance | 9e05369bddd4d5290eae3362caf8b28cd385b176 | 3,628,707 |
from typing import Tuple
def calculate_approximate_ci(
xs: np.ndarray, ratios: np.ndarray, confidence_ratio: float
) -> Tuple[float, float]:
"""
Calculate approximate confidence interval based on profile. Interval
bounds are linerly interpolated.
Parameters
----------
xs:
The ordered parameter values along the profile for the coordinate of
interest.
ratios:
The likelihood ratios corresponding to the parameter values.
confidence_ratio:
Minimum confidence ratio to base the confidence interval upon, as
obtained via `pypesto.profile.chi2_quantile_to_ratio`.
Returns
-------
lb, ub:
Bounds of the approximate confidence interval.
"""
# extract indices where the ratio is larger than the minimum ratio
indices, = np.where(ratios >= confidence_ratio)
l_ind, u_ind = indices[0], indices[-1]
# lower bound
if l_ind == 0:
lb = xs[l_ind]
else:
# linear interpolation with next smaller value
ind = [l_ind - 1, l_ind]
lb = np.interp(confidence_ratio, ratios[ind], xs[ind])
# upper bound
if u_ind == len(ratios) - 1:
ub = xs[u_ind]
else:
# linear interpolation with next larger value
ind = [u_ind + 1, u_ind] # flipped as interp expects increasing xs
ub = np.interp(confidence_ratio, ratios[ind], xs[ind])
return lb, ub | d48133a5017e3e121c918ef1efc44d1bef2c21cd | 3,628,708 |
import logging
def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):
"""
This function initialises a logger to stdout.
:param level: logging level (DEBUG / INFO / WARNING / CRITICAL)
:param quite: if mute logging
:param debug: if debug mode
:param to_file: log file path
:return: logger
"""
assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]
logger = logging.getLogger('main')
formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')
if debug:
level = logging.DEBUG
logger.setLevel(level=level)
if not quite:
if to_file:
fh = logging.FileHandler(to_file)
fh.setLevel(level=level)
fh.setFormatter(formatter)
logger.addHandler(fh)
else:
ch = logging.StreamHandler()
ch.setLevel(level=level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger | 13e60d08055639fcad9389b1ee49b2c7113f38c3 | 3,628,709 |
import torch
def transform_means(means, size, method='sigmoid'):
"""
Transforms raw parameters for the index tuples (with values in (-inf, inf)) into parameters within the bound of the
dimensions of the tensor.
In the case of a templated sparse layer, these parameters and the corresponding size tuple deascribe only the learned
subtensor.
:param means: (..., rank) tensor of raw parameter values
:param size: Tuple describing the tensor dimensions.
:return: (..., rank)
"""
# Compute upper bounds
s = torch.tensor(list(size), dtype=torch.float, device=d(means)) - 1
s = util.unsqueezen(s, len(means.size()) - 1)
s = s.expand_as(means)
# Scale to [0, 1]
if method == 'modulo':
means = means.remainder(s)
return means
if method == 'clamp':
means = torch.max(means, torch.zeros(means.size(), device=d(means)))
means = torch.min(means, s)
return means
means = torch.sigmoid(means)
return means * s | 86bd46f2a10f01dc3a39e5b3e7c3003bb95641bb | 3,628,710 |
def pull_if_not_exist(region: str=None, registry_prefix: str=None, repo: str=None, tag: str=None):
"""
Pull the image from the registry if it doesn't exist locally
:param region:
:param registry_prefix:
:param repo:
:param tag:
:return:
"""
output = get_stdout('''{docker} images {repo}:{tag}'''.format(docker=DOCKER,
repo=repo,
tag=tag))
if repo not in output:
target = '''{registry_prefix}/{repo}:{tag}'''.format(registry_prefix=registry_prefix,
repo=repo,
tag=tag)
# And push it to the registry
run('''{docker} pull {target}'''.format(docker=DOCKER,
target=target))
# Tag the repo back to the unqualified name
run('''{docker} tag {target} {repo}'''.format(docker=DOCKER,
repo=repo,
target=target))
# Tag the repo back to be 'latest'
run('''{docker} tag {target} {repo}:latest'''.format(docker=DOCKER,
repo=repo,
target=target))
# Tag the repo back to the correct tag
run('''{docker} tag {target} {repo}:{tag}'''.format(docker=DOCKER,
repo=repo,
tag=tag,
target=target))
return(True) | 4826a31f41db9cac92b70f1b568dd5351ff2d0bd | 3,628,711 |
import re
def _remove_junk(plaintext, language):
"""
"""
if language in {"zh", "ja", "fa", "iw", "ar"}:
return plaintext
lines = plaintext.splitlines()
lines = [l for l in lines if re.findall(MEANINGFUL, l) or not l.strip() or l.startswith("<meta")]
out = "\n".join(lines).strip() + "\n"
out = re.sub("\n{2,}", "\n\n", out)
return out | 0d88d55021aa4b81eeb39f1ca21540b3112c6a08 | 3,628,712 |
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _TENSOR_MODEL_PARALLEL_GROUP is None or _PIPELINE_MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True | f60560130cbde971e340987064f58e66cd1b6855 | 3,628,713 |
def CV_IS_IMAGE(*args):
"""CV_IS_IMAGE(CvArr img) -> int"""
return _cv.CV_IS_IMAGE(*args) | ca00c5f9614a59e037fbe1bd06d97f38f6c27b57 | 3,628,714 |
def fingerprint(samp):
"""A memory-efficient algorithm for computing fingerprint when wid is
large, e.g., wid = 100
"""
wid = samp.shape[1]
d = np.r_[
np.full((1, wid), True, dtype=bool),
np.diff(np.sort(samp, axis=0), 1, 0) != 0,
np.full((1, wid), True, dtype=bool)
]
f_col = []
f_max = 0
for k in range(wid):
a = np.diff(np.flatnonzero(d[:, k]))
a_max = a.max()
hist, _ = np.histogram(a, bins=a_max, range=(1, a_max + 1))
f_col.append(hist)
if a_max > f_max:
f_max = a_max
return np.array([np.r_[col, [0] * (f_max - len(col))] for col in f_col]).T | 3b372b4480da2fc5fdfd89755b0dc7ec1408fbf3 | 3,628,715 |
def ldns_rr_list_compare(*args):
"""LDNS buffer."""
return _ldns.ldns_rr_list_compare(*args) | 7a4414931578ec475bae6b1cf112e6eb8b8d564f | 3,628,716 |
def at_least_one_shift_each(cur_individual):
""" checks if there is at least one of each shift: 01, 10, 11 """
num_entrega = 0
num_recogida = 0
num_dual = 0
while cur_individual:
shift = cur_individual[:2]
cur_individual = cur_individual[2:]
if shift == '01':
num_entrega += 1
elif shift == '10':
num_recogida += 1
elif shift == '11':
num_dual += 1
if num_entrega > 0 and num_recogida > 0 and num_dual > 0:
return True
return False | 070fe16e779ab30bcee7873ec01876962f30ec91 | 3,628,717 |
def generate_validate_yaml_for_python(
artifact_name,
top_level_imports,
allowed=None,
exclude_files=None,
verbose=0,
):
"""Generate a validation YAML file from an artifact that is a python package.
This function works in two stages.
1. It uses a default set of globs for python installations.
2. It calls `generate_validate_yaml_from_artifact` to get any other possible files
to include.
Parameters
----------
artifact_name : str
The name of the artifact (e.g., `numpy`).
top_level_imports : list of str
A list of top-level imports (e.g., `[numpy,]` for numpy).
allowed : list of str
If not None, this set of artifacts will also be allowed to output files
to the paths from `artifact_name` in addition to `artifact_name`. If not set,
then only `artifact_name` will be allowed.
exclude_files : list of str
Any files matching these glob patterns will not be added to the list of
files in the outputs.
verbose : int, optional
Integer between 0 and 100. Passed to joblib. Zero produces no output (default)
and 100 produces the maximum amount of output.
Returns
-------
validate_yaml : dict
A dictionary with the contents of the validate YAML file.
"""
allowed = allowed or []
exclude_files = exclude_files or []
# add in the defaults that any python package is allowed to write
exclude_files += DEFAULT_PYTHON_EXCLUDES
# first make the default globs
default_globs = []
for import_name in top_level_imports:
for tmp_str in DEFAULT_PYTHON_GLOBS:
default_globs.append(tmp_str.format(import_name=import_name))
validate_yaml = generate_validate_yaml_from_libcfgraph(
artifact_name,
exclude_globs=default_globs + exclude_files,
verbose=verbose,
)
validate_yaml["files"].extend(default_globs)
for allow in allowed + [artifact_name]:
validate_yaml["allowed"].append(allow)
for key in ["files", "allowed"]:
validate_yaml[key] = sorted(list(set(validate_yaml[key])))
return validate_yaml | 074df24576fecabdd4d7d53faeacb09791556441 | 3,628,718 |
def near_points(px, qx, r, pxtree=None, qxtree=None):
"""
Finds points in qx that are within a distance r of any point in px
Parameters:
px, required, float(np, dim), coordinates of px
qx, required, float(nq, dim), coordinates of py
r, required, float, radius of closeness
pxtree, optional, cKDTree, tree for px, makes computation more efficient
qxtree, optional, cKDTree, tree for qx, makes computation more efficient
Returns:
Tuple of:
bool, of length (nq), with True denoting a close point
pxtree,
qxtree,
"""
if pxtree is None:
pxtree = sp.spatial.cKDTree(px, balanced_tree=False)
if qxtree is None:
qxtree = sp.spatial.cKDTree(qx, balanced_tree=False)
groups = pxtree.query_ball_tree(qxtree, r)
out = np.zeros(qx.shape[0], dtype=bool)
for group in groups:
out[np.array(group)] = True
return out, pxtree, qxtree | d9ee596d0b3dc02247f8df31ea521a978b79a1f2 | 3,628,719 |
def batch_dtype(observation_space, action_space):
"""Returns a batch dtype for the provided observation and action spaces.
Args:
observation_space -- the observation space
action_space -- the action space
Returns a complex numpy dtype for the batch
"""
states_dtype = np.dtype((observation_space.dtype, observation_space.shape))
actions_dtype = np.dtype((action_space.dtype, action_space.shape))
return np.dtype([('states', states_dtype), ('actions', actions_dtype),
('rewards', 'f4'), ('terminals', 'i1')], align=True) | 1525d9a2817a97b748ffd06c20b88b2e7d25a538 | 3,628,720 |
def runQuery(tStart, tStop):
"""
Get all the rows from all the events within the time period of _tStart_ and _tStop_.
:param tStart: start of the time period
:param tStop: stop of the time period
:return: an array of all the events between _tStart_ and _tStop_
"""
mariadb_connection = mariadb.connect(user='root', password='password', database='CMT', host='db')
cursor = mariadb_connection.cursor(buffered=True)
query = "select * from CMT.CMT where DoRE >= "
query += '"' + tStart + '"'
query += " and "
query += "DoRE <= "
query += '"' + tStop + '"'
query += " order by DoRE, ToRE ASC"
cursor.execute(query)
result = cursor.fetchall()
return result | b3dd62cc41a8a5da331190303b4008a135c07dbb | 3,628,721 |
from datetime import datetime
import pytz
def get_aware_utc_now():
"""Create a timezone aware UTC datetime object from the system time.
:returns: an aware UTC datetime object
:rtype: datetime
"""
utcnow = datetime.utcnow()
utcnow = pytz.UTC.localize(utcnow)
return utcnow | b0b102b8d7d49e0d7d4041a476502cdd683dc8af | 3,628,722 |
from typing import Optional
from typing import Callable
from typing import Iterator
from typing import Any
import inspect
def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([]) | 000c2c873ab38378bb42945ed3304213b254061a | 3,628,723 |
def evaluation(board: chess.Board, deciding_agent):
"""
Basic heuristic.
:param board: The current state of the game The current state of the game
:param deciding_agent: The identity of the "good" agent.
:return: Heuristic value for the state
"""
# Define 1 for myself and -1 for rival:
eval_num = 1 if board.turn != deciding_agent else -1
# Check for corner cases:
if board.is_checkmate():
return 10000000000 * eval_num
if board.is_stalemate() or board.is_insufficient_material():
return 0
# Calculate board value:
material = 0
material += eval_board(board) * eval_num
# Check if check is possible and give small reward for that:
if board.is_check():
material += 80 * eval_num
return material | 95bbc6c7b3dfd20b7c88b172f5b42adbada64247 | 3,628,724 |
import time
def calculate_temperature(T0, setpoint, K, omega, Tvar):
"""
Calculate temperature according to the following formula:
:math:`T_{output} = T_{var} exp^{-(t - t_0)/K} sin(ω t) + T_{setpoint}`
"""
t = time.monotonic()
return ((Tvar *
np.exp(-(t - T0) / K) *
np.sin(omega * t)) +
setpoint) | 3c56efe329e7f393b0bf1d6829fdcd0b0b32a557 | 3,628,725 |
def nodes_visited_for_seq_ref(distance, max_array, min_array, list_parent_node):
"""
"""
boolean_grp = np_logical_and(np_less_equal(distance, max_array),
np_greater(distance, min_array))
count_visited_nodes = np_sum(boolean_grp)
not_boolean_grp = np_logical_not(boolean_grp)
not_node_parent = list_parent_node[not_boolean_grp]
boolean_grp = np_logical_and(np_less_equal(distance, max_array[not_node_parent]),
np_greater(distance, min_array[not_node_parent]))
count_visited_nodes += np_sum(boolean_grp)
# root is not counted
count_visited_nodes -= 1
return count_visited_nodes | a85b86a28993d25f9c3b0a3b1f6ff0d8db5edab6 | 3,628,726 |
from typing import Type
import subprocess
from typing import Any
import importlib
def get_config_class() -> Type[BaseSettings]:
"""
Dynamically imports and returns the Config class from the current service.
This makes the script service repo agnostic.
"""
# get the name of the microservice package
with subprocess.Popen(
args=[GET_PACKAGE_NAME_SCRIPT],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
) as process:
assert (
process.wait() == 0 and process.stdout is not None
), "Failed to get package name."
package_name = process.stdout.read().decode("utf-8").strip("\n")
# import the Config class from the microservice package:
config_module: Any = importlib.import_module(f"{package_name}.config")
config_class = config_module.Config
return config_class | 5e64bc3735593b576dd516adfb49564429f18ee4 | 3,628,727 |
def bdc_plot_datasets(datasets, zoom = 4, layout=Layout(width='600px', height='600px')):
"""Plot Dataset tiles
"""
bbox = get_bounds(datasets, datasets[0].crs)
bbox_pol = shapely.wkt.loads(bbox.wkt)
project = partial(
pyproj.transform,
pyproj.Proj(datasets[0].crs.crs_str),
pyproj.Proj(init='epsg:4674'))
bbox_pol_wgs84 = transform(project, bbox_pol)
bbox = bbox_pol_wgs84.bounds
center = ((bbox[1] + bbox[3])/2,
(bbox[0] + bbox[2])/2)
m = Map(basemap=basemaps.Esri.WorldImagery, center=center, zoom=zoom, layout=layout)
grid = WMSLayer(url='http://brazildatacube.dpi.inpe.br/bdc/geoserver/grids/ows',
layers='BDC_GRID',
styles='tiles',
format='image/png',
transparent=True,
tile_size=512)
m.add_layer(grid)
if len(datasets):
project = partial(
pyproj.transform,
pyproj.Proj(datasets[0].crs.crs_str),
pyproj.Proj(init='epsg:4674'))
plotted = []
for ds in datasets:
idt = "{},{};{},{}".format(ds.metadata.lon[0],ds.metadata.lat[0],ds.metadata.lon[1],ds.metadata.lat[1])
if idt not in plotted:
plotted.append(idt)
# apply projection
ds_pol = transform(project, shapely.wkt.loads(ds.extent.wkt))
x,y = ds_pol.exterior.xy
points = [(y1,x1) for x1,y1 in zip(x,y)]
polygon = Polygon(
locations=points,
color="#0033CC",
fill_color="#388b8b",
weight=2,
fill_opacity=.6
)
m.add_layer(polygon);
return m | 6c05b84571dbef30116debb3605f14e56acb176c | 3,628,728 |
from typing import Tuple
from typing import List
import csv
def read_xnli(dir_path, lang, split, spm_path=None) -> Tuple[List[List[str]], List[str]]:
"""
Reads XNLI data.
:param dir_path: the path to the xnli folder
:param lang: the language
:param split: the split of the data that should be read (train, test, val)
:param spm_path: path to sentencepiece model
:return: a tuple consisting of a list of lists of tokens and a list of labels
"""
file_path = XNLI_PATHS[split]
if split == TRN:
file_path = file_path % lang
elif lang == EN:
file_name = 'xnli.dev.en.tsv' if split == VAL else 'xnli.test.en.tsv'
file_path = f'XNLI-MT-1.0/xnli/{file_name}'
file_path = dir_path / file_path
if spm_path is not None:
tokenizer = SentencePieceTokenizer(spm_path,
use_moses=False,
lang=lang)
tok = tokenizer.tok_fun_with_sp(lang)
tokenize = lambda x: tokenizer.process_text(x, tok)
print("WARNING: Sentence Piece is not tested on XNLI yet")
else:
tokenize = lambda x: x.split(' ')
toks, lbls = [], []
print(f'Reading {file_path}...')
with open(file_path, encoding='utf-8') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for i, row in enumerate(reader):
if i == 0: # skip the header
continue
# the examples are already tokenized with Moses
if split == TRN:
premise, hypo, label = row
else:
ex_lang = row[0]
if ex_lang != lang:
continue
premise, hypo, label = row[-3], row[-2], row[1]
# TODO add BOS
premise_toks = tokenize(premise) + [EOS]
hypo_toks = tokenize(hypo) + [EOS]
toks.append(premise_toks + [SEP] + hypo_toks)
lbls.append(label)
return toks, lbls | bdfe3a1d938fb39573e604e97ce8bd27512b3f34 | 3,628,729 |
def indexation(obj, key):
""":yaql:operator indexer
Returns value of attribute/property key of the object.
:signature: obj[key]
:arg obj: yaqlized object
:argType obj: yaqlized object, initialized with
yaqlize_indexer equal to True
:arg key: index name
:argType key: keyword
:returnType: any
"""
settings = yaqlization.get_yaqlization_settings(obj)
_validate_name(key, settings, KeyError)
res = obj[key]
_auto_yaqlize(res, settings)
return res | 3848641447a50e0059c7ce4d68318ff832d2f5f1 | 3,628,730 |
def convert_output_key(name):
""" Convert output name into IE-like name
:param name: output name to convert
:return: IE-like output name
"""
if not isinstance(name, tuple):
return name
if len(name) != 2:
raise Exception('stats name should be a string name or 2 elements tuple '
'with string as the first item and port number the second')
return '{}.{}'.format(*name) | d5c59766c615e0e7b45f173948692050a7b890e3 | 3,628,731 |
def collection(identifier: str):
"""Get a collection.
---
tags:
- collection
parameters:
- name: prefix
in: path
description: The identifier of the collection
required: true
type: string
example: 0000001
- name: format
description: The file type
in: query
required: false
default: json
schema:
type: string
enum: [json, yaml, context, turtle, jsonld]
""" # noqa:DAR101,DAR201
data = bioregistry.get_collection(identifier)
if not data:
abort(404, f"Invalid collection: {identifier}")
return serialize(
data,
serializers=[
("context", "application/ld+json", collection_to_context_jsonlds),
("turtle", "text/plain", partial(collection_to_rdf_str, fmt="turtle")),
("jsonld", "application/ld+json", partial(collection_to_rdf_str, fmt="json-ld")),
],
) | 1f37d74617d6e92fa4f955214b1f613856a8dadf | 3,628,732 |
def tensor_shape_proto(output_size):
"""
The shape of this tensor.
"""
return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in output_size]) | f5b89370c62259349c27c0da1558b2abb5160173 | 3,628,733 |
def check_raises(physical_line, filename):
"""Check raises usage
N354
"""
ignored_files = ["./tests/unit/test_hacking.py",
"./tests/hacking/checks.py"]
if filename not in ignored_files:
if re_raises.search(physical_line):
return (0, "N354 ':Please use ':raises Exception: conditions' "
"in docstrings.") | 9c735f485fdf73d0914abd105ddb6ff39140fda2 | 3,628,734 |
def eval_expr(src, env=None, **kwargs):
"""
Similar to eval_ast, but receives expression as a string and variables as
keyword arguments.
"""
env = {} if env is None else env
env.update(kwargs)
return eval_ast(parser(src), env) | 752e7198476f528916e2252a0d78b0586f7a88b8 | 3,628,735 |
def tasksInView(): # pragma: no cover
"""Iterate over all the tasks in view."""
return filter(isTask, itemsInView()) | b46b9c75590bc36aac015470e1d04183752a7855 | 3,628,736 |
def in_polygon(point, polygon):
"""Simple wrapper on the within method of shapely points
Params: point (POINT) a shapely point object
polygon (POLYGON) a shapely polygon object (the target overlap area)
Returns: (bool) whether or not the point is within polygon expressed as a boolean
"""
return point.within(polygon) | 0a26d023672162a53affddfe23a89361d900d9a0 | 3,628,737 |
def sort_points(points:list) -> list:
"""
Returns a list of point tuples equivalent to points, but sorted in order
by ascending x coordinate.
>>> sort_points([(5,4),(2,3)])
[(2,3),(5,4)]
>>> sort_points([(1,1),(3,2),(2,3)])
[(1,1),(2,3),(3,2)]
>>> sort_points([(99,120),(0,10),(200,0),(20,37)])
[(0,10),(20,37),(99,120),(200,0)]
"""
return sorted(points,key=_take_first) | 007b20889ad7b474b891196da1ee9ed6e3286812 | 3,628,738 |
from sys import path
def validate_overwrite_different_input_output(opts):
"""
Make sure that if overwrite is set to False, the input and output folders
are not set to the same location.
:param opts: a namespace containing the attributes 'overwrite', 'input',
and 'output'
:raises ValidationException: if 'input' and 'output' point to the same
directory and 'overwrite' is set to False
:return: True if 'overwrite' is set to True, or 'input'/'output' are
separate directories
"""
if opts.overwrite or path.abspath(opts.input) != path.abspath(opts.output):
return True
else:
raise ValidationException("Input and output directories are the same, "
"but --overwrite / -X flag is not provided.\n"
"Do you want to overwrite your input files? "
"If so, use the following command:\n"
"\tanchorhub -X " + opts.input) | 06427ee7782533979740f465f79a03aa3054bc1f | 3,628,739 |
def NASNetMobile(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: In case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224) | 5cbfb237566dab72376fb14b167e48e2cb69b219 | 3,628,740 |
def dcn_resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
pretrained_model = model_zoo.load_url(model_urls['resnet152'])
state = model.state_dict()
for key in state.keys():
if key in pretrained_model.keys():
state[key] = pretrained_model[key]
model.load_state_dict(state)
return model | edf6ab41af211cc37f04bf6614497be9e7a77293 | 3,628,741 |
import string
def preprocessing(text):
"""Exclude punctuations and digits from text."""
text = process_tweets(text)
text = text.lower()
exclude = string.punctuation + string.digits
for i in exclude:
text = text.replace(i, "")
return text | f390ce2b0c1a1ff831b5a7c7b2491b9c860ad4e4 | 3,628,742 |
import typing
def parse_patter(pats: typing.List[str]) -> typing.Tuple[typing.List[str], typing.List[str]]:
"""
分析匹配项
========
/xx/ # 只匹配根目录下文件夹
xx/ # 匹配所有的文件夹
/xx # 只匹配根目录下文件
xx # 匹配所有的文件
!xx # 除xx之外
=========
/xx/ => xx + xx/**
xx/ => xx + xx/** + **/xx + **/xx/**
/xx => xx
xx => xx + **/xx
!xx => 除xx之外
:param pats: 匹配项列表
:return: 包含的匹配项,除此之外的匹配项
"""
pats_includes = []
pats_excludes = []
for pat in pats:
if pat.startswith('!'):
pat = pat[1:]
pats_n = pats_excludes
else:
pats_n = pats_includes
pats_n.append(pat)
if pat.endswith('/'): # 文件夹:xx/
if pat.startswith('/'): # '/'开头,表示根目录下的文件
# 根目录下的文件夹:/xx/ => xx or xx/**
pats_n.append(pat[1:-1])
pats_n.append(pat[1:] + '**')
else:
# xx/ => xx or xx/** or **/xx or **/xx/**
pats_n.append(pat[:-1])
pats_n.append(pat + '**')
pats_n.append('**/' + pat[:-1])
pats_n.append('**/' + pat + '**')
else:
if pat.startswith('/'): # '/'开头,表示根目录下的文件
# 根目录下的文件:/xx => xx
pats_n.append(pat[1:])
else:
# xx => xx or **/xx
pats_n.append('**/' + pat)
return pats_includes, pats_excludes | 17eca544b157fe2ad406e1c4385c49acc53ff0d7 | 3,628,743 |
def index():
"""Job Status Index."""
return render_template('fts_index.html') | bd382655aea46c910ef66c6563a094186d5b2a51 | 3,628,744 |
def createNiftiTestFiles(shouldValidate: bool = True):
"""
Create base 3D NIfTI1 file all others are created from
"""
convertDicomFileToNifti(test_dicomPath, test_3DNifti1Path)
nifti1_3D = nib.load(test_3DNifti1Path)
# Extract the TR time, then eliminate pixel dimension data past 3rd
# dimension, as a 3D image really should only have 3D data, and having more
# can complicate later comparisons.
# these values are correct for the DICOM we have, but is not robust
TR_TIME = nifti1_3D.header['pixdim'][4]
nifti1_3D.header['pixdim'][4:] = 1
# TODO(spolcyn): Take datatype, units, etc from the DICOM directly
nifti1_3D.header['datatype'] = 512 # unsigned short -
nifti1_3D.header['xyzt_units'] = 2 # just millimeters
nib.save(nifti1_3D, test_3DNifti1Path)
"""
Create NIfTI2 version of 3D base
"""
nifti2_3D = nib.Nifti2Image(nifti1_3D.dataobj,
nifti1_3D.affine,
nifti1_3D.header)
nib.save(nifti2_3D, test_3DNifti2Path)
"""
Create 4D Nifti1 from base 3D Nifti1
"""
# This method copied *exactly* from nibabel/funcs.py, except for adding the
# dtype specifier to the out_data = np.empty(...) line
def concat_images_patched(images, check_affines=True, axis=None):
r""" Concatenate images in list to single image, along specified dimension
Parameters
----------
images : sequence
sequence of ``SpatialImage`` or filenames of the same dimensionality\s
check_affines : {True, False}, optional
If True, then check that all the affines for `images` are nearly
the same, raising a ``ValueError`` otherwise. Default is True
axis : None or int, optional
If None, concatenates on a new dimension. This requires all images to
be the same shape. If not None, concatenates on the specified
dimension. This requires all images to be the same shape, except on
the specified dimension.
Returns
-------
concat_img : ``SpatialImage``
New image resulting from concatenating `images` across last
dimension
"""
images = [nib.load(img) if not hasattr(img, 'get_data')
else img for img in images]
n_imgs = len(images)
if n_imgs == 0:
raise ValueError("Cannot concatenate an empty list of images.")
img0 = images[0]
affine = img0.affine
header = img0.header
klass = img0.__class__
shape0 = img0.shape
n_dim = len(shape0)
if axis is None:
# collect images in output array for efficiency
out_shape = (n_imgs, ) + shape0
out_data = np.empty(out_shape, dtype=img0.header.get_data_dtype())
else:
# collect images in list for use with np.concatenate
out_data = [None] * n_imgs
# Get part of shape we need to check inside loop
idx_mask = np.ones((n_dim,), dtype=bool)
if axis is not None:
idx_mask[axis] = False
masked_shape = np.array(shape0)[idx_mask]
for i, img in enumerate(images):
if len(img.shape) != n_dim:
raise ValueError(f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}')
if not np.all(np.array(img.shape)[idx_mask] == masked_shape):
raise ValueError(f'shape {img.shape} for image {i} not compatible with '
f'first image shape {shape0} with axis == {axis}')
if check_affines and not np.all(img.affine == affine):
raise ValueError(f'Affine for image {i} does not match affine for first image')
# Do not fill cache in image if it is empty
out_data[i] = np.asanyarray(img.dataobj)
if axis is None:
out_data = np.rollaxis(out_data, 0, out_data.ndim)
else:
out_data = np.concatenate(out_data, axis=axis)
return klass(out_data, affine, header)
"""
Create 4D Nifti1 from base 3D Nifti1
"""
nifti1_4D = concat_images_patched([nifti1_3D, nifti1_3D])
# TODO(spolcyn): Set this progamatically according to DICOM datatype
nifti1_4D.header["datatype"] = 512 # unsigned short
nifti1_4D.header['bitpix'] = 16 # 16 bits for unsigned short
correct3DHeaderTo4D(nifti1_4D, repetitionTime=TR_TIME)
nib.save(nifti1_4D, test_4DNifti1Path)
"""
Create 4D Nifti2 from 3D Nifti2
"""
nifti2_4D = nib.concat_images([nifti2_3D, nifti2_3D])
correct3DHeaderTo4D(nifti2_4D, repetitionTime=TR_TIME)
nib.save(nifti2_4D, test_4DNifti2Path)
if not shouldValidate:
return
"""
Validate created Nifti files by comparing headers and data that should match
"""
""" Helpers for validation """
# https://brainder.org/2015/04/03/the-nifti-2-file-format/
NIFTI2_REMOVED_FIELDS = ['data_type', 'db_name', 'extents', 'session_error',
'regular', 'glmin', 'glmax']
NIFTI2_CHANGED_FIELDS = ['sizeof_hdr', 'magic']
def headersMatch(niftiA, niftiB,
ignoredKeys: list = [],
specialHandlers: dict = {}) -> bool:
"""
Verify that two NIfTI headers match. A list of keys to ignore can be
provided (e.g., if comparing NIfTI1 and NIfTI2) or special handlers for
particular keys (e.g., a 3D NIfTI and 4D NIfTI that should match in the
first 3 dimensions of their shape).
Args:
niftiA, niftiB: NiBabel NIfTI images with headers
ignoredKeys: Keys to skip comparing
specialHandlers: Map of field name to function taking two arguments,
returning 'true' if the values should be considered equal, false
otherwise.
Returns:
True if the headers are equal, false otherwise.
"""
header1 = niftiA.header
header2 = niftiB.header
for key in header1:
if key in ignoredKeys:
continue
v1 = header1.get(key, None)
v2 = header2.get(key, None)
if np.array_equal(v1, v2):
continue
# Check for nan equality
else:
if np.issubdtype(v1.dtype, np.inexact) and \
np.allclose(v1, v2, atol=0.0, equal_nan=True):
continue
# If key is special and handler returns true, continue
elif key in specialHandlers and \
specialHandlers[key](v1, v2):
continue
else:
logger.warning("--------------------\n"
"Difference found!"
f"Key: {key}\nHeader 1: {v1}\nHeader 2: {v2}")
return False
return True
def dataMatch(niftiA, niftiB) -> bool:
return np.array_equal(niftiA.dataobj, niftiB.dataobj)
# Used when dimensions will increased by one in the 3D to 4D conversion
def dim3Dto4DHandler(v1: np.ndarray, v2: np.ndarray) -> bool:
return v1[0] + 1 == v2[0] and v1[4] + 1 == v2[4]
# Used when pixdim is different in 4th dimension for a 4D image vs. 3D
def pixdim3Dto4DHandler(v1: np.ndarray, v2: np.ndarray) -> bool:
return np.array_equal(v1[1:3], v2[1:3])
# Used when xyzt units is different in 4th (time) dimension for a 4D image
# vs. 3D which doesn't have one
def xyztunits3Dto4DHandler(v1: np.ndarray, v2: np.ndarray) -> bool:
# all spatial units (m, mm, um) have codes < 8, % 8 removes any temporal
# units
return np.array_equal(v1 % 8, v2 % 8)
handlerMap3Dto4D = {'dim': dim3Dto4DHandler, 'pixdim': pixdim3Dto4DHandler,
'xyzt_units': xyztunits3Dto4DHandler}
""" Actual validation """
""" 3D Nifti2 """
ignoredKeys = NIFTI2_REMOVED_FIELDS + NIFTI2_CHANGED_FIELDS
errorString = "{} for Nifti1 3D and Nifti2 3D did not match"
assert type(nib.load(test_3DNifti2Path)) is nib.Nifti2Image
assert headersMatch(nifti1_3D, nifti2_3D, ignoredKeys=ignoredKeys), \
errorString.format("Headers")
assert dataMatch(nifti1_3D, nifti2_3D), errorString.format("Image data")
""" 4D Nifti1 """
errorString = "{} for Nifti1 3D and Nifti1 4D did not match"
# First compare to the 3D Nifti1 it's derived from
assert headersMatch(nifti1_3D, nifti1_4D,
specialHandlers=handlerMap3Dto4D), \
errorString.format("Headers")
assert np.array_equal(nifti1_3D.dataobj, nifti1_4D.dataobj[..., 0])
assert np.array_equal(nifti1_3D.dataobj, nifti1_4D.dataobj[..., 1])
nifti1_4D_fromdisk = nib.load(test_4DNifti1Path)
# Then ensure the in-memory and on-disk representation are the same
assert headersMatch(nifti1_4D, nifti1_4D_fromdisk), \
errorString.format("Headers")
assert dataMatch(nifti1_4D, nifti1_4D_fromdisk), \
errorString.format("Image data")
""" 4D Nifti2 """
errorString = "{} for Nifti2 3D and Nifti2 4D did not match"
assert headersMatch(nifti2_3D, nifti2_4D,
specialHandlers=handlerMap3Dto4D), \
errorString.format("Headers")
assert np.array_equal(nifti2_3D.dataobj, nifti2_4D.dataobj[..., 0])
assert np.array_equal(nifti2_3D.dataobj, nifti2_4D.dataobj[..., 1])
""" 4D Nifti1 and 4D Nifti2 data """
errorString = "{} for Nifti1 4D and Nifti2 4D did not match"
ignoredKeys = NIFTI2_REMOVED_FIELDS + NIFTI2_CHANGED_FIELDS
assert headersMatch(nifti1_4D, nifti2_4D, ignoredKeys=ignoredKeys), \
errorString.format("Image data")
assert dataMatch(nifti1_4D, nifti2_4D), errorString.format("Headers") | 8d7f3ce200ef4b2a6e1438f8949c2541c264b292 | 3,628,745 |
import os
def is_downloaded(folder,path_):
"""[check if the .html file exists]
Args:
folder ([folder in which is placed the book]): [description]
path_ ([type]): [path of the .html page]
Returns:
[type]: [description]
"""
if not os.path.exists(folder):
os.makedirs(folder)
return bool(os.path.exists(path_)) | 1d480fedb13429baa4bc3234f7a42cafe8dba93c | 3,628,746 |
def calc_brownian_displacement(dt, viscosity, particle_diameter, temperature):
"""
Calculate brownian motion characteristic displacement per dt
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
dx = np.sqrt(2*kb*temperature*dt/(3*np.pi*viscosity*particle_diameter))
return dx | 1bfdba75d0ad30b1f0dca790c47e004afa39b1b1 | 3,628,747 |
def manhattan_distance(v1, v2, norm=False):
"""
return ||v1 - v2||_1
"""
v1, v2 = check_pairwise_vector(v1, v2, norm)
diff = v1 - v2
K = np.abs(diff).sum()
return K | c41f6c529bb7604a4ca0aa6487d8a2cc5225bf04 | 3,628,748 |
def read_data_submit(ftdi, buf, size):
"""
read_data_submit(context ftdi, unsigned char * buf, int size) -> transfer_control
Reads data from the chip. Does not wait for completion of the transfer
nor does it make sure that the transfer was successful.
Use libusb 1.0 asynchronous API.
Parameters:
-----------
ftdi: pointer to ftdi_context
buf: Buffer with the data
size: Size of the buffer
Returns:
--------
NULL: Some error happens when submit transfer
!NULL: Pointer to a ftdi_transfer_control
"""
return ftdi_read_data_submit(ftdi, buf, size) | a70652e72d9a425e052b723fc12102742eab6121 | 3,628,749 |
def set_spines(ax, plot_params):
"""
Sets spines of the shift graph to be invisible if chosen by the user
Parameters
----------
ax: Matplotlib ax
Current ax of the shift graph
plot_parms: dict
Dictionary of plotting parameters. Here `invisible_spines` is used
"""
spines = plot_params["invisible_spines"]
if spines:
for spine in spines:
if spine in {"left", "right", "top", "bottom"}:
ax.spines[spine].set_visible(False)
else:
print("invalid spine argument")
return ax | 4e74ce30f52d465e9470f608cd8c909dfae4d0a5 | 3,628,750 |
def update_user(id):
"""Method to handle update a user"""
try:
data = request.get_json() or {}
user = Authentication()
response = user.update_user(id, data)
return response
except KeyError:
return ({"Error": "An error occured"}) | 398e28b6fb03f505512738ad28824e0c885693b4 | 3,628,751 |
import types
def _normalize_resources(resources):
""" Exclude any resources that have `/facebook` in the path as these
are internal and require internal FB infra. Only applies to resources
specified in the `dict` format
Will also go ahead an invert the dictionary using `_invert_dict`
"""
if resources and types.is_dict(resources):
_normalized_dict_keys = _normalize_deps(resources.keys())
_normalized_resources = {key: resources[key] for key in _normalized_dict_keys}
return _invert_dict(_normalized_resources)
else:
return resources | 16a0bd8cd1d4e4ec5ad8e929d8be969d95a1924e | 3,628,752 |
import sys
def process_file(fn, coverage, percentage, multiple, debug):
"""
Determine whether there was adequate coverage of bases in this file.
NOTE: Returns True if the run was OK.
:param fn:
:param coverage:
:param percentage:
:param multiple:
:param debug:
:return:
"""
metadata_ended = False
pattern = 'PCT_TARGET_BASES_{0}X'.format(coverage)
header = []
data = []
for line in open(fn):
line = line.strip()
if not metadata_ended:
if line.startswith('## METRICS CLASS'):
metadata_ended = True
continue
if not header:
# process the header line
header = line.split()
else:
# process the data line
data = line.split()
break
try:
idx = header.index(pattern)
except ValueError:
print("Could not find coverage column {0} in: {1}\n"
"header line is\n{2}".format(pattern, fn, header),
file=sys.stderr)
return False
if debug:
print("Percentage at {0} is {1}".format(header[idx], data[idx]))
this_percentage = float(data[idx])
if this_percentage < percentage:
if multiple:
print("{0}X\t{1}\t{2}".format(coverage, this_percentage, fn))
else:
print("Too low coverage percentage at {0}X: {1} {2}".format(
coverage, this_percentage, fn))
print("Too low coverage percentage at {0}X: {1} {2}".format(
coverage, this_percentage, fn), file=sys.stderr)
return False
return True | cd7465a0e175138ae73f2a0d46ae729673bf0ec5 | 3,628,753 |
def parse_targets(target):
"""
Parse provided targets
:param target: Targets
:return: List of IP addresses
"""
if '-' in target:
ip_range = target.split('-')
try:
t = IPRange(ip_range[0], ip_range[1])
except AddrFormatError:
try:
start_ip = IPAddress(ip_range[0])
start_ip_words = list(start_ip.words)
start_ip_words[-1] = ip_range[1]
start_ip_words = [str(v) for v in start_ip_words]
end_ip = IPAddress('.'.join(start_ip_words))
t = IPRange(start_ip, end_ip)
except AddrFormatError:
t = target
else:
try:
t = IPNetwork(target)
except AddrFormatError:
t = target
if type(t) == IPNetwork or type(t) == IPRange:
return list(t)
else:
return [t.strip()] | 94e9a5784f0fa18bf7677e08fed62ee542a429d0 | 3,628,754 |
def lgam(x):
"""Natural log of the gamma fuction: see Cephes docs for details"""
if x < -34:
q = -x
w = lgam(q)
p = floor(q)
if p == q:
raise OverflowError("lgam returned infinity.")
z = q - p
if z > 0.5:
p += 1
z = p - q
z = q * sin(PI * z)
if z == 0:
raise OverflowError("lgam returned infinity.")
z = LOGPI - log(z) - w
return z
if x < 13:
z = 1
p = 0
u = x
while u >= 3:
p -= 1
u = x + p
z *= u
while u < 2:
if u == 0:
raise OverflowError("lgam returned infinity.")
z /= u
p += 1
u = x + p
if z < 0:
z = -z
if u == 2:
return log(z)
p -= 2
x = x + p
p = x * polevl(x, GB)/polevl(x,GC)
return log(z) + p
if x > MAXLGM:
raise OverflowError("Too large a value of x in lgam.")
q = (x - 0.5) * log(x) - x + LS2PI
if x > 1.0e8:
return q
p = 1/(x*x)
if x >= 1000:
q += (( 7.9365079365079365079365e-4 * p
-2.7777777777777777777778e-3) *p
+ 0.0833333333333333333333) / x
else:
q += polevl(p, GA)/x
return q | e165ee0f53aef367350ed964ae2d31374b847d59 | 3,628,755 |
def matches(G, queue):
"""
If the sequence in 'queue' correspond to a
node in 'G', return the sequence id,
otherwise return False
"""
if not queue:
return False
seq = 0
for a in queue:
try:
seq = G[seq][a]
except KeyError:
return False
return seq | 859f43f6457b4add4cda88495a8dc6c4a559e5d5 | 3,628,756 |
def get_mu_sigma(prices, returns_model='mean_historical_return', risk_model='ledoit_wolf',
frequency=252, span=500):
"""Get mu (returns) and sigma (asset risk) given a expected returns model and risk model
prices (pd.DataFrame) – adjusted closing prices of the asset,
each row is a date and each column is a ticker/id.
returns_model (string, optional) - Model for estimating expected returns of assets,
either 'mean_historical_return' or 'ema_historical_return' (default: mean_historical_return)
risk_model (string, optional) - Risk model to quantify risk: sample_cov, ledoit_wolf,
defaults to ledoit_wolf, as recommended by Quantopian in their lecture series on quantitative finance.
frequency (int, optional) – number of time periods in a year, defaults to 252 (the number of trading days in a year)
span (int, optional) – Applicable only for 'ema_historical_return' expected returns.
The time-span for the EMA, defaults to 500-day EMA)
"""
CHOICES_EXPECTED_RETURNS = {
'mean_historical_return': expected_returns.mean_historical_return(prices, frequency),
'ema_historical_return': expected_returns.ema_historical_return(prices, frequency, span)
}
CHOICES_RISK_MODEL = {
'sample_cov': risk_models.sample_cov(prices),
'ledoit_wolf': risk_models.CovarianceShrinkage(prices).ledoit_wolf()
}
mu = CHOICES_EXPECTED_RETURNS.get(returns_model.lower(), None)
S = CHOICES_RISK_MODEL.get(risk_model.lower(), None)
if mu is None:
raise Exception('Expected returns model %s is not supported. Only mean_historical_return and ema_historical_return are supported currently.' % risk_model)
if S is None:
raise Exception('Risk model %s is not supported. Only sample_cov and ledoit_wolf are supported currently.' % risk_model)
return mu, S | 272196b663e43696166fa7134b6dd5d9c6a9c49c | 3,628,757 |
def getPortList(chute):
"""
Get a list of ports to expose in the format expected by create_container.
Uses the port binding dictionary from the chute host_config section.
The keys are expected to be integers or strings in one of the
following formats: "port" or "port/protocol".
Example:
port_bindings = {
"1111/udp": 1111,
"2222": 2222
}
getPortList returns [(1111, 'udp'), (2222, 'tcp')]
"""
if not hasattr(chute, 'host_config') or chute.host_config == None:
config = {}
else:
config = chute.host_config
ports = []
for port in config.get('port_bindings', {}).keys():
if isinstance(port, int):
ports.append((port, 'tcp'))
continue
parts = port.split('/')
if len(parts) == 1:
ports.append((int(parts[0]), 'tcp'))
else:
ports.append((int(parts[0]), parts[1]))
# If the chute is configured to host a web service, check
# whether there is already an entry in the list for the
# web port. If not, we should add one.
web_port = chute.getWebPort()
if web_port is not None:
if not any(p[0] == web_port for p in ports):
ports.append((web_port, 'tcp'))
return ports | eb97568befca72f6d9cb6c455d8d9ad03b13eebf | 3,628,758 |
from typing import Union
from typing import Tuple
from typing import Any
from typing import Sequence
def _apply_kraus_single_qubit(
kraus: Union[Tuple[Any], Sequence[Any]], args: 'ApplyChannelArgs'
) -> np.ndarray:
"""Use slicing to apply single qubit channel. Only for two-level qubits."""
zero_left = linalg.slice_for_qubits_equal_to(args.left_axes, 0)
one_left = linalg.slice_for_qubits_equal_to(args.left_axes, 1)
zero_right = linalg.slice_for_qubits_equal_to(args.right_axes, 0)
one_right = linalg.slice_for_qubits_equal_to(args.right_axes, 1)
for kraus_op in kraus:
np.copyto(dst=args.target_tensor, src=args.auxiliary_buffer0)
linalg.apply_matrix_to_slices(
args.target_tensor, kraus_op, [zero_left, one_left], out=args.auxiliary_buffer1
)
# No need to transpose as we are acting on the tensor
# representation of matrix, so transpose is done for us.
linalg.apply_matrix_to_slices(
args.auxiliary_buffer1,
np.conjugate(kraus_op),
[zero_right, one_right],
out=args.target_tensor,
)
args.out_buffer += args.target_tensor
return args.out_buffer | 89b4d1ee99aafc01b8727c7a2c88337fac8e1273 | 3,628,759 |
def get_itk_array(path_or_image):
""" Get an image array given a path or itk image.
Parameters
----------
path_or_image : str or itk image
Path pointing to an image file with extension among
*TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz),
Analyze, SDT/SPR (Stimulate), Nrrd or VTK images* or an itk image.
Returns
-------
arr : ndarray
Image ndarray contained in the given path or the itk image.
"""
if isinstance(path_or_image, np.ndarray):
return path_or_image
elif isinstance(path_or_image, str):
image = get_itk_image(path_or_image)
elif isinstance(path_or_image, itk.Image):
image = path_or_image
else:
err = 'Image type not recognized: ' + str(type(path_or_image))
raise RuntimeError(err)
arr = itk.GetArrayFromImage(image)
return arr | c8d16f5fb4bc55695183c1cb8b9dd20b57a9909f | 3,628,760 |
def get_datasets_dbpedia(subset, limit):
"""
Loads dbpedia data from files, split the data into words and generates labels.
Returns split sentences and labels.
"""
datasets = dict()
data = []
target = []
target_int =[]
target_names = []
filename = 'data/dbpedia/'+subset+'.csv'
last_label = ''
i = 0
# Load data from files
with open(filename, 'r') as f:
for line in f:
label, header, text = line.split(',')
#print('.',end='')
if (i >= limit):
if (label == last_label):
continue
else:
i = 0 # reset i
print('Entry : {}, label:{}, header: {}'.format(i,label, header))
# remove blank spaces from text and insert into list 'data'
data.append(text.strip())
target.append(int(label)-1)
if label not in target_names:
target_names.append(label)
last_label = label
i += 1
datasets['data'] = data
datasets['target'] = target
datasets['target_names'] = target_names
return datasets | 03f567e814b3ca0227b558177feb8a2165256cfc | 3,628,761 |
def cv_read(path):
"""Read an image using opencv.
Args:
path: path to image
Returns:
numpy array containing the RGB image
"""
image = cv2.imread(path)
image = bgr_to_rgb(image)
return image | 3a3fff8edc5d533c394f611c020cc70986844608 | 3,628,762 |
def get_git_log_raw_output_for_two_commits(commit1, commit2):
"""
Get the git log raw output for a git commit.
:param commit1: the first commit, which occurs earlier than commit2
:param commit2: the second commit, which occurs later than commit1
returns the git command output
"""
if not commit2:
commit2 = 'HEAD'
if not commit1:
cmd = 'git log --raw --no-abbrev --oneline ' + commit2 + ' 2>/dev/null || true'
else:
cmd = 'git log --raw --no-abbrev --oneline ' + commit1 + '..' + commit2 + ' 2>/dev/null || true'
verbose(cmd, LEVEL_2)
output = get_shell_cmd_output(cmd)
#print(output)
return output | 0acf4c81b38968e376de2294be1c6f6c1a47a53e | 3,628,763 |
def get_info_from_service(service, zconf):
""" Resolve service_info from service. """
service_info = None
try:
service_info = zconf.get_service_info("_googlecast._tcp.local.", service)
if service_info:
_LOGGER.debug(
"get_info_from_service resolved service %s to service_info %s",
service,
service_info,
)
except IOError:
pass
return service_info | 0d19b5cbe5f8c07fc429f1bcaca3dbcce6978331 | 3,628,764 |
import glob
import sys
import os
def recursive_glob(path):
"""Version-agnostic recursive glob.
Implements the Python 3.5+ glob module's recursive glob for Python 2.7+.
Recursive glob emulates the bash shell's globstar feature, which is enabled
with `shopt -s globstar`.
Args:
path: A path that may contain `**` and `*` ("magic").
Returns:
The expanded list of paths with magic removed.
"""
if "*" not in path:
# Glob isn't needed.
return [path]
elif "**" not in path:
# Recursive glob isn't needed.
return glob.glob(path)
elif sys.version_info >= (3, 5):
# Recursive glob is supported.
# Pylint doesn't respect the version check.
# pylint: disable=unexpected-keyword-arg
return glob.glob(path, recursive=True)
# pylint: enable=unexpected-keyword-arg
# Simulate recursive glob with os.walk.
left, right = path.split("**", 1)
if not left:
left = "." + os.sep
right = right.lstrip(os.sep)
paths = []
for d, _, _ in os.walk(left):
# Don't recurse into hidden directories. Note that the current directory
# ends with '/', giving it a basename of '', which prevents this check
# from accidentally skipping it.
if not os.path.basename(d).startswith("."):
paths += recursive_glob(os.path.join(d, right))
return paths | 6a738db431a0230c186f14f6739f1beb20e1ff97 | 3,628,765 |
def _ragged_tile_axis(rt_input, axis, repeats):
"""Tile a dimension of a RaggedTensor to match a ragged shape."""
assert axis > 0 # Outermost dimension may not be ragged.
if not ragged_tensor.is_ragged(rt_input):
rt_input = ragged_conversion_ops.from_tensor(rt_input, ragged_rank=1)
if axis > 1:
return rt_input.with_values(
_ragged_tile_axis(rt_input.values, axis - 1, repeats))
else:
src_row_splits = rt_input.nested_row_splits
src_row_lengths = ragged_array_ops.nested_row_lengths(rt_input)
splits = src_row_splits[0]
dst_row_lengths = [repeats]
for i in range(1, len(src_row_lengths)):
dst_row_lengths.append(
ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats))
splits = array_ops.gather(src_row_splits[i], splits)
dst_values = ragged_util.repeat_ranges(rt_input.inner_values, splits,
repeats)
return ragged_factory_ops.from_nested_row_lengths(dst_values,
dst_row_lengths) | 1bed0ee8f239be0e370a5a853829699c4cfac511 | 3,628,766 |
import re
def camelcase_to_snakecase(value: str) -> str:
"""
Convert a string from snake_case to camelCase.
>>> camelcase_to_snakecase('')
''
>>> camelcase_to_snakecase('foo')
'foo'
>>> camelcase_to_snakecase('fooBarBaz')
'foo_bar_baz'
>>> camelcase_to_snakecase('foo_bar_baz')
'foo_bar_baz'
>>> camelcase_to_snakecase('_fooBarBaz')
'_foo_bar_baz'
>>> camelcase_to_snakecase('__fooBarBaz_')
'__foo_bar_baz_'
"""
value = re.sub(r"[\-\.\s]", "_", value)
if not value:
return value
return value[0].lower() + re.sub(
r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), value[1:]
) | 05fe02739e8152bc64ab35bd842162b5d7c3ab4c | 3,628,767 |
from deephaven.TableTools import emptyTable
def colorTable():
"""
Returns a table which visualizes all of the named colors.
:return: table which visualizes all of the named colors.
"""
return emptyTable(1) \
.updateView("Colors = colorNames()") \
.ungroup() \
.updateView("Paint = io.deephaven.gui.color.Color.color(Colors).javaColor()") \
.formatColumns("Colors = io.deephaven.db.util.DBColorUtil.bgfga(Paint.getRed(), Paint.getGreen(), Paint.getBlue())") \
.dropColumns("Paint") | 4a359979d9f44b8f6174f4a7939fd98da942cb2d | 3,628,768 |
def not_found(error):
"""Custom error handler for bad requests"""
return jsonify(dict(error = 'Not Found, resource not found')), 404 | 89e125144637258a4b663c489967f6187ce50744 | 3,628,769 |
def string_distance(str1, str2):
"""
计算两个字符串之间的编辑距离
@author: 仰起脸笑的像满月
@date: 2019/05/15
:param str1:
:param str2:
:return:
"""
m = str1.__len__()
n = str2.__len__()
distance = np.zeros((m + 1, n + 1))
for i in range(0, m + 1):
distance[i, 0] = i
for i in range(0, n + 1):
distance[0, i] = i
for i in range(1, m + 1):
for j in range(1, n + 1):
if str1[i - 1] == str2[j - 1]:
cost = 0
else:
cost = 1
distance[i, j] = min(distance[i - 1, j] + 1, distance[i, j - 1] + 1,
distance[i - 1, j - 1] + cost) # 分别对应删除、插入和替换
return distance[m, n] | 1dbdcddd13f7a7f5d62e6028a045f224d10984a1 | 3,628,770 |
def best_archiver(random, population, archive, args):
"""Archive only the best individual(s).
This function archives the best solutions and removes inferior ones.
If the comparison operators have been overloaded to define Pareto
preference (as in the ``Pareto`` class), then this archiver will form
a Pareto archive.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
archive -- the current archive of individuals
args -- a dictionary of keyword arguments
"""
new_archive = archive
for ind in population:
if len(new_archive) == 0:
new_archive.append(ind)
else:
should_remove = []
should_add = True
for a in new_archive:
if ind.candidate == a.candidate:
should_add = False
break
elif ind < a:
should_add = False
elif ind > a:
should_remove.append(a)
for r in should_remove:
new_archive.remove(r)
if should_add:
new_archive.append(ind)
return new_archive | 20d606f5ea4d76b1c6bd2acfd6bf1017e694f026 | 3,628,771 |
import dotenv
import os
import requests
def elections():
"""Download raw election data from the Election Guide API.
Refer to Election Guide API specifications to understand the data structure
and the meaning of specfic fields.
.. note:: This function requires a ``.env`` file in the project root with
the API URL and key. Contact DataKind data ambassadors for access.
Returns:
list: Election data json.
"""
# Load API URL and key.
dotenv.load_dotenv()
url = os.getenv("URL")
key = os.getenv("KEY")
headers = {"Authorization": f"Token {key}"}
# Get json from the API, page by page. Pages have 100 results each.
responses = []
while url:
response = requests.get(url, headers=headers).json()
responses.append(response)
url = response["next"]
return responses | effa7356346ec69ff48d07d473ef3ffe86267ed4 | 3,628,772 |
import time
import logging
def google_subdomains(name):
"""
This method uses google dorks to get as many subdomains from google as possible
Returns a dictionary with key=str(subdomain), value=GoogleDomainResult object
"""
google_results = {}
results_in_last_iteration = -1
while len(google_results) > results_in_last_iteration:
time.sleep(randint(0, 4) + randint(0, 1000) * 0.001)
# Make this funct keep iterating until there are new results
results_in_last_iteration = len(google_results)
# Order google_results by .count, as we want only the top 8 subs with more results
list_of_sub_ordered_by_count = sorted(
google_results, key=lambda sub: google_results[sub].count, reverse=True
)
google_results = _update_google_results(
_google_subdomain_lookup(
name, [sub for sub in list_of_sub_ordered_by_count if sub != name], 100, 0
),
google_results,
)
logging.debug(
"New subdomain(s) found: "
+ str(len(google_results) - len(list_of_sub_ordered_by_count))
)
[
logging.debug(sub + " - " + str(google_results[sub].count))
for sub in sorted(
google_results, key=lambda sub: google_results[sub].count, reverse=True
)
]
logging.debug(
"Finished google lookups with " + str(len(google_results)) + " subdomains discovered."
)
return google_results | a2152b5878b85e2db45f9586ea6f7473defda34e | 3,628,773 |
def contacts_per_person_normal_self_30():
"""
Real Name: b'contacts per person normal self 30'
Original Eqn: b'30'
Units: b'contact/Day'
Limits: (None, None)
Type: constant
b''
"""
return 30 | cbb35d2d87ed961a7295615598294d231de032dd | 3,628,774 |
import re
def process_text2phrases(text, clinical_ner_model):
"""
用于从文本中提取Clinical Text Segments
:param text:自由文本
:param clinical_ner_model: Stanza提供的预训练NER模型
:return: List[PhraseItem]
"""
tokenizer = SpanTokenizer()
spliters = getSpliters()
stopwords = getStopWords()
# 将文本处理成正常的小写形式
text = strip_accents(text.lower())
text = re.sub("[-_\"\'\\\\\t‘’]", " ", text)
# 对于换行符替换为句号,后续作为分割词
text = re.sub("(?<=[\w])[\r\n]", ".", text)
clinical_docs = clinical_ner_model(text)
sub_sentences = []
for sent_c in clinical_docs.sentences:
clinical_tokens = sent_c.tokens
# Stanza
flag = False
curSentence = []
tmp = set()
for i in range(len(clinical_tokens)):
wi = WordItem(clinical_tokens[i].text, clinical_tokens[i].start_char, clinical_tokens[i].end_char)
if "PROBLEM" in clinical_tokens[i].ner and wi.text not in {',', '.', ':', ';', '(', ')', '[', ']'}:
curSentence.append(wi)
else:
if len(curSentence) > 0:
phrase_item = PhraseItem(curSentence)
sub_sentences.append(phrase_item)
tmp.update(phrase_item.locs_set)
flag = True
curSentence = []
if len(curSentence) > 0:
phrase_item = PhraseItem(curSentence)
sub_sentences.append(phrase_item)
tmp.update(phrase_item.locs_set)
flag = True
# phrase segmentation
# 只有Stanza标记的句子才作补充
if not flag:
continue
curSentence = []
for i in range(len(clinical_tokens)):
wi = WordItem(clinical_tokens[i].text, clinical_tokens[i].start_char, clinical_tokens[i].end_char)
# 用于后续lemma比对
text_lemma = wnl.lemmatize(clinical_tokens[i].text)
if clinical_tokens[i].text not in WordItem.lemma_dict:
WordItem.lemma_dict[clinical_tokens[i].text] = text_lemma
if clinical_tokens[i].text in spliters:
if len(curSentence) > 0:
phrase_item = PhraseItem(curSentence)
# 只有不与Stanza重叠的部分才加入
if len(phrase_item.locs_set & tmp) == 0:
sub_sentences.append(phrase_item)
curSentence = []
else:
curSentence.append(wi)
if len(curSentence) > 0:
phrase_item = PhraseItem(curSentence)
if len(phrase_item.locs_set & tmp) == 0:
sub_sentences.append(phrase_item)
# 否定检测
for phrase_item in sub_sentences:
flag = False
for token in phrase_item.word_items:
if token.text.lower() in {"no", "not", "none", "negative", "non", "never", "few", "lower", "fewer", "less",
"normal"}:
flag = True
break
if flag:
phrase_item.set_no_flag()
# 省略恢复
sub_sentences_ = []
for idx, pi in enumerate(sub_sentences):
# 将含有and, or, / 的短句用tokenize进行拆分
sub_locs = [[i + pi.start_loc, j + pi.start_loc] for i, j in
tokenizer.tokenize(text[pi.start_loc:pi.end_loc])]
sub_phrases = []
curr_phrase = []
# 把以and, or, / 分割的短语提出
for loc in sub_locs:
wi = WordItem(text[loc[0]:loc[1]], loc[0], loc[1])
if wi.text in {"and", "or", "/"}:
if len(curr_phrase) > 0:
sub_phrases.append(PhraseItem(curr_phrase))
sub_phrases[-1].no_flag = pi.no_flag
curr_phrase = []
else:
curr_phrase.append(wi)
if len(curr_phrase) > 0:
sub_phrases.append(PhraseItem(curr_phrase))
sub_phrases[-1].no_flag = pi.no_flag
# 首先把原始分割的短语都加进去
for item in sub_phrases:
sub_sentences_.append(item)
# 只考虑A+B形式的恢复
if len(sub_phrases) == 2:
if len(sub_phrases[0]) >= 1 and len(sub_phrases[1]) == 1:
tmp = sub_phrases[0].word_items[:-1][:]
tmp.extend(sub_phrases[1].word_items)
sub_sentences_.append(PhraseItem(tmp))
sub_sentences_[-1].no_flag = pi.no_flag
elif len(sub_phrases[0]) == 1 and len(sub_phrases[1]) >= 1:
tmp = sub_phrases[0].word_items[:]
tmp.extend(sub_phrases[1].word_items[1:])
sub_sentences_.append(PhraseItem(tmp))
sub_sentences_[-1].no_flag = pi.no_flag
sub_sentences = sub_sentences_
# print([i.toSimpleString() for i in sub_sentences])
# 穷举短语 删除纯数字
phrases_list = []
for pi in sub_sentences:
tmp = pi.toSimpleString()
if isNum(tmp) or len(tmp) <= 1:
continue
for i in range(len(pi.simple_items)):
for j in range(10):
if i + j == len(pi.simple_items):
break
if len(pi.simple_items[i:i + j + 1]) == 1:
tmp_str = pi.simple_items[i:i + j + 1][0].text
if tmp_str in stopwords or isNum(tmp_str):
continue
phrases_list.append(PhraseItem(pi.simple_items[i:i + j + 1]))
phrases_list[-1].no_flag = pi.no_flag
# print(len(phrases_list))
# print([i.toString() for i in phrases_list])
return phrases_list | c92e3bc458ee75b472cf4766b7d6111004589a62 | 3,628,775 |
def get_all(isamAppliance, check_mode=False, force=False):
"""
Receives all services
"""
return isamAppliance.invoke_get("Receiving all Services", module_uri) | 67f6939c1b7b30898064c1b196bff852424e90e7 | 3,628,776 |
from scipy.stats import norm
def numeric_outlier(feature, keep_rate=0.9545, mode='right', feature_scale=None):
"""feature clip outlier.
Args:
feature: pd.Series, sample feature.
keep_rate: default 0.9545,
method: default 'right', one of ['left', 'right', 'both'], statistical distribution boundary.
feature_scale: list or tuple, [feature.mean(), feature.std()].
Returns:
normalize feature and feature_scale.
"""
assert mode in ['left', 'right', 'both'], "`mode` should be one of ['left', 'right', 'both']."
scale = feature_scale if feature_scale is not None else (feature.mean(), feature.std())
if mode=='both':
clip_dict = (feature.mean()+norm.ppf((1-keep_rate)/2)*feature.std(), feature.mean()+norm.ppf(keep_rate+(1-keep_rate)/2)*feature.std())
elif mode=='right':
clip_dict = (feature.min(), feature.mean()+norm.ppf(keep_rate)*feature.std())
else:
clip_dict = (feature.mean()+norm.ppf(1-keep_rate)*feature.std(), feature.max())
t = feature.clip(clip_dict[0], clip_dict[1])
return t, scale | 6d675cad7694fa6481e979f19dc35b9e99cdff38 | 3,628,777 |
def elk_index(hashDict):
""" Index setup for ELK Stack bulk install """
index_tag_full = {}
index_tag_inner = {}
index_tag_inner['_index'] = "hash-data"
index_tag_inner['_id'] = hashDict['hashvalue']
index_tag_full['index'] = index_tag_inner
return index_tag_full | 9adcc529b88b319180e223ba9e47bda51e628478 | 3,628,778 |
def build_2d_gauss_data(mu_1, mu_2, sig_1, sig_2, samples, changes={},
w=50, alpha=0.1, lags=0):
"""Build a bivarite dataset following a Gaussian distribution.
Parameters
----------
mu_1 : float
Mean of x_1.
mu_2 : float
Mean of x_2.
sig_1 : float
Standard deviation of x_1.
sig_2 : float
Standard deviation of x_2.
samples : int
Size of samples will be generated.
changes : dict, optional
Sudden or incipients changes to be were add. The default is
an empty dict.
Can be:
- incip: Incipient.
- sudden: Sudden.
Example:
{
'incip': [{'add': 50, 'where': (start, stop)}],
'sudden': [{'add': 50, 'where': (start, stop)}]
}
In `add` give the magnitude of the change and in `where`, where
the change will be added. start >=0 and stop < samples.
w : int, optional
Size of window to compute the moving average in incipients changes.
The default is 50.
alpha : float, optional
Weight for linear dependence (correlation) between x_1 and x_2.
x_2 = ... alpha * x_1 ...
lags : int, optional
If greater than 0 it's added time dependence.
Notes
-----
- x_1 is defined as following:
x_1 = N(mu_1, sig_1 ** 2),
where N is the normal distribuition with mean `mu_1' and variance
`sig_1 ** 2`.
- x_2 is defined as following:
x_2 = N(mu_2, sig_2 ** 2) + alpha * x_1 + N(0, 1),
where `alpha` is a weight and `N(0, 1)` is an white noise.
Returns
-------
x_1 : numpy.ndarray, shape(samples,)
1th random variable.
x_2 : numpy.ndarray, shape(samples,)
2th random variable..
"""
white_noise = norm.rvs(loc=0, scale=1, size=samples)
x_1 = norm.rvs(loc=mu_1, scale=sig_1, size=samples)
for change_name, changes_to_apply in changes.items():
change_name = change_name.lower()
for change in changes_to_apply:
to_sum = change['add']
start, stop = change['where']
num_samples = stop - start
mean_est = np.mean(x_1[start - w: start])
if change_name == 'incip':
add = np.linspace(start=0, stop=to_sum, num=num_samples)
x_1[start: stop] = norm.rvs(loc=mean_est,
scale=sig_1,
size=num_samples) + add
elif change_name == 'sudden':
x_1[start: stop] += norm.rvs(loc=to_sum,
scale=sig_1,
size=num_samples)
x_2 = norm.rvs(loc=mu_2, scale=sig_2, size=samples) + \
alpha * x_1 + white_noise
# Time dependence.
if lags > 0:
lagged_mat = build_lagged_matrix(np.c_[x_1, x_2], lags)
end_1th = 2 + lags
end_2th = end_1th + lags
x_1 += np.sum(alpha * lagged_mat[:, 2: end_1th], axis=1)
x_2 += np.sum(alpha * lagged_mat[:, end_1th: end_2th], axis=1)
return x_1, x_2 | 4b523e4c417fc223a19a1630004d0479fbe02b42 | 3,628,779 |
def parse_between_expression(var_dict, variable, expression_dict):
"""
Takes an integer variable and a string:string expression_dict. The key string is a semicolon-separated expression
denoting the lower and upper bounds of the value it can match, while the value string is a simple arithmetic
expression that can be parsed via parse_arithmetic. It will find the expression where the value of variable is
between X;Y, execute that expression, and return the result. ValueError will be thrown if no suitable expression is
found.
:type var_dict: dict
:type expression_dict: dict
:type variable: int
:param var_dict: Variable dictionary for parse_arithmetic.
:param variable: Variable to find the
:param expression_dict:
:return: result of the expression.
"""
# iterate to find the key we're using
use_key = None
for key in expression_dict.keys():
# split and format to int
split_expression = key.split(";")
if "" in split_expression:
split_expression.remove("")
split_expression = list(map(int, split_expression))
matches = False
# if the last character is a semicolon, just check if greater than or equal to
if key[-1] == ";":
matches = variable >= split_expression[0]
# if the first character is a semicolon, just check if lesser than or equal to
elif key[0] == ";":
matches = variable <= split_expression[0]
# otherwise, check if between
else:
matches = split_expression[0] <= variable <= split_expression[1]
if matches:
use_key = key
break
if use_key is None:
raise ValueError("No matching expression found!")
# return the result of the statement matching the key we found
# it should only parse if it's an expression (string), otherwise just return the value
expression = expression_dict[use_key]
if type(expression) is int or type(expression) is float:
return expression
elif type(expression) is str:
return parse_arithmetic(expression, var_dict)
elif type(expression) is dict:
# If the type of the "expression" is a dict, we're probably dealing with mods for things like attributes.
# In that case, we need to parse the expression in each child
for key in expression.keys():
if type(expression[key]) is str:
expression[key] = parse_arithmetic(expression[key], var_dict)
return expression
else:
raise TypeError(f"Invalid type for expression {expression}") | 221c1d6d5da3f079fb9bad6f9a6908a036720d7c | 3,628,780 |
def get_tag(el):
""" :returns: `geographic_msgs/KeyValue`_ message for `<tag>` *el* if any, None otherwise. """
pair = None
key = el.get('k')
if key != None:
pair = KeyValue()
pair.key = key
pair.value = get_required_attribute(el, 'v')
return pair | 2650603e5b88a51cef3dce3ef29478c3ccda79ca | 3,628,781 |
from typing import Dict
from typing import Any
from typing import Tuple
def json_to_transactions(json_data: Dict[Any, Any]) -> Tuple[Transaction, ...]:
"""
Convert json data to tuple of transaction objects.
Parameters
----------
json_data :
The transactions data where the values are hexadecimals.
Returns
-------
transactions : `Tuple[Transaction, ...]`
The transaction objects obtained from the json data.
"""
transactions = []
for transaction in json_data["transactions"]:
tx = Transaction(
nonce=hex_to_u256(transaction["nonce"]),
gas_price=hex_to_u256(transaction["gasPrice"]),
gas=hex_to_u256(transaction["gas"]),
to=(
None
if transaction["to"] == ""
else hex_to_address(transaction["to"])
),
value=hex_to_u256(transaction["value"]),
data=hex_to_bytes(transaction["input"]),
v=hex_to_u256(transaction["v"]),
r=hex_to_u256(transaction["r"]),
s=hex_to_u256(transaction["s"]),
)
transactions.append(tx)
return tuple(transactions) | 2444e778331874eff4b2b385b0b9aab113a436c2 | 3,628,782 |
def textureLightingCost(texParam, img, vertexCoord, sh, model, renderObj, w = (1, 1), option = 'tl', constCoef = None):
"""
Energy formulation for fitting texture and spherical harmonic lighting coefficients
"""
if option is 'tl':
texCoef = texParam[:model.numTex]
shCoef = texParam[model.numTex:].reshape(9, 3)
elif option is 't':
texCoef = texParam
shCoef = constCoef.reshape(9, 3)
elif option is 'l':
texCoef = constCoef
shCoef = texParam.reshape(9, 3)
texture = generateTexture(vertexCoord, np.r_[texCoef, shCoef.flatten()], model)
renderObj.updateVertexBuffer(np.r_[vertexCoord.T, texture.T])
renderObj.resetFramebufferObject()
renderObj.render()
rendering, pixelCoord = renderObj.grabRendering(return_info = True)[:2]
rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]]
img = img[pixelCoord[:, 0], pixelCoord[:, 1]]
# Color matching cost
r = (rendering - img).flatten()
Ecol = np.dot(r, r) / pixelCoord.shape[0]
# Statistical regularization
Ereg = np.sum(texCoef ** 2 / model.texEval)
if option is 'l':
return w[0] * Ecol
else:
return w[0] * Ecol + w[1] * Ereg | 95a54db4eba8ac799cde237805999ee47078d2ab | 3,628,783 |
def find_reaction_by_index(rxn:list,num:int)-> object:
"""
find reactions by chemkin reaction index
:param rxn: (list) rmg reaction list
:param num: (int) chemkin reaction index
:return: rmg reaction
"""
#x1 = []
for react in rxn:
if react.index==num:
#x1.append(react)
display(react)
if hasattr(react, 'library'):
print(react.library)
else:
print("reaction object has no library attribute")
print(react.kinetics,"\n")
return react | 99e98bd29fbce92d538e41f6dda124be2c861dc1 | 3,628,784 |
def map_contributor(contributor_dict, role_idx=0):
"""Map the DMP's contributor(s) to the record's contributor(s)."""
cid = contributor_dict["contributor_id"]
identifiers = (
{cid["type"]: cid["identifier"]}
if is_identifier_type_allowed(cid["type"], contributor_dict)
else {}
)
affiliations = []
# note: currently (sept 2020), the role is a SanitizedUnicode in the
# rdm-records marshmallow schema
contributor = {
"name": contributor_dict["name"],
"type": "Personal", # TODO ?
"given_name": None,
"family_name": None,
"identifiers": identifiers,
"affiliations": affiliations,
"role": contributor_dict["role"][role_idx],
}
additional_details = {
k: v
for k, v in translate_person_details(contributor_dict).items()
if k in contributor.keys() and v is not None
}
contributor.update(additional_details)
return {k: v for k, v in contributor.items() if v is not None} | d2e55613bc17e3154d557c7e9104b9f0b3eec0a7 | 3,628,785 |
def timeIntegration(params):
"""
TIMEINTEGRATION : Simulate a network of aLN modules
Return:
rates_exc: N*L array : containing the exc. neuron rates in kHz time series of the N nodes
rates_inh: N*L array : containing the inh. neuron rates in kHz time series of the N nodes
t: L array : the time value at which the time series are evaluated
mufe: N vector : final value of mufe for each node
mufi: N vector : final value of mufi for each node
IA: N vector : final value of IA for each node
seem : N vector : final value of seem for each node
seim : N vector : final value of seim for each node
siem : N vector : final value of siem for each node
siim : N vector : final value of siim for each node
seev : N vector : final value of seev for each node
seiv : N vector : final value of seiv for each node
siev : N vector : final value of siev for each node
siiv : N vector : final value of siiv for each node
"""
dt = params['dt'] # Time step for the Euler intergration (ms)
duration = params['duration'] # imulation duration (ms)
RNGseed = params['seed'] # seed for RNG
warn = params['warn'] # Display a warning if out og precalc limits in interpolation,
# set to 0 for faster computation
# ------------------------------------------------------------------------
# global coupling parameters
# Connectivity matric
Cmat = params['Cmat'] # Interareal relative coupling strengths (values between 0 and 1), Cmat(i,j) connnection from jth to ith
N = len(Cmat) # Number of areas
# Interareal connection delay
lengthMat = params['lengthMat']
signalV = params['signalV']
if N == 1:
Dmat = np.ones((N,N))*params['de']
else:
Dmat = dp.computeDelayMatrix(lengthMat,signalV) # Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms)
Dmat[np.eye(len(Dmat))==1] = np.ones(len(Dmat))*params['de']
Dmat_ndt = np.around(Dmat/dt).astype(int) # delay matrix in multiples of dt
params['Dmat_ndt'] = Dmat_ndt
#print(Dmat_ndt)
#print(np.max(Dmat_ndt), np.min(Dmat_ndt))
c_gl = params['c_gl'] # global coupling strength between areas(unitless)
Ke_gl = params['Ke_gl'] # number of incoming E connections (to E population) from each area
# ------------------------------------------------------------------------
# local network (area) parameters [identical for all areas for now]
### model parameters
dosc_version = params['dosc_version'] # (Use dynamic oscillations?) if 0: exponential version (fewer dimensions, faster)
filter_sigma = params['filter_sigma']
distr_delay = params['distr_delay']
fast_interp = params['fast_interp']
global_delay = params['global_delay']
# external input parameters:
tau_ou = params['tau_ou'] # Parameter of the Ornstein-Uhlenbeck process for the external input(ms)
sigma_ou = params['sigma_ou'] # Parameter of the Ornstein-Uhlenbeck (OU) process for the external input ( mV/ms/sqrt(ms) )
mue_ext_mean = params['mue_ext_mean'] # Mean external excitatory input (OU process) (mV/ms)
mui_ext_mean = params['mui_ext_mean'] # Mean external inhibitory input (OU process) (mV/ms)
sigmae_ext = params['sigmae_ext'] # External exc input standard deviation ( mV/sqrt(ms) )
sigmai_ext = params['sigmai_ext'] # External inh input standard deviation ( mV/sqrt(ms) )
# recurrent coupling parameters
Ke = params['Ke'] # Recurrent Exc coupling. "EE = IE" assumed for act_dep_coupling in current implementation
Ki = params['Ki'] # Recurrent Exc coupling. "EI = II" assumed for act_dep_coupling in current implementation
# Recurrent connection delays
de = params['de'] # Local constant delay "EE = IE" (ms)
di = params['di'] # Local constant delay "EI = II" (ms)
tau_se = params['tau_se'] # Synaptic decay time constant for exc. connections "EE = IE" (ms)
tau_si = params['tau_si'] # Synaptic decay time constant for inh. connections "EI = II" (ms)
tau_de = params['tau_de']
tau_di = params['tau_di']
cee = params['cee'] # strength of exc. connection
# -> determines ePSP magnitude in state-dependent way (in the original model)
cie = params['cie'] # strength of inh. connection
# -> determines iPSP magnitude in state-dependent way (in the original model)
cei = params['cei']
cii = params['cii']
# Recurrent connections coupling strength
Jee_max = params['Jee_max'] # ( mV/ms )
Jei_max = params['Jei_max'] # ( mV/ms )
Jie_max = params['Jie_max'] # ( mV/ms )
Jii_max = params['Jii_max'] # ( mV/ms )
# rescales c's here: multiplication with tau_se makes
# the increase of s subject to a single input spike invariant to tau_se
# division by J ensures that mu = J*s will result in a PSP of exactly c
# for a single spike!
cee = cee*tau_se/Jee_max #ms
cie = cie*tau_se/Jie_max #ms
cei = cei*tau_si/abs(Jei_max) #ms
cii = cii*tau_si/abs(Jii_max) #ms
c_gl = c_gl*tau_se/Jee_max #ms
# neuron model parameters
a = params['a'] # Adaptation coupling term ( nS )
b = params['b'] # Spike triggered adaptation ( pA )
EA = params['EA'] # Adaptation reversal potential ( mV )
tauA = params['tauA'] # Adaptation time constant ( ms )
# if params below are changed, preprocessing required
C = params['C'] # membrane capacitance ( pF )
gL = params['gL'] # Membrane conductance ( nS )
EL = params['EL'] # Leak reversal potential ( mV )
DeltaT = params['DeltaT'] # Slope factor ( EIF neuron ) ( mV )
VT = params['VT'] # Effective threshold (in exp term of the aEIF model)(mV)
Vr = params['Vr'] # Membrane potential reset value (mV)
Vs = params['Vs'] # Cutoff or spike voltage value, determines the time of spike (mV)
Tref = params['Tref'] # Refractory time (ms)
taum = C/gL # membrane time constant
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
mufe = params['mufe_init'].copy() # Filtered mean input (mu) for exc. population
mufi = params['mufi_init'].copy() # Filtered mean input (mu) for inh. population
mufe_dosc = params['mufe_init'].copy() + np.zeros(N)*(1j) # (if dosc_version: complex variable) (mV/ms)
mufi_dosc = params['mufi_init'].copy() + np.zeros(N)*(1j) # (if dosc_version: complex variable) (mV/ms)
IA = params['IA_init'].copy() # Adaptation current (pA)
seem = params['seem_init'].copy() # Mean exc synaptic input
seim = params['seim_init'].copy()
seev = params['seev_init'].copy() # Exc synaptic input variance
seiv = params['seiv_init'].copy()
siim = params['siim_init'].copy() # Mean inh synaptic input
siem = params['siem_init'].copy()
siiv = params['siiv_init'].copy() # Inh synaptic input variance
siev = params['siev_init'].copy()
# Lookup tables for the transfer functions
precalc_r, precalc_V, precalc_tau_mu, precalc_tau_sigma = params['precalc_r'], params['precalc_V'], params['precalc_tau_mu'], params['precalc_tau_sigma']
# parameter for the lookup tables
dI = params['dI']
ds = params['ds']
sigmarange = params['sigmarange']
Irange = params['Irange']
# Initialization
t = np.arange(0,duration,dt) # Time variable (ms)
sqrt_dt = np.sqrt(dt)
ndt_de = np.around(de/dt).astype(int)
ndt_di = np.around(di/dt).astype(int)
if not global_delay: # if no network delays, fill delay matrix with zeroes
Dmat_ndt = np.zeros((N,N)) # ATTENTION: this will also get rid of the INTRA-network delays (which is modelled as a E-E delay)
max_global_delay = np.max(Dmat_ndt)
startind = int(np.max([max_global_delay, ndt_de, ndt_di])+1)
#print("Startind: {}, len(t): {}, simulation steps: {}".format(startind, len(t), len(range(startind-1,len(t)))))
mue_ext = mue_ext_mean*np.ones((N,)) # Mean external exc input (mV/ms)
mui_ext = mui_ext_mean*np.ones((N,)) # Mean external inh inout (mV/ms)
# Set the initial firing rates.
if np.shape(params['rates_exc_init'])[1] == 1:
# If the initial firing rate is a 1D array, we use a fixed firing for a time "max_delay" before the simulation
rates_exc = np.dot(params['rates_exc_init'], np.ones((1,len(t)))) #kHz
rates_inh = np.dot(params['rates_inh_init'], np.ones((1,len(t)))) #kHz
else:
# Reuse the firing rates computed in a precedent simulation
rates_exc = np.zeros((N,len(t)))
rates_inh = np.zeros((N,len(t)))
rates_exc[:,0:startind-1] = params['rates_exc_init'][:,-startind:]
rates_inh[:,0:startind-1] = params['rates_inh_init'][:,-startind:]
if distr_delay:
rd_exc = 0.01*np.ones((N,N))
rd_inh = 0.01*np.ones(N)
for l in range(N):
Dmat_ndt[l,l] = 0.0 # if distributed, this will calculated in the r_d ODE (Eq. 4.43)
else:
rd_exc = np.zeros((N,N)) #kHz rd_exc(i,j): Connection from jth node to ith
rd_inh = np.zeros(N)
for l in range(N):
Dmat_ndt[l,l] = ndt_de # if no distributed, this is a fixed value (E-E coupling)
# Save the noise in the rates array to save memory
if RNGseed:
np.random.seed(RNGseed)
rates_exc[:,startind:] = np.random.standard_normal( ( N, len ( range(startind,len(t) ) ) ) )
rates_inh[:,startind:] = np.random.standard_normal( ( N, len ( range(startind,len(t) ) ) ) )
noise_exc = np.zeros((N,))
noise_inh = np.zeros((N,))
zeros4 = np.zeros((4,))
# ------------------------------------------------------------------------
# PREPARE OBJECTS FOR RETURNING VALUES
r_ext_chunk = np.zeros(rates_exc.shape)
mue_ext_chunk = np.zeros(rates_exc.shape)
mui_ext_chunk = np.zeros(rates_exc.shape)
sigmae_chunk = np.zeros(rates_exc.shape)
sigmai_chunk = np.zeros(rates_exc.shape)
seem_chunk = np.zeros(rates_exc.shape)
siim_chunk = np.zeros(rates_exc.shape)
seim_chunk = np.zeros(rates_exc.shape)
siem_chunk = np.zeros(rates_exc.shape)
seev_chunk = np.zeros(rates_exc.shape)
siiv_chunk = np.zeros(rates_exc.shape)
seiv_chunk = np.zeros(rates_exc.shape)
siev_chunk = np.zeros(rates_exc.shape)
mufe_chunk = np.zeros(rates_exc.shape)
mufi_chunk = np.zeros(rates_exc.shape)
IA_chunk = np.zeros(rates_exc.shape)
tau_exc_chunk = np.zeros(rates_exc.shape)
tau_inh_chunk = np.zeros(rates_exc.shape)
# tile external inputs to appropriate shape
ext_exc_current = adjust_shape(params['ext_exc_current'], rates_exc)
ext_inh_current = adjust_shape(params['ext_inh_current'], rates_exc)
ext_exc_rate = adjust_shape(params['ext_exc_rate'], rates_exc)
ext_inh_rate = adjust_shape(params['ext_inh_rate'], rates_exc)
# ------------------------------------------------------------------------
return timeIntegration_njit_elementwise(dt, duration,
warn, dosc_version,
distr_delay, filter_sigma, fast_interp,
Cmat, Dmat,
c_gl, Ke_gl,
tau_ou, sigma_ou,
mue_ext_mean, mui_ext_mean,
sigmae_ext, sigmai_ext,
Ke, Ki,
de, di,
tau_se, tau_si,
tau_de, tau_di,
cee, cie, cii, cei,
Jee_max, Jei_max, Jie_max, Jii_max,
a, b,
EA, tauA,
C, gL, EL, DeltaT, VT, Vr, Vs, Tref, taum,
mufe, mufi, mufe_dosc, mufi_dosc,
IA,
seem, seim, seev, seiv, siim, siem, siiv, siev,
precalc_r, precalc_V, precalc_tau_mu, precalc_tau_sigma,
dI, ds,
sigmarange, Irange,
N, Dmat_ndt,
t, rates_exc, rates_inh,
rd_exc, rd_inh,
sqrt_dt, startind,
ndt_de, ndt_di,
max_global_delay,
mue_ext, mui_ext,
r_ext_chunk,
mue_ext_chunk, mui_ext_chunk,
sigmae_chunk, sigmai_chunk,
mufe_chunk, mufi_chunk,
seem_chunk, siim_chunk,
seim_chunk, siem_chunk,
seev_chunk, siiv_chunk,
seiv_chunk, siev_chunk,
IA_chunk,
tau_exc_chunk, tau_inh_chunk,
ext_exc_rate, ext_inh_rate,
ext_exc_current, ext_inh_current,
noise_exc, noise_inh, zeros4) | 8f97380549d58e6b20a6df4ede3e16fd84b19d57 | 3,628,786 |
def string_similarity(s1, s2):
"""
Get a float representation of the difference between 2 strings.
Args:
s1: string
s2: string
Returns: float
"""
return SequenceMatcher(None, s1, s2).ratio() | 42faeb337edade5290d58a9f712d8de9a51118f5 | 3,628,787 |
from re import T
import torch
def pinv(mat: T.FloatTensor) -> T.FloatTensor:
"""
Compute matrix pseudoinverse.
Args:
mat: A square matrix.
Returns:
tensor: The matrix pseudoinverse.
"""
U, s, V = torch.svd(mat)
S = unsqueeze(s.reciprocal(), axis=0)
return multiply(V,S).mm(U.t()) | 0727e4b980cf1284920e431ed90e2fa5922083d2 | 3,628,788 |
from typing import Mapping
def get_remappings_full() -> Mapping[str, str]:
"""Get the remappings for xrefs based on the entire xref database."""
return _get_curated_registry()["remappings"]["full"] | 89dbf2f65a628ec7b5df010df0af5863ad4433ae | 3,628,789 |
def attention(img, att_map):
"""
Inputs:
img -- original image
att_map -- attention map, in this case shape (7,7) or (7,7,1,1) else.
To visualize just imshow new_img
"""
att_map = np.reshape(att_map, [7,7])
att_map = att_map.repeat(32, axis=0).repeat(32, axis=1)
att_map = np.tile(np.expand_dims(att_map, 2),[1,1,3])
att_map[:,:,1:] = 0
# apply gaussian
att_map = gaussian_filter(att_map, sigma=7)
att_map = (att_map-att_map.min()) / att_map.max()
att_map = cv2.resize(att_map, (img.shape[1], img.shape[0]))
new_img = att_map*255*0.8 + img*0.2
new_img = new_img.astype(np.uint8)
return new_img | 262e33d7ec2d4ed0c97acccebc9e98ce8d35babe | 3,628,790 |
import scipy
def _winsorize_wrapper(x, limits):
"""
Wraps scipy winsorize function to drop na's
"""
if hasattr(x, 'dropna'):
if len(x.dropna()) == 0:
return x
x[~np.isnan(x)] = scipy.stats.mstats.winsorize(x[~np.isnan(x)],
limits=limits)
return x
else:
return scipy.stats.mstats.winsorize(x, limits=limits) | 49cade19f486d596241ef5eaf18c2a8c3c7a078f | 3,628,791 |
def haversine(lon1, lat1, lon2, lat2, unit = 'km'):
"""Calculate the great circle distance between two lat/lons.
Adapted from https://stackoverflow.com/questions/4913349
Parameters
----------
lon1 : :obj:`float` or vector of :obj:`float`
Longitude of 1st point
lat1 : :obj:`float` or vector of :obj:`float`
Latitute of 1st point
lon2 : :obj:`float` or vector of :obj:`float`
Longitude of 2nd point
lat2 : :obj:`float` or vector of :obj:`float`
Latitude of 2nd point
unit : :obj:`str`, optional, default = ``'km'``
* ``'km'`` : Kilometers
* ``'mi'`` : Miles
Returns
-------
:obj:`float`
Distance in specified unit
"""
if unit == 'km':
r = 6371 # Radius of the earth in km
elif unit == 'mi':
r = 3956 # Radius of the earth in mi
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
return c * r | 09aabd13b60b863d38199a9e156f4d2240e0903e | 3,628,792 |
def get_datatype() -> str:
"""Returns a user specified datatype to be used elsewhere.
Returns
-------
desired_datatype: str
User specified datatype from list.
"""
data_func, data_args = get_inquirer("choices")
data_args["choices"] = DATATYPE_NAMES
datatype_call = data_func(**data_args)
desired_datatype = DATATYPE_KEYS[DATATYPE_NAMES.index(datatype_call)]
return desired_datatype | 3b0a2d187dc61336b3d23e9c2ae88347a2859b5f | 3,628,793 |
def stereonet2xyz(lon, lat):
"""
Converts a sequence of longitudes and latitudes from a lower-hemisphere
stereonet into _world_ x,y,z coordinates.
Parameters
----------
lon, lat : array-likes
Sequences of longitudes and latitudes (in radians) from a
lower-hemisphere stereonet
Returns
-------
x, y, z : arrays
The world x,y,z components of the vectors represented by the lon, lat
coordinates on the stereonet.
"""
lon, lat = np.atleast_1d(lon, lat)
x, y, z = sph2cart(lon, lat)
return y, z, -x | 3ecb65cf60bed31199745df50199a6e666f9e3fd | 3,628,794 |
def sky_to_cartesian(rdd, degree=True, dtype=None):
"""
Transform distance, RA, Dec into cartesian coordinates.
Parameters
----------
rdd : array of shape (3, N), list of 3 arrays
Right ascension, declination and distance.
degree : default=True
Whether RA, Dec are in degrees (``True``) or radians (``False``).
Returns
-------
positions : list of 3 arrays
Positions x, y, z in cartesian coordinates.
"""
conversion = 1.
if degree: conversion = np.pi / 180.
ra, dec, dist = rdd
cos_dec = np.cos(dec * conversion)
x = dist * cos_dec * np.cos(ra * conversion)
y = dist * cos_dec * np.sin(ra * conversion)
z = dist * np.sin(dec * conversion)
return [x, y, z] | 9e70245ccb2c6c0c97a4be4e2e5c00398d119564 | 3,628,795 |
def delete(id):
"""Delete post function"""
data = Post.query.get(id)
db.session.delete(data)
db.session.commit()
flash("Post successfully deleted")
return redirect(url_for(".index2")) | 4988c197429208e8e54a52370706aa211067cc5d | 3,628,796 |
import six
def repeat_n_times(n, fn, *args, **kwargs):
""" Repeat apply fn n times.
Args:
n: n times.
fn: a function or a list of n functions.
*args: additional args. Each arg should either be not a list, or a list
of length n.
**kwargs: additional keyword args. Each arg should either be not a
list, or a list of length n.
Returns:
either a single list of length n (if fn does not return a tuple), or a
tuple of lists of length n (if fn returns a tuple).
"""
if args:
my_args = _transpose_list_of_lists(
[_maybe_repeat(arg, n) for arg in args])
else:
my_args = [[] for _ in range(n)]
my_kwargs = [{} for _ in range(n)]
for k, v in six.iteritems(kwargs):
vals = _maybe_repeat(v, n)
for i in range(n):
my_kwargs[i][k] = vals[i]
# construct lists of functions
fns = _maybe_repeat(fn, n)
outputs = [fns[i](*my_args[i], **my_kwargs[i]) for i in range(n)]
if isinstance(outputs[0], tuple):
outputs = list(zip(*outputs))
outputs = tuple([list(o) for o in outputs])
return outputs | f08437f560f1ec6eb967c4236e017a6b123df8e0 | 3,628,797 |
def get_submissions(request, domain_id=0):
""" Takes a POST containing a tar of all MD5's
and returns a tar of all missing submissions
Heh, this is explicitly against good REST methodology
We leave this inside the django-rest 'Resource' so we can
use their authentication tools
"""
try:
return _get_submissions(request, domain_id)
except Exception, e:
type, value, tb = sys.exc_info()
logging.error( "EXCEPTION raised: %s" % (str(e)) )
logging.error( "TRACEBACK:\n%s" % ('\n'.join(traceback.format_tb(tb))) )
return HttpResponseBadRequest( "Exception raised %s." % (e.message) ) | c689f429665867e1b74f8ca8d11bdcfc3e89930f | 3,628,798 |
import uuid
def generate_unique_str(allow_dashes=True):
"""
Generate unique string using uuid package
Args:
allow_dashes (bool, optional): If true use uuid4() otherwise use hex that will skip dash in names. Defaults to True.
"""
if allow_dashes:
unique_str = str(uuid.uuid4())
else:
unique_str = uuid.uuid4().hex
return unique_str | 9a08364837ea719454b885fcb344631005e7a610 | 3,628,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.