content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def ScreenToMouse(pt):
"""Convert a value in screen coordinates to mouse coordinates.
Mouse coordinates are specified as a percentage of screen dimensions,
normalized to 16 bits. 0 represents the far left/top of the screen,
65535 represents the far right/bottom. This function assumes that
the size of the screen is fixed at module load time and does not change
Args:
pt: the point of the coords to convert
Returns:
the converted point
"""
# Initialize the screen dimensions on first execution. Note that this
# function assumes that the screen dimensions do not change during run.
if not ScreenToMouse._SCREEN_DIMENSIONS:
desktop = win32gui.GetClientRect(win32gui.GetDesktopWindow())
ScreenToMouse._SCREEN_DIMENSIONS = (desktop[2], desktop[3])
return ((65535 * pt[0]) / ScreenToMouse._SCREEN_DIMENSIONS[0],
(65535 * pt[1]) / ScreenToMouse._SCREEN_DIMENSIONS[1]) | 6b25230f87d581b9cb91bca029b68f004f413fbf | 3,635,800 |
def triadic_closure_algorithm():
"""
How to do triadic closure.
"""
ans = """
I would suggest the following strategy:
1. Pick a node
1. For every pair of neighbors:
1. If neighbors are not connected,
then this is a potential triangle to close.
This strategy gives you potential triadic closures
given a "center" node `n`.
The other way is to trace out a path two degrees out
and ask whether the terminal node is a neighbor
of the starting node.
If not, then we have another triadic closure to make.
"""
return render_html(ans) | 2ee1ca511975f6f7d4ef0a2f55c95f70bc3a56da | 3,635,801 |
from typing import Optional
import tarfile
import os
import stat
def _stat_to_tarinfo(
base_path: str, arch_path: str, *, umask: Optional[int] = None, follow_link=True
) -> tarfile.TarInfo:
"""
Convert a stat_result into a TarInfo structure.
"""
tarinfo = tarfile.TarInfo()
if follow_link:
statres = os.stat(os.path.join(base_path, arch_path))
else:
statres = os.lstat(os.path.join(base_path, arch_path))
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
typ = tarfile.REGTYPE
elif stat.S_ISDIR(stmd):
typ = tarfile.DIRTYPE
elif stat.S_ISFIFO(stmd):
typ = tarfile.FIFOTYPE
elif stat.S_ISLNK(stmd):
typ = tarfile.SYMTYPE
linkname = os.readlink(os.path.join(base_path, arch_path))
elif stat.S_ISCHR(stmd):
typ = tarfile.CHRTYPE
elif stat.S_ISBLK(stmd):
typ = tarfile.BLKTYPE
else:
raise TplBuildException("Unsupported file mode in context")
if arch_path == ".":
tarinfo.name = "/"
elif arch_path.startswith("./"):
tarinfo.name = arch_path[1:]
else:
tarinfo.name = "/" + arch_path
tarinfo.mode = _apply_umask(stmd, umask)
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.uname = "root"
tarinfo.gname = "root"
if typ == tarfile.REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = 0
tarinfo.type = typ
tarinfo.linkname = linkname
if typ in (tarfile.CHRTYPE, tarfile.BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo | 3a0f42c983b3d7cee75059c044f4d24794ef016b | 3,635,802 |
def parsed_args_gen():
"""Returns a function which creates an emulated parsed_args from kwargs
"""
def generator(**kwargs):
pdict = {}
for k, v in kwargs.items():
pdict[k] = v
return AttrDict(pdict)
return generator | 27cd56860ed4e1eca736b91b9a476b554df68512 | 3,635,803 |
def floordiv(a, b):
"""Compute the floordiv of two expressions.
Parameters
----------
a : PrimExpr
The left hand operand
b : PrimExpr
The right hand operand
Returns
-------
res : PrimExpr
The result expression.
"""
return _ffi_api._OpFloorDiv(a, b) | 9796e7c169e500c3ed7388d5f2756f3bc1fb478e | 3,635,804 |
import typing
import inspect
def optional(converter: typing.Callable) -> typing.Any:
"""
A modified version of attrs optional decorator that supports both `None` and `MISSING`
Type annotations will be inferred from the wrapped converter's, if it
has any.
args:
converter: The convertor that is used for the non-None or MISSING
"""
def optional_converter(val) -> typing.Any:
if val is None or val is MISSING:
return val
return converter(val)
sig = None
try:
sig = inspect.signature(converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if params and params[0].annotation is not inspect.Parameter.empty:
optional_converter.__annotations__["val"] = typing.Optional[params[0].annotation]
if sig.return_annotation is not inspect.Signature.empty:
optional_converter.__annotations__["return"] = typing.Optional[sig.return_annotation]
return optional_converter | e44d0baa06859271d9ab1e37a7f14a5bfcc452ef | 3,635,805 |
def convert_symbol(mpl_symbol):
"""Convert mpl marker symbol to plotly symbol and return symbol."""
if isinstance(mpl_symbol, list):
symbol = list()
for s in mpl_symbol:
symbol += [convert_symbol(s)]
return symbol
elif mpl_symbol in SYMBOL_MAP:
return SYMBOL_MAP[mpl_symbol]
else:
return 'dot' | 931316aa19d1292bd9905292edf3bf9117209874 | 3,635,806 |
from typing import Dict
def remove_none_dict(input_dict: Dict) -> Dict:
"""
removes all none values from a dict
:param input_dict: any dictionary in the world is OK
:return: same dictionary but without None values
"""
return {key: value for key, value in input_dict.items() if value is not None} | 3f91d653a680f0f9d842ab44cbbb9ea4142c12ab | 3,635,807 |
def get_host(request, host_id, segment_id):
"""return single host """
return openstack_connection(request).get_host(host_id, segment_id) | 0c214a73c302ea5acb7de98723e9520dfd56acba | 3,635,808 |
def _GetAttachedDevices(blacklist_file, test_device):
"""Get all attached devices.
Args:
test_device: Name of a specific device to use.
Returns:
A list of attached devices.
"""
blacklist = (device_blacklist.Blacklist(blacklist_file)
if blacklist_file
else None)
attached_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
if test_device:
test_device = [d for d in attached_devices if d == test_device]
if not test_device:
raise device_errors.DeviceUnreachableError(
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
return test_device
else:
if not attached_devices:
raise device_errors.NoDevicesError()
return sorted(attached_devices) | bcbba1cf2297dffc9648145ee27c9dc75d266448 | 3,635,809 |
import numpy
def li_dong_2016_load_uy_profiles():
""" Load and return the y-velocity profiles (digitized from Fig. 4b).
Returns
-------
numpy.ndarray
y positions as a 1D array of floats.
numpy.ndarray
y-velocity (plus x position) as a 1D array of floats.
"""
filepath = DATADIR / 'li_dong_2016_fig4b.csv'
with open(filepath, 'r') as infile:
uy, y = numpy.loadtxt(infile, delimiter=',', unpack=True)
return uy, y | 5847483141589c2b0cd8135ac688eb560c82cd71 | 3,635,810 |
def msg_queue_mode(params):
"""
Generate outgoing messages for `queue_mode_...` commands. The supported option is ``set``.
Parameters
----------
params : list
List of parameters of the command. The first two elements of the list are expected to
be ``mode`` and ``set`` keywords.
Returns
-------
str
Name of the method from RE Manager API
dict
Dictionary of the method parameters
"""
# Check if the function was called for the appropriate command
command = "queue"
expected_p0 = "mode"
if params[0] != expected_p0:
raise ValueError(f"Incorrect parameter value '{params[0]}'. Expected value: '{expected_p0}'")
# Make sure that there is a sufficient number of parameters to start processing
if len(params) < 2:
raise CommandParameterError(f"Item type and options are not specified '{command} {params[0]}'")
p_item_type = params[1]
if p_item_type != "set":
raise_request_not_supported([command, params[0], params[1]])
try:
if p_item_type == "set":
params_mode = params[2:]
if len(params_mode) % 2:
raise CommandParameterError(
f"The list of queue mode parameters must have even number of elements: {params_mode}"
)
queue_mode = {params_mode[i]: params_mode[i + 1] for i in range(0, len(params_mode), 2)}
for k in queue_mode.keys():
# Attempt to evaluate key parameters (e.g. "True" should become boolean True)
# If a parameter fails to evaluate, it should remain a string.
try:
queue_mode[k] = eval(queue_mode[k], {}, {})
except Exception:
pass
cmd_prms = {"mode": queue_mode}
else:
# This indicates a bug in the program.
raise ValueError(f"Unknown item type: {p_item_type}")
except IndexError:
raise CommandParameterError(f"The command '{params}' contain insufficient number of parameters")
method = f"{command}_{params[0]}_{params[1]}"
prms = cmd_prms
return method, prms | b52368fdab3667e33fef010e5119c9a12a0eb63e | 3,635,811 |
import warnings
def read_tiff(path, pages=None):
"""
Reads in a tiff stack
:param path: Full path to file
:param pages: list or numpy array of pages to load
:return: height x width x num_pages array of image files
"""
# Get number of requested pages
if pages is None:
num_pages = 1
elif type(pages) is int:
num_pages = 1
pages = [pages]
elif type(pages) is list or type(pages) is np.ndarray:
num_pages = len(pages)
else:
raise TypeError('Pages is type {}, but must be a list, int, or array'.format(type(pages).__name__))
with TiffFile(path) as f:
# get number of pages in actual tiff
num_tiff_pages = len(f.pages)
if num_pages > num_tiff_pages:
raise IndexError("Too many pages requested. Requested {} pages but only {} pages in tiff"
.format(num_pages, num_tiff_pages))
if pages is None and num_tiff_pages > 1:
warnings.warn("No specific pages requested, so returning all pages ({})"
.format(num_tiff_pages))
pages = xrange(num_tiff_pages)
num_pages = num_tiff_pages
# initialize tiff array
tiff_shape = f.pages[0].shape
tiff_array = np.empty(shape=(tiff_shape[0], tiff_shape[1], num_pages))
# load each page and store
for ind, page in enumerate(pages):
curr_page = f.pages[page]
tiff_array[:, :, ind] = curr_page.asarray()
# Compress if only 2d
if tiff_array.shape[2] == 1:
tiff_array = tiff_array.squeeze(axis=2)
return tiff_array | 6bc181b445cade4bd1d18f2fabb4e655af97d73b | 3,635,812 |
import pandas
import base64
import csv
def parse_file_buffer_to_seldon_request(file):
"""
Reads file buffer and parse to seldon request.
Parameters
----------
file : dict
Spooled temporary file.
Returns
-------
dict
Seldon API request
Raises
------
BadRequest
When `file` has no header.
"""
try:
df = pandas.read_csv(file._file, sep=None, engine='python')
df = df.to_dict('split')
return {
"data": {
"names": df['columns'],
"ndarray": df['data'],
}
}
except UnicodeDecodeError:
file.seek(0)
content = file.read()
bin_data = base64.b64encode(content).decode("utf-8")
kind = filetype.guess(content)
content_type = "application/octet-stream"
if kind is not None:
content_type = kind.mime
return {
"binData": bin_data,
"meta": {
"content-type": content_type,
},
}
except csv.Error:
file.seek(0)
return {
"strData": file.read().decode("utf-8")
} | 4df8b477d0d3b4be0159550ebcc7a2162398e512 | 3,635,813 |
import os
def is_pro():
"""Check if working in PRO"""
return os.environ.get("VTASKS_ENV", "False") == "True" | e193f5d6e4c24d57a2903fcb5714d5f7a8473fcb | 3,635,814 |
def load(data_dir, config, use_feature_transform=False, numeric=False, categorical=False):
"""
Load specific dataset.
Args:
data_dir (str): path to the dataset directory.
config (dict): general dict with settings.
use_feature_transform (bool): apply dense feature transform or not
Returns (dict): tensorflow Dataset objects and features.
"""
if config['data.dataset'] == "heart":
ret = load_heart(data_dir, config,
use_feature_transform=use_feature_transform,
numeric=numeric, categorical=categorical)
else:
raise ValueError(f"Unknow dataset: {config['data.dataset']}")
return ret | 27aa0cea9bd393f860790f7fbfe7207386678f9e | 3,635,815 |
from datahandlers.mnist_data import MNISTData as CVData
from datahandlers.mnist_auto_data import MNISTAutoData as CVData
from datahandlers.fashionmnist_data import FashionMNISTData as CVData
from datahandlers.cifar10_data import CIFAR10Data as CVData
import torchvision
import datasets
import torch
def generate_computer_vis_task(config, logger, device, data_dir):
"""Generate a computer vision datahandler.
Args:
config: Command-line arguments.
logger: The logger.
device: The cuda device.
data_dir (str): The data directory.
Returns:
(....): See docstring of function `generate_task`.
"""
transform = None
if config.dataset in ['mnist', 'mnist_autoencoder']:
# Downloading MNIST from the page of Yann Lecun can give errors. This
# problem is solved in torchvision version 0.9.1 but for earlier versions
# the following fix can be used.
if torchvision.__version__ != '0.9.1':
datasets.MNIST.resources = [
('https://ossci-datasets.s3.amazonaws.com/mnist/train' +
'-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
('https://ossci-datasets.s3.amazonaws.com/mnist/train' +
'-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k' +
'-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k' +
'-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')
]
if config.dataset == 'mnist':
logger.info('Loading MNIST dataset.')
elif config.dataset == 'mnist_autoencoder':
logger.info('Loading MNIST autoencoder dataset.')
train_val_split = [55000, 5000]
elif config.dataset == 'fashion_mnist':
logger.info('Loading Fashion MNIST dataset.')
train_val_split = [55000, 5000]
elif config.dataset == 'cifar10':
logger.info('Loading CIFAR-10 dataset.')
train_val_split = [45000, 5000]
### Load the testing data.
testset = CVData(data_dir, device, train=False, download=True,
double_precision=config.double_precision,
target_class_value=config.target_class_value)
test_loader = DataLoader(testset, batch_size=config.batch_size)
### Load the training data and split with validation if necessary.
trainset = CVData(data_dir, device, train=True, download=True,
double_precision=config.double_precision,
target_class_value=config.target_class_value)
val_loader = None
if not config.no_val_set:
trainset, valset = torch.utils.data.random_split(trainset,
train_val_split)
val_loader = DataLoader(valset, batch_size=config.batch_size,
shuffle=False)
train_loader = DataLoader(trainset, batch_size=config.batch_size,
shuffle=True)
### Create the dataset.
ds = DatasetWrapper(train_loader, test_loader, valset=val_loader,
name=config.dataset, in_size=testset._in_size,
out_size=testset._out_size)
return ds | c274245a71002d89b9b171cbd431ddbd627644bd | 3,635,816 |
import yaml
def create(client, spec: str, namespace: str = "default", timeout=100):
"""Create a CronJob.
:batch_v1_api: The Batch V1 API object.
:spec: A valid CronJob YAML manifest.
:namespace: The namespace of the CronJob.
:timeout: Timeout in seconds to wait for object creation/modification
:returns: True on creation, False if it already exists.
"""
body = yaml.safe_load(spec)
try:
response = client.create_namespaced_cron_job(namespace, body)
except ApiException as e:
# If the object already exists, return False.
if e.reason == "Conflict":
return False
raise e
name = body["metadata"]["name"]
if get(client, name, namespace) is None:
w = watch.Watch()
for event in w.stream(
client.list_cron_job_for_all_namespaces, timeout_seconds=timeout
):
if (
(event["type"] == "ADDED" or event["type"] == "MODIFIED")
and event["object"].kind == "CronJob"
and event["object"].metadata.name == response.metadata.name
and event["object"].metadata.namespace == response.metadata.namespace
):
break
return response | da305d5a6af566c8dd465d191f5c842019bedff8 | 3,635,817 |
import os
def pick_projects(directory):
"""
Finds all subdirectories in directory containing a .json file
:param directory: string containing directory of subdirectories to search
:return: list projects found under the given directory
"""
ext = '.json'
subs = [x[0] for x in os.walk(directory)]
projects = []
for sub in subs:
files = []
for f in os.listdir(sub):
if f.endswith(ext):
files.append(f)
if len(files) > 0:
sizes = [os.stat(os.path.join(sub, pick)).st_size for pick in files]
max_size = max(sizes)
index = sizes.index(max_size)
projects.append(os.path.join(sub, files[index]))
return projects | 577668e5f7729bb3fcfa39398b7fade9475fefbe | 3,635,818 |
def get_expanded_types(types, type_hierarchy):
"""Expands a set of types with both more specific and more generic types
(i.e., all super-types and sub-types)."""
expanded_types = set()
for type in types:
# Adding all supertypes.
expanded_types.update(get_type_path(type, type_hierarchy))
# Adding all subtypes (NOTE: this bit could be done more efficiently).
for type2 in type_hierarchy:
if type_hierarchy[type2]['depth'] <= type_hierarchy[type]['depth']:
continue
type2_path = get_type_path(type2, type_hierarchy)
if type in type2_path:
expanded_types.update(type2_path)
return expanded_types | a0f08ea1f96e960fedf1fe2a88139608a681cac1 | 3,635,819 |
import os
import jinja2
def render(template, **kwargs):
"""Render a Jinja2 template.
Parameters
----------
template : str
Name of the template file (without '.template' suffix). It must be
located in the directory 'pywrap/template_data'.
kwargs : dict
Template arguments.
Returns
-------
text : str
Rendered template.
"""
template_file = resource_filename(
"pywrap", os.path.join("template_data", template + ".template"))
if not os.path.exists(template_file):
raise IOError("No template for '%s' found." % template)
template = jinja2.Template(open(template_file, "r").read())
return template.render(**kwargs) | 93ea2c9bc95e4586b8f25a12b66a8c5b4d32f673 | 3,635,820 |
def inject_where(builder):
"""
helper function to append to the query the generated where clause
:param builder: the current builder
:return:
"""
query = builder.v.query
if callable(query):
return builder
lower = query.lower()
where = lower.find(' where ')
before = -1
for before_q in [' group by', ' order by', ' limit', ' offset']:
before = lower.find(before_q)
if before >= 0:
break
if where >= 0:
if before < 0:
builder.append(' {and_where}')
else:
builder.query('{} {{and_where}} {}'.format(query[:before], query[before:]))
builder.replace('and_where')
query = builder.v.query
builder.query('{} {{joins}} {}'.format(query[:where], query[where:]))
builder.replace('joins')
else:
if before < 0:
builder.append('{joins} {where}')
else:
builder.query('{} {{joins}} {{where}} {}'.format(query[:before], query[before:]))
builder.replace('where')
builder.replace('joins')
return builder | 78682f5c3712ffcb9e96a8c7c624d6f0177884ec | 3,635,821 |
import colorsys
def loadCPT(path):
"""A function that loads a .cpt file and converts it into a colormap for the colorbar.
This code was adapted from the GEONETClass Tutorial written by Diego Souza, retrieved 18 July 2019.
https://geonetcast.wordpress.com/2017/06/02/geonetclass-manipulating-goes-16-data-with-python-part-v/
Parameters
----------
path :
Path to the .cpt file
Returns
-------
cpt :
A colormap that can be used for the cmap argument in matplotlib type plot.
"""
try:
f = open(path)
except:
print ("File ", path, "not found")
return None
lines = f.readlines()
f.close()
x = np.array([])
r = np.array([])
g = np.array([])
b = np.array([])
colorModel = 'RGB'
for l in lines:
ls = l.split()
if l[0] == '#':
if ls[-1] == 'HSV':
colorModel = 'HSV'
continue
else:
continue
if ls[0] == 'B' or ls[0] == 'F' or ls[0] == 'N':
pass
else:
x=np.append(x,float(ls[0]))
r=np.append(r,float(ls[1]))
g=np.append(g,float(ls[2]))
b=np.append(b,float(ls[3]))
xtemp = float(ls[4])
rtemp = float(ls[5])
gtemp = float(ls[6])
btemp = float(ls[7])
x=np.append(x,xtemp)
r=np.append(r,rtemp)
g=np.append(g,gtemp)
b=np.append(b,btemp)
if colorModel == 'HSV':
for i in range(r.shape[0]):
rr, gg, bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == 'RGB':
r = r/255.0
g = g/255.0
b = b/255.0
xNorm = (x - x[0])/(x[-1] - x[0])
red = []
blue = []
green = []
for i in range(len(x)):
red.append([xNorm[i],r[i],r[i]])
green.append([xNorm[i],g[i],g[i]])
blue.append([xNorm[i],b[i],b[i]])
colorDict = {'red': red, 'green': green, 'blue': blue}
# Makes a linear interpolation
cpt = LinearSegmentedColormap('cpt', colorDict)
return cpt | 3af8c564899c89afb3d6a4cc87ac90009526b2a4 | 3,635,822 |
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print("Program to find the character from an input ASCII value.")
ascii_val = int(input("Enter ASCII value to find character: "))
print("\nASCII {asci} in character is \"{char}\""
.format(asci=ascii_val, char=chr(ascii_val)))
return 0 | 45bec0eb658cc17005b97e6fa812c806f5b77440 | 3,635,823 |
import random
def get_random_greeting():
"""
Return random greeting message.
"""
return random.choice(GREETINGS) | 89d4a93105ffe1a241730088388eaf4ffeff71da | 3,635,824 |
def dyad_completion(w):
""" Return the dyadic completion of ``w``.
Return ``w`` if ``w`` is already dyadic.
We assume the input is a tuple of nonnegative Fractions or integers which sum to 1.
Examples
--------
>>> w = (Fraction(1,3), Fraction(1,3), Fraction(1, 3))
>>> dyad_completion(w)
(Fraction(1, 4), Fraction(1, 4), Fraction(1, 4), Fraction(1, 4))
>>> w = (Fraction(1,3), Fraction(1,5), Fraction(7, 15))
>>> dyad_completion(w)
(Fraction(5, 16), Fraction(3, 16), Fraction(7, 16), Fraction(1, 16))
>>> w = (1, 0, 0.0, Fraction(0,1))
>>> dyad_completion(w)
(Fraction(1, 1), Fraction(0, 1), Fraction(0, 1), Fraction(0, 1))
"""
w = tuple(Fraction(v) for v in w)
d = max(v.denominator for v in w)
# if extra_index:
p = next_pow2(d)
if p == d:
# the tuple of fractions is already dyadic
return w
else:
# need to add the dummy variable to represent as dyadic
return tuple(Fraction(v*d, p) for v in w) + (Fraction(p-d, p),) | 3631e4db62607e18a22e652009747f25a8a585c7 | 3,635,825 |
def create_pv_string_points(x_coord: float,
y_coord: float,
string_width: float,
string_height: float
) -> [Polygon, np.ndarray]:
"""
:param x_coord:
:param y_coord:
:param string_width:
:param string_height:
:return:
"""
pts = ((x_coord, y_coord),
(x_coord + string_width, y_coord),
(x_coord + string_width, y_coord + string_height),
(x_coord, y_coord + string_height))
module = Polygon(pts)
xs_string = np.arange(module_width / 2, module_width, module_width)
ys_string = np.arange(module_height / 2 + y_coord, y_coord + string_height, module_height)
xxs, yys = np.meshgrid(xs_string, ys_string, sparse=True)
string_points = MultiPoint(np.transpose([np.tile(xs_string, len(ys_string)),
np.repeat(yys, len(xs_string))]))
return module, string_points | 212e666efa51d60fcf29da4afbbff498d6d94197 | 3,635,826 |
def phasor(H):
"""
Caculate phasor values from given histogram
g = 1 / N * sum(H * cos(f))
s = 1 / N * sum(H * sin(f))
===========================================================================
Input Meaning
---------- ---------------------------------------------------------------
H List or np.array with histogram values
===========================================================================
Output Meaning
---------- ---------------------------------------------------------------
z complex number describing the phasor
===========================================================================
"""
Np = len(H)
F = 2 * np.pi * np.linspace(0, Np-1, Np) / (Np - 1)
g = np.sum(H * np.cos(F)) / np.sum(H)
s = np.sum(H * np.sin(F)) / np.sum(H)
z = complex(g, s)
return(z) | 4034ec4e9a35c2792bd52a860ba61c729b80dcba | 3,635,827 |
def if_stmt(cond, body, orelse):
"""Functional form of an if statement.
Args:
cond: Boolean.
body: Callable with no arguments, and outputs of the positive (if) branch
as return type.
orelse: Callable with no arguments, and outputs of the negative (else)
branch as return type.
Returns:
Tuple containing the statement outputs.
"""
if tensor_util.is_tensor(cond):
return tf_if_stmt(cond, body, orelse)
else:
return _py_if_stmt(cond, body, orelse) | 88db6bacfca094e94c8cec165e871127f8498175 | 3,635,828 |
import os
def download_file(save_dir, filename, url, md5=None):
"""
Download the file from the url to specified directory.
Check md5 value when the file is exists, if the md5 value is the same as the existed file, just use
the older file, if not, will download the file from the url.
Args:
save_dir(string): The specified directory saving the file.
filename(string): The specified filename saving the file.
url(string): The url downling the file.
md5(string, optional): The md5 value that checking the version downloaded.
"""
fullname = os.path.join(save_dir, filename)
if os.path.exists(fullname):
if md5 and (not md5file(fullname) == md5):
logger.info("Updating {} from {}".format(filename, url))
logger.disable()
get_path_from_url(url, save_dir, md5)
else:
logger.info("Downloading {} from {}".format(filename, url))
logger.disable()
get_path_from_url(url, save_dir, md5)
logger.enable()
return fullname | 9b0d0e4a19a8347bff7f2288eda402dccbe91e88 | 3,635,829 |
def load_db(DB_Filename, ValueType, ValueColumnIdx, KeyColumnIdx):
"""Loads a database contained in file 'DB_Filename'. Creates a python dictionary
that maps from a string (contained in column KeyColumnIdx) to a number set
or a single number (contained in column ValueColumnIdx).
NOTE: The 'key' maybe for example the property value. The
'value' is the number set it points to. This maybe
confusing.
"""
table = parse_table(DB_Filename)
if ValueType == "NumberSet": convert_column_to_interval(table, ValueColumnIdx)
elif ValueType == "number": convert_column_to_number(table, ValueColumnIdx)
db = convert_table_to_associative_map(table, ValueColumnIdx, ValueType, KeyColumnIdx)
return db | 3291b3230a6b18945f26b16675d6d6fc27baba53 | 3,635,830 |
def createFoodObject(dataset, row):
"""
Create food URI and triples related to food properties
"""
food_onto_term = str(row['Food Ontology Term'])
food_label = row['Food']
food_type = row['NEW Food Type']
food_amount = row['NEW Food Matrix']
food_source = food_amount.split('\n')[0].replace('Matrix:', '')
dose_unit = row['Unit']
dose_value = row['Value']
dose_freq = row['Frequency']
fooduri_list = []
# if there is no ontology term define for this food, create one
if food_onto_term == 'nan':
fooduri = FOODHKG_INST[get_hash(food_label)]
createFoodProp(dataset, fooduri, food_label, food_type, food_source,
dose_unit, dose_value, dose_freq)
fooduri_list.append(fooduri)
else:
for fooduri in food_onto_term.split(';'):
fooduri = fooduri.strip()
if fooduri == '':
continue
fooduri = URIRef(fooduri)
createFoodProp(dataset, fooduri, food_label, food_type, food_source,
dose_unit, dose_value, dose_freq)
fooduri_list.append(fooduri)
return fooduri_list | 686ab766c7341e538741e20b0f70a32215db6daa | 3,635,831 |
from re import T
def business():
""" RESTful CRUD controller """
def rheader_table(r):
if r.record:
return TABLE( TR( TH("%s: %s" % (T("Name"),
r.record.business_name)),
TH("%s: %s %s" % (T("Address"),
r.record.street1,
r.record.street2))))
return None
list_fields = ["business_name", "owner_name",
"address1", "address2", "city", "county"]
return generic(resourcename, rheader_table, list_fields) | c7b237196f4a614327b54fd4b53950aa16d18331 | 3,635,832 |
from pathlib import Path
def points_from_svg(svg_file_path):
""" Takes a SVG file as an input and returns a list of points in the complex plane from its path. """
# Read SVG into a list of curves.
paths, attributes = svg2paths(svg_file_path)
curves = paths[0]
# Get a list of the coordinates from each curve.
# Coordinates are given as points in the complex plane.
num_samples = 10
points_list = []
for curve in curves:
for i in range(num_samples):
points_list.append(Path(curve).point(i/(float(num_samples)-1)))
return points_list | a3fe434cda819b6c15f5ef3e854ab99455b879b6 | 3,635,833 |
from typing import List
from typing import Callable
from typing import Optional
import functools
import operator
def cluster_mols(
mols: List[Chem.rdchem.Mol],
cutoff: float = 0.2,
feature_fn: Callable = None,
n_jobs: Optional[int] = 1,
):
"""Cluster a set of molecules using the butina clustering algorithm and a given threshold.
Args:
mols: a list of molecules.
cutoff: Cuttoff for the clustering. Default to 0.2.
feature_fn: A feature function that takes a Chem.rdchem.Mol object
and return molecular features. By default, the `dm.to_fp()` is used.
Default to None.
n_jobs: Number of jobs for parallelization. Let to 1 for no
parallelization. Set to None to use all available cores.
"""
if feature_fn is None:
feature_fn = functools.partial(dm.to_fp, as_array=False)
features = dm.parallelized(feature_fn, mols, n_jobs=n_jobs)
dists = []
n_mols = len(mols)
for i in range(1, n_mols):
dist = DataStructs.BulkTanimotoSimilarity(features[i], features[:i], returnDistance=True)
dists.extend([x for x in dist])
# now cluster the data
cluster_indices = Butina.ClusterData(dists, n_mols, cutoff, isDistData=True)
cluster_mols = [operator.itemgetter(*cluster)(mols) for cluster in cluster_indices]
# Make single mol cluster a list
cluster_mols = [[c] if isinstance(c, Chem.rdchem.Mol) else c for c in cluster_mols]
return cluster_indices, cluster_mols | f38425183f42a994ba158a37a8b86463b7bf784d | 3,635,834 |
import os
import shutil
import re
import fileinput
import sys
def match_depends(module):
""" Check for matching dependencies.
This inspects spell's dependencies with the desired states and returns
'False' if a recast is needed to match them. It also adds required lines
to the system-wide depends file for proper recast procedure.
"""
params = module.params
spells = params['name']
depends = {}
depends_ok = True
if len(spells) > 1 or not params['depends']:
return depends_ok
spell = spells[0]
if module.check_mode:
sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
try:
shutil.copy2(sorcery_depends_orig, sorcery_depends)
except IOError:
module.fail_json(msg="failed to copy depends.check file")
else:
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
for d in params['depends'].split(','):
match = rex.match(d)
if not match:
module.fail_json(msg="wrong depends line for spell '%s'" % spell)
# normalize status
if not match.group('status') or match.group('status') == '+':
status = 'on'
else:
status = 'off'
depends[match.group('depend')] = status
# drop providers spec
depends_list = [s.split('(')[0] for s in depends]
cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
rc, stdout, stderr = module.run_command(cmd_gaze)
if rc != 0:
module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
fi = fileinput.input(sorcery_depends, inplace=True)
try:
try:
for line in fi:
if line.startswith(spell + ':'):
match = None
for d in depends:
# when local status is 'off' and dependency is provider,
# use only provider value
d_offset = d.find('(')
if d_offset == -1:
d_p = ''
else:
d_p = re.escape(d[d_offset:])
# .escape() is needed mostly for the spells like 'libsigc++'
rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
(re.escape(spell), re.escape(d), d_p))
match = rex.match(line)
# we matched the line "spell:dependency:on|off:optional:"
if match:
# if we also matched the local status, mark dependency
# as empty and put it back into depends file
if match.group('lstatus') == depends[d]:
depends[d] = None
sys.stdout.write(line)
# status is not that we need, so keep this dependency
# in the list for further reverse switching;
# stop and process the next line in both cases
break
if not match:
sys.stdout.write(line)
else:
sys.stdout.write(line)
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fi.close()
depends_new = [v for v in depends if depends[v]]
if depends_new:
try:
try:
fl = open(sorcery_depends, 'a')
for k in depends_new:
fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fl.close()
depends_ok = False
if module.check_mode:
try:
os.remove(sorcery_depends)
except IOError:
module.fail_json(msg="failed to clean up depends.backup file")
return depends_ok | f77123097bfdd23fac0f1f7604b5a7d3ac6fee1b | 3,635,835 |
from typing import Optional
from typing import Tuple
def reshape_shuffle_ctg(fhr: np.array, uc: np.array, y: np.array, time: Optional[np.array]) -> Tuple[np.array, np.array, np.array, Optional[np.array], Optional[np.array]]:
"""
Reshape and optionally shuffle inputs and targets for the keras/tf input in model
Args:
fhr, y : (np.arrays) combines batch of fhr signals and targets
shuffle: (bool) is True the data are shuffled together
Output: np.arrays of reshaped and (optionally) suffled data
"""
N = fhr.shape[1] # number of samples after concat
length = fhr.shape[0]
fhr = np.reshape(fhr[:, :N], (N, length, 1))
uc = np.reshape(uc[:, :N], (N, length, 1))
y = np.reshape(y, (N, 1))
if time:
time = np.reshape(time[:, :N], (N, length, 1))
if shuffle:
fhr, uc, time, y = shuffle(fhr, uc, time, y)
return fhr, uc, time, y
elif shuffle:
fhr, uc, y = shuffle(fhr, uc, y) # shuffles all in unison along the first axis
return fhr, uc, y | 5986d80fb31dbe87e7d4f65f750a2ff6d77cf27a | 3,635,836 |
from typing import Optional
from typing import Tuple
import requests
def do_project(project: str) -> Optional[Tuple[str, str, str]]:
"""
Query Anitya and zypper for current version.
"""
max_version = None
prog_id = anitya_find_project_id(proj_name=project)
if prog_id:
res = requests.get('https://release-monitoring.org/project/%d/' % prog_id)
res.raise_for_status()
lines = res.text.splitlines()
for line in lines:
if "doap:revision" in line:
version = Version(line[line.find('>') + 1:line.rfind('<')])
if version and (max_version is None or version > max_version):
max_version = version
opensuse_version = zypper.package_version(project)
return (project,
str(max_version) if max_version else '',
str(opensuse_version) if opensuse_version else '') | 0011b617453a3358ce30c2d13a9c4cb0492d090e | 3,635,837 |
def save_channel_videoid(channel_id: str, video_id: str):
"""儲存單個影片ID與頻道ID
Args:
channel_id (str): [channel_id]
video_id (str): [video_id]
Returns:
[bool]]: [suss/fail]
"""
schemas = {
"video_id": video_id,
"channel_id": channel_id
}
play_list_model = ChannelPlaylistItem(**schemas)
try:
with app.app_context():
db.session.add(play_list_model)
db.session.commit()
return True
except SQLAlchemyError as e:
print(type(e))
return False | f3c1c59cb5ff8f540335480b075fa4b6889e7733 | 3,635,838 |
def mark_point(mark_point=None, **kwargs):
"""
:param mark_point:
标记点,有'min', 'max', 'average'可选
:param kwargs:
:return:
"""
return _mark(mark_point, **kwargs) | 21287c07e77f69ce672ef1371225fad7f357277d | 3,635,839 |
def get_keys(opts):
"""Gets keys from keystore and known-hosts store"""
hosts = KnownHostsStore()
serverkey = hosts.serverkey(opts.vip_address)
key_store = KeyStore()
publickey = key_store.public
secretkey = key_store.secret
return {"publickey": publickey, "secretkey": secretkey,
"serverkey": serverkey} | 668447b134201e2b68e982d9cdf6219cb578dfff | 3,635,840 |
def _get_model_ptr_from_binary(binary_path=None, byte_string=None):
"""Returns a pointer to an mjModel from the contents of a MuJoCo model binary.
Args:
binary_path: Path to an MJB file (as produced by MjModel.save_binary).
byte_string: String of bytes (as returned by MjModel.to_bytes).
One of `binary_path` or `byte_string` must be specified.
Returns:
A `ctypes.POINTER` to a new `mjbindings.types.MJMODEL` instance.
Raises:
TypeError: If both or neither of `byte_string` and `binary_path`
are specified.
"""
if binary_path is None and byte_string is None:
raise TypeError(
"At least one of `byte_string` or `binary_path` must be specified.")
elif binary_path is not None and byte_string is not None:
raise TypeError(
"Only one of `byte_string` or `binary_path` may be specified.")
if byte_string is not None:
assets = {_FAKE_BINARY_FILENAME: byte_string}
return mujoco.MjModel.from_binary_path(_FAKE_BINARY_FILENAME, assets)
return mujoco.MjModel.from_binary_path(binary_path, {}) | a2aede03e3e137596bd8de689aad69272749d884 | 3,635,841 |
from typing import List
def emulate_decoding_routine(vw, function_index, function: int, context, max_instruction_count: int) -> List[Delta]:
"""
Emulate a function with a given context and extract the CPU and
memory contexts at interesting points during emulation.
These "interesting points" include calls to other functions and
the final state.
Emulation terminates if the CPU executes an unexpected region of
memory, or the function returns.
Implementation note: currently limits emulation to 20,000 instructions.
This prevents unexpected infinite loops.
This number is taken from emulating the decoding of "Hello world" using RC4.
:param vw: The vivisect workspace in which the function is defined.
:type function_index: viv_utils.FunctionIndex
:param function: The address of the function to emulate.
:type context: funtion_argument_getter.FunctionContext
:param context: The initial state of the CPU and memory
prior to the function being called.
:param max_instruction_count: The maximum number of instructions to emulate per function.
:rtype: Sequence[decoding_manager.Delta]
"""
emu = floss.utils.make_emulator(vw)
emu.setEmuSnap(context.emu_snap)
logger.trace(
"Emulating function at 0x%08X called at 0x%08X, return address: 0x%08X",
function,
context.decoded_at_va,
context.return_address,
)
deltas = floss.decoding_manager.emulate_function(
emu, function_index, function, context.return_address, max_instruction_count
)
return deltas | e663e3ce225fe5603a7debabb22ef9614f0a74ac | 3,635,842 |
def probs_to_costs(costs, beta=.5):
""" Transform probabilities to costs (in-place)
"""
p_min = 0.001
p_max = 1. - p_min
costs = (p_max - p_min) * costs + p_min
# probabilities to costs, second term is boundary bias
costs = np.log((1. - costs) / costs) + np.log((1. - beta) / beta)
return costs | 77307ef656a8146286028d957d0bed64cda01a17 | 3,635,843 |
def requires_ids_or_filenames(method):
"""
A decorator for spectrum library methods that require either a list of Ids or a list of filenames.
:param method:
A method belonging to a sub-class of SpectrumLibrary.
"""
def wrapper(model, *args, **kwargs):
have_ids = ("ids" in kwargs) and (kwargs["ids"] is not None)
have_filenames = ("filenames" in kwargs) and (kwargs["filenames"] is not None)
assert have_ids or have_filenames, "Must supply a list of Ids or a list of filenames"
assert not (have_ids and have_filenames), "Must supply either a list of Ids or a list of filenames, not both."
# If a single Id is supplied, rather than a list of Ids, turn it into a one-entry tuple
if have_ids and not isinstance(kwargs["ids"], (list, tuple)):
kwargs["ids"] = (kwargs["ids"],)
# If a single filename is supplied, turn it into a one-entry tuple
if have_filenames and not isinstance(kwargs["filenames"], (list, tuple)):
kwargs["filenames"] = (kwargs["filenames"],)
return method(model, *args, **kwargs)
return wrapper | df4cb705f11567e8e5a23da730aead8e7c90f378 | 3,635,844 |
from typing import List
from typing import Tuple
import requests
import fnmatch
def _list_nsrr(
db_slug: str,
subfolder: str = '',
pattern: str = '*',
shallow: bool = False,
) -> List[Tuple[str, str]]:
"""
Recursively list filenames and checksums for a dataset.
Specify a subfolder and/or a filename-pattern to filter results.
Implemented according to the NSRR API documentation:
https://github.com/nsrr/sleepdata.org/wiki/api-v1-datasets#list-files-in-folder
Parameters
----------
db_slug : str
Short identifier of a database, e.g. `'mesa'`.
subfolder : str, optional
The folder at which to start the search, by default `''` (i.e. the
root folder).
pattern : str, optional
Glob-like pattern to select files (only applied to the basename,
not the dirname), by default `'*'`.
shallow : bool, optional
If `True`, only search in the given subfolder (i.e. no recursion),
by default `False`.
Returns
-------
list[tuple[str, str]]
A list of tuples `(<filename>, <checksum>)`; `<filename>` is the
full filename (i.e. dirname and basename) and `<checksum>` the
MD5 checksum.
"""
api_url = f'https://sleepdata.org/api/v1/datasets/{db_slug}/files.json'
response = requests.get(api_url, params={'path': subfolder})
try:
response_json = response.json()
except JSONDecodeError:
raise RuntimeError(f'No API response for dataset {db_slug}.') from None
files = []
for item in response_json:
if not item['is_file'] and not shallow:
files.extend(_list_nsrr(db_slug, item['full_path'], pattern))
elif fnmatch(item['file_name'], pattern):
files.append((item['full_path'], item['file_checksum_md5']))
return files | 7c0dbf352046245b189266435a4408ed739d99dd | 3,635,845 |
def red():
"""
Returns the red RGB tensor
Returns
-------
Tensor
the (1,3,) red tensor
"""
return color2float(Uint8Tensor([237, 28, 36])) | 7c627c88ca34f8b54f54711dfdf8a9b0301daa8b | 3,635,846 |
from io import StringIO
def get_temporary_text_file(contents, filename):
"""
Creates a temporary text file
:param contents: contents of the file
:param filename: name of the file
:type contents: str
:type filename: str
"""
f = StringIO()
flength = f.write(contents)
text_file = InMemoryUploadedFile(f, None, filename, 'text', flength, None)
# Setting the file to its start
text_file.seek(0)
return text_file | 7b29cb3b7bf09e78f24574555acfb24784dd9ccb | 3,635,847 |
import sys
def print_wf_integrity_stats(stats, workflow_id, dax_label, fmt):
"""
Prints the integrity statistics of workflow
stats : workflow statistics object reference
workflow_id : UUID of workflow
dax_label : Name of workflow
format : Format of report ('text' or 'csv')
"""
if fmt not in ["text", "csv"]:
print("Output format %s not recognized!" % fmt)
sys.exit(1)
report = ["\n"]
if fmt == "text":
# In text file, we need a line with the workflow id first
report.append("# {} ({})".format(workflow_id, dax_label or "All"))
col_names = integrity_stats_col_name_text
if fmt == "csv":
col_names = integrity_stats_col_name_csv
integrity_statistics = stats.get_integrity_metrics()
if fmt == "text":
max_length = [max(0, len(col_names[i])) for i in range(4)]
columns = ["" for i in range(4)]
# figure out max lengths?
for i in integrity_statistics:
max_length[0] = max(max_length[0], len(i.type))
max_length[1] = max(max_length[0], len(i.file_type))
max_length[2] = max(max_length[1], len(str(i.count)))
max_length[3] = max(max_length[2], len(str(i.duration)))
max_length = [i + 1 for i in max_length]
header_printed = False
for i in integrity_statistics:
content = [i.type, i.file_type, str(i.count), str(i.duration)]
if fmt == "text":
for i in range(0, 4):
columns[i] = col_names[i].ljust(max_length[i])
content[i] = content[i].ljust(max_length[i])
if fmt == "csv":
columns = integrity_stats_col_name_csv
content = [workflow_id, dax_label] + content
if not header_printed:
header_printed = True
report.append(print_row(columns, integrity_stats_col_size, fmt))
report.append(print_row(content, integrity_stats_col_size, fmt))
return NEW_LINE_STR.join(report) + NEW_LINE_STR | f244d4c47e86ae7ad8d0448fa688fdaaf893b4d4 | 3,635,848 |
def build_dynamic_focal_key_loss(task_cfgs):
"""According to "Dynamic Task Prioritization for Multitask Learning"
by Michelle Guo et al."""
losses = {}
for task_cfg in task_cfgs:
name = task_cfg['name']
losses[name] = build_dynamic_focal_key_task(task_cfg)
return WeightModule(losses) | 0b687461fea2cc73bf8671e1a1e97a32a422103c | 3,635,849 |
from typing import Tuple
from typing import Dict
from typing import Union
from typing import Optional
import glob
def settings_from_task_id(
task_id: int,
inj_data_path: str = "./data_raw_injections/task_files/",
) -> Tuple[str, Dict[str, Union[str, Optional[Dict[str, str]], bool, int]], int]:
"""Returns injection file (with path), waveform parameters in a dictionary, and number of injections for the given task id.
Args:
task_id: Slurm task ID from 1 to 2048.
inj_data_path: Path to injection files.
Raises:
ValueError: If there are no matching or more than one matching injections files.
Also, if the science case is not recognised to set the waveform parameters.
"""
# TODO: rewrite injection_file_name in generate_injections to use it here?
matches = glob.glob(inj_data_path + f"*_TASK_{task_id}.npy")
if len(matches) != 1:
raise ValueError(
f"Number of matches in data_raw_injections/ path is not one: {len(matches)}"
)
# includes absolute path
file = matches[0]
science_case, num_injs_per_redshift_bin_str = (
file.replace("_INJS-PER-ZBIN_", "_SCI-CASE_")
.replace("_TASK_", "_SCI-CASE_")
.replace(".npy", "_SCI-CASE_")
.split("_SCI-CASE_")[1:3]
)
num_injs_per_redshift_bin = int(num_injs_per_redshift_bin_str)
if science_case == "BNS":
wf_dict = dict(
wf_model_name="tf2_tidal",
wf_other_var_dic=None,
numerical_over_symbolic_derivs=False,
coeff_fisco=4,
)
# TODO: change to more accurate numerical waveform once gwbench 0.7 released
# wf_dict = dict(science_case=science_case, wf_model_name='lal_bns', wf_other_var_dic=dict(approximant='IMRPhenomD_NRTidalv2'), numerical_over_symbolic_derivs=True, coeff_fisco = 4)
elif science_case == "BBH":
wf_dict = dict(
wf_model_name="lal_bbh",
wf_other_var_dic=dict(approximant="IMRPhenomHM"),
numerical_over_symbolic_derivs=True,
coeff_fisco=8,
)
else:
raise ValueError("Science case not recognised.")
wf_dict["science_case"] = science_case
return file, wf_dict, num_injs_per_redshift_bin | ccf3a49934b63c561ee76e3cde837fb9f8ae1fcf | 3,635,850 |
import io
import tokenize
def remove_comments_and_docstrings(source):
"""
Returns *source* minus comments and docstrings.
.. note:: Uses Python's built-in tokenize module to great effect.
Example::
def noop(): # This is a comment
'''
Does nothing.
'''
pass # Don't do anything
Will become::
def noop():
pass
"""
io_obj = io.StringIO(source)
out = ""
prev_toktype = tokenize.INDENT
last_lineno = -1
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
ltext = tok[4]
if start_line > last_lineno:
last_col = 0
if start_col > last_col:
out += (" " * (start_col - last_col))
# Remove comments:
if token_type == tokenize.COMMENT:
pass
# This series of conditionals removes docstrings:
elif token_type == tokenize.STRING:
if prev_toktype != tokenize.INDENT:
# This is likely a docstring; double-check we're not inside an operator:
if prev_toktype != tokenize.NEWLINE:
# Note regarding NEWLINE vs NL: The tokenize module
# differentiates between newlines that start a new statement
# and newlines inside of operators such as parens, brackes,
# and curly braces. Newlines inside of operators are
# NEWLINE and newlines that start new code are NL.
# Catch whole-module docstrings:
if start_col > 0:
# Unlabelled indentation means we're inside an operator
out += token_string
# Note regarding the INDENT token: The tokenize module does
# not label indentation inside of an operator (parens,
# brackets, and curly braces) as actual indentation.
# For example:
# def foo():
# "The spaces before this docstring are tokenize.INDENT"
# test = [
# "The spaces before this string do not get a token"
# ]
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
return out | ffd185fc2517342e9eb0e596c431838009befde5 | 3,635,851 |
def get_arg_loc(callingconvention: str, bytecounter: int, size: int) -> str:
"""Return a string that denotes the location of a given function argument."""
index = bytecounter // 4
if index < 0:
raise Exception(
"Argument index cannot be smaller than zero: " + str(index))
if callingconvention == "arm":
return get_arm_arg_loc(bytecounter, size)
elif callingconvention == "mips":
return get_mips_arg_loc(bytecounter, size)
else:
return "?" | fffd44a5d47e28fd571ee05a8332a570c24ddf95 | 3,635,852 |
import pathlib
import tqdm
def alignments_pass(alignments: pathlib.Path) -> Alignments:
"""Peform a single pass on all alignments to calculate meta information."""
meta = Alignments()
for speaker in tqdm(list(alignments.glob("*")), desc="Alignment Pass"):
# To ignore hidden files etc.
if str(speaker.stem).isnumeric():
for grid in speaker.glob("*.TextGrid"):
tg = textgrid.TextGrid.fromFile(grid)
for interval in tg[0]:
text = interval.mark
if text:
if text not in meta.word_counts:
meta.word_counts[text] = 0
meta.word_counts[text] += 1
return meta | bf5068e9fae3b3ae21188c7a76fb21b91045e465 | 3,635,853 |
def BRepBlend_HCurve2dTool_Intervals(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:param T:
:type T: TColStd_Array1OfReal &
:param S:
:type S: GeomAbs_Shape
:rtype: void
"""
return _BRepBlend.BRepBlend_HCurve2dTool_Intervals(*args) | 7443a54617ebec6602521ed2e7cb765845972e46 | 3,635,854 |
import itertools
def get_block_objects(disasm, nodes, func_addr):
"""
Get a list of objects to be displayed in a block in disassembly view. Objects may include instructions, stack
variables, and labels.
:param angr.analyses.Disassembly disasm: The angr Disassembly Analysis instance.
:param iterable nodes: A collection of CFG nodes.
:param int func_addr: The function address of the current block.
:return: a list of Instruction objects and label names (strings).
:rtype: list
"""
block_addrs = [node.addr for node in nodes]
block_addr = block_addrs[0]
insn_addrs = list(itertools.chain.from_iterable(disasm.block_to_insn_addrs[addr] for addr in block_addrs))
lst = [ ]
variable_manager = disasm.kb.variables[func_addr]
# function beginning
if block_addr == func_addr:
# function header
func = disasm.kb.functions.get_by_addr(func_addr)
if func is not None:
func_header = FunctionHeader(func.name, func.prototype,
func.calling_convention.args if func.calling_convention is not None else None)
lst.append(func_header)
# stack variables
# filter out all stack variables
variables = variable_manager.get_variables(sort='stack', collapse_same_ident=False)
variables = sorted(variables, key=lambda v: v.offset)
lst.append(Variables(variables))
# phi variables
phi_variables = variable_manager.get_phi_variables(block_addr)
if phi_variables:
for phi, variables in phi_variables.items():
lst.append(PhiVariable(phi, variables))
# instructions and labels
for insn_addr in insn_addrs:
if insn_addr != func_addr and insn_addr in disasm.kb.labels:
lst.append(Label(insn_addr, get_label_text(insn_addr, disasm.kb)))
lst.append(disasm.raw_result_map['instructions'][insn_addr])
# initial label, if there is any
# FIXME: all labels should be generated during CFG recovery, and this step should not be necessary.
if lst and not isinstance(lst[0], FunctionHeader):
# the first element should be a label
lst.insert(0, Label(block_addrs[0], get_label_text(block_addrs[0], disasm.kb)))
return lst | 400e1ae5a42bfcb24bd66a6b301504efefc04619 | 3,635,855 |
def new_lunar_system_in_time(time_JD=2457099.5|units.day):
"""
Initial conditions of Solar system --
particle set with the sun + eight moons,
at the center-of-mass reference frame.
Defined attributes:
name, mass, radius, x, y, z, vx, vy, vz
"""
time_0 = 2457099.5 | units.day
delta_JD = time_JD-time_0
solar_system = solar_system_in_time(time_JD)
solar_system[0].type = "star"
solar_system[0].name = "sun"
solar_system[1:].type = "planet"
for pi in solar_system:
moons = get_moons_for_planet(pi, delta_JD=delta_JD)
solar_system.add_particles(moons)
solar_system.move_to_center()
### to compare with JPL, relative positions and velocities need to be corrected for the
# Sun's vectors with respect to the barycenter
#r_s = (3.123390770608490E-03, -4.370830943817017E-04, -1.443425433116342E-04) | units.AU
#v_s = (3.421633816761503E-06, 5.767414405893875E-06, -8.878039607570240E-08) | (units.AU / units.day)
#print sun
#print moons.position.in_(units.AU) + r_s
#print moons.velocity.in_(units.AU/units.day) + v_s
return solar_system | 7380e3b3f56c065865fbe64c1841a59851298113 | 3,635,856 |
import os
def read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from nibabel
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
if not op.isdir(dir_name):
raise IOError('Directory for annotation does not exist: %s',
fname)
cands = os.listdir(dir_name)
cands = [c for c in cands if '.annot' in c]
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory: %s' % (fname, ', '.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
orig_tab = np.fromfile(fid, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names | af9a0c535b9ed001b0fe9493e5d582d7db080e8a | 3,635,857 |
import copy
def overlap3(data):
"""
"""
#
dataC = copy.copy(data)
temp = [[] for i in range(0, 12)]
index = int(dataC[0][0][5 : 7])
for x in dataC:
temp[(index % 12) - 1].append(x[1:])
index += 1
final = []
for x in temp:
final.append(np.array(x))
return final | 7baeb8bf5741b75262ca329b91e3a8625a5ad61c | 3,635,858 |
def gen_qsub_script(exp, run_type):
"""Populate qsub script with settings"""
reps = {}
qsub_path = ''
if config.is_cp_job(run_type):
reps = gen_cp_qsub_constants(exp, run_type)
else:
reps = gen_mask_qsub_constants(exp, run_type)
qsub_script = ''
qsub_template_path = config.get_template_qsub(run_type)
# read qsub template
with open(qsub_template_path, 'r') as f:
qsub_template = f.read()
qsub_script = helpers.multi_str_replace(qsub_template, reps)
# write qsub script
qsub_path = config.get_sge_input_qsub(exp, run_type)
with open(qsub_path, 'w') as f:
f.write(qsub_script)
return qsub_path | 66c7cf39512a0dfe8c1f7d62ed9542560eb0f927 | 3,635,859 |
import torch
def quat_diff_rad(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Get the difference in radians between two quaternions.
Args:
a: first quaternion, shape (N, 4)
b: second quaternion, shape (N, 4)
Returns:
Difference in radians, shape (N,)
"""
b_conj = quat_conjugate(b)
mul = quat_mul(a, b_conj)
# 2 * torch.acos(torch.abs(mul[:, -1]))
return 2.0 * torch.asin(
torch.clamp(
torch.norm(
mul[:, 0:3],
p=2, dim=-1), max=1.0)
) | fe6d63dbe1b0bfc4d834af18e2260032ea405e25 | 3,635,860 |
def PairsMerging(xIn: list):
"""
Recursive function for merging pairs formed at the first step
Parameters
----------
xIn : list
xIn - input list containing pairs of subarrays for sorting.
Returns
-------
Merged pair.
"""
# print(xIn,"input array for Pairs Merging") # Debugging
# while loop below - instead of recursive call of this function -
while(len(xIn) > 1):
xOut = [None]*((len(xIn)//2) + (len(xIn) % 2)) # a temporary array for forming output
sortingStep = 1 # Stepping and while() loop below going on pairs of subarrays for composing them in a sorted subarr
# while loop below going on the pairs of subarrays for making composed, sorted subarray for an output
while (sortingStep <= (len(xIn)//2)):
xTemp = [i*0 for i in range(len(xIn[2*(sortingStep-1)])+len(xIn[2*(sortingStep-1)+1]))] # For saving values
# from pairs of lists to sort
l1 = 0; l2 = 0 # Indexes for comparing values from both subarrays - donors for composing result sorted array
iFill = 0 # for filling resulting sorted subarray - result of this recursive function
# Picking values from two subarrays for making composed subarray as a result
while (l1 < len(xIn[2*(sortingStep-1)])) and (l2 < len(xIn[2*(sortingStep-1)+1])):
if (xIn[2*(sortingStep-1)])[l1] < (xIn[2*(sortingStep-1)+1])[l2]:
xTemp[iFill] = (xIn[2*(sortingStep-1)])[l1]; l1 += 1
else:
xTemp[iFill] = (xIn[2*(sortingStep-1)+1])[l2]; l2 += 1
iFill += 1
# Adding below the remaining, last biggest value from two subarrays to a composed subarray (output of recursion)
if (l1 < len(xIn[2*(sortingStep-1)])):
while ((l1 < len(xIn[2*(sortingStep-1)]))): # Adding remaining values from subarrays to a composed one
xTemp[iFill] = (xIn[2*(sortingStep-1)])[l1]; l1 += 1; iFill += 1
elif (l2 < len(xIn[2*(sortingStep-1)+1])):
while ((l2 < len(xIn[2*(sortingStep-1)+1]))): # Adding remaining values from subarrays to a composed one
xTemp[iFill] = (xIn[2*(sortingStep-1)+1])[l2]; l2 += 1; iFill += 1
# print(xTemp,"resulting of subarray")
xOut[sortingStep-1] = xTemp
sortingStep += 1
# Adding odd value (a single value subarray) to a resulting subarray - an output one
if (len(xIn) % 2) > 0:
xOut[sortingStep-1] = xIn[len(xIn)-1]
xIn = xOut.copy()
# Final function result
return xIn[0] | 83e2c36c2eb77b9acdfa6390bb79467dc5b788c7 | 3,635,861 |
def generate_dataset_db(
connection_string: str, file_name: str, include_null: bool
) -> str:
"""
Given a database connection string, extract all tables/fields from it
and write out a boilerplate dataset manifest, excluding optional null attributes.
"""
db_engine = get_db_engine(connection_string)
db_schemas = get_db_schemas(engine=db_engine)
db_datasets = create_db_datasets(db_schemas=db_schemas)
write_dataset_manifest(
file_name=file_name, include_null=include_null, datasets=db_datasets
)
return file_name | e80c43f98c608e63c61cda50d952bd13fded4bce | 3,635,862 |
def salt(secret: str) -> str:
"""A PBKDF salt."""
return sha256(secret.encode("utf-8")).hexdigest() | edbbf13dd4ce72c8bdaf272267de13704ce9930e | 3,635,863 |
async def async_get_service(
hass: HomeAssistant,
config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> KNXNotificationService | None:
"""Get the KNX notification service."""
if not discovery_info or not discovery_info["platform_config"]:
return None
platform_config = discovery_info["platform_config"]
xknx: XKNX = hass.data[DOMAIN].xknx
notification_devices = []
for device_config in platform_config:
notification_devices.append(
XknxNotification(
xknx,
name=device_config[CONF_NAME],
group_address=device_config[KNX_ADDRESS],
)
)
return (
KNXNotificationService(notification_devices) if notification_devices else None
) | 9ffb1c0f2736dfde2aca18a73648c99f43c8d0f6 | 3,635,864 |
import re
import warnings
from sys import base_prefix
def docx_to_df(file_path):
"""
Convert docx file to dataframe
Parameters
----------
file_path : str
A file path of documnet
Returns
-------
dataframe
speech | transcript_filepath | id | transcriber_id | wave_filepath
------------------------------------------------------------------
00:00 | Users/Soyeon/~~~. |119-2| 113. | Users/~~~~
"""
# Convert docx file to dataframe
text = docx2txt.process(file_path)
text_list = text.split('\n')
df = pd.DataFrame(text_list, columns = ["speech"])
# Add [transcript_filepath] column
df["transcript_filepath"] = file_path
# Add [id], [transcriber_id] columns
extract = re.search('(\d{3})-(\d{1})-(\d{3})', file_path)
if extract is not None:
df["id"] = extract.group(1) + "-" + extract.group(2)
df["transcriber_id"] = extract.group(3)
else:
df["id"] = None
df["transcriber_id"] = None
warnings.warn('File {0} seems to have the wrong title format for extracting id and transcriber_id'.format(file_path));
# Add [wave_filepath] column
audio_path = base_prefix + "Audio Files & Transcripts/Audio Files/"
df["wave_filepath"] = audio_path + df["id"] + ".wav"
return df | 7a1a73ad12d5ec4a9e4f3ef367a8a282d6999819 | 3,635,865 |
def _is_referenced_by_a_stack_frame_name(referrers, obj, name):
"""
Is there a reference among the given referrers, that is a stack frame,
which contains a local variable of the given name, which points to
the object of interest?
:param referrers: The references to scan.
:param obj: The object of interest.
:param name: The name the reference must have.
:return: Boolean.
"""
frame_referrers = [ref for ref in referrers if _is_a_(ref, 'frame')]
for frame in frame_referrers:
if name in frame.f_locals:
object_referred_to = frame.f_locals[name]
if object_referred_to == obj:
return True
return False | c8ba5fcffc407d52a672b0ab9d95217c2886747c | 3,635,866 |
from .py4cytoscape_utils import node_name_to_node_suid
from .py4cytoscape_utils import edge_name_to_edge_suid
def get_table_value(table, row_name, column, namespace='default', network=None, base_url=DEFAULT_BASE_URL):
"""Retrieve the value from a specific row and column from node, edge or network tables.
Args:
table (str): Name of table, e.g., node (default), edge, network
row_name (str): Node, edge or network name, i.e., the value in the "name" column
column (str): Name of column to retrieve values from
namespace (str): Namespace of table. Default is "default".
network (SUID or str or None): Name or SUID of a network. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
obj: the value of the table cell, cast to float, int, bool or str depending on column type
Raises:
HTTPError: if table or namespace doesn't exist in network or if cell contains a numeric type but no number
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_table_value('node', 'YDL194W', 'COMMON')
'SNF3'
>>> get_table_value('edge', 'YLR197W (pp) YOR310C', 'EdgeBetweenness', network='My Network')
2.0
>>> get_table_value('node', 'YDL194W', 'IsSingleNode')
False
"""
suid = networks.get_network_suid(network, base_url=base_url)
# column type
table_col_info = get_table_column_types(table, namespace, network, base_url=base_url)
table_col_type = table_col_info[column]
# which row
row_key = None
if table == 'node':
row_key = node_name_to_node_suid(row_name, network, base_url=base_url)[0]
elif table == 'edge':
row_key = edge_name_to_edge_suid(row_name, network, base_url=base_url)[0]
elif table == 'network':
row_key = networks.get_network_suid(row_name,
base_url=base_url) # TODO: R implementation looks wrong because of == and use of row_name
else:
row_key = None
# get row/column value
res = commands.cyrest_get(f'networks/{suid}/tables/{namespace}{table}/rows/{row_key}/{column}', base_url=base_url,
require_json=False)
if not res: return None
# TODO: This "not res" can't happen for numbers because CyREST returns HTTPError if a value doesn't exist ... is this what we want?
# TODO: For strings, a '' is returned ... do we want to return None for this?
if table_col_type == 'Double':
return float(res)
elif table_col_type == 'Long':
return int(res)
elif table_col_type == 'Integer':
return int(res)
elif table_col_type == 'Boolean':
return bool(res)
else:
return str(res) | 6ad91abd6c7d5bb2db735b2d1f30e8d3c2dc152f | 3,635,867 |
def prettify_name_tuple(tup):
""" Processes the intersect tuples from the steam API. """
res = []
for name in tup:
res.append(name.split("_")[0])
return ", ".join(res) | 68d9e7170f02cf4a5de434806e7abcd99e5a77e7 | 3,635,868 |
def init_repository(path, bare=False,
flags=C.GIT_REPOSITORY_INIT_MKPATH,
mode=0,
workdir_path=None,
description=None,
template_path=None,
initial_head=None,
origin_url=None):
"""
Creates a new Git repository in the given *path*.
If *bare* is True the repository will be bare, i.e. it will not have a
working copy.
The *flags* may be a combination of:
- GIT_REPOSITORY_INIT_BARE (overriden by the *bare* parameter)
- GIT_REPOSITORY_INIT_NO_REINIT
- GIT_REPOSITORY_INIT_NO_DOTGIT_DIR
- GIT_REPOSITORY_INIT_MKDIR
- GIT_REPOSITORY_INIT_MKPATH (set by default)
- GIT_REPOSITORY_INIT_EXTERNAL_TEMPLATE
The *mode* parameter may be any of GIT_REPOSITORY_SHARED_UMASK (default),
GIT_REPOSITORY_SHARED_GROUP or GIT_REPOSITORY_INIT_SHARED_ALL, or a custom
value.
The *workdir_path*, *description*, *template_path*, *initial_head* and
*origin_url* are all strings.
See libgit2's documentation on git_repository_init_ext for further details.
"""
# Pre-process input parameters
if bare:
flags |= C.GIT_REPOSITORY_INIT_BARE
# Options
options = ffi.new('git_repository_init_options *')
C.git_repository_init_init_options(options, C.GIT_REPOSITORY_INIT_OPTIONS_VERSION)
options.flags = flags
options.mode = mode
options.workdir_path = to_bytes(workdir_path)
options.description = to_bytes(description)
options.template_path = to_bytes(template_path)
options.initial_head = to_bytes(initial_head)
options.origin_url = to_bytes(origin_url)
# Call
crepository = ffi.new('git_repository **')
err = C.git_repository_init_ext(crepository, to_bytes(path), options)
check_error(err)
# Ok
return Repository(path) | 1660cf767ddc393506d461d5c029f2f408c4b6de | 3,635,869 |
def carrington_rotation_number_relative(time, lon):
"""
A function that returns the decimal carrington rotation number for a spacecraft position
that may not be at the same place at earth. In this case you know the carrington longitude
of the spacecraft, and want to convert that to a decimal carrington number that is within
+0.5 and -0.5 of the decimal rotation for the earth-based longitude.
:param time: an astropy Time object indicating the time the position is known.
:param lon: the carrington longitude of the spacecraft position.
:return: the decimal_carrington number.
"""
# get the decimal carrington number for Earth at this time
cr_earth = sunpy.coordinates.sun.carrington_rotation_number(time)
# convert that to the earth longitude (this should match sunpy.coordinates.sun.L0(time))
cr0 = np.floor(cr_earth)
lon_earth = np.mod((1 - (cr_earth - cr0)*360), 360)
# compute the angular difference and the modulus
diff = lon_earth - lon
mod = np.mod(diff, 360.)
# compute the fractional rotation offset, which depends on where the periodic boundary is.
offset = 0.0
if lon_earth < 180 and mod < 180 and diff < 0:
offset = +1.0
if lon_earth >= 180 and mod >= 180 and diff >= 0:
offset = -1.0
cr_now = cr0 + np.mod(1.0 - lon/360., 360.) + offset
debug = False
if debug:
print('{: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f} {: 7.3f}'.format(lon, diff, mod, cr_now, cr_earth,
cr_now - cr_earth))
print(cr_earth, cr0, lon_earth, sunpy.coordinates.sun.L0(time).value, lon, cr_now)
return cr_now | b771ba70edca7b546605cfede35053dabb3717bf | 3,635,870 |
import os
import sys
def wrap_elasticluster(args):
"""Wrap elasticluster commands to avoid need to call separately.
- Uses .bcbio/elasticluster as default configuration location.
- Sets NFS client parameters for elasticluster Ansible playbook. Uses async
clients which provide better throughput on reads/writes:
http://nfs.sourceforge.net/nfs-howto/ar01s05.html (section 5.9 for tradeoffs)
"""
if "-s" not in args and "--storage" not in args:
# clean up old storage directory if starting a new cluster
# old pickle files will cause consistent errors when restarting
storage_dir = os.path.join(os.path.dirname(DEFAULT_EC_CONFIG), "storage")
std_args = [x for x in args if not x.startswith("-")]
if len(std_args) >= 3 and std_args[1] == "start":
cluster = std_args[2]
pickle_file = os.path.join(storage_dir, "%s.pickle" % cluster)
if os.path.exists(pickle_file):
os.remove(pickle_file)
args = [args[0], "--storage", storage_dir] + args[1:]
if "-c" not in args and "--config" not in args:
args = [args[0]] + ["--config", DEFAULT_EC_CONFIG] + args[1:]
os.environ["nfsoptions"] = "rw,async,nfsvers=3" # NFS tuning
sys.argv = args
try:
return elasticluster.main.main()
except SystemExit as exc:
return exc.args[0] | 1288510358305caea3f69656e0576d06c8c2f837 | 3,635,871 |
def deproject(center,depth,K,pose=None):
"""
center.shape = [1,2]
depth.shape = [1,1]
K.shape = [3,3]
"""
out_gt = center * depth
out_gt = np.concatenate((out_gt, depth), 1)
# out_gt = [1,3]
inv_K = np.linalg.inv(K.T)
xyz = np.dot(out_gt, inv_K)
return xyz | 62a996fc00453e9541a64c05c84b88fe316fa08e | 3,635,872 |
import requests
def get_branches(repo_id: str):
"""
Gets the branches from desired repository.
:param repo_id: repo id
:return: list(dict)
"""
branches = requests.get(url=BRANCHES_API_URL.format(repo_id), headers=HEADER).json()
return [parse_branch(branch) for branch in branches] | 80a8f84c6652ab63bdf11a5daa0447d4c60d7443 | 3,635,873 |
def lfs_cart_portlet(context, title=None):
"""Tag to render the cart portlet.
"""
if title is None:
title = _(u"Cart")
portlet = CartPortlet()
portlet.title = title
return {
"html": portlet.render(context)
} | 2791272ccc3ed3a0e38deb0f153e82c6528bbbb7 | 3,635,874 |
def test_declarative_barb_gfs_knots():
"""Test making a contour plot."""
data = xr.open_dataset(get_test_data('GFS_test.nc', as_file_obj=False))
barb = BarbPlot()
barb.data = data
barb.level = 300 * units.hPa
barb.field = ['u-component_of_wind_isobaric', 'v-component_of_wind_isobaric']
barb.skip = (3, 3)
barb.earth_relative = False
barb.plot_units = 'knot'
panel = MapPanel()
panel.area = 'us'
panel.projection = 'data'
panel.layers = ['coastline', 'borders', 'usstates']
panel.plots = [barb]
pc = PanelContainer()
pc.size = (8, 8)
pc.panels = [panel]
pc.draw()
return pc.figure | d4bb384802460354a93514c8be70fb699e16f481 | 3,635,875 |
from typing import Iterable
def new(entities: Iterable[DXFEntity] = None, query: str = "*") -> EntityQuery:
"""Start a new query based on sequence `entities`. The `entities` argument
has to be an iterable of :class:`~ezdxf.entities.DXFEntity` or inherited
objects and returns an :class:`EntityQuery` object.
"""
return EntityQuery(entities, query) | 37b65767ec61c319da09518d438b7bc791f659c9 | 3,635,876 |
def create_pydot_graph(op_nodes, data_nodes, param_nodes, edges, rankdir='TB', styles=None):
"""Low-level API to create a PyDot graph (dot formatted).
"""
pydot_graph = pydot.Dot('Net', graph_type='digraph', rankdir=rankdir)
op_node_style = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'rounded, filled'}
for op_node in op_nodes:
style = op_node_style
# Check if we should override the style of this node.
if styles is not None and op_node[0] in styles:
style = styles[op_node[0]]
pydot_graph.add_node(pydot.Node(op_node[0], **style, label="\n".join(op_node)))
for data_node in data_nodes:
pydot_graph.add_node(pydot.Node(data_node[0], label="\n".join(data_node[1:])))
node_style = {'shape': 'oval',
'fillcolor': 'gray',
'style': 'rounded, filled'}
if param_nodes is not None:
for param_node in param_nodes:
pydot_graph.add_node(pydot.Node(param_node[0], **node_style, label="\n".join(param_node[1:])))
for edge in edges:
pydot_graph.add_edge(pydot.Edge(edge[0], edge[1]))
return pydot_graph | 2b15ef833ef968d752ecbc19e705facac2038255 | 3,635,877 |
import requests
def info_session(request, session_type):
"""Information session request form."""
if not SESSION_TYPES.get(session_type):
raise Http404
if request.method == 'POST':
form = InfoSessionForm(session_type, request.POST)
if form.is_valid():
cd = form.cleaned_data
session_type = SESSION_TYPES[session_type]
cd['session_type'] = session_type
# fetch event
earl = '{0}/{1}/{2}@JSON'.format(
settings.LIVEWHALE_API_URL,
settings.LIVEWHALE_API_EVENTS_ID,
cd['event'],
)
response = requests.get(earl)
jason = response.json()
cd['event'] = jason
# to
recipients = settings.CONTINUING_EDUCATION_INFOSESSION_RECIPIENTS
subject = 'Master of Business Information Session Request: {0}'.format(session_type)
subject += '{0} on {1} ({2})'.format(
session_type, jason['date'], jason['date_time'],
)
send_mail(
request,
recipients,
subject,
cd['email'],
'admissions/infosession_email.html',
cd,
)
return HttpResponseRedirect(reverse_lazy('info_session_success'))
else:
form = InfoSessionForm(session_type)
return render(request, 'admissions/infosession.html', {'form': form}) | 028d2832d728b4569473cd5b010c8da25d3717bf | 3,635,878 |
def transform_frame(frame, transformation_matrix):
"""
transform the selected region to bird's eye view
:param frame: the original image
:param transformation_matrix: the transformation matrix
:return: the image after transform, width scale, and height scale
"""
rows, cols, _ = frame.shape
new_frame = cv2.warpPerspective(frame, transformation_matrix, (cols, rows))
scale_w = int(new_frame.shape[0] / frame.shape[0])
scale_h = int(new_frame.shape[1] / frame.shape[1])
return new_frame, scale_w, scale_h | 4652c855b29c17a208e4d7d054a7090fa82a6181 | 3,635,879 |
def _annotation_dict_all_filter(data, query):
"""Match edges with the given dictionary as a sub-dictionary.
:param dict data: A PyBEL edge data dictionary
:param dict query: The annotation query dict to match
:rtype: bool
"""
annotations = data.get(ANNOTATIONS)
if annotations is None:
return False
for key, values in query.items():
ak = annotations.get(key)
if ak is None:
return False
for value in values:
if value not in ak:
return False
return True | bd71eaa995242afbad3c158874cf86bb1708d7c3 | 3,635,880 |
def split_storm_info(storm_list):
"""split_storm_info takes a list of strings and creates a pandas dataframe
for the data set taken off the NHC archive. This function is called in the main to
find all storms."""
name, cycloneNum, year, stormType, basin, filename = [], [], [], [], [], []
for line in storm_list[1:]:
fields = line.split(',')
name.append(fields[0].strip())
basin.append(fields[1].strip())
cycloneNum.append(fields[7].strip())
year.append(fields[8].strip())
stormType.append(fields[9].strip())
filename.append(fields[-1].strip().lower())
storms = DataFrame({'Name': name, 'Basin': basin, 'CycloneNum': np.array(cycloneNum),
'Year': np.array(year), 'StormType': stormType,
'Filename': filename})
return(storms) | fe41e6cf6dfa3d4be1c5549bd29284d0a29a5d90 | 3,635,881 |
import sys
def open_file(file_name):
""" Opens a comma separated CSV file
Parameters
----------
file_name: string
The path to the CSV file.
Returns:
--------
Output: the opened file
"""
# Checks for file not found and perrmission errors
try:
f = open(file_name, 'r')
except FileNotFoundError:
print("Couldn't find file " + file_name)
sys.exit(3)
except PermissionError:
print("Couldn't access file " + file_name)
sys.exit(4)
# opens the file
f = open(file_name, 'r', encoding="ISO-8859-1")
return(f) | 21e3abe90fbfb169568ef051fa3f130cc7f1315a | 3,635,882 |
def linear_inshape(module_masks, mask):
"""
Coarse grained input mask does not change the shape of weights and output tensor
Parameters
----------
module_masks : ModuleMasks
The ModuleMasks instance of the linear
mask : CoarseMask
The mask of its input tensor
Returns
-------
CoarseMask
The mask of its output tensor, ```None``` means shape of output tensor is not changed
"""
assert isinstance(mask, CoarseMask)
assert mask.mask_index[0] is None
if module_masks.input_mask is not None:
assert module_masks.input_mask <= mask
module_masks.set_input_mask(mask)
return None | 51661e74575fe2b924ce6fa4a67c4b47ee53ea99 | 3,635,883 |
def test_enable_8021q_3(monkeypatch):
"""Verify that enable_802q_1 function return exception if 8021q can not be loaded.
"""
cmd_list = []
def mockreturn(command):
cmd_list.append(command)
so = "8021q"
if command == "lsmod | grep ^8021q":
so = ""
return CmdStatus(so, "", 0)
lh = GenericLinuxHost(LH_CFG, OPTS)
monkeypatch.setattr(lh.ssh, 'exec_command', mockreturn)
with pytest.raises(Exception) as excepinfo:
lh.enable_8021q()
result = "Fail to load 8021q:\n8021q"
assert str(excepinfo.value) == result | f1cf3e6679d1d3c1bb8a25ff2873ae787164cb7d | 3,635,884 |
def prep_data_for_feature_gen(data):
"""Restructure OANDA data to use it for TA-Lib feature generation"""
inputs = {
'open': np.array([x['openMid'] for x in data]),
'high': np.array([x['highMid'] for x in data]),
'low': np.array([x['lowMid'] for x in data]),
'close': np.array([x['closeMid'] for x in data]),
'volume': np.array([float(x['volume']) for x in data])}
return inputs | a9666a24486e19196c2c13ebd198675207fc8d32 | 3,635,885 |
def matriz_krylov(A, x, n_iters=None):
"""Genera una matriz de krylov dada una matriz A y un vector x. Cada columna de la matriz es la iteración i de A^i*x.
Args:
A (matriz): Matriz de aplicación
x (vector): Vector base
n_iters (int, optional): Número de iteraciones. Por defecto es el número de filas de A + 1 (garantiza que la matriz de krylov tiene una combinación lineal).
Returns:
m_krylov: Matriz con las aplicaciones de krylov por columna.
"""
if n_iters is None:
n_iters = A.shape[0] + 1
m_krylov = zeros(A.shape[0], n_iters)
m_krylov[:, 0] = x
for i in range(1, n_iters):
m_krylov[:, i] = A * m_krylov[:, i - 1]
return simplify(m_krylov) | 5463ca2db8d1d638f5ef7ab0ee258416dceccd62 | 3,635,886 |
def test_case_result_score(answers, user_test_id):
"""
Calculate result score for test.
Check every user's answer (check_answer), calculate number of correct answers.
@param answers: dict of pairs question_id and list of answers.
@param user_test_id: UserTestCase object id --> int
@return: result score from 0 to 100.
"""
total_number_of_questions = UserTestCase.objects.get(id=user_test_id).test_case.question.count()
correct_answers = 0
for question_id, answer_list in answers.items():
correct_answers += check_answer(question_id, answer_list)
result_score = int(correct_answers/total_number_of_questions*100)
return result_score | 1d4efae6f50a5d9cc4ed655e47056d066c53abac | 3,635,887 |
def AtensDeltaV(df):
"""Delta V calculation for Atens asteroids, where a < 1."""
df['ut2'] = 2 - 2*np.cos(df.i/2)*np.sqrt(2*df.Q - df.Q**2)
df['uc2'] = 3/df.Q - 1 - (2/df.Q)*np.sqrt(2 - df.Q)
df['ur2'] = 3/df.Q - 1/df.a - (
(2/df.Q)*np.cos(df.i/2)*np.sqrt(df.a*(1-df.e**2)/df.Q))
return df | 6996a921020b8474dc119c0384062288d1d138b7 | 3,635,888 |
from pypy.interpreter import gateway
def init__builtin__(space):
"""NOT_RPYTHON"""
##SECTION##
## filename '<codegen /Users/steve/Documents/MIT TPP/2009-2010/6.893/project/pypy-dist/pypy/interpreter/gateway.py:824>'
## function 'pypy_init'
## firstlineno 2
##SECTION##
# global declarations
# global object g3dict
# global object gs___name__
# global object gs___builtin__
# global object gs_pypy_init
# global object gfunc_pypy_init
# global object gs___import__
# global object gs_site
# global object g0dict
def pypy_init(space, w_import_site):
goto = 1 # startblock
while True:
if goto == 1:
v0 = space.is_true(w_import_site)
if v0 == True:
goto = 2
else:
w_0 = space.w_None
goto = 3
if goto == 2:
w_1 = space.call_function((space.builtin.get(space.str_w(gs___import__))), gs_site, g0dict, space.w_None, space.w_None)
w_0 = space.w_None
goto = 3
if goto == 3:
return w_0
fastf_pypy_init = pypy_init
fastf_pypy_init.__name__ = 'fastf_pypy_init'
##SECTION##
g3dict = space.newdict()
gs___name__ = space.new_interned_str('__name__')
gs___builtin__ = space.new_interned_str('__builtin__')
space.setitem(g3dict, gs___name__, gs___builtin__)
gs_pypy_init = space.new_interned_str('pypy_init')
gfunc_pypy_init = space.wrap(gateway.interp2app(fastf_pypy_init, unwrap_spec=[gateway.ObjSpace, gateway.W_Root]))
space.setitem(g3dict, gs_pypy_init, gfunc_pypy_init)
gs___import__ = space.new_interned_str('__import__')
gs_site = space.new_interned_str('site')
g0dict = space.newdict()
return g3dict | c583fd15c33aeefddf67116aff63b30d26edb366 | 3,635,889 |
import scipy
import numpy
def CalculateXuIndex(mol):
"""
#################################################################
Calculation of Xu index
---->Xu
Usage:
result=CalculateXuIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
nAT=mol.GetNumAtoms()
deltas=[x.GetDegree() for x in mol.GetAtoms()]
Distance= Chem.GetDistanceMatrix(mol)
sigma=scipy.sum(Distance,axis=1)
temp1=0.0
temp2=0.0
for i in range(nAT):
temp1=temp1+deltas[i]*((sigma[i])**2)
temp2=temp2+deltas[i]*(sigma[i])
Xu=numpy.sqrt(nAT)*numpy.log(temp1/temp2)
return Xu | 0123f3ea82bb89ef7923e7f1638aafd8fbfe9fb0 | 3,635,890 |
def register_dataclass(registry: ServiceRegistry, target, for_, context=None):
""" Generic injectory factory for dataclasses """
# Note: This function could be a decorator which already knows
# the registry, has all the targets, and can do them in one
# container that it makes. For example:
# from wired.decorators import factory
# @factory(for_=Greeter, context=FrenchCustomer)
# @datclass
# class FrenchGreeter(Greeter):
# pass
if getattr(target, 'factory', None):
# This class wants to control its factory, use that one
dataclass_factory = target.factory
else:
# Use a generic dataclass factory
def dataclass_factory(c: ServiceContainer):
instance = injector_construction(c, target)
return instance
registry.register_factory(dataclass_factory, for_, context=context) | 50453755c6c132cf4cf38fd727935c306dc7082d | 3,635,891 |
async def async_setup_entry(hass, config_entry):
"""Set up the UniFi component."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
controller = UniFiController(hass, config_entry)
controller_id = get_controller_id_from_config_entry(config_entry)
hass.data[DOMAIN][controller_id] = controller
if not await controller.async_setup():
return False
if controller.mac is None:
return True
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, controller.mac)},
manufacturer=ATTR_MANUFACTURER,
model="UniFi Controller",
name="UniFi Controller",
# sw_version=config.raw['swversion'],
)
return True | 52c4409532c10899a9b9b621b762bf92b3a58b59 | 3,635,892 |
import os
def file_to_dataframe(file_id, compression='infer', client=None,
**read_kwargs):
"""Load a :class:`~pandas.DataFrame` from a CSV stored in a Civis File
The :class:`~pandas.DataFrame` will be read directly from Civis
without copying the CSV to a local file on disk.
Parameters
----------
file_id : int
ID of a Civis File which contains a CSV
compression : str, optional
If "infer", set the ``compression`` argument of ``pandas.read_csv``
based on the file extension of the name of the Civis File.
Otherwise pass this argument to ``pandas.read_csv``.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
**read_kwargs
Additional arguments will be passed directly to
:func:`~pandas.read_csv`.
Returns
-------
:class:`~pandas.DataFrame` containing the contents of the CSV
Raises
------
ImportError
If ``pandas`` is not available
See Also
--------
pandas.read_csv
"""
if not HAS_PANDAS:
raise ImportError('file_to_dataframe requires pandas to be installed.')
client = APIClient() if client is None else client
file_info = client.files.get(file_id)
file_url = file_info.file_url
if not file_url:
raise EmptyResultError('Unable to locate file {}. If it previously '
'existed, it may have '
'expired.'.format(file_id))
file_name = file_info.name
if compression == 'infer':
comp_exts = {'.gz': 'gzip', '.xz': 'xz', '.bz2': 'bz2', '.zip': 'zip'}
ext = os.path.splitext(file_name)[-1]
if ext in comp_exts:
compression = comp_exts[ext]
return pd.read_csv(file_url, compression=compression, **read_kwargs) | c675602890c94dcf549d4235be50a5662151ed72 | 3,635,893 |
from typing import Union
from typing import Optional
import warnings
def dataset_to_xy(
dataset: Dataset,
target_columns: Union[str, list],
qid_column: Optional[str],
):
"""Convert Merlin Dataset to XGBoost DMatrix"""
df = dataset.to_ddf()
qid = None
if qid_column:
df = df.sort_values(qid_column)
qid = df[qid_column]
all_target_columns = dataset.schema.select_by_tag(Tags.TARGET).column_names
# Ignore list-like columns from schema
list_column_names = [
col_name
for col_name, col_schema in dataset.schema.column_schemas.items()
if col_schema.is_list
]
if list_column_names:
warnings.warn(f"Ignoring list columns as inputs to XGBoost model: {list_column_names}.")
X = df.drop(all_target_columns + list_column_names, axis=1)
y = df[target_columns]
# Ensure columns are in a consistent order
X = X[sorted(X.columns)]
return X, y, qid | 201e5bf5513f35bd683bfad7bcf4ecb8b255cd93 | 3,635,894 |
def clean_data(df):
"""Clean data included in the DataFrame and transform categories part
INPUT
df -- type pandas DataFrame
OUTPUT
df -- cleaned pandas DataFrame
"""
categories = df['categories'].str.split(pat=';', expand=True)
row = categories.loc[0]
colnames = []
for entry in row:
colnames.append(entry[:-2])
category_colnames = colnames
print('Column names:', category_colnames)
categories.columns = category_colnames
for column in categories:
categories[column] = categories[column].str[-1:]
categories[column] = categories[column].astype(int)
df.drop('categories', axis=1, inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
# Removing entry that is non-binary
df = df[df['related'] != 2]
print('Duplicates remaining:', df.duplicated().sum())
return df | 40319f0f739e532bd559f14c70d988b7257c6fa3 | 3,635,895 |
def compute_timeline(agents, ts_tuple, dep_ivs):
"""
Compute the timeline of events that can occur in the field.
Given the departure intervals of the agents, this function
computes a common timeline of events that captures all possible
occurances of events in the field.
Parameters
----------
agents: A list of integers.
ts_tuple: Tuple of transition system objects.
ts_tuple[i] corresponds to the transition system of agent i.
dep_ivs: 2-D array of interval objects.
dep_ivs[i][j] gives the earliest lates departure times of agent
i from position j of the run.
Returns
-------
timeline: A dictionary of sets of tuples keyed by interval objects.
An event is a tuple of the form (agent_no, run_pos).
timeline is a dictionary of sets of events keyed by intervals
such that timeline[Interval(0,1)] gives the set of events that
can occur in Interval(0,1), if such an interval is defined.
"""
# Construct a dictionary of sets of events keyed by intervals.
timeline = dict()
# An event is a tuple of the form (agent_no, run_pos).
# timeline[Interval(...)] gives the set of events that can occur
# in Interval(...), if such an interval is defined.
# Consider all agents
for agent_no in agents:
# Consider all positions
for run_pos in range(0, len(dep_ivs[agent_no])):
# The queue of intervals to be projected
# We use a queue as we may break down old intervals
# if they intersect with the new ones partially.
projection_queue = [dep_ivs[agent_no][run_pos]]
for new_iv in projection_queue:
# Flag to remember if we intersect with anything
intersected = False
new_iv_events = {Event(agent=agent_no, pos=run_pos)}
# Consider all previously discovered intervals
for old_iv in list(timeline.keys()):
# See if new_iv intersects with any old_iv
old_iv_events = timeline[old_iv]
int_iv = old_iv & new_iv
if int_iv:
# We have a valid intersection
intersected = True
# Create a new iv for intersection (or update)
timeline[int_iv] = old_iv_events | new_iv_events
# Find non-intersecting parts of old_iv and new_iv
old_diff = old_iv.difference(int_iv)
new_diff = new_iv.difference(int_iv)
if old_diff:
# Break old_iv as needed
for old_iv_frag in old_diff:
timeline[old_iv_frag] = set([ii for ii in old_iv_events])
# Remove previous old_iv entry
del timeline[old_iv]
# Break new_iv as needed
for new_iv_frag in new_diff:
# add to queue, as this fragment may intersect with others
projection_queue.append(new_iv_frag)
# Finished processing this part of new_iv.
# As intervals in the timeline are disjoint no need
# to consider further entries.
break
if not intersected:
# new_iv did not intersect w/ any of the old_ivs
timeline[new_iv] = new_iv_events
return timeline | 12d99be139a4327552231dc2802d165d114139fe | 3,635,896 |
import unittest
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_randomdata.test_suite())
return testSuite | 8ac9cfcfebf9255f2cab01db9d4a1a53a3d24871 | 3,635,897 |
def eHealthClass_setupPulsioximeterForNextReading():
"""eHealthClass_setupPulsioximeterForNextReading()"""
return _ehealth.eHealthClass_setupPulsioximeterForNextReading() | d48cfd85752a75ca27e33794e18058a89f03a291 | 3,635,898 |
import scipy
def hist_argmaxima2(hist, maxima_thresh=.8):
"""
must take positive only values
Setup:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.histogram import * # NOQA
GridSearch:
>>> hist1 = np.array([1, .9, .8, .99, .99, 1.1, .9, 1.0, 1.0])
>>> hist2 = np.array([1, .9, .8, .99, .99, 1.1, 1.0, 1.0])
>>> hist2 = np.array([1, .9, .8, .99, .99, 1.1, 1.0])
>>> hist2 = np.array([1, .9, .8, .99, .99, 1.1, 1.2])
>>> hist2 = np.array([1, 1.2])
>>> hist2 = np.array([1, 1, 1.2])
>>> hist2 = np.array([1])
>>> hist2 = np.array([])
Example:
>>> # ENABLE_DOCTEST
>>> maxima_thresh = .8
>>> hist = np.array([1, .9, .8, .99, .99, 1.1, .9, 1.0, 1.0])
>>> argmaxima = hist_argmaxima2(hist)
>>> print(argmaxima)
"""
# FIXME: Not handling general cases
# [0] index because argrelmaxima returns a tuple
if len(hist) == 0:
return np.empty(dtype=np.int)
comperetor = np.greater
argmaxima_ = scipy.signal.argrelextrema(hist, comperetor)[0]
if len(argmaxima_) == 0:
argmaxima_ = np.array([hist.argmax()]) # Hack for no maxima
maxval = hist[argmaxima_].max()
size = len(hist)
end = size - 1
# Test if 0 is a maximum point
if 0 not in argmaxima_ and size > 0:
start_is_extreme = hist[0] > hist[1]
if start_is_extreme and hist[0] >= maxval * maxima_thresh:
argmaxima_ = np.hstack([[0], argmaxima_])
# Test if end is maximum point
if end not in argmaxima_ and end > 0:
#end_is_extreme = np.all(hist[argmaxima_[-1] + 1:(end - 1)] < hist[end] )
end_is_extreme = hist[end] > hist[end - 1]
if not end_is_extreme:
# FIXME: might be a case when end is level
pass
#end_is_extreme = np.all(hist[argmaxima_[-1] + 1:(end - 1)] == hist[end] )
if end_is_extreme and hist[end] >= maxval * maxima_thresh:
argmaxima_ = np.hstack([argmaxima_, [end]])
# threshold maxima to be within a factor of the maximum
maxima_y = hist[argmaxima_]
isvalid = maxima_y >= maxval * maxima_thresh
argmaxima = argmaxima_[isvalid]
return argmaxima | 4d7e3cf343f9604389d39c82c69c0f0eeb59a383 | 3,635,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.