content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def add_metadata(amr_df, metadata_df):
"""
Adds the extra information in the metadata tsv into the amr df
"""
merged_df = amr_df.merge(metadata_df, on=['genome_id','genome_name','taxon_id'], how='left')
return merged_df | 92c4431eaff937d9a6b49ae5806fe97838f56bf6 | 33,723 |
from typing import Optional
def _parse_str(arg: 'Optional[str]') -> str:
"""Parse argument as :obj:`str`.
Args:
arg: Original request argument.
Returns:
Parsed query argument.
"""
if arg is None:
return ''
return arg | 1886e0399cb7a6d9de4656ab4b39e0eb16d3c395 | 33,725 |
import torch
def to_onehot(c):
""" Creates a onehot torch tensor """
onehot = torch.zeros((1, 1000))
onehot[:, c] = 1.0
return onehot | c89356180d41243158dcee838eab7181f00f870a | 33,726 |
import bs4
def is_declaration(obj): # pragma: no cover
"""Is declaration."""
return isinstance(obj, bs4.Declaration) | 431f704a141c34f57c72218256f4d50fe2e8a99d | 33,727 |
import math
def normal_cdf(x, mu=0.0, sigma=1.0):
""" 평균이 mu이고, 표준편차가 sigma인
정규 분포의 누적 분포 함수(cumulative distribution function).
math.erf() 함수(error function)를 이용해서 구현"""
return (1 + math.erf((x - mu) / (math.sqrt(2) * sigma))) / 2 | 5287d3740d6232e034609e0e0fce8a4b87e8ae45 | 33,728 |
import typing
def _read_exact(stream: typing.BinaryIO, byte_count: int) -> bytes:
"""Read ``byte_count`` bytes from ``stream`` and raise an exception if too few bytes are read
(i. e. if EOF was hit prematurely).
"""
data = stream.read(byte_count)
if len(data) != byte_count:
raise ValueError(f"Attempted to read {byte_count} bytes of data, but only got {len(data)} bytes")
return data | 701180cb98985eca620c7e4d200050d92cc889ea | 33,730 |
def compute_min_distance_mendelian_ci(proband_CI, parent_CIs):
"""Commute the smallest distance between the given proband confidence interval, and the confidence intervals of
parental genotypes.
Args:
proband_CI (Interval): ExpansionHunter genotype confidence interval.
parent_CIs (list of Intervals): list of ExpansionHunter genotype confidence intervals.
Return:
int: the smallest distance (in base-pairs) between one of the Interval in the parent_CIs Interval list, and
the given proband_CI.
"""
return min([abs(proband_CI.distance_to(parent_CI)) for parent_CI in parent_CIs]) | 9b8f71c612972410054b18b05c5e86bda3c96321 | 33,731 |
import json
def filter_items(df):
"""
Filter items of interest from the stays.csv files
:param events_df:
:param itemIDs:
:return:
"""
with open('simplimicapp/items.JSON') as infobj:
items_dict = json.load(infobj)
all_relevant = [_ for d in items_dict.values() for v in d.values() for _ in v]
df.drop([c for c in df.columns if c not in all_relevant and str(c).isdigit()], axis=1, inplace=True)
return df | cfd8df9f6f69a486cf9183f3d98b81270b4a3ab2 | 33,732 |
def is_valid_status_code(status_code):
"""
Returns whether the input is a valid status code. A status code is valid if it's an integer value and in the
range [100, 599] :param status_code: :return:
"""
return type(status_code) == int and 100 <= status_code < 600 | 903878d7fabd6e3abd25d83bb0bbd37e0c8d3ce5 | 33,733 |
import os
import json
def load_settings(session_path):
"""
Load PyBpod Settings files (.json).
[description]
:param session_path: Absolute path of session folder
:type session_path: str
:return: Settings dictionary
:rtype: dict
"""
path = os.path.join(session_path, "raw_behavior_data",
"_ibl_pycwBasic.settings.json")
with open(path, 'r') as f:
settings = json.loads(f.readline())
return settings | 7663d6c18526e830b55c4e50544c118cfadf8ebb | 33,734 |
import subprocess
import time
def subprocess_execute(command, time_out=60):
"""executing the command with a watchdog"""
# launching the command
c = subprocess.Popen(command)
# now waiting for the command to complete
t = 0
while t < time_out and c.poll() is None:
time.sleep(1) # (comment 1)
t += 1
# there are two possibilities for the while to have stopped:
if c.poll() is None:
# in the case the process did not complete, we kill it
c.terminate()
# and fill the return code with some error value
returncode = -1 # (comment 2)
else:
# in the case the process completed normally
returncode = c.poll()
return returncode | 9f8755498942e2376cd3097ed03677d8cc9d0484 | 33,735 |
def get_change_segment_details(change_segment):
"""
Function returns details for single change segment as a dictionary.
"""
change_segment_dict = {}
change_segment_dict['change_time'] = change_segment\
.find_element_by_xpath(".//strong[@class='qa-segment-change-time']").text
return change_segment_dict | 15a9408e3f7d817deacac9d71e48f4a653b667b6 | 33,736 |
from typing import Tuple
def coordinates_to_chunk(x: int, y: int, chunk_size: int) -> Tuple[int, int]:
"""
Convert x, y coordinates to chunk coordinates
"""
normal_x = x // chunk_size
normal_y = y // chunk_size
middle_x = normal_x*chunk_size+(chunk_size//2)
middle_y = normal_y*chunk_size+(chunk_size//2)
return middle_x, middle_y | 40ab03f2a6391f2d11e72cc80d510a3f82af536e | 33,737 |
def prepare_plot_data(n_labels, legends, markers):
"""
:param int n_labels: Number of labels
:param list_or_None legends: List of legends
If None, it will be transformed to list of None
:param list_or_None markers: List of markers
If None, it will be transformed to list of 'circle'
:returns: legends, markers
- legends : transformed list of legends
- markers : transformed list of markers
Usage
>>> n_labels = 3
>>> legends, markers = None, None
>>> legends, markers = prepare_plot_data(n_labels, legends, markers)
>>> print(legends)
[None, None, None]
>>> print(markers) # default markers
['circle', 'circle', 'circle']
"""
if legends is None:
legends = [None] * n_labels
if markers is None:
markers = ['circle'] * n_labels
def length_checker(list):
if len(list) < n_labels:
list_instance = locals()['list']
raise ValueError(
f'Number of labels is {n_labels}, however too short list: {list_instance}')
for list in [legends, markers]:
length_checker(list)
return legends, markers | 84ce953fd03e9dd548f3e2e7a3e5fcce43b4364a | 33,738 |
def premium(q,par):
""" Returns the (minimum) premium that an insurance company would take
Args:
q (float): coverage
par (namespace): parameter values
Returns:
(float): premium
"""
return par.p*q | 2166bc6e16577a26d3adc17afe5929bf8fca073b | 33,739 |
def _thermocycle_error_text():
"""
Returns formatted error text for thermocycle value errors
"""
return """Thermocycle input types must take a list of dictionaries in the form of:
[{"cycles": integer,
"steps": [{
"duration": duration,
"temperature": temperature
"read": boolean (optional)
}]
}]
--or--
[{"cycles": integer,
"steps": [{
"duration": duration,
"gradient": {
"top": temperature,
"bottom": temperature
}
"read": boolean (optional)
}]
}]
(You can intermix gradient and non-gradient steps)""" | cee2fb445da91e50f32fe6799dfd328e8c3d5edc | 33,741 |
def parse_cookie(string):
"""
parse_cookie(string) -> dict
Parse the given Cookie: header value and return a mapping from cookie
names to values.
"""
ret = {}
for el in string.split(';'):
n, _, v = el.partition('=')
ret[n.strip()] = v.strip()
return ret | 5da1fbfa7ee9cde5dfb742355cf96c2931633d80 | 33,744 |
def comm_self_to_comm_world(s, w):
"""
Convert a function defined on a mesh with a MPI_COMM_SELF to
a function defined on the same geometry, but with MPI_COMM_WORLD.
The mesh and function spaces mush be identical except for the
MPI communicator.
- s is the MPI_COMM_SELF function
- w is the MPI_COMM_WORLD function
The return value is in the same function space as w
"""
Vs = s.function_space()
Vw = w.function_space()
gdim = Vs.mesh().geometry().dim()
coords_s = Vs.tabulate_dof_coordinates().reshape((-1, gdim))
coords_w = Vw.tabulate_dof_coordinates().reshape((-1, gdim))
def locate_in_s(coord):
for i, c in enumerate(coords_s):
if (c == coord).all():
return i
# Construct w2 and initialize to zero
w2 = w.copy()
w2.vector().zero()
w2.vector().apply('insert')
# Construct a parallel version of s by finding dofs with the
# exact same geometrical coordinates
arr_s = s.vector().get_local()
arr_w2 = w2.vector().get_local()
for dof_w, coord in enumerate(coords_w):
dof_s = locate_in_s(coord)
arr_w2[dof_w] = arr_s[dof_s]
w2.vector().set_local(arr_w2)
w2.vector().apply('insert')
return w2 | 0c29f4b1273f3f4ca4d40dacbe615c0fdad061ec | 33,745 |
import os
def list_files_in_subdir(filepath: str) -> list:
"""
Get a list of all the filepath of files in directory and subdirectory.
"""
res = []
for path, _, files in os.walk(filepath):
for name in files:
res.append(os.path.join(path, name))
return res | c3a56bb5bf77860b400ea706879db261ad8bc237 | 33,746 |
def strip_comments(line):
"""strip comments ('! blubb', '# blabb') off a line"""
line = line.replace('#', '!')
return line.split('!')[0] | 9bdc62c5d843da540244ba40c8d2107f751ede98 | 33,747 |
import numpy
def linbleeched_F0(data):
"""
Calculate a linear fit (:math:`y(t)=m t+y_0)` for each pixel, which is assumed to correct for bleeching effects.
:param data: he video data of shape (M,N,T).
:return: tuple (m,y0) with two images each with shape (M,N).
"""
# generate c coordinates
x = numpy.arange(data.shape[-1])
# reshape the data to two d array, first dimension is pixel index, second dimension is time
d = numpy.reshape(data, (data.shape[0] * data.shape[1], data.shape[-1]))
# find fit parameters
m, y0 = numpy.polyfit(x, d.T, 1)
# reshape fit parameters back to image shape
return m.reshape(data.shape[0:2]), y0.reshape(data.shape[0:2]) | 278772624246186d0495e3b61ca2635533e3aa3e | 33,748 |
def fetch_vendor_id(vendor_name, conn):
"""
Retrieve our vendor id from our PostgreSQL DB, table data_vendor.
args:
vendor_name: name of our vendor, type string.
conn: a Postgres DB connection object
return:
vendor id as integer
"""
cur = conn.cursor()
cur.execute("SELECT id FROM data_vendor WHERE name = %s", (vendor_name,))
# will return a list of tuples
vendor_id = cur.fetchall()
# index to our first tuple and our first value
vendor_id = vendor_id[0][0]
return vendor_id | 504cb7e71791ef7385edf5e143ffa0f4c3d09313 | 33,750 |
def enter(e):
"""Allows user to specify if they want pdfcp() to automatically append a
carriage return to the end of copied pdf text in addition to removing
line breaks while pdfcp() is running.
Parameters
----------
e : str ("y" or "")
Carriage return append user input specifying which message to print.
Returns
-------
"A carriage return WILL be appended" : str
Message returned if e set to "y" by user.
"A carriage return WON'T be appended" : str
Message returned if e set to anything other than "y" by user.
"""
# Resets global "f" variable value to True each time options is run
# so pdfcp() function can restart and continue running post-termination
global f
f = True
# Notifies user if carriage return will or won't be appended to copied text
if e is "y":
print("A carriage return WILL be appended")
return "A carriage return WILL be appended"
else:
print("A carriage return WON'T be appended")
return "A carriage return WON'T be appended" | d4aa4fbd55b483ac7e7cfa6384a01ebc253f44af | 33,751 |
import random
import math
def ferma(number: int, k: int = 100) -> bool:
"""Тест простоты Ферма
Wiki:
https://en.wikipedia.org/wiki/Fermat_primality_test
:param number: проверяемое число
:type number: int
:param k: количество тестов
:type k: int, default 100
:return: True если число псевдопростое, False если составное
:rtype: bool
"""
if number == 2:
return True
for _ in range(1, k + 1):
random_number = (random.randint(1, number) % (number - 2)) + 2
# проверка на взаимную простоту чисел random_number и number
if math.gcd(random_number, number) != 1:
return False
# проверка условия теоремы Ферма, с использованием возведения
# числа в степень по модулю
if pow(random_number, number - 1, number) != 1:
return False
return True | a36d193ab34db4c75b1738ce63998452b7517693 | 33,752 |
def read_file_first_line(filename):
""" Read first line of given filename """
result = None
with open(filename, 'r') as f:
result = f.readline()
result = result.rstrip("\n")
f.close()
return result | 4cc6502ab76e2bdbf2ccdb2367e69bf02bccf572 | 33,753 |
import torch
def reparameterize(mu, logvar):
"""
Reparameterize for the backpropagation of z instead of q.
This makes it so that we can backpropagate through the sampling of z from
our encoder when feeding the sampled variable to the decoder.
(See "The reparameterization trick" section of https://arxiv.org/abs/1312.6114)
Args:
mu (torch.Tensor): batch of means from the encoder distribution
logvar (torch.Tensor): batch of log variances from the encoder distribution
Returns:
z (torch.Tensor): batch of sampled latents from the encoder distribution that
support backpropagation
"""
# logvar = \log(\sigma^2) = 2 * \log(\sigma)
# \sigma = \exp(0.5 * logvar)
# clamped for numerical stability
logstd = (0.5 * logvar).clamp(-4, 15)
std = torch.exp(logstd)
# Sample \epsilon from normal distribution
# use std to create a new tensor, so we don't have to care
# about running on GPU or not
eps = std.new(std.size()).normal_()
# Then multiply with the standard deviation and add the mean
z = eps.mul(std).add_(mu)
return z | 339204d7471f319eef4179ca5f89fff79f68f70f | 33,754 |
def number_of_bases_above_threshold(high_quality_base_count, base_count_cutoff=2, base_fraction_cutoff=None):
"""
Finds if a site has at least two bases of high quality, enough that it can be considered
fairly safe to say that base is actually there.
:param high_quality_base_count: Dictionary of count of HQ bases at a position where key is base and values is the
count of that base.
:param base_count_cutoff: Number of bases needed to support multiple allele presence.
:param base_fraction_cutoff: Fraction of bases needed to support multiple allele presence.
:return: True if site has at least base_count_cutoff/base_fraction_cutoff bases, False otherwise
(changeable by user)
"""
# make a dict by dictionary comprehension where values are True or False for each base depending on whether the
# count meets the threshold.
# Method differs depending on whether absolute or fraction cutoff is specified
if base_fraction_cutoff:
total_hq_base_count = sum(high_quality_base_count.values())
bases_above_threshold = {base: float(count)/total_hq_base_count >= base_fraction_cutoff and
count >= base_count_cutoff for (base, count) in high_quality_base_count.items()}
else:
bases_above_threshold = {base: count >= base_count_cutoff for (base, count) in high_quality_base_count.items()}
# True is equal to 1 so sum of the number of Trues in the bases_above_threshold dict is the number of bases
# passing threshold
return sum(bases_above_threshold.values()) | c703020b0a3b0b1b3c39f38f4a4bae1be9946269 | 33,756 |
def placeholder(x):
"""
Placeholder description.
:param int x: integer number taken as input
:return int or float: returns x
"""
return x | 5ec8cff976e9b9e94e83d7a6ab8c825e4f2f0982 | 33,758 |
def get_last_valid_idx(output_k, forecast_cycle, ar_iterations):
"""Provide the last available index for training, once accounted for all forecasted timesteps."""
future_idxs = output_k[output_k >= 0]
if future_idxs.size == 0: # empty
future_idxs = 0
else:
future_idxs = abs(max(future_idxs))
return ar_iterations*forecast_cycle + future_idxs | 7f67ecee0c22402973ba9b03ad23cce424bf2ad9 | 33,760 |
from typing import Any
def key(request: Any) -> Any:
"""A key to test indexing into DictConfig."""
return request.param | d61ba3341f34ff36bbc0b4d95ba1978dbd8d1125 | 33,764 |
import torch
def batchify(data):
"""返回带有负采样的跳元模型的小批量样本."""
max_len = max(len(c) + len(n) for _, c, n in data)
centers, contexts_negatives, masks, labels = [], [], [], []
for center, context, negative in data:
cur_len = len(context) + len(negative)
centers += [center]
contexts_negatives += \
[context + negative + [0] * (max_len - cur_len)]
masks += [[1] * cur_len + [0] * (max_len - cur_len)]
labels += [[1] * len(context) + [0] * (max_len - len(context))]
return (torch.tensor(centers).reshape(
(-1, 1)), torch.tensor(contexts_negatives), torch.tensor(masks),
torch.tensor(labels)) | 731c85bef3639e565621a1f7f05dd3df40beceff | 33,767 |
def get_reference(node, identifiers):
"""Recurses through yaml node to find the target key."""
if len(identifiers) == 1:
return {identifiers[0]: node[identifiers[0]]}
if not identifiers[0]: # skip over any empties
return get_reference(node, identifiers[1:])
return get_reference(node[identifiers[0]], identifiers[1:]) | 5297e7cd7d97b7c7e494a814de8077c3a37f9b90 | 33,769 |
import sys
def find_first_app_frame_and_name(ignores=None):
"""
Remove ignorable calls and return the relevant app frame. Borrowed from
structlog, but fixes an issue when the stack includes an 'exec' statement
or similar (f.f_globals doesn't have a '__name__' key in that case).
Parameters
----------
ignores: list, optional
Additional names with which the first frame must not start.
Returns
-------
tuple of (frame, name)
"""
ignores = ignores or []
f = sys._getframe()
name = f.f_globals.get('__name__')
while f is not None and f.f_back is not None and \
(name is None or any(name.startswith(i) for i in ignores)):
f = f.f_back
name = f.f_globals.get('__name__')
return f, name | 953148f64336a37ffdf40b175fefefc472db7f66 | 33,770 |
def orange_settings(c):
"""Return a dictionary of settings for the leo.core.leoAst.Orange class."""
allow_joined_strings = c.config.getBool(
'beautify-allow-joined-strings', default=False)
n_max_join = c.config.getInt('beautify-max-join-line-length')
max_join_line_length = 88 if n_max_join is None else n_max_join
n_max_split = c.config.getInt('beautify-max-split-line-length')
max_split_line_length = 88 if n_max_split is None else n_max_split
# Join <= Split.
# pylint: disable=consider-using-min-builtin
if max_join_line_length > max_split_line_length:
max_join_line_length = max_split_line_length
return {
'allow_joined_strings': allow_joined_strings,
'max_join_line_length': max_join_line_length,
'max_split_line_length': max_split_line_length,
'tab_width': abs(c.tab_width),
} | ffefc29515a79ebe50be53194bb59f49c2d8e1e2 | 33,771 |
import tarfile
def get_file_from_tar(tar_path, file_name):
"""
Using a partial or full file name, get the full path of the file within the tar.
@param tar_path: path to a tar file
@param file_name: full file name or end of the filename to find
@return the path for the file
"""
with tarfile.open(tar_path, 'r') as tar_fh:
for i in tar_fh.getmembers():
if i.name.endswith(file_name):
return i.name | 3f9e093a653aa7f6e9fc4f68138250b8038fbaa2 | 33,774 |
import pytz
def _dates_to_naive_utc(date_objects):
"""Converts dates to UTC time zone and strips time zone info.
Date objects can be time zone aware. If the input date objects are time
zone aware, they are converted to UTC time and then the time zone info is
removed from the resulting object.
If inputs dates are not time zone aware, no conversion occurs. Therefore,
care should be taken NOT to provide time zone naive dates that are not
already in UTC time.
Args:
date_objects: List of date objects.
Returns:
List of time zone naive date objects converted to UTC time.
"""
if len(date_objects) == 0:
return []
naive_dates = []
for date in date_objects:
if date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None:
date = date.astimezone(pytz.utc)
naive_dates.append(date.replace(tzinfo=None))
return naive_dates | 0ba479f94f07bba8ea7d54fb7100e6f0732803b8 | 33,775 |
def scale_factor(redshift):
"""
Calculates the scale factor, a, at a given redshift.
a = (1 + z)**-1
Parameters
----------
redshift: array-like
The redshift values.
Returns
-------
a: array-like
The scale factor at the given redshift.
Examples
--------
>>> scale_factor(1)
0.5
>>> scale_factor(np.array([0, 1, 2, 3]))
array([1, 0.5, 0.3333333, 0.25])
"""
a = (1 + redshift)**-1.0
return a | d682cb510f2c7fe53d96b76e88cd5f2bfc964cb5 | 33,777 |
def _get_annotations(generator, sample_array):
"""
Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_annotations[num_images][num_classes] = annotations[num_class_annotations, 5]
Args:
generator: The generator used to retrieve ground truth annotations.
sample_array: An array of indexes of images to sample from the generator.
Returns:
A list of lists containing the annotations for each image in the generator.
"""
n_samples = sample_array.shape[0]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(n_samples)]
for idx in range(n_samples):
i = sample_array[idx]
# load the annotations
annotations = generator.load_annotations(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
all_annotations[idx][label] = annotations['bboxes'][annotations['labels'] == label, :].copy()
return all_annotations | 08f8f1c263ce01b29fce88d0b416304dc3b1b5fc | 33,778 |
import re
def asset_name_from_label(label):
"""Builds a filename from the asset label string.
Args:
- label (str): User-friendly label
Returns:
- the asset file name
"""
assetDir = re.sub(r'[^\w]', '', re.sub(' ', '_', label)) + '.rma'
return assetDir | 880e10c3f9b717f7551b910770820b502d6cd7af | 33,779 |
def _time_string(time_value):
"""hrs:mins:secs"""
minutes, seconds = divmod(time_value, 60)
hours, minutes = divmod(minutes, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds) | d690dcd48aba7712b9a4a1772bb7c37e4d7b7cbd | 33,780 |
import pathlib
def path_to_uri(path):
"""
Convert OS specific path to file:// URI.
Accepts either unicode strings or bytestrings. The encoding of any
bytestring will be maintained so that :func:`uri_to_path` can return the
same bytestring.
Returns a file:// URI as an unicode string.
"""
return pathlib.Path(path).as_uri() | 4d05bbb7bb07f9bb9f6d86ab4980368f89c0db60 | 33,781 |
def extend_train_set(sentences1, sentences2, is_similar, test_sentences1, test_sentences2, test_labels):
"""
Increase the size of the training set by adding the first half of the testing
set to the training set
Args:
sentences1 (list): first list of training sentences
sentences2 (list): second list of training sentences
is_similar (list): list of training labels
test_sentences1 (list): first list of testing sentences
test_sentences2 (list): second list of testing sentences
test_labels (list): list of testing labels
Returns:
sentences1 (list): extended list of training sentences
sentences2 (list): extended list of training sentences
is_similar (list): extended list of training labels
test_sentences1 (list): shortened list of testing sentences
test_sentences2 (list): shortened list of testing sentences
test_labels (list): shortened list of testing labels
"""
sentences1 += test_sentences1[len(test_sentences1)/2:]
sentences2 += test_sentences2[len(test_sentences2)/2:]
is_similar += test_labels[len(test_labels)/2:]
test_sentences1 = test_sentences1[:len(test_sentences1)/2]
test_sentences2 = test_sentences2[:len(test_sentences2)/2]
test_labels = test_labels[:len(test_labels)/2]
return sentences1, sentences2, is_similar, test_sentences1, test_sentences2, test_labels | cd98ee71f95dc88f2487daaaffaeba3a8066825e | 33,782 |
def abs2(x):
"""Square modulus of x. Fastest way possible for a numpy array."""
return x.real**2 + x.imag**2 | 5b733ed33cda6f2a5aebf938a8291631fc65af08 | 33,783 |
def mock_get_benchmark_config(benchmark):
"""Mocked version of common.benchmark_config.get_config."""
if benchmark == 'benchmark1':
return {
'unsupported_fuzzers': ['fuzzer2'],
}
if benchmark == 'benchmark2':
return {
'unsupported_fuzzers': ['fuzzer2', 'fuzzer3'],
}
return {} | 2496495fae2293e6ea4b5411f3f44cba1a8dee5d | 33,784 |
def es_subcadena(adn1, adn2):
"""
(str, str) -> boolean
funcion que nos permite definir la subcadena de una secuencia dada
>>> es_subcadena('atcgta', 'gta')
True
>>> es_subcadena('atcg', 'tta')
False
:param adn1: str con la cadena 1
:param adn2: str con la cadena 2
:return: si la secuencia de la cadena 2 es subcadena de l secuencia de la cadena 1
"""
if int == type(adn2):
raise TypeError(str(adn2) + ' no es pueden enteros')
if int == type(adn1):
raise TypeError(str(adn1) + ' no se pueden enteros')
if float == type(adn1):
raise TypeError(str(adn1) + ' no se pueden enteros')
if adn2 in adn1:
return True
elif adn2 not in adn1:
return False | 91e981b41194fdd3a11d6d59a91f4c88d7ffa118 | 33,785 |
def extractRadiantSampleParameters(sample_list):
""" Given a list of samples, extract the parameters into a list. """
jd_list = [s.jd for s in sample_list]
la_sun_list = [s.la_sun for s in sample_list]
rag_list = [s.ra_g for s in sample_list]
decg_list = [s.dec_g for s in sample_list]
vg_list = [s.vg for s in sample_list]
lam_list = [s.lam for s in sample_list]
bet_list = [s.bet for s in sample_list]
return jd_list, la_sun_list, rag_list, decg_list, vg_list, lam_list, bet_list | 5f39f27dde793609a3754aa7ee0a8f7c12e12d7e | 33,786 |
def get_batch_len(batch):
"""Return the number of data items in a batch."""
if isinstance(batch, (tuple,list)): # non-vectorized modalities
return len(batch[0])
return len(batch) | 9794e5c050edf951dcc96166364e429a3d61df27 | 33,787 |
from typing import Tuple
def tuple_to_string(tup: Tuple) -> str:
"""Standardized assembly of tuple strings for DTS files
:param tup: typing.Tuple
:return: str
"""
string = ' '.join(tup)
return string | d1abafff084bfd05a4fbfc5116f8004c390a97da | 33,788 |
def extract_pillar_shape(grid):
"""Returns string indicating whether whether pillars are curved, straight, or vertical as stored in xml.
returns:
string: either 'curved', 'straight' or 'vertical'
note:
resqml datasets often have 'curved', even when the pillars are actually 'vertical' or 'straight';
use actual_pillar_shape() method to determine the shape from the actual xyz points data
"""
if grid.pillar_shape is not None:
return grid.pillar_shape
ps_node = grid.resolve_geometry_child('PillarShape')
if ps_node is None:
return None
grid.pillar_shape = ps_node.text
return grid.pillar_shape | 12c754543b89cde31f9c3ef0ebc04d4322ce71f0 | 33,789 |
def generate_shape(shape_id):
"""
Utility function called by generate_target
"""
shape = None
if shape_id == 1:
shape = [[0, 0], [0, 1], [1, 0], [1, 1]]
elif shape_id == 2:
shape = [[0, 0], [1, 0], [2, 0], [3, 0]]
elif shape_id == 3:
shape = [[0, 0], [0, 1], [0, 2], [0, 3]]
elif shape_id == 4:
shape = [[0, 0], [1, 0], [2, 0], [2, 1]]
elif shape_id == 5:
shape = [[0, 0], [1, -2], [1, -1], [1, 0]]
elif shape_id == 6:
shape = [[0, 0], [0, 1], [1, 1], [2, 1]]
elif shape_id == 7:
shape = [[0, 0], [0, 1], [0, 2], [1, 0]]
elif shape_id == 8:
shape = [[0, 0], [1, 0], [2, -1], [2, 0]]
elif shape_id == 9:
shape = [[0, 0], [0, 1], [0, 2], [1, 2]]
elif shape_id == 10:
shape = [[0, 0], [0, 1], [1, 0], [2, 0]]
elif shape_id == 11:
shape = [[0, 0], [1, 0], [1, 1], [1, 2]]
elif shape_id == 12:
shape = [[0, 0], [1, 0], [1, 1], [2, 0]]
elif shape_id == 13:
shape = [[0, 0], [1, -1], [1, 0], [1, 1]]
elif shape_id == 14:
shape = [[0, 0], [1, -1], [1, 0], [2, 0]]
elif shape_id == 15:
shape = [[0, 0], [0, 1], [0, 2], [1, 1]]
elif shape_id == 16:
shape = [[0, 0], [0, 1], [1, -1], [1, 0]]
elif shape_id == 17:
shape = [[0, 0], [1, 0], [1, 1], [2, 1]]
elif shape_id == 18:
shape = [[0, 0], [0, 1], [1, 1], [1, 2]]
elif shape_id == 19:
shape = [[0, 0], [1, -1], [1, 0], [2, -1]]
return shape | d07cda9f4eafea0a5f0db416340a81fd9776cdd6 | 33,791 |
def parse_archive_csv(path):
"""
Parses archives names file. Returns list of filename lists: [[archive1, archive2, archive3], ...]
:param path:
:return:
"""
with open(path) as f:
lines = f.readlines()
_archives = []
for line in lines:
_archives.append([x for x in line.split()])
return _archives | ecac6efe412b9f38d0335c2bbe82dc77e6d19547 | 33,794 |
import os
def get_pg_url() -> str:
""" create postgresql connection string """
return (
f"postgres+psycopg2://{os.getenv('POSTGRESQL_USER')}"
f":{os.getenv('POSTGRESQL_PASSWORD')}@{os.getenv('POSTGRES_SERVICE_HOST', 'postgres')}"
f":{os.getenv('POSTGRESQL_PORT', '5432')}/{os.getenv('POSTGRESQL_DATABASE')}"
) | 066c4de4308aa50664c47a1f22aa8ce15598b5c1 | 33,795 |
def ccw_to_cw(ccw_points):
"""
Converts counter clockwise points to clockwise points
"""
cw_points = ccw_points.copy()
cw_points[:, 4:7] = ccw_points[:, 13:16]
cw_points[:, 7:10] = ccw_points[:, 10:13]
cw_points[:, 10:13] = ccw_points[:, 7:10]
cw_points[:, 13:16] = ccw_points[:, 4:7]
return cw_points | a5486b4063cad75257bfeff88f94984a08787deb | 33,796 |
import argparse
def parse_arguments():
"""
Parse input arguments for train script
i.e.: python train.py --arch=vgg16 --learning_rate=0.01 --hidden_units="4096, 2048"
:return: Parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str, help="Path of flower images")
parser.add_argument("--save_dir", type=str, default="checkpoints", help="Path for save checkpoints")
parser.add_argument("--arch", type=str, default="vgg16", help="Pre-trained Model")
parser.add_argument("--learning_rate", type=float, default="0.01", help="Learning rate")
parser.add_argument("--hidden_units", type=str, default="4096, 2048", help="Number of nodes per hidden layer")
parser.add_argument("--epochs", type=int, default="10", help="Number of epochs")
parser.add_argument("--gpu", help="Use GPU")
return parser.parse_args() | e6e4280034a3c07bef761a3b58cfc9ba99cc8f8c | 33,797 |
import os
def get_package_name() -> str:
"""Get the name of the package."""
cwd = os.getcwd()
return cwd.split(os.path.sep)[-1] | b5e24edec2202a7c370b4cd7b8cd4f5a5184c429 | 33,798 |
def _get_tcp_slave_address(config):
"""
Get the TCP slave address to be used for the tests.
"""
return config.getoption("--tcp-address") | adec5d4a52f36737766fbc3e7d9b35a8bc5f6eb8 | 33,800 |
def mean_group_min_count(df, group_col, mean_col, min_count=10):
"""Calculate mean of column col in df, grouped by group_col, but normalising the
sum by either the actual number of rows in the group or min_count, whichever is
larger
"""
counts = df.groupby(group_col)[mean_col].count()
counts[counts < min_count] = min_count
sums = df.groupby(group_col)[mean_col].sum()
return sums / counts | ced80f28c0231c84f394b5cd398af94012413a5a | 33,801 |
def parse_filter_parameters (parameters, parse):
"""
Parse command parameters to get information filtering flags for the command.
:param parameters: The list of parameters provided to the command.
:param parse: The list of filtering parameter names to parse.
:return A list with the values for the filtering flags. The order in which they are returned
matches the order they were defined in the parsing list.
"""
parsed = []
for flag in parse:
val = parameters.get (flag, "false")
if (val == "true"):
parsed.append (True)
else:
parsed.append (False)
if (not any (parsed)):
parsed = [True] * len (parse)
return parsed | 479ce6b11be9137523c842e64629159b3424b090 | 33,802 |
def subsample_fourier(x, k):
"""Subsampling in the Fourier domain.
Subsampling in the temporal domain amounts to periodization in the Fourier
domain, so the input is periodized according to the subsampling factor.
Parameters
----------
x : numpy array
Input numpy array with at least 3 dimensions.
k : int
The subsampling factor.
Returns
-------
res : numpy array
The input numpy array periodized along the last axis to yield a
numpy array of size x.shape[-2] // k along that dimension.
"""
N = x.shape[-1]
res = x.reshape(x.shape[:-2] + (k, N // k)).mean(axis=-2, keepdims=True)
return res | eba3a9d4a0fea71c0d42c3b7da7b762e56766974 | 33,803 |
def Column_hasCode(cont_Code_center):
"""
:param cont_Code_center: The sequence of Code objects contained in this Column
:type cont_Code_center: Array
"""
return len(cont_Code_center) > 0 | d54c3fe293d032106b5588dc09a4f7609cea754d | 33,805 |
def _get_bone_matrix(bone):
"""bone should be a Bone
B_b
"""
if bone.parent:
b_mat = bone.matrix_local.inverted() * bone.parent.matrix_local
else:
b_mat = bone.matrix_local.inverted()
return b_mat | ee4ed9fc131172bb100eea2558f3573796031b0e | 33,806 |
def parse_multiple(s, f, values=None):
"""Parse one or more comma-separated elements, each of which is parsed
using function f."""
if values is None: values = []
values.append(f(s))
if s.pos < len(s) and s.cur == ',':
s.pos += 1
return parse_multiple(s, f, values)
else:
return values | 3ee6dbf85769541bdadcdf4f2de910f54d8dbef5 | 33,808 |
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
"""Configure the package."""
config = Configuration('camcan', parent_package, top_path)
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('datasets/tests/data')
config.add_subpackage('datasets/tests/data/sub-0')
config.add_subpackage('datasets/tests/data/sub-1')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('preprocessing/tests/data')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
return config | 04c0c97688e335eba96b1c16e0f86738fb6c41e7 | 33,810 |
import os
import fnmatch
def glob_r(directory, pattern):
"""Recursively scan a directory looking for files matching the given pattern.
The pattern matches the full path relative to the given directory. Matches are
checked using the fnmatch module.
Args:
directory: The directory to start searching in.
pattern: A fnmatch pattern.
Returns:
A list of absolute paths that match.
"""
full_paths = []
for root, _, filenames in os.walk(directory):
for filename in filenames:
full_paths.append(os.path.join(root, filename))
rv = []
for full_path in full_paths:
if fnmatch.fnmatch(full_path[len(directory) + 1:], pattern):
rv.append(full_path)
return rv | 5222d84ba77fcf46f732fbb63909586929f26aad | 33,812 |
import torch
def evaluateMasks(predSegmentations, gtSegmentations, device, pred_non_plane_idx, gt_non_plane_idx=20, printInfo=False):
"""
:param predSegmentations:
:param gtSegmentations:
:param device:
:param pred_non_plane_idx:
:param gt_non_plane_idx:
:param printInfo:
:return:
"""
predSegmentations = torch.from_numpy(predSegmentations).to(device)
gtSegmentations = torch.from_numpy(gtSegmentations).to(device)
pred_masks = []
if pred_non_plane_idx > 0:
for i in range(pred_non_plane_idx):
mask_i = predSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
pred_masks.append(mask_i)
else:
assert pred_non_plane_idx == -1 or pred_non_plane_idx == 0
for i in range(gt_non_plane_idx + 1, 100):
mask_i = predSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
pred_masks.append(mask_i)
predMasks = torch.stack(pred_masks, dim=0)
gt_masks = []
if gt_non_plane_idx > 0:
for i in range(gt_non_plane_idx):
mask_i = gtSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
gt_masks.append(mask_i)
else:
assert pred_non_plane_idx == -1 or pred_non_plane_idx == 0
for i in range(gt_non_plane_idx+1, 100):
mask_i = gtSegmentations == i
mask_i = mask_i.float()
if mask_i.sum() > 0:
gt_masks.append(mask_i)
gtMasks = torch.stack(gt_masks, dim=0)
valid_mask = (gtMasks.max(0)[0]).unsqueeze(0)
gtMasks = torch.cat([gtMasks, torch.clamp(1 - gtMasks.sum(0, keepdim=True), min=0)], dim=0) # M+1, H, W
predMasks = torch.cat([predMasks, torch.clamp(1 - predMasks.sum(0, keepdim=True), min=0)], dim=0) # N+1, H, W
intersection = (gtMasks.unsqueeze(1) * predMasks * valid_mask).sum(-1).sum(-1).float()
union = (torch.max(gtMasks.unsqueeze(1), predMasks) * valid_mask).sum(-1).sum(-1).float()
N = intersection.sum()
RI = 1 - ((intersection.sum(0).pow(2).sum() + intersection.sum(1).pow(2).sum()) / 2 - intersection.pow(2).sum()) / (
N * (N - 1) / 2)
joint = intersection / N
marginal_2 = joint.sum(0)
marginal_1 = joint.sum(1)
H_1 = (-marginal_1 * torch.log2(marginal_1 + (marginal_1 == 0).float())).sum()
H_2 = (-marginal_2 * torch.log2(marginal_2 + (marginal_2 == 0).float())).sum()
B = (marginal_1.unsqueeze(-1) * marginal_2)
log2_quotient = torch.log2(torch.clamp(joint, 1e-8) / torch.clamp(B, 1e-8)) * (torch.min(joint, B) > 1e-8).float()
MI = (joint * log2_quotient).sum()
voi = H_1 + H_2 - 2 * MI
IOU = intersection / torch.clamp(union, min=1)
SC = ((IOU.max(-1)[0] * torch.clamp((gtMasks * valid_mask).sum(-1).sum(-1), min=1e-4)).sum() / N + (
IOU.max(0)[0] * torch.clamp((predMasks * valid_mask).sum(-1).sum(-1), min=1e-4)).sum() / N) / 2
info = [RI.item(), voi.item(), SC.item()]
if printInfo:
print('mask statistics', info)
pass
return info | 701102fe285134a7cff3767e6a1744de0529e63a | 33,813 |
from typing import List
def _get_columns(words: List, headers: List[str]) -> List:
"""Finds columns based on headers.
Finds column members by finding all words in `words` that are below `head`
and have (roughly) the same x0 value as `header[i]` for all headers in
`headers`. Pads lists with "{"text": ""} which is the pdfplumber equivalent
of an empty string.
Args:
words: List of word objects
headers: List of column headers
Returns:
List: Cells of the column
"""
cols = []
for header in headers:
head = next((w for w in words if w["text"] == header))
rows = []
for word in words:
left_aligned = abs(word["x0"] - head["x0"]) < 0.5
right_aligned = abs(word["x1"] - head["x1"]) < 0.5
below_header = word["bottom"] >= head["bottom"]
if (left_aligned or right_aligned) and below_header:
rows.append(word)
cols.append(rows)
num_rows = max(len(x) for x in cols)
for col in cols:
col += [{"text": ""}] * (num_rows - len(col))
return cols | e791ca35c3c8aa191a1842edbb6126f88f4f4a4c | 33,814 |
from datetime import datetime
def epochs_to_timestamp(epochs: int, date_format: str = "%B %d, %Y %H:%M:%S %p") -> str:
"""
Converts epochs time representation to a new string date format.
Args:
epochs (int): time in epochs (seconds)
date_format (str): the desired format that the timestamp will be.
Returns:
str: timestamp in the new format, empty string in case of a failure
"""
try:
return datetime.utcfromtimestamp(epochs).strftime(date_format)
except TypeError:
return "" | 1535be9f82292a9387abf1123e934af02f743c92 | 33,816 |
from typing import Dict
from typing import Any
def dict2argstr(d: Dict[str, Any]) -> str:
"""Convert a dict to a text of kwd=arg pairs"""
return ",".join("{!s}={!r}".format(key, val) for (key, val) in d.items()) | 88955a2effb166e11f6fec87bb959f536f97d197 | 33,818 |
def load_chromosomes(fpath):
"""
:param fpath:
:return:
"""
chroms = dict()
with open(fpath, 'r') as infile:
for line in infile:
if not line.strip():
continue
cols = line.strip().split()
chroms[cols[0]] = int(cols[1])
return chroms | 3aaf3e6c85fb9ca64094b3ae06652b380371231f | 33,819 |
import six
def safebinary(obj):
"""
Make sure obj is binary type
"""
if isinstance(obj, six.binary_type):
return obj
try:
return six.binary_type(obj)
except UnicodeEncodeError:
return obj.encode('utf-8') | 6859c802456bccf54db095ccf254d2607e63ef8f | 33,820 |
def _node_types_match(node, node_template):
"""
Verifies whether input and output types of two nodes match. The first has fixed arities of
the input type whereas the second is a template with variable arities.
:param node: Node with fixed input types arities.
:param node_template: A node template with variable arities.
:return bool: True if the types of the nodes match.
"""
# out types match?
if node.out_type != node_template.out_type:
return False
# in types (and arities) match?
in_types = node.in_type
template_types = node_template.type_arity_template
if len(in_types) != len(template_types):
return False
for in_type_1, in_type_2 in zip(in_types, template_types):
# check type compatibility
if in_type_1.name != in_type_2.prim_type:
return False
# check arity compatibility
if not in_type_2.is_valid_arity(in_type_1.arity):
return False
return True | 46cb2c6c23aa6965536988c5918f4efa87dc9065 | 33,822 |
import re
def mapping_account(account_map, keyword):
"""Finding which key of account_map contains the keyword, return the corresponding value.
Args:
account_map: A dict of account keywords string (each keyword separated by "|") to account name.
keyword: A keyword string.
Return:
An account name string.
Raises:
KeyError: If "DEFAULT" keyword is not in account_map.
"""
if "DEFAULT" not in account_map:
raise KeyError("DEFAULT is not in " + account_map.__str__)
account_name = account_map["DEFAULT"]
for account_keywords in account_map.keys():
if account_keywords == "DEFAULT":
continue
if re.search(account_keywords, keyword):
account_name = account_map[account_keywords]
break
return account_name | 7fa67f75c1b621ca00d87359072c7516b3f097b7 | 33,823 |
def mesh(osi, tag, args):
"""
Create a mesh object. See below for available mesh types... toctree:::maxdepth:
2linemeshtrimeshquadmeshtetmeshpartmeshbgmesh
Parameters
----------
osi: o3seespy.OpenSeesInstance
tag: None
args: None
"""
_parameters = [tag, *args]
return osi.to_process("mesh", _parameters) | d5e6f4c29381e6ec1eb7307f853eaea740f133f2 | 33,825 |
import re
def expand_contractions(tweet):
"""
Expands language contractions found in the English vocabulary
in the tweet.
INPUT:
tweet: original tweet as a string
OUTPUT:
tweet with its contractions expanded
"""
tweet = re.sub("can't", 'cannot', tweet, flags=re.I)
#tweet = re.sub("cant", 'can\'t', tweet, flags=re.I)
tweet = re.sub("won't", 'will not', tweet, flags=re.I)
#tweet = re.sub("wont", 'won\'t', tweet, flags=re.I)
tweet = re.sub("n't", ' not', tweet, flags=re.I)
tweet = re.sub("i'm", 'i am', tweet, flags=re.I)
#tweet = re.sub("im", 'i am', tweet, flags=re.I)
tweet = re.sub("'re", ' are', tweet, flags=re.I)
tweet = re.sub("it's", 'it is', tweet, flags=re.I)
tweet = re.sub("that's", 'that is', tweet, flags=re.I)
tweet = re.sub("'ll", ' will', tweet, flags=re.I)
tweet = re.sub("'l", ' will', tweet, flags=re.I)
tweet = re.sub("'ve", ' have', tweet, flags=re.I)
tweet = re.sub("'d", ' would', tweet, flags=re.I)
tweet = re.sub("he's", 'he is', tweet, flags=re.I)
tweet = re.sub("she's", 'she is', tweet, flags=re.I)
tweet = re.sub("what's", 'what is', tweet, flags=re.I)
tweet = re.sub("who's", 'who is', tweet, flags=re.I)
tweet = re.sub("'s", '', tweet, flags=re.I)
tweet = re.sub("\'em", ' \'em', tweet, flags=re.I)
return tweet | 5ba8fd9b59488935e3a4f49372b6c5d1959537d8 | 33,826 |
import math
import torch
def rand_angles():
"""
random rotation angles
"""
alpha, gamma = 2 * math.pi * torch.rand(2)
beta = torch.rand(()).mul(2).sub(1).acos()
return alpha, beta, gamma | ff1b930989dfc78d4dfec00d569c7c410a3ddcc0 | 33,827 |
def is_correct(dictionary, word):
""" (Open File for reading, str) -> bool
Return True iff word is a correctly-spelled word in dictionary.
Note: due to a PCRS-specific constraint, the example calls below use Open with a capital 'O'.
The actual function has a lowercase 'o'.
>>> dict1 = Open('dict.txt', 'r')
>>> is_correct(dict1, "Zyrtec")
True
>>> dict1.close()
>>> dict1 = Open('dict.txt', 'r')
>>> is_correct(dict1, "lolz")
False
>>> dict1.close()
"""
for line in dictionary:
if line.strip() == word:
return True
return False | 3fc9d0d112ce5ec4f736fa38318f0b14dcd215d5 | 33,828 |
def _closest_ref_length(references, trans_length):
"""Find the reference that has the closest length to the translation.
Parameters
----------
references: list(list(str))
A list of references.
trans_length: int
Length of the translation.
Returns
-------
closest_ref_len: int
Length of the reference that is closest to the translation.
"""
ref_lengths = (len(reference) for reference in references)
closest_ref_len = min(ref_lengths,
key=lambda ref_length: (abs(ref_length - trans_length), ref_length))
return closest_ref_len | aaa5b2f021fc1996d30258af8dbe2ada69bc85aa | 33,829 |
def check_for_comment_block(line, file_type):
"""
:param line:
:param file_type:
:return:
"""
line = line.strip()
if file_type == "py":
return line.startswith('"""') and ('"""' not in line and len(line) > 3)
if file_type in ["java", "js", "jsx", "c", "cpp", "cxx", "h"]:
return line.startswith("/*") and "*/" not in line
if file_type in ["jsp"]:
return line.startswith("<%--") and not line.endswith("%-->") | d1ef849cdacb8eef441fb0c6c69f26d28e5cca2a | 33,830 |
def cli_parse(parser):
"""Add method specific options to CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument('--max-order', type=int, required=False, default=2,
choices=[1, 2],
help='Maximum order of sensitivity indices \
to calculate')
parser.add_argument('--skip-values', type=int, required=False, default=1024,
help='Number of sample points to skip (default: 1024)')
# hacky way to remove an argument (seed option is not relevant for Saltelli)
remove_opts = [x for x in parser._actions if x.dest == 'seed']
[parser._handle_conflict_resolve(None, [('--seed', x), ('-s', x)]) for x in remove_opts]
return parser | c267bb818c8fa317162304c5aa37d02bad63daee | 33,832 |
import socket
def get_ip_addresses(ip_address_or_hostname):
"""
Provide an ip or hostname and return all valid ip4 or ipv6 addresses.
:return: ip addresses
"""
addresses = []
for res in socket.getaddrinfo(ip_address_or_hostname, 0):
addresses.append(res[4][0])
return list(set(addresses)) | b64cef29932d3166c63faceb196b70a5436806f6 | 33,833 |
from datetime import datetime
def print_stamp():
"""
Pre: User needs nothing to pass in.
Post: Returns a string catalogging the date and time in the format of [month day year, 24hour:minute:seconds]
"""
day = datetime.today()
string = day.strftime("[%b %m, %y, %H:%M:%S]")
return string + " " | d2fc9a83100fa7d39f98e036a594d8dcc0be6f81 | 33,834 |
import pathlib
import json
def save(data: dict, path: pathlib.Path) -> bool:
"""Save a provided dictionary to file in json.
Parameters
----------
data : dict
The dictionary to be saved to file.
path : pathlib.Path
The path to the file to be saved. This path should included the
file name, and any file extensions you wish, but ideal use the
.json extension for transparency.
Returns
-------
bool,
Returns true if the save opperation terminated without error.
"""
with open(path, 'w') as fp:
json.dump(data, fp, indent=4, sort_keys=True)
return True | e272abbc4e0f4a1193d2bb4ed7b11ed452519daa | 33,835 |
def filter_none(data):
"""Helper function which drop dict items with a None value."""
assert isinstance(data, dict), "Dict only"
out = {key: value for key, value in data.items() if value is not None}
return out | 25c09c878e1e5c25b93d4947937d38f4bf2b965a | 33,836 |
import tqdm
import torch
def mixture_proposal_elbo_samples(num_samples, hier_elbo, checkpoints):
"""Use a mixture of q(nu) distributions as proposals, at many temperatures.
q(nu) = \sum_k \pi_k q_k(nu).
To sample, draw k ~ Categorical(1/K), then draw nu ~ q_k(nu).
"""
raise ValueError('Incorrect: does not include q_k(nu) for all k in proposal densities!')
component_samples = []
num_components = len(checkpoints)
for checkpoint in tqdm(checkpoints):
checkpoint = torch.load(checkpoint)
hier_elbo.q_nu.load_state_dict(checkpoint['q_nu'])
res = hier_elbo.compute_objective(num_samples)
component_samples.append(res['hier_elbo'])
component_samples = np.array(component_samples)
mixture_components = np.random.choice(num_components, size=num_samples, replace=True)
return component_samples[mixture_components, np.arange(num_samples)] | 5f412720b64b25c245eaed6b6d0d4e18e2238b69 | 33,837 |
def pfunc_doctor_banding(args):
"""Intermediate function, because we can't pass a member function to the
concurrent.futures system (it seems), and can only pass a single parameter,
so we pass the object and the function argument to this (as a single
list)."""
rota = args[0]
doc = args[1]
return doc.banding_info(rota) | f2f33b020852819ab9e827e45600db1533e67216 | 33,838 |
import argparse
def get_parser() -> argparse.ArgumentParser:
"""Returns CLI arguments parser"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"ticket_name", type=str, help="Jira Ticket Number (e.g., BASE-12345)"
)
parser.add_argument(
"--open", action="store_true", help="Run `open` command instead"
)
return parser | 61266fc5841d0ad09ae03f45686b90bde729925c | 33,840 |
import numpy
def CalculateDelay(start, stop, step, points):
"""Quantizes the time coordinates for the delay.
Quantizes points by rounding the timestamps downwards to the nearest
point in the time sequence start, start+step, start+2*step... Takes
the average of the delays of points rounded to the same. Returns
masked array, in which time points with no value are masked.
"""
grouped_delays = [[] for _ in numpy.arange(start, stop + step, step)]
rounded_value_index = lambda x: int((x - start) / step)
for point in points:
grouped_delays[rounded_value_index(point.real_send_time_ms)].append(
point.absdelay)
regularized_delays = [
numpy.average(arr) if arr else -1 for arr in grouped_delays
]
return numpy.ma.masked_values(regularized_delays, -1) | d8d5e81a0f46aa2d3ec37777f1a746365666fe3d | 33,841 |
def readData(infile):
"""read data from infile."""
dd = {}
for line in infile:
if line[0] == "#":
continue
d = line[:-1].split("\t")
contig, start, end, score = d[0], int(d[3]), int(d[4]), float(d[5])
# if contig != "I": continue
if contig not in dd:
dd[contig] = []
dd[contig].append((start, end, score))
return dd | 3ccdfed40b610f452ae1d0e9b4f670e1a71b8b72 | 33,842 |
def _parse_positions(S, M):
"""
returns the index in S of the marker M
"""
all_locations = set()
for index in range(0, len(S)):
if S[index] is M:
all_locations.add(index)
return all_locations | 127c92b73298eb050ba7ff3b101714321b085d80 | 33,844 |
def clean_bulk_add(contents):
"""
A helper function for adding many articles at a time (by uploading a
JSON file of article information). Clean the data, ensure that only
complete entries are included, and add all of the entries to our database.
"""
clean_articles = []
for article in contents:
try:
if "timestamp" not in article:
article["timestamp"] = None
article["authors"] = ",".join(article["authors"])
if "doi" not in article:
article["doi"] = None
if "experiments" in article:
article["experiments"] = str(article["experiments"])
else:
article["experiments"] = str([])
if "meshHeadings" in article:
article["metadata"] = str(
{"meshHeadings": article["meshHeadings"]})
del article["meshHeadings"]
else:
article["metadata"] = str({"meshHeadings": []})
if "journal" in article and "year" in article:
article["reference"] = article["authors"] + \
"(" + str(article["year"]) + ") " + article["journal"]
del article["journal"]
del article["year"]
else:
article["reference"] = None
# once the article data is clean, add it to a separate list that
# we'll pass to PeeWee
article = {
"timestamp": article["timestamp"],
"abstract": article["abstract"],
"authors": article["authors"],
"doi": article["doi"],
"experiments": article["experiments"],
"metadata": article["metadata"],
"neurosynthid": None,
"pmid": article["pmid"],
"reference": article["reference"],
"title": article["title"]
}
clean_articles.append(article)
except BaseException:
pass
return clean_articles | cca48e35099fa061d6a9198e90aeaab18fdfb704 | 33,845 |
from pathlib import Path
def get_pocket_name(s):
""" For xuefeng generated data files """
s = Path(s).stem
s = str(s)
idx = s.find("_round1_dock_ena+db_fingerprints")
s = s[:idx]
return s | 47d6fe6d07afb18068e74b65f27ec767d3c866ed | 33,846 |
import random
def retrieve(team, year):
"""
Part of the Retriever interface
returns random popularity for the last 10 years for 30 teams.
"""
return random.randint(100,200) | 3b80777c2a4077074b1745e4e8456fafc8db9fa8 | 33,847 |
def _get_synid_dd(syn, cohort, synid_table_prissmm):
"""Get Synapse ID of the most current PRISSMM non-PHI data dictionary for the BPC cohort."""
query = f"SELECT id FROM {synid_table_prissmm} WHERE cohort = '{cohort}' ORDER BY name DESC LIMIT 1"
query_results = syn.tableQuery(query)
synid_folder_prissmm = query_results.asDataFrame()["id"][0]
synid_prissmm_children = syn.getChildren(synid_folder_prissmm)
for child in synid_prissmm_children:
if child["name"] == "Data Dictionary non-PHI":
return child["id"]
return None | cdd59827e9f8a955d3be4ba9b511b0d37fb145e1 | 33,849 |
from operator import add
def _solve_method_2(N, queries):
"""
This section right here uses some Pythonic optimizations, but they still
don't cut on speed. This at least is much faster I think, assuming each of
these operations is atomic.
"""
arr = [0] * N
largest = -1
for query in queries:
difference = abs(query[0] - (query[1]+1))
temp_arr = [query[2]] * difference
temp_arr_2 = arr[query[0]-1:query[1]]
arr[query[0]-1:query[1]] = list(map(add, temp_arr_2, temp_arr))
return max(arr) | 6daaf01512c18cf17bafe42a9da9de1425ba9b17 | 33,850 |
def max_integer(my_list=[]):
"""
finds the largest integer of a list
"""
if len(my_list) == 0:
return (None)
my_list.sort()
return (my_list[-1]) | d6aa168b1d5207d04761542285bb388c1d235c2b | 33,851 |
def unwrap_twist(twist):
"""
Unwraps geometry_msgs/Twist into two tuples of linear and angular velocities
"""
l_x = twist.linear.x
l_y = twist.linear.y
l_z = twist.linear.z
a_x = twist.angular.x
a_y = twist.angular.y
a_z = twist.angular.z
return (l_x, l_y, l_z), (a_x, a_y, a_z) | 66c4eceeea7791790252ea01c7e8aabe28ea9be0 | 33,852 |
import math
def refract(uv, n, etai_over_etat):
"""
refraction
"""
cos_theta = min(uv.negative().dot(n), 1.0)
r_out_perp = (uv + n.multiply(cos_theta)).multiply(etai_over_etat)
r_out_parallel = n.multiply(-math.sqrt(abs(1.0 - r_out_perp.length_square())))
return r_out_parallel + r_out_perp | 6af8bb5c57518acc432ba040bf1091567664856b | 33,855 |
def _get_axis_pivots_SW(robot_geometry, units='cm'):
"""
Converts robot geometry into -x, -y, -z coordinates of axis pivots
Converts from robot frame to maya frame and converts from mm to cm
Maya Robot
x ----- y
y ----- z
z ----- x
:return axis_pivots: x-y-z loaction of each axis' pivot in Maya space
"""
a1_x = 0
a1_y = robot_geometry['z1']/2. # This can be anywhere
a1_z = 0
a2_x = 0
a2_y = robot_geometry['z1']
a2_z = robot_geometry['x1']
a3_x = robot_geometry['y1']
a3_y = a2_y + robot_geometry['z2']
a3_z = a2_z
a4_x = a3_x
a4_y = a3_y + robot_geometry['z3']
a4_z = a3_z + robot_geometry['x2']/2
a5_x = a4_x
a5_y = a4_y
a5_z = a3_z + robot_geometry['x2']
a6_x = a5_x
a6_y = a5_y
a6_z = a5_z + robot_geometry['x3']
axis_pivots = [[a1_x, a1_y, a1_z],
[a2_x, a2_y, a2_z],
[a3_x, a3_y, a3_z],
[a4_x, a4_y, a4_z],
[a5_x, a5_y, a5_z],
[a6_x, a6_y, a6_z]]
if units == 'cm':
# Convert to centimeter (Maya's default unit)
for i, axis in enumerate(axis_pivots):
axis_pivots[i] = [val/10 for val in axis]
return axis_pivots | 1aece2e330600f81bb2f8c5b8f001c6cd0c8f86e | 33,856 |
def connection_groups(**kwargs):
"""
Called to return the list of groups to automatically add/remove
this connection to/from.
"""
return ["dashboard"] | 32b766fcc7807f6ed68e5d21ed0c8af4552f879d | 33,857 |
def is_x_a_square(x: int) -> bool:
"""Is x a square number?"""
if x == 0:
return False
left = 1
right = x
while left <= right:
mid = left + (right - left) // 2
if mid ** 2 == x:
return True
elif mid ** 2 < x:
left = mid + 1
else:
right = mid - 1
return False | d6587a6e52c5c189e42da06976fb8f2027c9de82 | 33,858 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.