content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def checkbytes_lt128(file):
"""
Check if all bytes of a file are less than decimal 128.
Returns :
True for an ASCII encoded text file else False.
"""
with open(file, 'rb') as f:
content = f.read()
return all(b<128 for b in content) | d492dce226a55e12fe34cbac8f32b16267062b42 | 125,027 |
def load_metadata(itk_img, key):
"""
Load the metadata of the input itk image associated with key.
"""
return itk_img.GetMetaData(key) if itk_img.HasMetaDataKey(key) else None | b6222fef1abca1fee74867bb9bd7a72a9f39dac1 | 125,034 |
from datetime import datetime
def ts_to_date(ts, date_format="%Y-%m-%d %H:%M:%S"):
"""
Return a formatted date/time string from a given time stamp
Parameters
----------
ts : int, float
time stamp to convert
date_format : string
format to convert time stamp to
Returns
-------
str: formatted date/time string
"""
return datetime.fromtimestamp(ts).strftime(date_format) | 70d2886c27260c9f4daa6e0c5c01ba0de47ef959 | 125,041 |
def get_repository_by_id( app, id ):
"""Get a repository from the database via id."""
sa_session = app.model.context.current
if app.name == 'galaxy':
return sa_session.query( app.model.ToolShedRepository ).get( id )
else:
return sa_session.query( app.model.Repository ).get( id ) | 48a461b8f7acb98f590f53c7b8f028a6517e2bf6 | 125,042 |
def build_transaction_description(match, type):
"""Builds a transaction's description from a regex match."""
return {
"type": type,
"sub_type": match.group(1),
"location": match.group(2),
"description": match.group(3)
} | 6c246d1fdb695e978a4673f5c980abe41b71ee60 | 125,050 |
def returns(data):
"""
Dummy function returns its own param
:return:
"""
return data | 5adf63ac1b5401469146f71fc68719b50037c250 | 125,055 |
def asfloat(x):
"""try to convert value to float, or fail gracefully"""
try:
return float(x)
except (ValueError, TypeError):
return x | 7743a5893f2edb5b9572774b31cb968a7ca83409 | 125,057 |
def select_data(nwis_df):
"""Create a boolean array of columns that contain data.
Args:
nwis_df:
A pandas dataframe created by ``extract_nwis_df``.
Returns:
an array of Boolean values corresponding to the columns in the
original dataframe.
Example:
>>> my_dataframe[:, select_data(my_dataframe)]
returns a dataframe with only the data columns; the qualifier columns
do not show.
"""
data_regex = r"[0-9]$"
return nwis_df.columns.str.contains(data_regex) | 946268655a189a3365a853ada74eccf67f14012d | 125,059 |
def accuracy(targ, pred, empty_score=0.0):
"""
Computes accuracy between target and prediction array
Parameters
----------
targ : array-like, bool or numeric
pred : array-like, bool or numeric
Returns
-------
accuracy : float
Best accuracy = 1.0
Worst Accuracy = 1.0
Target is empty (sum eq to zero) = empty_score
"""
if targ.shape != pred.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
return (targ==pred).mean() | 4b45552d7000b969133993356eb6c4c7fbb615cd | 125,060 |
def get_max_pgs_index(df_ghat):
"""
Purpose: to identify the column that holds the max polygenic score for each row
Arguments:
df_ghat: dataframe of polygenic scores, where each row is a new parent pair.
Returns:
series of indices of the highest polygenic score per parent pair
"""
return df_ghat.idxmax(axis=1).map(lambda x: int(x.lstrip('ghat_'))) | 305c7416fda3a952075498a5513616aca19803f9 | 125,061 |
import re
def get_uuid(line):
"""
Returns the uuid from a line where the uuid is between parentheses
"""
m = re.search(".*\((0x[a-z0-9]+)\)", line)
assert m is not None
return m.group(1) | 429bff13db251524b281a80f22041e5a4ec7c592 | 125,065 |
import torch
def get_data_memory(data, units='M', exclude_demonstration_data=False):
"""
Computes the memory consumption of torch_geometric.data.Data object in MBytes
Counts for all torch.Tensor elements in data: x, y, edge_index, edge_attr, stats, etc.
:param data: Transition object
:param units: memory units, 'M' = MBytes etc.
:param exclude_demonstration_data: if True, exclude data related to learning from demonstrations.
:return:
"""
membytes = 0
for k in data.keys:
if exclude_demonstration_data and 'demonstration' in k:
continue
v = data[k]
if type(v) is torch.Tensor:
membytes += v.element_size() * v.nelement()
mem = {'B': membytes,
'K': membytes / 2**10,
'M': membytes / 2**20,
'G': membytes / 2**30,
}.get(units)
return mem | 18dc95174de5532eadaa9e448d7a48b570bcb3df | 125,066 |
def insert(old_string, new_string, index):
""" inserts string new into string old at position i"""
# in log, index starts at 1
index -= 1
return old_string[:index] + new_string + old_string[index:] | 5c8788f4c0fa65bf8302f9ad8334155bd21cc5f4 | 125,070 |
def calculate_pool_chunksize(num_checkers, num_jobs):
"""Determine the chunksize for the multiprocessing Pool.
- For chunksize, see: https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool.imap # noqa
- This formula, while not perfect, aims to give each worker two batches of
work.
- See: https://gitlab.com/pycqa/flake8/merge_requests/156#note_18878876
- See: https://gitlab.com/pycqa/flake8/issues/265
"""
return max(num_checkers // (num_jobs * 2), 1) | 38e464e8943bf12011df1ec878cb2ef5c2a5acb7 | 125,071 |
def match_end_faces(test_end_face, end_faces):
""" Test if test_end_face match with any end face in end_faces
Each face takes value in (0, 90, 180, 270)
:param test_end_face: one face
:param end_faces: end_faces_to_match_against
:return: True if there is a match
"""
for ef in end_faces:
if (test_end_face+180) % 360 == ef:
return True
return False | 7ac8b0477c70dd1641308be69d2f33f723c6737a | 125,073 |
def get_primary_key(model):
"""
Return primary key name from a model. If the primary key consists of multiple columns,
return the corresponding tuple
:param model:
Model class
"""
mapper = model._sa_class_manager.mapper
pks = [mapper.get_property_by_column(c).key for c in mapper.primary_key]
if len(pks) == 1:
return pks[0]
elif len(pks) > 1:
return tuple(pks)
else:
return None | 54daa5cde947d27b9b223d2b57062d44aea8d444 | 125,075 |
def java_ts_to_secs(java_ts):
"""
Convert java timestamp into unix timestamp.
:param java_ts: java timestamp in milliseconds.
:type java_ts: int
:return: Timestamp in seconds.
:rtype: int
"""
return java_ts / 1000 | 9139b4ebd65524b9484ca9e1f3232dd3f846851c | 125,077 |
def red_outline(ann, plots, plot_id):
"""Return annotated image with plot outlined in red."""
region = plots.region_by_identifier(plot_id)
region = region.dilate(14)
region = region.border
region = region.dilate(7)
ann.mask_region(region, color=(255, 0, 0))
return ann | f947d736faf5333d7e670141b5c9d2b632a3044b | 125,078 |
from typing import Sequence
import math
def split_train_test_validate(
filenames: Sequence, train: int = 80, test: int = 10, validate: int = 10
) -> tuple[Sequence, Sequence, Sequence]:
"""
Split the audio filenames into train, test, and validate segments.
:param filenames: filenames corresponding to audio training data files.
:param train: percentage of files to use for the train dataset.
:param test: percentage of files to use for the test dataset.
:param validate: percentage of files to use for the validation dataset.
"""
if train + test + validate != 100:
raise ValueError("train + test + validate percentages != 100%")
total = len(filenames)
traind = math.floor((train / 100.0) * total)
vald = math.floor((validate / 100.0) * total)
return (
filenames[:traind],
filenames[traind + vald :],
filenames[traind : traind + vald],
) | b345142f1711f800c09cb690dc213912ad41eb58 | 125,080 |
def paired(coords):
"""Return a list of pairs from a flat list of coordinates."""
assert len(coords) % 2 == 0, 'Coordinates are not paired.'
points = []
x = None
for elem in coords:
if x is None:
x = elem
else:
points.append((x, elem))
x = None
return points | c674c64e279627f8b4782461f187a03b51283ee6 | 125,082 |
def _read_coreference(corpus_soup):
""" Obtain all mentions and coreference clusters in current document.
Returns
-------
tuple:
(dict[str, list[str]], list[list[str]]):
(1.) mentions
(2.) mentions organized by coreference cluster
"""
mentions = {}
clusters = []
for cluster_obj in corpus_soup.findAll("tc:entity"):
curr_cluster = []
for mention_obj in cluster_obj.findAll("tc:reference"):
mention_id = mention_obj["id"]
mention_tokens = mention_obj["tokenids"].split(" ")
mentions[mention_id] = mention_tokens
curr_cluster.append(mention_id)
clusters.append(curr_cluster)
return mentions, clusters | 31f1e5f2dc5ea4fa19d7de7e3f8526595e261394 | 125,083 |
def whitelist(dct, fields):
"""
Leave only those fields which keys are present in `fields`.
:param dct: Source dictionary
:type dct: dict
:param fields: List of fields to keep
:type fields: list
:return: Resulting dictionary containing whitelisted fields only
:rtype: dict
"""
return {
k: v for k, v in dct.items() if k in fields
} | 1fcd2d771de07f154008f5cd7107c37531f3a5e5 | 125,086 |
def _unit2coef(strUnit):
"""
Function returns a unit coefficient based on a unit symbol.
Available unit names, symbols and coefficients:
(femto): 'f' = 1e-15
(pico): 'p' = 1e-12
(nano): 'n' = 1e-9
(micro): 'u' = 1e-6
(mili): 'm' = 1e-3
(none): ' ' = 1
(kilo): 'k' = 1e3
(Mega): 'M' = 1e6
(Giga): 'G' = 1e9
(Tera): 'T' = 1e12
(hour): 'h' = 3600
Args:
strUnit (string): key of the unit
Returns:
iCoef (int): unit coefficient
"""
# The name of the function (for error purposes)
strFunc = 'rxcs.console._unit2coef'
# ----------------------------------------------------------------
# femto
if strUnit == 'f':
iCoef = 1e-15
# pico
elif strUnit == 'p':
iCoef = 1e-12
# nano
elif strUnit == 'n':
iCoef = 1e-9
# micro
elif strUnit == 'u':
iCoef = 1e-6
# mili
elif strUnit == 'm':
iCoef = 1e-3
# none
elif strUnit == ' ':
iCoef = 1
# kilo
elif strUnit == 'k':
iCoef = 1e3
# Mega
elif strUnit == 'M':
iCoef = 1e6
# Giga
elif strUnit == 'G':
iCoef = 1e9
# Tera
elif strUnit == 'T':
iCoef = 1e12
# hour
elif strUnit == 'h':
iCoef = 3600
# ----------------------------------------------------------------
# Unknown unit
else:
strErr = strFunc + ' : '
strErr = strErr + ('> %s < is an unknown unit symbol') % (strUnit)
raise Exception(strErr)
# ----------------------------------------------------------------
return iCoef | a445d29b649b850e32f8ff98c7ccd82e686c0b5f | 125,087 |
def get_driver_list(pd_file_names):
"""
:Arguments:
1. pd_file_names (list[str]) = All the files names obtained from the
ProductDrivers directory of the repository that is temporarily cloned
into the user's machine
:Returns:
2. driver_list (list[str]) = This list contains serial numbers and the
driver names alternately
eg: [1, driver_name_1, 2, driver_name_2, 3, driver_name_3]
"""
subfiles = []
driver_list = []
for subfile in pd_file_names:
if subfile.endswith('.py'):
subfiles.append(subfile)
for i, subfile in zip(range(0, len(subfiles)), subfiles):
driver_list.append(str(i+1))
driver_list.append(subfile)
return driver_list | 1e05fc4ea73ae00a28c4a313917337035d5de1d7 | 125,090 |
def replace_string_contents(raw_string, renaming_dictionary):
"""
Takes a string and replaces it with any changes provided in a renaming dictionary
:param raw_string: a raw string with which to pass through the renaming dictioanary
:param renaming_dictionary: a dictionary containing keys to be replaced with their associated values
:return: string identical to the raw string provided with all changes made based on the renaming dictionary
"""
# Renames a string using a provided renaming dictionary
replacement_string = raw_string
# Iterate over keys in renaming dictionary
for key in renaming_dictionary:
# For any keys found in the provided string, replace them using the value of the provided dictionary for that key
if key in replacement_string:
replacement_string = replacement_string.replace(key, renaming_dictionary[key])
# return the modified string
return replacement_string | 87d022ef0b0f7a7b97d68ece9d4472f063fad0dd | 125,091 |
import math
import time
def get_timestamp() -> int:
"""Gets the current UNIX timestamp as a full integer."""
# Several ns faster.
return math.ceil(time.time()) | 9f5abc13e1476bbd7780637ff9e75aaceb43c171 | 125,092 |
import re
def filter_out_links(text):
""" Function for filtering out links from given text in a tweet """
return re.sub(r'http\S+', '', text) | 407e50eb175e36c31cb78fc750ce362f53aa3788 | 125,093 |
def reduce_em_matrix(em_mat, haplogroups, contrib_props):
"""
Takes the matrix used by the EM algorithm, the column haplogroup labels,
and the table of identified contributors and returns a new matrix made
up of only the haplogroups that have passed the contributor filtering
steps.
Args:
em_mat: a numpy matrix
haplogroups: a list of strings for every column in em_mat
contrib_props: a list of lists for each identified contributor
containing the hap#, haplogroup and initial proportion
estimate for each contributor.
Returns:
A new matrix made up of only the columns listed in contrib_props and
a new list of labels for the simplified matrix.
"""
haps_to_keep = {con[1] for con in contrib_props}
indexes = [i for i in range(len(haplogroups))
if haplogroups[i] in haps_to_keep]
new_haps = [haplogroups[i] for i in indexes]
return em_mat[:, indexes], new_haps | c9c3f68082db141324d8bd99a2458d37b67b26e9 | 125,094 |
import math
def bearingCoord (lat0, lon0, lat1, lon1):
"""
Bearing from one point to another in degrees (0-360).
"""
dLat = lat1 - lat0
dLon = lon1 - lon0
y = math.sin(math.radians(dLon)) * math.cos(math.radians(lat1))
x = math.cos(math.radians(lat0)) * math.sin(math.radians(lat1)) - \
math.sin(math.radians(lat0)) * math.cos(math.radians(lat1)) * \
math.cos(math.radians(dLon))
bearing = math.degrees(math.atan2(y, x))
if(bearing < 0.0):
bearing = bearing + 360.0
return bearing | ec7d6ca8094427f9bc4810f50e88dc055f5d5456 | 125,096 |
def _data_helper(num_files, data_cols=None, label_cols=None, id_cols=None):
"""Initializes data, labels, ids, data_cols, label_cols, id_cols as list
Arguments:
num_files {int} -- number of files
Keyword Arguments:
data_cols {int or list of int} -- location of data (default: {None})
label_cols {int of list of ints} -- location of labels (default: {None})
id_cols {int or list of ints} -- location of ids (default: {None})
Returns:
data {list} -- empty data list
labels {list} -- empty labels list
ids {list} -- empty ids list
data_cols {list} -- location of data
label_cols {list} -- location of labels
id_cols {list} -- location of ids
"""
data = None
labels = None
ids = None
if data_cols is not None:
#duplicating int. i.e assuming that every file has the same data col
if type(data_cols) == int:
data_cols = [[data_cols]]*num_files
else:
data_cols = [data_cols] * num_files
data = []
else:
data_cols = [None]*num_files
if label_cols is not None:
if type(label_cols) == int:
label_cols = [[label_cols]]*num_files
else:
label_cols = [label_cols] * num_files
labels = []
else:
label_cols = [None]*num_files
if id_cols is not None:
if type(id_cols) == int:
id_cols = [id_cols]*num_files
ids = []
else:
id_cols = [None]*num_files
return data, labels, ids, data_cols, label_cols, id_cols | 2b08f1528736dfd23228e5afd717a1daab8eaeb5 | 125,098 |
import re
def get_imports(contents: str) -> list[str]:
"""Get all Python imports defined in the text
Args:
contents (str): Text to process
Returns:
list[str]: List of import names/aliases discovered
"""
# Define the pattern to look for and search for it
import_pattern = r"import\s*(\S*)(?:\s*as\s*(\S*))?"
matches = re.findall(import_pattern, contents)
output = []
# Check if each match has an alias or not, record the name/alias
for match in matches:
if match[1]:
output.append(match[1])
else:
output.append(match[0])
return output | 1c235a2b53ebd236261acdefdee81df74d5d9fc2 | 125,102 |
def count_files_dir(path):
"""count number of files recursivly in path."""
# IN pathlib path
num_f_dest = 0
for f in path.glob("**/*"):
if f.is_file():
num_f_dest += 1
return num_f_dest | 88da56f91c6afb8d87390e0da4d644991273cb09 | 125,106 |
def get_regions(market):
"""
Get a comma-separated list of regions the Market's serves
"""
region_list = []
for country in market.countries_served.all():
if str(country.region) not in region_list:
region_list.append(str(country.region))
region_list.sort(key=lambda region: region)
return ", ".join(region_list) | c1c1deeaf6650d1041e7d80a07fe3fe8c93af68d | 125,107 |
def does_not_mutate(func):
"""Prevents methods from mutating the receiver"""
def wrapper(self, *args, **kwargs):
new = self.copy()
return func(new, *args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper | e83ac3703338ed2ebae39595e20cab254826657c | 125,109 |
def grab_random_text_value(browser):
"""
Grabs the random text value by ID
:param browser: Headless browser
:return: String
"""
text = browser.find_element_by_id("random-text").text
return text | 6b7c61a9945a3a5c4fd36c65a4c48715a2beae5c | 125,110 |
def belongs(name, groups):
""" checks if name belongs to any of the group
"""
for group in groups:
if group in name:
return True
return False | 21858fb04a1099a1b0c5fa0dc0899f7d1c2723c2 | 125,111 |
import re
def get_ccs_version(ccs_root):
"""Returns the version number of the ccs installation
Version number is as found in ccs.properties file
Args:
ccs_root (str): full path to root of ccs installation
Returns:
str: full version/build id as found in ccs.properties file
Raises:
OSError: raised if ccs.properties file cannot be found
"""
version = None
with open(ccs_root + '/eclipse/ccs.properties') as f:
lines = f.readlines()
for line in lines:
match = re.match("^ccs_buildid=([0-9]+.[0-9]+.[0-9]+.[0-9]+)", line, flags=re.IGNORECASE)
if match:
version = match.group(1)
break
return version | 4241d6ac10627ea548008993c330c67bd6ce0a4e | 125,114 |
import codecs
def read_utf8_file(filename):
"""
Read a utf-8 encoded file.
"""
source_file = codecs.open(filename, encoding='utf-8')
content = source_file.read()
source_file.close()
return content | 433228fb505e22dad0eab98abcf78f729349d4f5 | 125,117 |
import torch
def iou(proposals, gt_bboxes):
"""
Compute Intersection over Union between sets of bounding boxes.
@Params:
-------
proposals (tensor):
Proposals of shape [B, A, H', W', 4], where 4 indicates (x_tl, y_tl, x_br, y_br).
gt_bboxes (tensor):
Ground truth boxes, from the DataLoader, of shape [B, N, 5], where 5 indicates
(x_tl, y_tl, x_br, y_br, cls_id). N is the max number of bboxes within this batch,
for images[i] which has fewer bboxes then N, then gt_bboxes[i] will be padded
with extra rows of -1.
@Returns:
-------
iou_mat (tensor):
IoU matrix of shape [B, A*H'*W', N] where iou_mat[b, i, n] gives the IoU between
one element of proposals[b] with gt_bboxes[b, n]
"""
B, A, h_amap, w_amap = proposals.shape[:4]
proposals = proposals.view(B, -1, 1, 4) # [B, A*H'*W', 1, 4]
gt_bboxes = gt_bboxes[..., :4].view(B, 1, -1, 4) # [B, 1, N, 4]
# Area of proposals, shape [B, A*H'*W', 1]
proposals_wh = proposals[..., 2:] - proposals[..., :2]
proposals_area = proposals_wh[..., 0] * proposals_wh[..., 1]
# Area of gt_bboxes, shape [B, 1, N]
gt_bboxes_wh = gt_bboxes[..., 2:] - gt_bboxes[..., :2]
gt_bboxes_area = gt_bboxes_wh[..., 0] * gt_bboxes_wh[..., 1]
# Area of Intersection, shape [B, A*H'*W', N]
intersect_xy_tl = torch.maximum(proposals[..., :2], gt_bboxes[..., :2])
insersect_xy_br = torch.minimum(proposals[..., 2:], gt_bboxes[..., 2:])
intersect_wh = (insersect_xy_br - intersect_xy_tl).clamp(min=0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] # [B, A*H'*W', N]
# Area of Union
union = proposals_area + gt_bboxes_area - intersect_area
# IoU
iou_mat = intersect_area / union
return iou_mat | d484fa26808f420a99158131618ebe7e0e806499 | 125,118 |
import re
def IsVPCNameValid(vpc):
"""Return true if a VPC name is valid.
https://cloud.google.com/compute/docs/reference/rest/v1/networks
"The first character must be a lowercase letter, and all following characters
(except for the last character) must be a dash, lowercase letter, or digit.
The last character must be a lowercase letter or digit."
Args:
vpc: A string.
Returns:
bool: True if a VPC name matches the pattern and length requirements.
"""
if len(vpc) < 1 or len(vpc) > 63:
return False
return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc)) | 72adb1d7b53a98150d619d79eea9db3821947438 | 125,128 |
from pathlib import Path
import hashlib
def sha256_checksum(file_path: Path, block_size: int = 65536) -> str:
"""
Compute sha256 checksum of file.
Args:
file_path: path to the file
block_size: amount of bytes read per cycle
Returns:
sha256 hash of the file
"""
sha256 = hashlib.sha256()
with open(file_path, "rb") as file_h:
for block in iter(lambda: file_h.read(block_size), b''):
sha256.update(block)
sha256.update(bytes(file_path.name, 'utf-8'))
return sha256.hexdigest() | 00048bfb7e6ec266ce0bcf2d17cef20fd328a201 | 125,129 |
def split_line(xml_line_text):
"""
Returns a list of values, given a row of delimited RETS response data
:param xml_line_text: a string of delimited RETS data
:type xml_line_text: str
:rtype: list
:return: a list of values from xml_line_text
"""
def handle_delimiter(text, delimiter):
if text[0] == delimiter and text[-1] == delimiter:
# Fully delimited values
return text.split(delimiter)[1:-1]
else:
# No leading/trailing delimiter
return text.strip().split(delimiter)
if '\x09' in xml_line_text:
# Search Transactions
return handle_delimiter(xml_line_text, '\x09')
elif '\t' in xml_line_text:
# Metadata Transactions
return handle_delimiter(xml_line_text, '\t')
elif '\n' in xml_line_text.strip():
# Login/Logout Transactions
return handle_delimiter(xml_line_text, '\n')
return [xml_line_text.strip()] | d371cdf275763ece58bcb47cb5b86810d7b74c7f | 125,131 |
def parse_message(data):
"""Return a tuple containing the command, the key, and (optionally) the
value cast to the appropriate type."""
command, key, value, value_type = data.strip().split(';')
if value_type:
if value_type == 'LIST':
value = value.split(',')
elif value_type == 'INT':
value = int(value)
else:
value = str(value)
else:
value = None
return command, key, value | 029705f2100b050cd1261f96079e9cf332a0e8e1 | 125,132 |
def to_rdn(rei: int) -> float:
""" Convert REI value to RDN. """
return rei / 10 ** 18 | dc71ac435b327759343dfba336650927b93957d7 | 125,138 |
def erd(active, rest):
"""The event-related de/sync formula. Output is in percents.
If result is < 0, than what we have is ERD. Positive numbers denote ERS.
"""
return ((active-rest)/rest)*100 | bd20fbdf2f33c8f8feccfe196c21e9387c8df041 | 125,142 |
def model(model_dense, model_sparse, sparse): # pylint: disable=redefined-outer-name
"""Fixture to get a tight-binding model, both sparse and dense."""
if sparse:
return model_sparse
return model_dense | c3b162302df0190179e7d881a8f151866f50ba24 | 125,143 |
def read_plink_ped(plink_ped_fp):
"""Read in plink .ped file."""
parents_dict = {}
ped_dict = {}
with open(plink_ped_fp, "rt") as handle:
for record in handle:
if record.startswith("-9"):
parents_dict[record.split()[1]] = record.split()
else:
ped_dict[record.split()[1]] = record.split()
return(parents_dict, ped_dict) | 4367d48d0f5c39bf6e806c6330294c61fd86552a | 125,147 |
def capture_signature_from_property_groups_element(element):
"""
Capture signature (Device ID) data from a property-group element
:param element: element with tag='property-groups'
:return: bytearray with 3 bytes of Device ID data
"""
signature = bytearray(3)
for i in element.findall('property-group/property'):
if i.attrib['name'] == 'SIGNATURE0':
signature[0] = int(i.attrib['value'], 16)
if i.attrib['name'] == 'SIGNATURE1':
signature[1] = int(i.attrib['value'], 16)
if i.attrib['name'] == 'SIGNATURE2':
signature[2] = int(i.attrib['value'], 16)
return signature | 05f16d1b978b2a59192f919823a1ba731e5c5678 | 125,148 |
def is_divisible_by_any(number, divisors):
"""Return True if a number is divisible by any divisor in a list, False otherwise"""
for x in divisors:
if number % x == 0: return True
return False | b3f88d966887d39229589c471c9f779d9afc5b33 | 125,152 |
from typing import List
from typing import Dict
def get_label_map_dict(label_list: List[str]) -> Dict[str, int]:
"""Returns a dict mapping labels and the corresponding id.
Args:
label_list: List of label names.
Returns:
A dict mapping labels to the corresponding id.
"""
return {label: label_id for label_id, label in enumerate(label_list)} | e1d3cdc23f3285b1da74dff739b03ca8229820ad | 125,155 |
import re
def getTimeElementsFromString(dtStr):
"""Return tuple of (year,month,day,hour,minute,second) from date time string."""
match = re.match(r'^(\d{4})[/-](\d{2})[/-](\d{2})[\s*T](\d{2}):(\d{2}):(\d{2})(?:\.\d+)?Z?$',dtStr)
if match: (year,month,day,hour,minute,second) = map(int,match.groups())
else:
match = re.match(r'^(\d{4})[/-](\d{2})[/-](\d{2})$',dtStr)
if match:
(year,month,day) = map(int,match.groups())
(hour,minute,second) = (0,0,0)
else: raise(RuntimeError("Failed to recognize date format: %s" % dtStr))
return (year,month,day,hour,minute,second) | e1a2ef030e6041d256cad85a18baf4f3784377d8 | 125,159 |
def get_previous_node(node):
"""
Return the node before this node.
"""
if node.prev_sibling:
return node.prev_sibling
if node.parent:
return get_previous_node(node.parent) | 83a753acbc9c6a8dabf54b14271a890334ab0970 | 125,165 |
def badge_upload_path(instance, filename):
"""
Construct the upload path for a project's badge image.
"""
return "direct-sharing/badges/{0}/{1}".format(instance.id, filename) | b27d32b0465a016b492e658a14a78e5b7a065c35 | 125,168 |
def rename(df, values=None, columns=None, locale=None):
"""
Replaces data values and column names according to locale
Args:
df (pd.DataFrame): DataFrame to transform
values (dict):
- key (str): term to be replaced
- value (dict):
- key: locale
- value: term's translation
columns (dict):
- key (str): columns name to be replaced
- value (dict):
- key: locale
- value: column name's translation
locale (str): locale
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value)
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns)
return df | b590f7eb5f40d31aeb02dbcdcb4280cabd0990d4 | 125,169 |
from datetime import datetime
import pytz
def get_utctimestamp(thedate=datetime.utcnow(), fmt='%Y-%b-%d %H:%M:%S'):
"""
Get a UTC timestamp string from a ``datetime.datetime`` object
:param thedate: ``datetime.datetime`` object
:type thedate: datetime.datetime
:param fmt: Timestamp format string, default is "%Y-%b-%d %H:%M:%S"
:type fmt: str
:return: UTC timestamp string
:rtype: str
"""
return thedate.astimezone(pytz.utc).strftime(fmt) | b2d395575920c367606b494bd9703c0f0f479661 | 125,170 |
import fnmatch
def path_in_patterns(patterns, path):
"""Check a path against many fnmatch patterns"""
pattern_matches = (
pattern for pattern in patterns if fnmatch.fnmatch(path, str(pattern)))
return any(pattern_matches) | f3b451d7dee2dfba68213fdc64eeaa1fecef6918 | 125,171 |
def get_agent_value_estimate(agent, state, action):
"""
Obtains the agent's value estimate for a particular state and action.
Args:
state (torch.Tensor): state of size [batch_size, n_state_dims]
action (torch.Tensor): action of size [batch_size, n_action_dims]
Returns a dictionary of action-value estimates:
direct: the estimate using the Q-network, size [batch_size]
estimate: the full estimate (using the model), size [batch_size]
"""
agent.reset(); agent.eval()
state = state.to(agent.device); action = action.to(agent.device)
direct_estimate = agent.q_value_estimator(agent, state, action, direct=True).detach().view(-1).cpu().numpy()
estimate = agent.q_value_estimator(agent, state, action).detach().view(-1).cpu().numpy()
return {'direct': direct_estimate, 'estimate': estimate} | c676beed0a7f779a212a0cda03a838770bdc846b | 125,181 |
import torch
def batchwise_cdist(samples1, samples2, eps=1e-6):
"""Compute L2 distance between each pair of the two multi-head embeddings in batch-wise.
We may assume that samples have shape N x K x D, N: batch_size, K: number of embeddings, D: dimension of embeddings.
The size of samples1 and samples2 (`N`) should be either
- same (each sample-wise distance will be computed separately)
- len(samples1) = 1 (samples1 will be broadcasted into samples2)
- len(samples2) = 1 (samples2 will be broadcasted into samples1)
The following broadcasting operation will be computed:
(N x 1 x K x D) - (N x K x 1 x D) = (N x K x K x D)
Parameters
----------
samples1: torch.Tensor (shape: N x K x D)
samples2: torch.Tensor (shape: N x K x D)
Returns
-------
batchwise distance: N x K ** 2
"""
if len(samples1.size()) != 3 or len(samples2.size()) != 3:
raise RuntimeError('expected: 3-dim tensors, got: {}, {}'.format(samples1.size(), samples2.size()))
if samples1.size(0) == samples2.size(0):
batch_size = samples1.size(0)
elif samples1.size(0) == 1:
batch_size = samples2.size(0)
elif samples2.size(0) == 1:
batch_size = samples1.size(0)
else:
raise RuntimeError(f'samples1 ({samples1.size()}) and samples2 ({samples2.size()}) dimensionalities '
'are non-broadcastable.')
samples1 = samples1.unsqueeze(1)
samples2 = samples2.unsqueeze(2)
return torch.sqrt(((samples1 - samples2) ** 2).sum(-1) + eps).view(batch_size, -1) | bc9cfa34642c0deaca53e9908f9c778ef6501113 | 125,183 |
def lmap(func, iterable):
"""
returns list after applying map
:: list(map(func, iterable))
"""
return list(map(func, iterable)) | 46aa8ce30cd48446720377dad7cc5cc71d89922e | 125,184 |
def first_missing_positive(nums):
"""
Find the first missing positive integer in a list of integers.
This algorithm sorts the array by making swaps
and ignoring elements that are greater than the length
of the array or negative.
If the element is equal to the current index (start) then
it is already in place.
If the element is < 0 or > len() or a duplicate, then pull
in the last element.
Otherwise swap the current element into place with the
element that is occupying it's appropriate place. Do this
until the current value is the correct one for its place or
the start and end have swapped.
Args:
nums: list of integers
Returns:
The first integer greater than 0 that is not
present in the input list.
"""
start = 0
end = len(nums) - 1
while start <= end:
i = nums[start] - 1
# if this element is in position
if i == start:
start += 1
# if the element is negative or out of bounds
# or a duplicate that is already sorted swap the
# current element into the oob and dec the end
elif i < 0 or i > end or nums[start] == nums[i]:
nums[start] = nums[end]
end -= 1
# swap the element to where it should be
else:
nums[start], nums[i] = nums[i], nums[start]
return start + 1 | b6eebfc3c114c9f35ea94c2569e0c845461f9d73 | 125,186 |
def is_git(cmd):
"""Test if a git command."""
return cmd.endswith("git-bln") | a05e7ac77952f7b5e539db9335020c711798d49a | 125,187 |
import math
def round_int(x):
"""Rounds `x` and then converts to an int."""
return int(math.floor(x + 0.5)) | 5e394e9a39e6f25a952885a14f8179663926ad47 | 125,188 |
def convert_bool(bool_str: str) -> bool:
"""Convert HTML input string to boolean"""
return bool_str.strip().lower() in {"true", "yes", "on", "1", "enable"} | cc9c59720fe03b5dd7a156e3121c3759e484803d | 125,189 |
from math import sqrt
def asymmetry(pi, pK, e, g_pK, g_e):
"""
calculates background-subtracted asymmetry given
pi = (content,error)
pK = (content,error)
e = (content,error)
g_pK = (pK contamination in pi window) / purity of pK sideband
g_e = (e contamination in pi window) / purity of e sideband
returns (content,error)
"""
content = (pi[0] - g_pK*pK[0] - g_e*e[0]) / (1 - g_pK - g_e)
error = sqrt(pi[1]**2 + (g_pK*pK[1])**2 + (g_e*e[1])**2) / (1 - g_pK - g_e)
return (content,error) | 9e0b9f15c0eec3878ffd59c5cd30cf6ed34b888a | 125,195 |
import hashlib
def get_seed(mnemonic_bytes, passphrase=None):
"""
This function creates a mnemonic seed from bytes encoded mnemonic.
Passphrase is optional
"""
if passphrase is not None:
salt = ("mnemonic" + passphrase).encode("utf8")
else:
salt = "mnemonic".encode("utf8")
seed = hashlib.pbkdf2_hmac(
'sha512',
mnemonic_bytes,
salt,
2048,
)
return seed | e8b0821683d84fbda31677d172fdcb07755fa3b4 | 125,200 |
def should_remove_line(line, blacklist):
"""
Helping function to match image in blacklist
:param line: line in datalist file
:param blacklist: list of images to be removed
:return
"""
return any([image in line for image in blacklist]) | b3d056c3d6397347e82c8da4049f80a59e09f446 | 125,205 |
def PreprocessBWT(bwt):
"""
Preprocess the Burrows-Wheeler Transform bwt of some text
and compute as a result:
* starts - for each character C in bwt, starts[C] is the first position
of this character in the sorted array of
all characters of the text.
* occ_count_before - for each character C in bwt and each position P in bwt,
occ_count_before[C][P] is the number of occurrences of character C in bwt
from position 0 to position P inclusive.
"""
# create count array
starts = dict()
counter = dict()
occ_count_before = [dict() for _ in range(len(bwt)+1)]
for i, c in enumerate(bwt):
for k, v in counter.items():
occ_count_before[i][k] = v
if not c in counter:
counter[c] = 0
counter[c] += 1
# add last row to count array
for k, v in counter.items():
occ_count_before[len(bwt)][k] = v
# create dictionary with starting indices
templist = sorted([(k,v) for k,v in counter.items()], key=lambda x: x[0])
summing = 0
for item in templist:
starts[item[0]] = summing
summing += item[1]
return starts, occ_count_before | 62fefa9116290c8edfc27337e67543d5e9757966 | 125,211 |
def where_am_i(position):
"""
Print my position
"""
print('I am at:', position)
return position | 2ddf053e02db0a3e50dc51bfa6af5cbc43fbe969 | 125,212 |
from typing import Any
def is_nan(value: Any) -> bool:
"""
Returns True if 'value' is some form of NaN, whether float('nan')
or a numpy or pandas Nan.
"""
# Test for numpy.nan and float('nan')
if not value == value:
return True
else:
return False | cecfbf0afc8eac31707272d4fce88fcfb3ca2797 | 125,214 |
def _arraysum(array_a, array_b):
"""Calculate the sum of two arrays."""
return array_a + array_b | 37c6d3eb1e9c740a533b54346b7eb0f96e05cfac | 125,219 |
import torch
def _manhattan_fast(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Helper function to calculate manhattan distance between ``torch.Tensors`` ``x`` and ``y``: :math:`sum(|x-y|)`
Uses dimension expansion. Returns a 2D tensor of size :math:`m x n`.
Parameters
----------
x : torch.Tensor
2D tensor of size :math:`m x f`
y : torch.Tensor
2D tensor of size :math:`n x f`
"""
d = torch.sum(torch.abs(x.unsqueeze(1) - y.unsqueeze(0)), dim=2)
return d | efeeea1161dea8cab92178d827ead639680ecce2 | 125,220 |
import re
def get_date(text):
"""Method to extract the date from right before a colon in a string surrounded by quotes"""
match = re.search(r"\[(.+?)\]", text)
if match:
match_string = match.group()
else:
match_string = ""
# print("{}".format(match_string))
# match should now be a combination of the date and time, we just want the date portion...
date_string = match_string.split(":", 1)[0]
return date_string.strip(r"\[") | e305dafe411af52c67ec6742e5017cd5760af26d | 125,224 |
from typing import List
from typing import Dict
def count_matrix(motifs: List[str]) -> Dict:
"""Return the count matrix from a list of dna strings of equal length
Arguments:
motifs {List[str]} -- List of DNA sequences of equal length
Returns:
Dict -- count matrix
Example:
>>> motifs = ["TCGGGGGTTTTT",
... "CCGGTGACTTAC",
... "ACGGGGATTTTC",
... "TTGGGGACTTTT",
... "AAGGGGACTTCC",
... "TTGGGGACTTCC",
... "TCGGGGATTCAT",
... "TCGGGGATTCCT",
... "TAGGGGAACTAC",
... "TCGGGTATAACC"]
>>> count_mat = count_matrix(motifs)
>>> for k, v in count_mat.items():
... print(k, v)
A [2, 2, 0, 0, 0, 0, 9, 1, 1, 1, 3, 0]
C [1, 6, 0, 0, 0, 0, 0, 4, 1, 2, 4, 6]
G [0, 0, 10, 10, 9, 9, 1, 0, 0, 0, 0, 0]
T [7, 2, 0, 0, 1, 1, 0, 5, 8, 7, 3, 4]
"""
if not all([len(m) == len(motifs[0]) for m in motifs]):
raise ValueError("All motifs must be same length.")
count_mat = {"A" : [0 for _ in range(len(motifs[0]))],
"C" : [0 for _ in range(len(motifs[0]))],
"G" : [0 for _ in range(len(motifs[0]))],
"T" : [0 for _ in range(len(motifs[0]))]
}
for seq in motifs:
for i, nt in enumerate(seq):
count_mat[nt][i] += 1
return count_mat | 1ab6b94d3ee88e67cd34571d9778797d5de95803 | 125,225 |
import pathlib
from typing import Dict
from typing import Any
import json
def read_stocks(json_file: pathlib.Path) -> Dict[str, Any]:
"""Load the stocks entries from the given json file."""
with open(json_file, "r+") as file:
return json.load(file) | 23c34ced7c9ada9cf00bb8c381b12b5fa3d14c7a | 125,227 |
from typing import Tuple
def intersects(p1: Tuple[float, float], p2: Tuple[float, float],
p3: Tuple[float, float], p4: Tuple[float, float]) -> bool:
"""Checks if line segment p1p2 and p3p4 intersect.
This method, which works by checking relative orientation, allows
for collinearity, and only checks if each segment straddles the line
containing the other.
"""
def subtract(a: Tuple[float, float], b: Tuple[float, float]) \
-> Tuple[float, float]:
x1, y1 = a
x2, y2 = b
return (x1 - x2), (y1 - y2)
def cross_product(a: Tuple[float, float], b: Tuple[float, float]) \
-> float:
x1, y1 = b
x2, y2 = a
return x1 * y2 - x2 * y1
def direction(a: Tuple[float, float], b: Tuple[float, float],
c: Tuple[float, float]) -> float:
return cross_product(subtract(a, c), subtract(a, b))
d1 = direction(p3, p4, p1)
d2 = direction(p3, p4, p2)
d3 = direction(p1, p2, p3)
d4 = direction(p1, p2, p4)
if ((d2 < 0 < d1) or (d1 < 0 < d2)) and \
((d4 < 0 < d3) or (d3 < 0 < d4)):
return True
return False | 12d9189120cf647306606de23dbdb218a1034337 | 125,228 |
def create_genomic_meta_file(
study_identifier: str,
alteration_type: str,
datatype: str,
stable_id: str,
profile_name: str,
profile_description: str,
filename: str,
) -> dict:
"""Create genomic metadata files
Args:
study_identifier (str): A string used to uniquely identify this cancer study
within the database
alteration_type (str): Allowed values - MUTATIONS_EXTENDED, FUSION, COPY_NUMBER_ALTERATION
datatype (str): Allowed values - DISCRETE, FUSION, MAF
stable_id (str): Allowed values - mutations, fusion, cna
profile_name (str): A name for the data
profile_description (str): A description of the data
filename (str): cbioportal filename
Returns:
dict: cBioPortal meta file
"""
meta_info = {
"cancer_study_identifier": study_identifier,
"genetic_alteration_type": alteration_type,
"datatype": datatype,
"stable_id": stable_id,
"show_profile_in_analysis_tab": "true",
"profile_name": profile_name,
"profile_description": profile_description,
"data_filename": filename,
}
return meta_info | 4f435dc1333de8b6adfeafb8afce4279855461d9 | 125,232 |
def summarize_segments(segments):
"""Take a list of segments and create a simple summary"""
if len(segments) > 0:
on_floor = sum([x["duration"] for x in segments if x["location"] == 'on floor'])
off_floor = sum([x["duration"] for x in segments if x["location"] == 'off floor'])
working = sum([x["duration"] for x in segments if x["condition"] == 'working'])
not_working = sum([x["duration"] for x in segments if x["condition"] == 'not working'])
on_floor_pct = round(on_floor / (on_floor + off_floor) * 100, 2)
off_floor_pct = round(off_floor / (on_floor + off_floor) * 100, 2)
working_pct = round(working / (working + not_working) * 100, 2)
not_working_pct = round(not_working / (working + not_working) * 100, 2)
else:
on_floor_pct = 0
off_floor_pct = 0
working_pct = 0
not_working_pct = 0
return {"on_floor": on_floor_pct,
"off_floor": off_floor_pct,
"working": working_pct,
"not_working": not_working_pct} | a3b572febb9a9f8ffcbc364b3512ef2f15838352 | 125,244 |
def print_verilog_literal(size, value):
"""Print a verilog literal with expicilt size"""
if(value >= 0):
return "%s'd%s" % (size, value)
else:
return "-%s'd%s" % (size, abs(value)) | dcf5cd21a05a19d786e27a9dcd5b29743eeb8dc8 | 125,245 |
import hashlib
def file_get_md5_checksum(path, block_size= 2 ** 20):
"""Returns MD% checksum for given file."""
# Function source originally from : https://gist.github.com/juusimaa/5846242.
md5 = hashlib.md5()
try:
file = open(path, 'rb')
while True:
data = file.read(block_size)
if not data:
break
md5.update(data)
except IOError:
print('File \'' + path + '\' not found!')
return None
except:
return None
return md5.hexdigest() | d1c51ffd25509d4fcfc36b4c037a67cd4cb22d3e | 125,246 |
import re
def make_regex(pattern, guard_symbol=None):
"""
Performs a few substitutions so the pattern can use format-style
shorthands, then compiles and returns the regex.
"""
replacements = {
's' : r"[ \t]", # space characters
'guard' : guard_symbol
}
return re.compile(pattern.format(**replacements), re.MULTILINE) | e227447fffe3335005507c33931db17101ae0835 | 125,256 |
from datetime import datetime
def artifact_dir_format(test_name: str) -> str:
"""
Create a common target test directory name format.
"""
return test_name + '_' + str(datetime.now().isoformat().split('.')[0]) | d6b39c74d759a0f58193e30343ac21501f1edbe2 | 125,257 |
def raw_counts_to_adu(raw_counts, fixed_offset, meanblck, nreadout):
"""Convert Kepler raw pixel counts photometer ADU.
Converts raw cadence pixel counts to Analog to Digital Units read off
the photometer following the Kepler Archive Manual (p23).
This is necessary because the spacecraft software subtracts
a mean black value from each channel to have the pixel values
of all channels center around zero, so that a single compression
table can be used to compress all pixel values.
However to avoid negative values, a large positive offset value
('LCFXDOFF' or 'SCFXDOFF') is then added that lifts the pixel
values to a level that matches the range in the compression
table designed for the pixel counts.
"""
return raw_counts + nreadout*meanblck - fixed_offset | aa20d2f4f6d786fc837b4bb5efe8bf7430c2bf11 | 125,265 |
def create_successive_pairs_from_list(alist):
"""
Given a list like ["berlin", "leipzig", "dresden", "cottbus", "munich"]
we want to create list of dictionary pairs as follows:
[
{"from": "berlin", "to": "leipzig"},
{"from": "leipzig", "to",: "dresden"},
{"from": "dresden", "to": "cottbus"},
{"from": "cottbus", "to": "munich"},
]
"""
my_pairs = []
for index in range(len(alist)):
if index < len(alist) - 1:
my_pairs.append({"from": alist[index], "to": alist[index + 1]})
return my_pairs | c345142e58639804a37ee29e7e27b929c776eb7a | 125,268 |
def mod_inverse(a, n):
"""Return the inverse of a mod n.
n must be prime.
>>> mod_inverse(42, 2017)
1969
"""
b = n
if abs(b) == 0:
return (1, 0, a)
x1, x2, y1, y2 = 0, 1, 1, 0
while abs(b) > 0:
q, r = divmod(a, b)
#print("q : " + str(q) + " r : " + str(r));
x = x2 - q * x1
y = y2 - q * y1
a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y
#print("a : " + str(a) + " b : " + str(b) + " x2 : " + str(x2) + " x1 : " + str(x1)
# + " y2 : " + str(y2) + " y1 : " + str(y1));
return x2 % n | 98961289e491eac52c7e59280c3beb780ba864d9 | 125,277 |
import struct
def float_to_bin(f):
""" Convert a float into a binary string. """
ba = struct.pack('>d', f)
ba = bytearray(ba) # convert string to bytearray - not needed in py3
s = ''.join('{:08b}'.format(b) for b in ba)
return s[:-1].lstrip('0') + s[0] | 670db28fc3462bc1413710c5a3708c95024a5009 | 125,280 |
def dependency_mapping(dep):
"""
+-----------+-----------------------------------+
| EGrid Tag | Dependency Tag |
+===========+===================================+
| S | nsub, csubj, csubjpass, dsubjpass |
+-----------+-----------------------------------+
| O | iobj, obj, pobj, dobj |
+-----------+-----------------------------------+
| X | For any other dependency tag |
+-----------+-----------------------------------+
"""
S = ['nsubj', 'csubj', 'csubjpass', 'dsubjpass']
O = ['iobj', 'obj', 'pobj', 'dobj']
if S.count(dep) == 1:
return 'S'
if O.count(dep) == 1:
return 'O'
return 'X' | 065ff113142a0014e1f7c033c0027ede73225ac0 | 125,282 |
from datetime import datetime
def output_src_addr(pnr, settings):
"""
Output source address and current time in pnr header.
"""
out = []
out_append = out.append
out_append('.')
if pnr['remote_pnr']:
out_append(pnr['remote_system'][:3])
out_append('RM')
out_append(pnr['remote_system'][3:])
else:
out_append('MOWRM1H')
out_append(' ')
out_append(datetime.now().strftime('%d%H%M'))
return ''.join(out) | 3672d8b8bac1427c26d37ef896dfce9753c43bdf | 125,287 |
import uuid
def generate_name() -> str:
"""Generate a random name."""
return "_" + uuid.uuid4().hex | 41f0a40feedaa11b9ab5e73de204c1ffb0aee350 | 125,292 |
def interp(val, array_value, array_ref):
"""
Interpolate the array_value from the array_ref with val. The array_ref
must be in an increasing order!
"""
if val <= array_ref[0]:
return array_value[0]
elif val > array_ref[len(array_ref)-1]:
return array_value[len(array_ref)-1]
else:
i = 1
while val > array_ref[i]:
i += 1
delta = array_ref[i] - array_ref[i-1]
return ((array_ref[i]-val)*array_value[i-1]+array_value[i]*(val-array_ref[i-1]))/delta | a47e49eea2bc8233103c4425103f2a4bab42c6ce | 125,293 |
def nearest_ind(item_list,val):
"""Return index of nearest val in item_list"""
error_arr=[abs(tmp-val) for tmp in item_list]
error_min=min(error_arr)
return error_arr.index(error_min) | b8bd5a42246d5623596b1843d03f8d30d3e808ff | 125,297 |
from typing import Any
def proxy_property(attribute: str, proxy: str) -> Any:
"""Create a property that is a proxy to an attribute of an attribute.
Example:
class Foo:
proxy = Something()
bar = proxy_property("baz", "proxy")
foo = Foo()
then foo.bar would be proxied to foo.proxy.baz
If attribute is "a" and proxy is "b", it proxies to ".a.b".
If the proxy value is None, then use a local value instead,
which acts as a temporary storage in the meanwhile.
"""
#
# Typing note
#
# There is no real way to type properties returned by a function,
# not to speak about the arbitrary getattr/setattr.
# This explains the use of Any everywhere
#
not_proxied_value: Any = None
def getter(obj: Any) -> Any:
dest = getattr(obj, proxy)
if dest is None:
return not_proxied_value
return getattr(dest, attribute)
def setter(obj: Any, value: Any) -> None:
dest = getattr(obj, proxy)
if dest is None:
nonlocal not_proxied_value
not_proxied_value = value
else:
setattr(dest, attribute, value)
return property(fget=getter, fset=setter) | 49d15843554be6c119dcabd386d8b185fe04d83f | 125,298 |
def egcd(a, b):
"""
Computes greatest common divisor (gcd) and coefficients of Bezout's identity
for provided integers `a` and `b` using extended Euclidean Algorithm.
Args:
a (long): First integer number.
b (long): Second integer number.
Returns:
(long, long, long): Three-tuple representing
(gcd, Bezout's identity coefficient, Bezout's identity coefficient).
"""
r1=a
r2=b
s1=1
s2=0
t1=0
t2=1
while r2>0:
q=r1//r2
r=r1-q*r2
r1=r2
r2=r
s=s1-q*s2
s1=s2
s2=s
t=t1-q*t2
t1=t2
t2=t
return (r1,s1,t1) | d3729678c3a1a9bbfb186c41b0649c95264ab563 | 125,299 |
import re
def _preprocess_twitter_handles(input_text):
"""
Split twitter handles and hashtags into tokens when possible
尽可能将twitter句柄和主题标签拆分为标记
:param input_text:
:return:
>>> _preprocess_twitter_handles('The Buccaneers just gave a $19 million contract to a punter via @89YahooSports wow')
'The Buccaneers just gave a $19 million contract to a punter via @89 Yahoo Sports wow'
>>> _preprocess_twitter_handles('Congrats to my first ever Broski of the Week @CMPunk!')
'Congrats to my first ever Broski of the Week @CM Punk!'
"""
input_text = re.sub(r'([\w]+?)([A-Z])(?=[a-z0-9])', r'\1 \2', input_text)
return input_text | 599ccebf6b5e5b3d3e8ba23cac51c0a20c23da21 | 125,302 |
def get_list_keys(dct: dict):
"""
This function gets all keys of inputted dict
"""
lst = []
for key in dct.keys():
lst.append(key)
return lst | 9bdf46ed70527bff4103024ba2bc79ab20799f59 | 125,303 |
def flatten(x):
"""Returns the flattened list or tuple."""
if len(x) == 0:
return x
if isinstance(x[0], list) or isinstance(x[0], tuple):
return flatten(x[0]) + flatten(x[1:])
return x[:1] + flatten(x[1:]) | f014906531bba0af87f303a58b7720161407a5ee | 125,306 |
from datetime import datetime
def part_of_day() -> str:
"""Checks the current hour to determine the part of day.
Returns:
str:
Morning, Afternoon, Evening or Night based on time of day.
"""
current_hour = int(datetime.now().strftime("%H"))
if 5 <= current_hour <= 11:
return 'Morning'
if 12 <= current_hour <= 15:
return 'Afternoon'
if 16 <= current_hour <= 19:
return 'Evening'
return 'Night' | cd0d6dac018a704686321aca275a312b9ee1747b | 125,307 |
def is_in_boundary(x, start, end):
"""Tests if x is in [start, end]."""
return x >= start and x <= end | 4333ca99678756e57c64318d7926c316ea94d9be | 125,309 |
def get_parameters_from_style(style_name):
"""
Get individual parameters from the style name of a font.
"""
parameters = style_name.split('-')
return parameters | fc00de58652f9c69db692ddb94e2773dfc649221 | 125,310 |
def failing_write_handler(pv, value, timestamp, context):
"""
A write handler which disallows writes.
"""
return False | fbb24677ef13836b2fb6f24c09ce513276990971 | 125,311 |
def get_feature_type (f_feature_names) :
"""Get the type of features in a dict"""
dict_feature_type ={}
fi = open(f_feature_names, mode='r', encoding='utf-8')
for line in fi :
line = line.rstrip()
f_name, f_type = line.split("\t")
dict_feature_type[f_name]=f_type
fi.close()
return dict_feature_type | 205ba9e64024266c38ab3ee28c29bbe8065af546 | 125,314 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.