content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import typing
def parse_xlsx(worksheet) -> typing.List[typing.List]:
"""Parse the Excel xlsx file at the provided path.
Args:
worksheet: An openpyxl worksheet ready to parse
Returns: List of values ready to write.
"""
# Convert the sheet to a list of lists
row_list = []
for r in worksheet.rows:
# Parse cells
cell_list = [cell.value for cell in r]
# Skip empty rows
try:
# A list with only empty cells will throw an error
next(c for c in cell_list if c)
except StopIteration:
continue
# Add to the master list
row_list.append(cell_list)
# Pass it back
return row_list | ef7494a0720a21fdfc99e378132f811effed0e51 | 97,923 |
import itertools
def partition_iter(pred, iterable):
"""Partitions an iterable with a predicate into two iterables, one with elements satisfying
the predicate and one with elements that do not satisfy it.
:returns: a tuple (satisfiers, unsatisfiers).
"""
left, right = itertools.tee(iterable, 2)
return (
(x for x in left if pred(x)),
(y for y in right if not pred(y))
) | a9cbd0d5b42832f9a2b6dbf249ffa56bbc9219e4 | 97,925 |
def MovingAvg(Ls, w=3):
""" Moving average of input data, with time window of w days """
return [sum(Ls[max(0,i-w):min(len(Ls),i+w)])/(2*w) for i in range(len(Ls))] | 2dc385e453d32cec4ffcb986bcde7bb18d9325a1 | 97,927 |
def to_uri(bucket: str, key: str) -> str:
"""Construct a S3 URI.
Args:
bucket: The S3 bucket name.
key: The S3 key.
Returns:
A S3 URI in the format s3://bucket/key.
"""
return f's3://{bucket}/{key}' | 77333eb32c6ea4b9d897262e724dc45c7af6cb38 | 97,928 |
from typing import List
def pow2(x: int) -> List[int]:
"""
Decompose a number into powers of 2
:param x: number to decompose
:type x: int
:return: list of powers of 2 that compose the number
:rtype: List[int]
"""
powers = []
i = 1
while i <= x:
if i & x:
powers.append(i)
i <<= 1
return powers | fcc8ac1a430d653d01fe6bd4e4363b94455d80cb | 97,929 |
def generate_glob_by_extension(extension):
"""
Generates a glob that matches the given extension, case-insensitively.
Example
-------
For '.po' files, the generated glob is '*.[pP][oO]'
"""
extension = extension.lstrip(".")
case_insensitive_char_list = ["[{0}{1}]".format(char, char.upper()) for char in extension]
glob = "".join(case_insensitive_char_list)
return "*.{0}".format(glob) | a65f618eb11e6d95d43fece54c7b33e306e298df | 97,934 |
def generate_previous_list(next_list):
"""
Generate the expected list of previous values given a list of next values.
Lags the next list, holds the last valid number, and adds None to the
front. e.g.
[0,1,2,None,None,None] -> [None, 0,1,2,2,2]
"""
# Add none and lag the list
previous_list = [None] + next_list[:-1]
# Hold the last valid number by replacing None with last valid number
idx_last_valid = 1
for i in range(1, len(previous_list)):
if previous_list[i] is None:
previous_list[i] = previous_list[idx_last_valid]
else:
idx_last_valid = i
assert len(next_list) == len(previous_list)
return previous_list | 2982e25cc0459a56c87d3f7460b0973667d90283 | 97,935 |
from typing import List
def tokenize_char(text: str) -> List[str]:
"""
Convert each character to a separate token.
Args:
text: String to be tokenized.
Returns:
text_list: List of individual characters.
"""
return list(text) | 760d3fc0046cb24d5024a09eb686495583229e51 | 97,938 |
def print_write(string: str, file):
"""
Write text in a text file with printing in the console.
:param string: string to write
:param file: text file
:return: True
"""
print(string, file=file)
print(string)
return True | dbd090e0636b628963aaf9d91e25e08c87632a08 | 97,939 |
import requests
import warnings
def request_fulltextXML(ext_id):
"""
Requests a fulltext XML document from the ePMC REST API. Raises a warning if this is not
possible
Parameters
----------
ext_id : String
ePMC identifier used to retrieve the relevant entry. Format is prefix of 'PMC'
followed by an integer.
Returns
-------
r : `Requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
The response to the query served up by the requests package.
"""
request_url = "https://www.ebi.ac.uk/europepmc/webservices/rest/" + ext_id + "/fullTextXML"
r = requests.get(request_url)
if r.status_code == 200:
return r
else:
warnings.warn("request to " + str(ext_id) + " has failed to return 200, and has returned " + str(r.status_code))
return r
pass | b1979ff27c3f47597093ac07b377766756a58709 | 97,940 |
def format_correct_answer(category, correct_answer):
"""
based on the category we may need to format the correct answer
so she repeats it back a little slower or spells it out
"""
# if it's a spelling_backwords question we need
# to add spaces into the answer so she spells it out
if category == 'spelling_backwords':
return correct_answer.replace("", "... ")[1: -1]
# if it's repeat add elipses to slow down her saying it back
if category == 'repeat':
return correct_answer.replace(" ", "... ")
return correct_answer | 6ab1cdc23d6531de9cc6080f848286e47dfa8fd8 | 97,942 |
import random
def positive_intensifier(dirty):
"""Extend a statement of positive feelings."""
r = random.random()
if r <= (0.2):
verb = random.choice(["say", "do"])
return ", and there's nothing anyone can %s to stop you" % verb
elif r <= (0.4):
return ", and you don't care who knows it"
elif r <= (0.6):
return ", and you should tell everyone about it"
elif r <= (0.8):
return ", and you must post about it on Twitter"
else:
return ", and you must record it in your journal" | 838f59117a2ee50a63ab846bd7edad4e75298db1 | 97,944 |
def minhash_containment(s, t):
"""
Calculate the MinHash estimate of the Jaccard Containment.
Parameters
----------
s, t : iterable of int
Set signatures, as returned by `signature`.
n : int
The length of signature.
Returns
-------
float
"""
return len(s & t) / len(s) | b96fc0faf477ea0d8c95fd4702b1c5876b1e134a | 97,947 |
def rotatebits(bits: int) -> int:
"""Rotates the bits in a byte
Args:
bits: the int 8 bits to rotate
Returns:
int value of rotated 8 bits
"""
return sum(
((bits & (1 << bit)) >> bit) << (7 - bit) for bit in range(7, 0, -1)
) | f4027f996396e3db5e8804507004b06b9762daa2 | 97,957 |
def avg_and_total(iterable):
"""Compute the average over a numeric iterable."""
items = 0
total = 0.0
for item in iterable:
total += item
items += 1
return total / items, total | 1884b6ca013b03cd7d2a708e5b0ad0a2293f0b84 | 97,960 |
def get_site_table(hpo_id, table):
"""
Return hpo table for site
:param hpo_id: identifies the hpo site as str
:param table: identifies the cdm table as str
:return: cdm table name for the site as str
"""
return f'{hpo_id}_{table}' | 26bd01604a99a07f14353af4d932c63cb63f28e2 | 97,962 |
def get_containing_box(shape: tuple):
"""Returns box that contains complete array of shape ``shape``."""
return [0] * len(shape), shape | 6295e54e0bf5d47bd3ce72f8e3b08e6d9d2a19bb | 97,966 |
import configparser
def get_config_params(config_filedir):
"""
Loads a config file and returns it as a dictionary.
:param config_filedir: Directory and filename of config file to load
:return: Dictionary where keys are the names of the entries in the config file. The values associated with each key
have the data type specified by the sections in the config file ("FLOAT", "INT", etc.)
"""
# Load config file in ConfigParser
config_input = configparser.ConfigParser()
config_input.read(config_filedir)
# Initialize dictionary
info = {}
# Iterate through config file sections, determine which entries are which data type based on the section they are
# found in, and load each entry into the dictionary, where each key name is the entry name in the config file
for section in config_input.sections():
for (key, val) in config_input.items(section):
if (section == "FLOAT"):
info[key] = float(val)
elif (section == "INT"):
info[key] = int(val)
elif (section == "STRING"):
info[key] = val
elif (section == "BOOL"):
if val == "True":
info[key] = True
else:
info[key] = False
return(info) | 16ec270cb9bc175cbad504cd748d2ce3c869b71a | 97,976 |
import re
def parse_no_blacks(parts):
"""
Parse whether or not an ad indicates that there are "no black clients" allowed.
Returns a tuple containing:
[0]: Binary result of whether or not ad specifies "no blacks allowed"
[1]: The input strings, minus the sections indicating "no blacks allowed"
"""
match_patterns = [r'no ?black', r'no ?african', r'no ?aa', r'white ?(guys|men|clients) ?only',
r'only ?white ?(guys|men|clients)']
remove_patterns = [r'no ?black ?or ?african', r'no ?african ?or ?black', r'men', r'guys']
output_parts = []
output_val = 0
# Check each part
for part in parts:
o_part = part
# Check all patterns
for m in match_patterns:
found = re.search(m, part)
if found != None:
# Found a 'no black allowed' phrase
output_val = 1
# Remove all relevant phrases
for p in remove_patterns:
o_part = re.sub(p, '', o_part)
o_part = re.sub(m, '', o_part)
# Append part to output (if it's not empty)
if len(o_part) > 2:
output_parts.append(o_part)
return (output_val, output_parts) | dbf3788138dbcee83ecff01b2da5f99dba12b690 | 97,979 |
def _LowerBound(values, value, pred):
"""Implementation of C++ std::lower_bound() algorithm."""
first, last = 0, len(values)
count = last - first
while count > 0:
i = first
step = count // 2
i += step
if pred(values[i], value):
i += 1
first = i
count -= step + 1
else:
count = step
return first | 3aa66e16bee418c20d95d455f408bcf6241dbbf5 | 97,984 |
import torch
def global_average(x, batch_lengths):
"""
Block performing a global average over batch pooling
:param x: [N, D] input features
:param batch_lengths: [B] list of batch lengths
:return: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
return torch.stack(averaged_features) | 577329863113cd196337379b9b6e651aceee6b25 | 97,987 |
import math
def pts_to_secs(time_in_seconds: float, time_base: float, start_pts: float) -> float:
"""
Converts a present time with the given time base and start_pts offset to seconds.
Returns:
time_in_seconds (float): The corresponding time in seconds.
"""
if time_in_seconds == math.inf:
return math.inf
return (time_in_seconds - start_pts) * float(time_base) | 31c522da656b4bb7fc8cfa010569fa6462e6bcef | 97,991 |
def medel(data):
"""Returns the arithmetic mean."""
return sum(data) / len(data) | 8b7faf1ff4deedcdfc00d975a59d93d0434fe437 | 97,992 |
def toint(a):
""" Convert string to int and also scale them accordingly if they end
in "k", "m" or "b".
"""
weights = {'k': 1000, 'm': 1000000, 'b': 1000000000}
if len(a) > 1:
if a[-1] in weights:
return weights[a[-1]] * int(a[:-1])
return int(a) | 261503f0aa4884c529ec1ba1ae9449956c495993 | 97,997 |
def filter_dict(d, keys):
"""Returns a subset of dictionary `d` with keys from `keys`
"""
filtered = {}
for key in keys:
filtered[key] = d.get(key)
return filtered | 88cd45bfb0e8005800b4089e376462f428256909 | 98,000 |
def get_binary_url(version: str, arch: str) -> str:
"""
get atx-agent url
"""
return "https://github.com/openatx/atx-agent/releases/download/{0}/atx-agent_{0}_linux_{1}.tar.gz".format(
version, arch) | ff7e1bf01141d93b2e4419e664e7d515e258d72e | 98,004 |
import importlib
def get_dataset(name, base_dir=None):
"""Imports the given dataset and returns an instance of it."""
lib = importlib.import_module("..datasets.%s.dataset" % name, __name__)
return lib.get_dataset(base_dir) | 1494051d948cb7f5d338c5df3f7d8185c2228a02 | 98,013 |
import torch
def FalseTensor(*size, device='cuda:0'):
"""
Returns a Tensor of type torch.uint8 containing only False values
Parameters
----------
*size : int
the shape of the tensor
device : str
the device to store the tensor to
Returns
-------
Tensor
a uint8 precision tensor
"""
return torch.zeros(*size, dtype=torch.uint8, device=device) | 40ea379020283e87edd74fd0a7821ea763838dc9 | 98,015 |
def format_notes(section):
"""Format the "Notes" section."""
assert len(section) == 1
return '!!! note "Notes"\n {0}'.format(section[0].strip()) | 639fce05e775ad76bb906cba8ba902fc4f6f5ac8 | 98,021 |
def factorial(input_number: int) -> int:
"""
Calculate the factorial of specified number
>>> factorial(1)
1
>>> factorial(6)
720
>>> factorial(0)
1
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: factorial() not defined for negative values
>>> factorial(0.1)
Traceback (most recent call last):
...
ValueError: factorial() only accepts integral values
"""
if input_number < 0:
raise ValueError("factorial() not defined for negative values")
if not isinstance(input_number, int):
raise ValueError("factorial() only accepts integral values")
result = 1
for i in range(1, input_number):
result = result * (i + 1)
return result | db9f31f1d54ce3858bf4f4c35f9dd18679551c70 | 98,025 |
def get_hand_index_ranges(lines):
"""Return a list of hand start and end indicies"""
hand_start_indices = []
for i, line in enumerate(lines):
if "-- starting hand #" in line:
hand_start_indices.append(i)
hand_end_indices = []
for i, line in enumerate(lines):
if "-- ending hand #" in line:
hand_end_indices.append(i)
return list(zip(hand_start_indices, hand_end_indices)) | b9f38598311aaff439a312c08ead37053c0809b1 | 98,027 |
import warnings
def deprecated(func):
"""
A decorator for marking functions as deprecated. Results in
a printed warning message when the function is used.
"""
def decorated(*args, **kwargs):
warnings.warn('Call to deprecated function %s.' % func.__name__,
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
decorated.__name__ = func.__name__
decorated.__doc__ = func.__doc__
decorated.__dict__.update(func.__dict__)
return decorated | 03a9fa6b882da1212b976562db27a126d5f398df | 98,030 |
def _dfs_in(atom):
"""Traverse an Atom's incoming neighborhood iteratively with a depth-first search."""
atoms = [atom]
stack = list(atom.incoming)
while stack:
atom = stack.pop(0)
atoms.append(atom)
stack[0:0] = atom.incoming
return atoms | 8e3557def098bdedea2507f298d781a3b51f7d3b | 98,031 |
import copy
def change_live_state(seed, new_state):
"""
Given an input seed with live state 1, copy the seed and
replace state 1 with new_state.
"""
# let's not waste time by changing state 1 to state 1
assert new_state != 1
# copy the seed so that the original is not changed
new_seed = copy.deepcopy(seed)
# change state 1 in the given seed to new_state
for x in range(new_seed.xspan):
for y in range(new_seed.yspan):
# update seed.cells -- state 0 remains state 0
# and state 1 becomes new_state
if (new_seed.cells[x][y] == 1):
new_seed.cells[x][y] = new_state
#
return new_seed | e09e0a747bd4ebfe717df4e888929fc96c864dcc | 98,036 |
def get_unique_pairs(es):
""" Query to directly get matching pairs of PS nodes.
Args:
es (Elasticsearch object): Current elasticsearch connection object.
Returns:
dict: An double aggregation of sources and corresponding destinations.
"""
query = {
"query": {
"bool": {
"filter": [
{
"range": {
"n_hops.avg": {
"gt": 1
}
}
},
{
"range": {
"n_hops.value_count": {
"gt": 1000
}
}
}
]
}
},
"aggs": {
"sources": {
"terms": {
"field": "src",
"size": 60
},
"aggs": {
"destinations": {
"terms": {
"field": "dest",
"size": 59
}
}
}
}
}
}
try:
return es.search(index="trace_derived_v2", body=query)
except Exception as e:
print(e) | a2eac07bea1f1d8d55134db1ee0edd5e9303ab52 | 98,038 |
def search(bs, arr):
"""
Binary search recursive function.
:param bs: int
What to search
:param arr: array
Array of ints where to search. Array must be sorted.
:return: string
Position of required element or "cannot found" string
"""
length = len(arr)
check_item = arr[int(length / 2)]
if check_item == bs:
return str(check_item) + ' is on position ' + str(arr[int(length / 2)])
if check_item < bs:
return search(bs, arr[int(length / 2):])
if check_item > bs:
return search(bs, arr[:int(length / 2)])
return "cannot found" | c157d25414a5aaa746b9b26cb51cdb2472376ece | 98,039 |
def bool_string_to_int(value):
"""
Convert a boolean string to corresponding integer value
:param value: e.g. 'TRUE'
:return: 1 if string is 'true', 0 otherwise
:throws: AttributeError if value is not type of string
"""
return 1 if value.lower() == 'true' else 0 | 3edd8f5b83d971b168a1c33e9a9cde8dbf93e241 | 98,040 |
def get_venv_command(venv_path: str, cmd: str) -> str:
"""Return the command string used to execute `cmd` in virtual env located at `venv_path`."""
return f"source {venv_path}/bin/activate && {cmd}" | d039e1e1154779a69e20a86fc577aab839d6c0d3 | 98,044 |
def has_tag(lst, tag):
""" checks if a list contains a sprite with a given tag """
if not isinstance(lst, list):
lst = [lst]
for l in lst:
if l.tag == tag:
return True
else:
return False | 39a269588d9a9cc9bac472e13a5fe49928c67fb2 | 98,052 |
def findall(string,sub):
"""Starting indices of all occuenreces of a substring within a string.
E.g. findall("Allowed Hello Hollow","ll") -> [1,10,16]"""
indices = []
if len(sub) > 0:
index = 0 - len(sub)
while True:
index = string.find(sub,index+len(sub))
if index == -1: break
indices += [index]
return indices | 90a6fdfeb670c1a45275d04d4cc3bfb0aa9455ab | 98,053 |
def menu(texte, data):
"""
input texte, returns the answer if it is an int within data indexes.
"""
j = 1
while j:
choice = input(texte)
try:
data[int(choice)]
return choice
except:
print("\nMerci de taper un chiffre parmis les choix proposés\n\n") | 8803f73985194ad4fe3bc3dd1d3101ac8c877215 | 98,060 |
import torch
def val_loop(model, loss_fn, samples, labels, batch_size, seq_len, device='cpu', pre_trained=False):
"""
Standard pytorch validation loop, using our helper loss function above
:param model: model to test
:param loss_fn: loss function to evaluate with
:param samples: data in
:param labels: labels out
:param batch_size: batch size
:param seq_len: sequence length
:param device: device to put tensors on
:param pre_trained: are we using pre-trained embeddings or indices?
:return: loss and accuracy for evaluation
"""
loss_total = 0
acc_total = 0
total_samples = 0
with torch.no_grad():
# Again, step through data taking batch_size*sequence_length sized steps
# For each step, use your helper loss function to get a loss value and accuracy total
# DO NOT STEP THE OPTIMIZER OR BACKPROP THE LOSS
pass
# Return loss and accuracy
return loss_total, acc_total/total_samples | 96a83f780988f39754debb1ded977d278bd9a7e0 | 98,063 |
def prepare_collections_list(in_platform):
"""
Basic function that takes the name of a satellite
platform (Landsat or Sentinel) and converts
into collections name recognised by dea aws. Used only
for ArcGIS Pro UI/Toolbox.
Parameters
-------------
in_platform : str
Name of satellite platform, Landsat or Sentinel.
Returns
----------
A string of dea aws collections associated with input
satellite.
"""
# checks
if in_platform not in ['Landsat', 'Sentinel']:
raise ValueError('Platform must be Landsat or Sentinel.')
# prepare collections
if in_platform == 'Landsat':
return ['ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3']
elif in_platform == 'Sentinel':
return ['s2a_ard_granule', 's2b_ard_granule'] | 7342dee9d95977182804a925083833c9369f6723 | 98,064 |
import time
def now_epoch_usecs() -> int:
"""integer micro-seconds epoch of current time"""
return int(time.time() * 1e6) | b60a49a94ad70a30b3de5eb006f24f9e083ba0a8 | 98,070 |
def _escape_json_for_js(json_dumps_string):
"""
Escape output of JSON dumps that is safe to be embedded in a <SCRIPT> tag.
This implementation is based on escaping performed in
simplejson.JSONEncoderForHTML.
Arguments:
json_dumps_string (string): A JSON string to be escaped.
This must be the output of json.dumps to ensure:
1. The string contains valid JSON, and
2. That non-ascii characters are properly escaped
Returns:
(string) Escaped JSON that is safe to be embedded in HTML.
"""
json_dumps_string = json_dumps_string.replace("&", "\\u0026")
json_dumps_string = json_dumps_string.replace(">", "\\u003e")
json_dumps_string = json_dumps_string.replace("<", "\\u003c")
return json_dumps_string | d693946b5b2db3a66e913b80a9c29866caa872df | 98,071 |
def readlinear(fname):
"""Read a linear table, each line becomes two consecutive elements.
Comments begin with '#'.
"""
f = open(fname, 'r')
lines = f.readlines()
f.close()
table = []
for l in lines:
l = l.strip()
if l and l[0] != '#':
columns = l.split()
table.append(int(columns[0],base=0))
table.append(int(columns[1],base=0))
return table | 1ed5200e9735d43d26cb181e04e58a09de6ae895 | 98,074 |
def get_depth(tree):
"""
Returns the maximum depth of a tree.
"""
depth = tree.depth
for child in tree.children:
depth = max(depth, get_depth(child))
return depth | 62bd155a81d8571598301352268d0b912c0e4433 | 98,075 |
def is_rpc(command):
"""
Decorate a function to signal it is a WCraaS RPC function and provide its command.
>>> @is_rpc("awesome_command")
... def func(): pass
...
>>> func.is_rpc
True
>>> func.rpc_command
'awesome_command'
"""
def decorator(fn):
fn.is_rpc = True
fn.rpc_command = command
return fn
return decorator | 79331b30b4b657151c5af6b9bbdffcf078244957 | 98,078 |
def skyserver_link(sdss_objid):
"""
Creates a string with skyserver link for desired galaxy.
Args:
sdss_objid -- the sdss_objid value to use in link
Returns:
Viewer link based on sdss_objid value
"""
return 'http://skyserver.sdss.org/dr14/en/tools/explore/summary.aspx?id=%d' % sdss_objid | b5fc9413a1b0fc002180410bf22fce7bac2f59c8 | 98,079 |
def create_name_behavior_id(name: str, team_id: int) -> str:
"""
Reconstructs fully qualified behavior name from name and team_id
:param name: brain name
:param team_id: team ID
:return: name_behavior_id
"""
return name + "?team=" + str(team_id) | 6f193e51da992b87e29e8deed13aa613ee769047 | 98,080 |
def _use_vg_evaluator(dataset):
"""Check if the dataset uses the Cityscapes dataset evaluator."""
return dataset.name.startswith('vg') | 4a91b989678da1227f90983b3d91d15d5ccfb008 | 98,082 |
def prob2label(prod):
"""Transforms Probability to 0/1 Labels
Args:
prod: Probability of prediction (confidence)
Returns:
int: 0/1 Labels
"""
return (prod > 0.5) | b1ba71b28bddc59f6e2dd01b13b809d9afb3ba1e | 98,093 |
def get_input_addrs(tx):
""" Takes tx object and returns the input addresses associated. """
addrs = []
# print("tx['inputs']: ", tx['inputs'])
for x in tx['inputs']:
try:
addrs.append(x['prev_out']['addr'])
except KeyError:
continue
return addrs
#try:
# return [x['prev_out']['addr'] for x in tx['inputs']]
#except KeyError:
# This happens when there's a coinbase transaction
# return [] | f228d38319ea1b28ed45a817d8e371d2d0cd537d | 98,095 |
from functools import reduce
from operator import mul
def _prod(iterable):
"""Product of all items in an iterable."""
return reduce(mul, iterable, 1) | d45b056adfb412026d07782851a1e6144107d4fd | 98,098 |
import logging
def print_func(fn):
"""Prints a method name function before calling it."""
fn_name = fn.__name__
def decorated_fn(*args, **kwargs):
logging.info(fn_name)
output = fn(*args, **kwargs)
logging.info("%s: done", fn_name)
return output
return decorated_fn | 9c880392951e79b3d1029867fb929c9072c43602 | 98,102 |
def max_decimal_value_of_binary(num_of_bits):
"""
get max decimal value of a binary string with a fixed length
:param num_of_bits: # of bits
:type num_of_bits: int
:return: max decimal value
:rtype: int
"""
return int('1'*num_of_bits, base=2) | c1dc4a3b9a1af511821ef432467b8b275084e055 | 98,107 |
def apply_to_dict_recursively(d, f):
"""Recursively apply function to a document
This modifies the dict in place and returns it.
Parameters
----------
d: dict
e.g. event_model Document
f: function
any func to be performed on d recursively
"""
for key, val in d.items():
if hasattr(val, 'items'):
d[key] = apply_to_dict_recursively(d=val, f=f)
d[key] = f(val)
return d | e414f2fd645e0628a03f9cf40908aae354102a46 | 98,114 |
import six
def make_bytes(dictionary):
"""Encodes all Unicode strings in the dictionary to UTF-8 bytes. Converts
all other objects to regular bytes.
Returns a copy of the dictionary, doesn't touch the original.
"""
result = {}
for (key, value) in six.iteritems(dictionary):
# Keep binary data as-is.
if isinstance(value, six.binary_type):
result[key] = value
continue
# If it's not a string, convert it to one.
if not isinstance(value, six.text_type):
value = six.text_type(value)
result[key] = value.encode('utf-8')
return result | 07dae3ce26a97203b7f1bd067f34e88b18522c06 | 98,115 |
import torch
def relaxed_allclose(x, y, atol=0.01, pct_wrong_allowable=0.1):
"""Comparing two Torch tensors using `allclose` repeatedly fails due to
numerical issues. This test relaxes the constraint that every single element
in the two tensors are similar. Instead, a percentage of elements must be
similar.
"""
res = torch.isclose(x, y, atol=atol)
n_wrong = res.numel() - res.sum()
n_wrong_allowable = pct_wrong_allowable * res.numel()
return n_wrong <= n_wrong_allowable | 50c7ddf2c152bef1ef28a67479e6ea952b7d8538 | 98,116 |
import math
def get_audio_pool_size(
audio_frequency,
audio_time,
):
"""
Compute the pooling size used in AudioClassifyHead.
args:
audio_frequency (int): frequency dimension of the audio clip.
audio_time (int): time dimension of the audio clip.
returns:
pool_size (list): list of the kernel sizes of an avg pooling,
frequency pool kernel size, time pool kernel size in order.
"""
pool_size = [
math.ceil(audio_frequency / 16),
math.ceil(audio_time / 16),
]
return pool_size | cf5ee565b832ca13e4d0fab08329a63d804dd16e | 98,117 |
def get_number_aliens_x(game_settings, alien_height):
"""Compute number of aliens in the column"""
available_space_y = game_settings.screen_height - 2 * alien_height
number_aliens_y = int(available_space_y / (2 * alien_height))
return number_aliens_y | 0930552c8f22d29d5646e0cbe74afcfaef7925b0 | 98,118 |
def centre_point(aabb):
"""Returns the centre point of the AABB.
"""
return (aabb[0] + aabb[1]) * 0.5 | 9c4bb1f041455b8e4e668ae14aed562825daec94 | 98,124 |
def num_examples_per_epoch(mode):
"""Returns the number of examples in the data set."""
if mode == 'train':
return 1281167
if mode == 'validation':
return 50000 | 62812e8cb242f37bb7347044c26717e27ae31d37 | 98,129 |
def replace_dict_keys(dicts, replace_list_dict):
""" Replace values in `dicts` according to `replace_list_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dicts : dict
Dictionary.
Examples
--------
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","b":"y"})
{'x': 1, 'y': 2, 'c': 3}
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","e":"y"}) # keys of `replace_list_dict`
{'x': 1, 'b': 2, 'c': 3}
"""
replaced_dicts = dict([(replace_list_dict[key], value) if key in list(replace_list_dict.keys())
else (key, value) for key, value in dicts.items()])
return replaced_dicts | 71476fc1d4847391a83e729c490a777be958c574 | 98,132 |
def find_all_indexes(text=str, pattern=str) -> list:
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
- Best case: O(1) If there is no pattern and one char in the text
- Worse Case: O(n) if we have to loop to the end
- If there is a pattern:
- O(n*j) - n being the outer loop and j being the inner loop"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
if not pattern:
return [i for i in range(len(text))]
# * \\ Does not utilize string slicing (FASTER IN THE LONG RUN) // *
index_pos = []
for i in range(len(text) - len(pattern) + 1):
for j in range(len(pattern)):
if text[i+j] != pattern[j]:
break
else:
index_pos.append(i)
return index_pos
# * \\ Utilizes string slicing (SLOWER because it has to slice)// *
# for i in range(len(text) - len(pattern) + 1):
# if text[i:(i + len(pattern))] == pattern:
# index_pos.append(i)
# continue
# return index_pos | 66290a1274d21d681c1b7e4f4d1c769a310d322b | 98,142 |
import shelve
def load_update_stamps(field, sheet_name):
"""Load last issues/PRs update timestamps from the file.
Args:
field (str): PRs or issues updates should be loaded.
sheet_name (str): Name of the sheet.
Returns:
dict:
Index of the last issues/PRs update timestamps
for every repo on this sheet.
"""
with shelve.open("last_updates", "c") as lasts_file:
return lasts_file.setdefault(field, {}).get(sheet_name, {}) | ed2e89b904930810177054dce379eaf74cfc4670 | 98,143 |
def get_gc_region(seqs, headers, positions):
"""
Slice out the exact region of the fasta the primer amplified.
Args:
seqs (list): the list of sequences from flank_file
headers (list): the list of headers from flank_file
positions (list): list of tuples with header, p_len, p_seq
Returns:
sliced_seq (list): the exact amplified sequence.
"""
sliced_seq = []
for header, seq in zip(headers, seqs):
for pos in positions:
if pos[0] == header:
sliced_seq.append((header, len(seq[pos[1]:pos[2]+1]), seq[pos[1]:pos[2]+1]))
return sliced_seq | 4dc53029d7a56f898d2c6a9f0cee7d419aea87d4 | 98,146 |
def insert_interval(intervals, new_interval):
"""Return a mutually exclusive list of intervals after inserting new_interval
Params:
intervals: List(Intervals)
new_interval: Interval
Return:
List(Interval)
"""
length = len(intervals)
if length < 1:
return [new_interval]
i, start, end, merged = 0, new_interval.start, new_interval.end, []
while i < length and intervals[i].end < start:
merged.append(intervals[i])
i += 1
while i < length and new_interval.start < intervals[i].end:
new_interval.start = min(new_interval.start, intervals[i].start)
new_interval.end = max(new_interval.end, intervals[i].end)
i += 1
merged.append(new_interval)
while i < length:
merged.append(intervals[i])
i += 1
return merged | ad819197c3199442f2c20cbad974ae2c2de3fd42 | 98,149 |
def max_len(items):
""" Return length of longest item in items"""
return len(max(items, key=len)) | a6fe47fddf5c425966798a1919d9c8fd11d42a4c | 98,153 |
def IDM_eql(p, v):
"""Equilibrium solution for IDM, v = velocity, p = parameters. Returns headway corresponding to eql."""
s = ((p[2]+p[1]*v)**2/(1 - (v/p[0])**4))**.5
return s | 0db7ce9cf4ffc080da0ddcb8b748b652c2c92832 | 98,156 |
def second(iterable):
"""
Gets the second element from an iterable. If there are no items are not
enough items, then None will be returned.
:param iterable: Iterable
:return: Second element from the iterable
"""
if iterable is None or len(iterable) < 1:
return None
return iterable[1] | fdc8d5f2ec4032151cb0397227ca7b84cecfdbda | 98,157 |
from typing import Dict
import random
def sample_from_distr(distribution: Dict[int, float]) -> int:
"""Utility function for sampling from distribution.
Parameters
----------
distribution : Dict[int, float]
Returns
-------
int
Sampled int key weighted by float values
"""
keys = list(distribution.keys())
values = list(distribution.values())
return random.choices(population=keys, weights=values, k=1)[0] | 82133958f2032b8b92b94e7690c8305e1d9150da | 98,161 |
def HandleLocaleQuery(locales_manager, locale):
"""Verifies passed arguments and issues a lookup of locale data.
Args:
locales_manager (LocalesManager object): Locale manager.
locale (string): Name of the locale to be queried.
Raises:
LookupError: If an error occurred during lookup, e.g. the requested
locale is unkown.
Returns:
(dict) Data about the requested locale.
"""
try:
locale = locales_manager.Locale(locale)
except KeyError as e:
raise LookupError(e)
return {'locale': {'name': locale.name,
'long_name': locale.long_name,
'latitude': locale.latitude,
'longitude': locale.longitude
},
'parent': locale.parent,
'children': locale.children
} | 44dc40a5121e743374b71dce2d12408a8c07e04f | 98,162 |
def remove_prefixes(text: str, prefixes: list) -> str:
"""Removes pre-defined prefixes from a given text string.
Arguments:
text: Text string where prefixes should be removed
suffixes: List of strings that should be removed from the beginning of the text
Returns:
text: Text string with removed prefixes
"""
for prefix in prefixes:
if text.lower().startswith(prefix.lower()):
return text[len(prefix):].strip()
return text | 60d264fa09f6f82d9675bd99c4c143176547bf60 | 98,166 |
def count_samples(input_data):
"""
Count number of samples in the input data
Parameters
----------
input_data : array-like or list/tuple of array-like objects
Input data to the network
Returns
-------
int
Number of samples in the input data.
"""
if isinstance(input_data, (list, tuple)):
return len(input_data[0])
return len(input_data) | b1cc67692bfb2a653c03f53f39a54e003a85931e | 98,168 |
def get_long_words(tg, threshold=127):
"""
Returns all words that are longer than a given threshold.
:param tg: (TermGenerator) Object with parsed input.
:param threshold: (int) Minimum length (in characters) of the words.
:return: (List) All words longer than the threshold.
"""
return [el for el in tg.terms if len(el) > threshold] | 120325e748c5a943c9681fa73c15f44eef53e93a | 98,174 |
def get_neighboors(board, x, y):
"""Return coordinate of all neighoors
that are not "0"
Args:
board (np 2D array): the state of the game
x (int): x coordinate to check
y (int): y coordinate to check
"""
out = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if j == 0 and i == 0:
continue
n_x, n_y = x + i, y + j
if n_x < 0 or n_x > board.shape[1] - 1:
continue
if n_y < 0 or n_y > board.shape[0] - 1:
continue
if board[y + j, x + i] != 0:
out.append(board[y + j, x + i])
return out | a103af4407737ec4838fd233eaf547fdf68a06bb | 98,177 |
def create_previous_run(path, suffix):
"""Create a mock previous run of the tool."""
prev_run = path / f'recipe_test_{suffix}'
prev_recipe = prev_run / 'run' / 'recipe_test.yml'
prev_recipe.parent.mkdir(parents=True)
prev_recipe.write_text('test')
return prev_run | 12d4f5d4c96ac39fefb3ea46efd0c06117c63d3a | 98,178 |
def _get_finished_states(entity):
"""
Member name is either:
operationState' (NS, NSI)
'_admin.'operationalState' (VIM, WIM, SDN)
For NS and NSI, 'operationState' may be one of:
PROCESSING, COMPLETED,PARTIALLY_COMPLETED, FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
For VIM, WIM, SDN: '_admin.operationalState' may be one of:
ENABLED, DISABLED, ERROR, PROCESSING
:param entity: can be NS, NSI, or other
:return: two tuples with status completed strings, status failed string
"""
if entity == 'NS' or entity == 'NSI':
return ('COMPLETED', 'PARTIALLY_COMPLETED'), ('FAILED_TEMP', 'FAILED')
else:
return ('ENABLED', ), ('ERROR', ) | 76f9ec909a8161b0c7d5a3f4fcd8ce3c4e20e0dd | 98,186 |
import re
def to_camel_case(text):
"""
https://www.codewars.com/kata/517abf86da9663f1d2000003/train/python
Complete the method/function so that it converts dash/underscore delimited words into camel casing. The first
word within the output should be capitalized only if the original word was capitalized (known as Upper Camel
Case, also often referred to as Pascal case). Examples
"the-stealth-warrior" gets converted to "theStealthWarrior"
"The_Stealth_Warrior" gets converted to "TheStealthWarrior"
:param text: the text to convert to camel case
:return: the text as camel case
"""
result = ''
if text:
words = re.split("[-_]", text)
for word in words:
if result:
result += word[:1].upper() + word[1:]
else:
result += word
return result | c1fd3a4fe7d8c2886ef588bdba150be68f6ec5a5 | 98,187 |
def digits(n, string=False):
"""Get the digits of a number as a list of numbers
`string` if True, return a list of strings
"""
if string:
list_of_digits = [digit for digit in str(n)]
else:
list_of_digits = [int(digit) for digit in str(n)]
return list_of_digits | 05a7d151b4c90a35b143c294e0653bfe47a0bbdf | 98,188 |
def mean_free_path(T, P, lamb_0=67.3, T_0=296.15, P_0=101325, S=110.4):
""" calculates mean free path of the aerosol particles
Parameters
----------
T : float
measurement temperature
P : float
measurement pressure
lamb_0 : float
reference mean free path
T_0 : float
reference temperature
P_0 : float
reference pressure
S : float
Sutherland constant of air
Returns
-------
mean_free_path : float
mean free path at T and P
"""
return (lamb_0 * ((T_0 + S) / (T + S)) * (P_0 / P) * ((T / T_0)**2)) | 83c279a9947901d5b653224d93aa6b82a836a555 | 98,191 |
def tflip(tup):
"""
Flips tuple elements.
This is useful for list to screen coordinates translation.
In list of lists: x = rows = vertical
whereas on screen: x = horizontal
"""
return (tup[1], tup[0]) | 800410f192dfa12af59b57e9af91b8c2c18d121b | 98,192 |
def pro(A,B):
"""
Compute the trace of the matrix product between A and B.
Arguments:
A,B -- square matrices
Return:
com -- Scalar
"""
prod = (A.dot(B)).trace()
return prod | 76ea2a7837abfbe8a30ed8d7c066e36de0bb43ad | 98,194 |
import ssl
import aiohttp
def get_os_session(*, os_cacert, insecure, log):
"""
Returns a secure - or insecure - HTTP session depending
on configuration settings.
Works for both HTTP and HTTPS endpoints.
"""
if os_cacert:
ssl_context = ssl.create_default_context(cafile=os_cacert)
return aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context))
if insecure:
log.warning('Insecure connection to OpenStack')
return aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False))
log.error('No cacert provided and insecure parameter not specified') | 50cb11785a20d7828fa72f474f8a326064503e90 | 98,195 |
def add(x,y):
"""return after adding two numbers"""
return x+y | baeae22661eff95b923fc5d9710cd098f2706830 | 98,199 |
def divisors(n):
"""Return the integers that evenly divide n.
>>> divisors(1)
[1]
>>> divisors(4)
[1, 2]
>>> divisors(12)
[1, 2, 3, 4, 6]
>>> [n for n in range(1, 1000) if sum(divisors(n)) == n]
[1, 6, 28, 496]
"""
return [1] + [x for x in range(2, n) if n % x == 0] | ed8fe578580c1f2b5ceb76f669eacfd2913e894f | 98,206 |
def quad(x):
""" Quadratic function (parabola) """
return 1/2 * x ** 2 + 3 | be08f7ebc13b300ebe315c39258b8f132db58ae2 | 98,207 |
def cb_len(begin, end, size):
"""Circular buffer length primitive."""
return (end - begin) % size | 5053be9798e04323d18da8141cfaa10ebb1e7e9d | 98,212 |
def find_field(data_to_filter, field_name, field_value):
"""Utility function to filter blackduck objects for specific fields
Args:
data_to_filter (dict): typically the blackduck object or subselection of this
field_name (string): name of field to use in comparisons
field_value (string): value of field we seek
Returns:
object: object if found or None.
"""
return next(filter(lambda d: d.get(field_name) == field_value, data_to_filter), None) | a87e0764b5f37dac6a9678df51cc6111ea519f7d | 98,219 |
def transpose(x, y, _):
"""Transpose rows and columns."""
return y, x | 46f2b58e6f15c7b337a9b6d714ff4c7cd1ba5ea3 | 98,220 |
from typing import List
def remove_punc(sentences: List[List[str]]) -> List[List[str]]:
"""
Remove punctuation from sentences.
Parameters
----------
sentences:
List of tokenized sentences.
"""
return [
[
"".join([c for c in word if c.isalpha()])
for word in sentence
if len(word) > 0
]
for sentence in sentences
] | de574fea31b9e89909656c9255b6ed1f891abb07 | 98,227 |
def eh_estrategia(strat):
"""
Verificar se uma determinada string e uma estrategia valida
Parametros:
start (string): possivel estrategia
Retorna:
(bool): True caso a estrategia seja valida e False caso contrario
"""
strats = ("basico", "normal", "perfeito")
return strat in strats | a487677b1eb6b927ac9bd85857e9636732fa8934 | 98,229 |
def find_falling_hydrometeors(obs, is_liquid, is_insects):
"""Finds falling hydrometeors.
Falling hydrometeors are radar signals that are
a) not insects b) not clutter. Furthermore, falling hydrometeors
are strong lidar pixels excluding liquid layers (thus these pixels
are ice or rain).
Args:
obs (_ClassData): Container for observations.
is_liquid (ndarray): 2-D boolean array of liquid droplets.
is_insects (ndarray): 2-D boolean array of insects.
Returns:
ndarray: 2-D boolean array containing falling hydrometeors.
"""
is_z = ~obs.z.mask
no_clutter = ~obs.is_clutter
no_insects = ~is_insects
falling_from_lidar = ~obs.beta.mask & (obs.beta.data > 1e-6) & ~is_liquid
is_falling = (is_z & no_clutter & no_insects) | falling_from_lidar
return is_falling | aaf4daef6a104a392abd8f308a11c3ccf799c0ae | 98,231 |
def read_from_file(filename):
"""Read from file passed as argument."""
with open(filename, "r") as a_file:
return a_file.read() | 9e9a7f6e69ab04c5f274187d6bbe29a4529f1d79 | 98,235 |
import pickle
def load_training_data(filename):
"""Load Pickle file and return training data."""
with open(filename, 'rb') as file:
data = pickle.load(file)
return data | d51f5b3f7c1aefebb2c2b23d8e61a77919b9429e | 98,240 |
def flatten2list(object) -> list:
"""Flatten to list nested objects of type list, tuple, sets."""
gather = []
for item in object:
if isinstance(item, (list, tuple, set)):
gather.extend(flatten2list(item))
else:
gather.append(item)
return gather | 2cf2ade5ce1b37eb68eecbd4bae3fe9155fbe41b | 98,245 |
def parse_api_response(response):
"""Parse the response from the API. """
faces = []
persons = []
for obj in response.json()['objects']:
if obj['type'] == 'face':
faces.append(obj)
if obj['type'] == 'person':
persons.append(obj)
return faces, persons | 32f617cabf4d32cda8318003c8355931f76239fc | 98,254 |
def time_converter(seconds):
"""Converts time (in seconds) in more understandable format.
Example: time_conversion(131623.456) --> Script execution
finished after 1 day, 12 hours and 33 minutes."""
total_seconds = seconds
days = seconds // (24 * 3600)
seconds = seconds % (24 * 3600)
hours = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
microseconds = (seconds % 1) * 1000
days, hours, minutes, seconds, microseconds = (
int(days),
int(hours),
int(minutes),
int(seconds),
int(microseconds),
)
if total_seconds < 1:
return f"{microseconds} microsecond(s)"
elif 1 <= total_seconds < 60:
return f"{seconds} second(s) and {microseconds} microsecond(s)"
elif 60 <= total_seconds < 3600:
return f"{minutes} minute(s), {seconds} second(s) and {microseconds} microsecond(s)"
elif 3600 <= total_seconds < 86400:
return f"{hours} hour(s), {minutes} minute(s) and {seconds} second(s)"
elif 86400 <= total_seconds:
return f"{days} day(s), {hours} hour(s) and {minutes} minute(s)" | 4e9f6aceebbaab5da60b453b68f9e1736cbfb687 | 98,257 |
import random
def shuffle(deck):
"""Return iterator over shuffled deck."""
deck = list(deck)
random.shuffle(deck)
return iter(tuple(deck)) | b7adb26560868cf009522bf3e2ab39b59a5fd345 | 98,260 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.