content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def to_string(recipe, num_servings):
""" Return a list of strings with a description of this recipe.
The recipe will be customized for the given number of servings.
"""
s = []
s.append("Recipe for {}, {} servings:".format(recipe['name'],
num_servings))
s.append("")
s.append("Ingredients:")
s.append("")
for ingredient in recipe['ingredients']:
s.append(" {} - {} {}".format(ingredient['ingredient'],
ingredient['amount'] * num_servings /
recipe['num_servings'],
ingredient['units']))
s.append("")
s.append("Instructions:")
s.append("")
for i,instruction in enumerate(recipe['instructions']):
s.append("{}. {}".format(i+1, instruction))
return s
|
714cc696a1032786d582de64e0654d0f5eeb0a2e
| 70,432
|
def int2bin(num, len_):
"""
Convert ``num`` of int type to list of length ``len_`` with 0's and 1's.
``num`` and ``len_`` have to non-negative.
For e.g., ``num`` = `5`; `int2bin(num`, ``len_`` =4) = `[0, 1, 0, 1]`.
For e.g., ``num`` = `12`; `int2bin(num`, ``len_`` =3) = `[1, 0, 0]`.
Input
-----
num: int
An integer to be converted into binary representation.
len_: int
An integer defining the length of the desired output.
Output
-----
: list of int
Binary representation of ``num`` of length ``len_``.
"""
assert num >= 0, "Input integer should be non-negative"
assert len_ >= 0, "width should be non-negative"
bin_ = format(num, f'0{len_}b')
binary_vals = [int(x) for x in bin_[-len_:]] if len_ else []
return binary_vals
|
9c2ce701a54178510a01b3217893f44ac9389fd2
| 564,924
|
def _get_list_value(index, array):
"""
helper operation to loop a list of values regardless of the index value
Example:
>>> a = [111,222,333]
>>> list_loop(15, a)
111
"""
if len(array) == 0:
return None
elif index >= 0 and index < len(array):
return array[index]
return array[index % len(array)]
|
641df4a2c3f795a9a3fa18d413afd88ddee8a1f3
| 226,071
|
def _encode_Bool_Flag(bBoolFlag : bool) -> int:
"""
Description
-----------
Method encoding a bool value into an integer flag.
Parameters
----------
`bBoolFlag` : bool
Boolean flag which should be converted into an integer value
Return
------
`iEncodedFlag` : int
Boolean flag encoded as integer (0 : True | 1 : False)
"""
if (bBoolFlag == True):
return 1
elif (bBoolFlag == False):
return 0
|
1df72f50b6b3e8206a09634386cd14f308f931f5
| 694,865
|
import string
import random
def generate_random_file_name(length = 5) -> str:
"""
Generate a random file name
:param length: file name length
:return: the file name
"""
filename_characters = list(string.ascii_letters + string.digits)
try:
r = random.SystemRandom()
return ''.join(r.sample(filename_characters, length))
except NotImplementedError:
return ''.join(random.sample(filename_characters, length))
|
f017b4d3e922969fd89f5d168806a0aaf9637e71
| 191,696
|
def first_line(filepath):
"""Get the first line of the path content. No exception handling"""
with open(filepath) as f:
content = f.readlines()
return content[0].strip()
|
3ec61cd56ef544c37c2652ed9c0a344335cd7be0
| 370,947
|
def _parse_str_to_dict(x):
"""Convert "k1:v1,k2:v2" string to dict
Args:
x (str): Input string
Returns:
dict: Dictionary {"k1": "v1", "k2": "v2"}
"""
d = {}
for p in x.split(","):
if ":" in p:
k, v = p.split(":")
d[k] = v
return d
|
5659b55900adddfd856a4c97d03406747a8e2603
| 677,030
|
def fcc_coord_test(x, y, z): # dist2 = 2, 4, 6, 12
"""Test for coordinate in FCC lattice"""
return (x+y+z) % 2 == 0
|
0c0c893e4eb5da155da7dca97e7ca06bd30ad48c
| 440,539
|
def ilen(iterable):
"""Return the number of items in ``iterable``."""
return sum(1 for _ in iterable)
|
2bfc1093becead69ee0915f3b7c30a3c248dcd39
| 476,296
|
def wrap_result(query):
"""
Wraps a given MongoAlchemy result query in a simple Array so
it can be transformed in a JSON object
:param query:
:return:
"""
results = []
for row in query:
results.append(row.to_json())
return results
|
c21bb19fc66c9310facc6b252c27e4a5c641d5f3
| 219,308
|
def _stack_input_if_needed(a, b, dim, weights):
"""
Stack input arrays a, b if needed in correlation metrics.
Adapt dim and weights accordingly.
Parameters
----------
a : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
b : xarray.Dataset or xarray.DataArray
Labeled array(s) over which to apply the function.
dim : list
The dimension(s) to apply the correlation along.
weights : xarray.Dataset or xarray.DataArray or None
Weights matching dimensions of ``dim`` to apply during the function.
Returns
-------
a : xarray.Dataset or xarray.DataArray stacked with new_dim
Labeled array(s) over which to apply the function.
b : xarray.Dataset or xarray.DataArray stacked with new_dim
Labeled array(s) over which to apply the function.
new_dim : str
The dimension(s) to apply the correlation along.
weights : xarray.Dataset or xarray.DataArray stacked with new_dim or None
Weights matching dimensions of ``dim`` to apply during the function.
"""
if len(dim) > 1:
new_dim = "_".join(dim)
a = a.stack(**{new_dim: dim})
b = b.stack(**{new_dim: dim})
if weights is not None:
weights = weights.stack(**{new_dim: dim})
else:
new_dim = dim[0]
return a, b, new_dim, weights
|
2cdde2e609bc57d9ae234a1894d2fb96bc902238
| 460,362
|
import toml
def load_features(path):
"""
Loads a Cargo.toml file and extracts all features from it.
:param path: The path to the Cargo.toml
:return: A list of features this package has
"""
with open(path, 'r') as f:
config = toml.load(f)
if 'features' not in config:
return []
return list(config['features'].keys())
|
58ff9723e5281da344c27e344e559b75245b9017
| 621,818
|
from datetime import datetime
def structured_data_datetime(dt):
"""
Formats datetime object to structured data compatible datetime string.
"""
if dt.time():
return datetime.strftime(dt, "%Y-%m-%dT%H:%M")
return datetime.strftime(dt, "%Y-%m-%d")
|
acab0ce0fd32280ce722c00da4b73ce2f18c3716
| 572,439
|
def cleanup_user(user):
"""Given a dictionary of a user, return a new dictionary for output as
JSON."""
return {"id" : str(user["_id"])}
|
4ebd6abefbac839c26ebfd9c6a0503e51b3d48a5
| 46,694
|
def multiply_by_exponent(val, exponent=3, base=10):
"""
Simple template tag that takes an integer and returns new integer of base ** exponent
Return
int
Params:
val: int
The integer to be multiplied
exponent: int
The exponent
base: int
The base
"""
if type(val) == int:
int_val = int(val)
else:
int_val = 0
return int_val * (base ** exponent)
|
6423e4cd85d4f3ef0faa24de4eeebf2d7d1a017d
| 148,412
|
def get_three_frame_orfs(sequence, starts=None, stops=None):
"""Find ORF's in frames 1, 2 and 3 for the given sequence.
Positions returned are 1-based (not 0)
Return format [{'start': start_position, 'stop': stop_position, 'sequence': sequence}, ]
Keyword arguments:
sequence -- sequence for the transcript
starts -- List of codons to be considered as start (Default: ['ATG'])
stops -- List of codons to be considered as stop (Default: ['TAG', 'TGA', 'TAA'])
"""
if not starts:
starts = ['ATG']
if not stops:
stops = ['TAG', 'TGA', 'TAA']
# Find ORFs in 3 frames
orfs = []
for frame in range(3):
start_codon = None
orf = ''
for position in range(frame, len(sequence), 3):
codon = sequence[position:position + 3]
if codon in starts:
# We have found a start already, so add codon to orf and
# continue. This is an internal MET
if start_codon is not None:
orf += codon
continue
# New orf start
start_codon = position
orf = codon
else:
# if sequence starts with ATG, start_codon will be 0
if start_codon is None:
# We haven't found a start codon yet
continue
orf += codon
if codon in stops:
# orfs[start_codon + 1] = orf
orfs.append({'start': start_codon + 1, 'stop': position + 3, 'sequence': orf})
# Reset
start_codon = None
orf = ''
return orfs
|
87b19974fc67e4577243737d0067ca5cf13d41e4
| 430,660
|
def get_dict_list_len(d, keys=[], negate=False):
"""Gets the sum of each list in dict `d`
Parameters
----------
d : dict of str : list
* dict to sums of
keys : list of str
* list of keys to get sums of, if empty gets a sum of all keys
negate : bool
* only used if keys supplied
* False : get the sums of `d` that do match keys
* True : get the sums of `d` that do not match keys
Returns
-------
list_len : int
* sum of lists in `d` that match keys
"""
if keys:
if negate:
list_len = sum([len(d[k]) for k in d if k not in keys])
else:
list_len = sum([len(d[k]) for k in d if k in keys])
else:
list_len = sum([len(d[k]) for k in d])
return list_len
|
d51709cb8cbef30ba57638ef2c95ecb4340d7b9a
| 519,010
|
def int_to_bytearray(value: int, num_byte: int) -> bytearray:
"""
Convert a long integer to a 'bytearray' object.
Args:
value: The long integer value to convert.
num_byte: Number of bytes in the 'bytearray' object.
Returns:
The 'bytearray' object from long integer value.
"""
return bytearray(
[(value & (0xff << pos * 8)) >> pos * 8 for pos in range(num_byte - 1, -1, -1)]
)
|
2a144d43801e13c40f14e32892ae4c3932ac4646
| 604,417
|
def reducelabels(x, steps=10):
"""
Insert an empty string in order to better visualize x labels
:param x: list of x data
:param steps: integer with the number of x values to visualize
>>> x = [str(i) for i in range(0,10)]
>>> reducelabels(x, steps=3)
['0', '', '', '3', '', '', '6', '', '', '9']
"""
x_rep = ["" for i in range(0, len(x))]
for i in range(0, len(x), round(len(x) / steps)):
x_rep[i] = x[i]
return x_rep
|
b4c715a0ae5a170ce54c766881f05e5b838a8cca
| 254,585
|
def adjust_by_hours(num_hours, time):
"""Takes in military time as a string, and adjusts it by the specified number of hours"""
time = int(time)
time += num_hours*100
return str(time)
|
fbde77be6c7c8a7419fdac37901360643208fe8c
| 480,927
|
import math
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Returns rounded number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
|
54a8b9a12f5c845d5cc26a3ed868e0872620968e
| 594,980
|
from pathlib import Path
def build_index(directory):
"""Build a basic HTML index listing all of the files in a directory."""
directory_url = str(directory).split('_site', 1)[1]
urls = [str(path).split('_site', 1)[1].split('/')[-1]
for path in Path(directory).glob('*')]
links = [f'<li><a href="{url}">{url}</a></li>' for url in urls]
return '\n'.join([
'<!doctype html>',
'<meta http-equiv="Content-Type" content="text/html; charset=utf-8">'
f'<title>Directory listing for {directory_url}</title>'
f'<h1>Directory listing for {directory_url}</h1>'
'<ul>',
*links,
'</ul>',
])
|
0cfb0208a9ac3bdec0a68b8dbd9629cf814ea5d5
| 252,359
|
import re
def regex_negative_match(pattern, value):
"""
Return False if the regex is "negative" and is a complete match for the
supplied value, True otherwise.
"""
if pattern[0] != '~':
# this is a positive regex, ignore
return True
return not re.match(pattern[1:] + '\Z', value)
|
6282f21818d9792cfc2aceff00f71e861ad49695
| 77,526
|
def check_intervals(I_s, I_b):
"""
check if the interval I_s is within I_b
"""
smaller, bigger = I_s
if smaller >= I_b[0] and bigger <= I_b[1]:
return True
else:
return False
|
41e2b148d5bcff3d7a084eae9143c072f01470df
| 257,929
|
def hindu_lunar_leap_month(date):
"""Return 'leap_month' element of a lunar Hindu date, date."""
return date[2]
|
842e58b9ae990eec67540f1deae3702bcc95217c
| 541,633
|
def engine_options_from_config(config):
"""Return engine options derived from config object."""
options = {}
def _setdefault(optionkey, configkey):
"""Set options key if config key is not None."""
if config.get(configkey) is not None:
options[optionkey] = config[configkey]
_setdefault('echo', 'SQLALCHEMY_ECHO')
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
return options
|
9ece915beba58b080ad330f75197c466246fdfe2
| 30,387
|
import importlib
def import_module_from_string(module_str: str) -> type:
"""returns the module given its string path
for example:
'volley.data_models.GenericMessage'
is equivalent to:
from volley.data_models import GenericMessage
"""
modules = module_str.split(".")
class_obj = modules[-1]
pathmodule = ".".join(modules[:-1])
module = importlib.import_module(pathmodule)
t: type = getattr(module, class_obj)
return t
|
74b14b191339a94ede8b9bb53d3143e3da907451
| 479,791
|
from typing import Tuple
def get_dim(
width: float = 398.3386,
fraction_of_line_width: float = 1,
ratio: float = (5 ** 0.5 - 1) / 2,
) -> Tuple[float, float]:
"""Return figure height, width in inches to avoid scaling in latex.
Default width is `src.constants.REPORT_WIDTH`.
Default ratio is golden ratio, with figure occupying full page width.
Args:
width (float, optional): Textwidth of the report to make fontsizes match.
Defaults to `src.constants.REPORT_WIDTH`.
fraction_of_line_width (float, optional): Fraction of the document width
which you wish the figure to occupy. Defaults to 1.
ratio (float, optional): Fraction of figure width that the figure height
should be. Defaults to (5 ** 0.5 - 1)/2.
Returns:
fig_dim (tuple):
Dimensions of figure in inches
Example:
Here is an example of using this function::
>>> dim_tuple = get_dim(fraction_of_line_width=1, ratio=(5 ** 0.5 - 1) / 2)
"""
# Width of figure
fig_width_pt = width * fraction_of_line_width
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * ratio
return (fig_width_in, fig_height_in)
|
e2021d09f82c439f282c527806d78a6119bcc47f
| 53,231
|
def FIELD(name: str) -> str:
"""
Creates a reference to a field
Args:
name: field name
Usage:
>>> FIELD("First Name")
'{First Name}'
"""
return "{%s}" % name
|
0fe885aa407a6d3da54f47f2528b7ed464186ab5
| 326,040
|
def coord_2_id(x, y):
"""Give the pinID from the x,y coordinate."""
return (34 - y) * 34 + x
|
e02c03f0761ea7adfb6d50332ded3b0a82c2b016
| 629,372
|
def preprocess_urls(urls):
"""Preprocesses URLs doing any necessary canonicalization.
Args:
urls: Set of URLs.
Returns:
Iterable of URLs that have been modified.
"""
return urls
|
a41bc8a953758085d49a6a4af4c48fbda8ce484f
| 388,165
|
def parse_csv_data(filename_csv: str = 'nation_2021 -10-28.csv') -> list:
"""Function takes a csv file and returns a list of strings for the rows in the file."""
with open(filename_csv, "r") as file:
list_of_lines = file.readlines()
return list_of_lines
|
fedb8af215e528ecaa9b4340077cff6491f13e9f
| 79,154
|
def is_in(constant):
"""Create validator to check whether value is in a constant set of values."""
return lambda value: value in constant
|
e386886ac9e814303186644dba15fb29b3d7074e
| 561,317
|
def state_to_id(state):
"""Convert a state to its ID as used by the IBGE databases.
Raises KeyError if the state is invalid. """
state = state.upper()
states = {'AC': 12, 'AL': 27,
'AP': 16, 'AM': 13,
'BA': 29, 'CE': 23,
'DF': 53, 'ES': 32,
'GO': 52, 'MA': 21,
'MT': 51, 'MS': 50,
'MG': 31, 'PA': 15,
'PB': 25, 'PR': 41,
'PE': 26, 'PI': 22,
'RJ': 33, 'RN': 24,
'RS': 43, 'RO': 11,
'RR': 14, 'SC': 42,
'SP': 35, 'SE': 28,
'TO': 17}
return states[state]
|
5d57e6578881caa5ad2ee2e58ea6f290771c3c58
| 76,762
|
def _add_system(query, data):
"""Add data from successful system MIB query to original data provided.
Args:
query: MIB query object
data: Three keyed dict of data
Returns:
data: Aggregated data
"""
# Process query
result = query.system()
# Add tag
for primary in result.keys():
for secondary in result[primary].keys():
for tertiary, value in result[primary][secondary].items():
data[primary][secondary][tertiary] = value
# Return
return data
|
cb37d05ac484f9a8f57b7a61afd9a40a971a23d0
| 50,770
|
def files_exist(files):
"""Check whether all file names in a list are accessible for reading.
"""
for f in files:
with open(f, "r") as fi:
pass
return True
|
23941ee752c965526567571cce894399c1e7a42f
| 642,854
|
def generate_buy_signal(df, spy_large_move):
""" Method generating a buy signal column in a given dataframe"""
# Buy signal when SPY id having a large negative move and equity is still up
df['Buy Signal'] = (df['SPY Change'] < spy_large_move) & (df['Change'] > 0)
return df
|
8fbcb59a8766340be130ff4f219101a524db7b11
| 61,738
|
def test_version(version1, version2):
""" Compares a version with a version string and checks if the first
is in the range defined by the second. The second version can be
prefixed by one of =, <, >, >=, <= or != to compare with a range of
versions.
"""
version1 = int(version1)
version2 = str(version2)
ver1 = version1
ver2 = int(version2.lstrip('<>=!'))
op = version2.rstrip('0123456789')
if len(op) == 0 or op == '=':
return ver1 == ver2
if op == "=" or op == "==":
return ver1 == ver2
if op == "<=":
return ver1 <= ver2
if op == "<":
return ver1 < ver2
if op == ">=":
return ver1 >= ver2
if op == ">":
return ver1 > ver2
if op == "!=" or op == "<>":
return ver1 != ver2
return False
|
1b4207d3a3cba9557e63ef5d80c9c6bc95bac475
| 291,756
|
def set_corpus(dictionary, texts):
""" 建立语料库
Args:
dictionary: 基于问题分词后构建的词典
texts: 分词后的问题
Returns:
corpus: 语料库
"""
corpus = [dictionary.doc2bow(text) for text in texts]
return corpus
|
c3f2991b697f1eb2b42e904604ba6e1a4167ff94
| 325,813
|
import math
def otRound(value):
"""Round float value to nearest integer towards ``+Infinity``.
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
defines the required method for converting floating point values to
fixed-point. In particular it specifies the following rounding strategy:
for fractional values of 0.5 and higher, take the next higher integer;
for other fractional values, truncate.
This function rounds the floating-point value according to this strategy
in preparation for conversion to fixed-point.
Args:
value (float): The input floating-point value.
Returns
float: The rounded value.
"""
# See this thread for how we ended up with this implementation:
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
return int(math.floor(value + 0.5))
|
f6bc13594cbecc75497bfed0bbc8369659e4030a
| 420,383
|
from datetime import datetime
from typing import Tuple
import math
def quantize_time_range(
start_time: datetime,
stop_time: datetime
) -> Tuple[datetime, datetime]:
"""Quantize the start and end times to 5 min boundaries.
:param start_time: Start time
:param stop_time: Stop time
:return: Quantized start and end times
:raises ValueError: if ``start_time`` or ``stop_time`` is ``None``
:Examples:
>>> start = datetime(2020, 5, 1, 12, 0, 1)
>>> stop = datetime(2020, 5, 1, 16, 0, 6)
>>> qstart, qstop = quantize_time_range(start, stop)
>>> qstart
datetime.datetime(2020, 5, 1, 12, 0)
>>> qstop
datetime.datetime(2020, 5, 1, 16, 0)
"""
if start_time is None or stop_time is None:
raise ValueError("both start and stop times must be provided")
# truncate to 5 min
truncated_start_time = start_time.replace(
minute=math.floor(start_time.minute / 5.0) * 5, second=0, microsecond=0
)
truncated_stop_time = stop_time.replace(
minute=math.floor(stop_time.minute / 5.0) * 5, second=0, microsecond=0
)
if truncated_start_time == truncated_stop_time:
return start_time, stop_time
else:
return truncated_start_time, truncated_stop_time
|
5ed1dc4091f9ab896efda375f90466ec143310bc
| 364,966
|
def len_in_bits(n):
"""
Return number of bits in binary representation of @n.
"""
try:
return n.bit_length() # new in Python 2.7
except AttributeError:
if n == 0:
return 0
return len(bin(n)) - 2
|
f7e23beabbb84b86fae514d43938f5aebff6ec07
| 311,327
|
def sum_of_powers(ind):
"""Sum Of Powers function defined as:
$$ f(x) = \sum_{i=1}^{n} \norm{x_i}^{(i+1)} $$
with a search domain of $-1 < x_i < 1, 1 \leq i \leq n$.
The global minimum is at $f(x_1, ..., x_n) = f(0, ..., 0) = 0.
"""
return sum((abs(ind[i]) ** (i+1.) for i in range(len(ind)))),
|
67088d36e2bbdc1440338aaed40b78c76da5a096
| 439,818
|
def set_verbosity(level=1):
"""Set logging verbosity level, 0 is lowest."""
global verbosity
verbosity = level
return verbosity
|
4588991bbfb9f52041e46fb0c302ded7f1a83667
| 26,273
|
def strConcat(strings : list) -> str:
"""Concatenate a list of string objects
Parameters
----------
strings
list of input strings to concatenate
Returns
-------
concatenated string
"""
return "".join(map(str, strings))
|
8f91b5c584d45489e3b54aaeef41dee4aeec6888
| 519,759
|
import pickle
def load_pickle(file):
"""Read a pickle with and return content"""
with open(file, "rb") as fhandle:
res = pickle.load(fhandle)
return res
|
1c24b46039dfd0fb3b0643691a717442f3118312
| 250,340
|
def expand_extent(extent, margin):
"""Make the rectangular region bigger by a given amount to all directions.
Args:
extent: The rectangular given as a list of [lon1, lon2, lat1, lat2].
Longitude in [0,360) °E and latitude in [-90,+90] °N.
margin: The amount to extend into all four directions. (one number)
Returns:
Expanded extent as a list of [lon1, lon2, lat1, lat2].
Most simple case:
>>> expand_extent([20, 200, -10, 10], 10)
[10, 210, -20, 20]
Don’t go beyond 90° North or South:
>>> expand_extent([20, 200, -85, 85], 10)
[10, 210, -90, 90]
Cover the whole globe:
>>> expand_extent([0, 360, -90, 90], 10)
[0, 360, -90, 90]
Circle around 0° longitude:
>>> expand_extent([0, 30, -10, 10], 10)
[350, 40, -20, 20]
>>> expand_extent([20, 355, -10, 10], 10)
[10, 5, -20, 20]
Special Case 1: lon1 and lon2 are so close that by expanding them, they
close around the whole globe.
>>> expand_extent([20, 15, -10, 10], 10)
[0, 360, -20, 20]
Special Case 2.1: lon1 and lon2 both expand around the 0°/360° boundary so
that they close around the whole globe.
>>> expand_extent([5, 359, -10, 10], 10)
[0, 360, -20, 20]
Special Case 2.2: Only lon2 expands beyond 360°, but then it overlaps with
the expanded lon1, and they close around the whole globe.
>>> expand_extent([11, 359, -10, 10], 10)
[0, 360, -20, 20]
Special Case 3: Only lon1 expands beyond 0°, but then it overlaps with the
expanded lon2, and they close again around the whole globe.
>>> expand_extent([1, 349, -10, 10], 10)
[0, 360, -20, 20]
"""
result = list()
result[:] = extent
lon1, lon2 = extent[0:2]
lat1, lat2 = extent[2:4]
# LONGITUDE
if lon1 > lon2 and (lon1 - margin) < (lon2 + margin):
# Special Case 1
result[0] = 0
result[1] = 360
elif (lon1 < lon2 and lon2 + margin >= 360 and
lon1 - margin < (lon2 + margin) % 360):
# Special Case 2
result[0] = 0
result[1] = 360
elif (lon1 < lon2 and lon1 - margin <= 0 and
lon2 + margin > (lon1 - margin) % 360):
# Special Case 3
result[0] = 0
result[1] = 360
else:
# The normal case.
result[0] = (lon1 - margin) % 360
result[1] = (lon2 + margin) % 360
# LATITUDE
result[2] = max(-90, lat1 - margin)
result[3] = min(+90, lat2 + margin)
return result
|
64ccb131410148e403f4a83bfbc2176b265d6a83
| 582,276
|
from typing import List
def flat_sentences(sentences: List[List[str]]) -> List[str]:
"""
flat a list of list to a list
Args:
sentences: List[List[str]] source sentences
Returns:
flatted_sentence
"""
flatted_sentence = []
for sentence in sentences:
flatted_sentence += sentence
return flatted_sentence
|
c56155c9123cd7fa184310e1aded4abf8655339d
| 496,141
|
def clean(dirty_json, dataset, text_threshold=50):
"""Standardise a json data's schema. This function will identify
four types of entity: the title (must be called 'title' in the input
json), the id (must be called 'id' in the input json), text bodies
(will be searchable in Clio, required to have at least 'text_threshold'
characters), and 'other' fields which is everything else.
Args:
dirty_json (json): Input raw data json.
dataset (str): Name of the dataset.
text_threshold (int): Minimum number of characters required
to define a body of text as a textBody field.
Returns:
uid, cleaned_json, fields: list of ids, cleaned json, set of fields
"""
uid = []
cleaned_json = []
fields = set()
for row in dirty_json:
new_row = dict()
for field, value in row.items():
# Ignore nulls
if value is None:
continue
if field == "title":
field = f"title_of_{dataset}"
elif field == "id":
field = f"id_of_{dataset}"
uid.append(value)
elif type(value) is str and len(value) > text_threshold:
field = f"textBody_{field}_{dataset}"
else:
field = f"other_{field}_{dataset}"
new_row[field] = value
fields.add(field)
cleaned_json.append(new_row)
return uid, cleaned_json, fields
|
a743b77f2edf6f96b0b9aaa80c06a0ee47e7156e
| 455,732
|
from bs4 import BeautifulSoup
def _clean_soup(soup):
"""Clean up html to get rid of tricky comments."""
return BeautifulSoup(str(soup).replace('<!--', '').replace('-->', ''), "lxml")
|
1fd6c43e0ef86ab72b39443b2d8dcd8d1c666184
| 145,943
|
import typing
def selection_sort(numbers: typing.List[int]) -> typing.List[int]:
"""The Selection Sort algorithm.
Args:
numbers (typing.List[int]): A list of integers.
Returns:
typing.List[int]: A list of integers ordered from smallest value
to largest value.
"""
# For every list element:
for i in range(0, len(numbers), 1):
# Assume the list element at index i has the smallest value.
minimum_index = i
# For every other element in the unsorted list:
for j in range(i + 1, len(numbers), 1):
# If the list element at index j is smaller than what is
# currently the minimum, then set j as the new minimum.
if numbers[j] < numbers[minimum_index]:
minimum_index = j
# Finally, swap whatever is at index i with the smallest value.
numbers[i], numbers[minimum_index] = (
numbers[minimum_index],
numbers[i],
)
return numbers
|
7eaa58e9fcebd4e3bb439d719a1f7d5c39ec767a
| 260,712
|
def get_loop_segments(loop):
"""returns a list of segments in a loop"""
segments = []
last_point = None
for this_point in loop:
if last_point is not None:
new_segment = [last_point, this_point]
segments.append(new_segment)
last_point = this_point
return segments
|
13a4b5b49439177b11aba5f00259880d701bddcb
| 300,631
|
def get_fully_qualified_classname(cls=None, obj=None):
"""
Returns `fully-qualified-name` of the class represented by **cls** or **obj**
:param cls:
:param obj:
:return:
"""
if obj:
module = obj.__class__.__module__
if module is None or module == str.__class__.__module__:
return obj.__class__.__name__
return module + '.' + obj.__class__.__name__
elif cls:
module = cls.__module__
if module is None or module == str.__class__.__module__:
return cls.__name__
return module + '.' + cls.__name__
|
6801fe4458d088599aef8f3b02b0819abe3f3de3
| 448,200
|
def commonLeader(data, i, j):
"""Get the number of common leading elements beginning at data[i] and data[j]."""
l = 0
try:
while l < 255+9 and data[i+l] == data[j+l]:
l += 1
except IndexError:
pass # terminates
return l
|
7b724aaff3c62be2fdd37e42677ef51a20c21f5d
| 630,846
|
def get_mgmt_interface(device, mgmt_ip=None):
""" Get the name of the management interface.
if the mgmt_ip is provided, will use that for the lookup. If not, will
call the get_mgmt_ip API to get the IP.
Args:
mgmt_ip: (str) IP address of the management interface (optional)
Returns:
String with interface name
"""
mgmt_ip = mgmt_ip or device.api.get_mgmt_ip()
out = device.parse('show ip interface brief')
result = out.q.contains_key_value(key='ip_address', value=mgmt_ip).get_values('interface')
if result:
return result[0]
|
1a848764411beb73f5a128a45e2919c092706d60
| 267,980
|
def setup_registry(registry):
"""set up the given registry for use with pint_xarray
Namely, it enables ``force_ndarray_like`` to make sure results are always
duck arrays.
Parameters
----------
registry : pint.UnitRegistry
The registry to modify
"""
if not registry.force_ndarray and not registry.force_ndarray_like:
registry.force_ndarray_like = True
return registry
|
1888eaece4a388307854251012ea75dc213ddd43
| 604,450
|
def unet_params(model_name):
"""Get U-Net params based on model name."""
params_dict = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
# 'unet-b0': (1.0, 1.0, 224, 0.2),
# 'unet-b1': (1.0, 1.1, 240, 0.2),
# 'unet-b2': (1.1, 1.2, 260, 0.3),
# 'unet-b3': (1.2, 1.4, 300, 0.3),
# 'unet-b4': (1.4, 1.8, 380, 0.4),
# 'unet-b5': (1.6, 2.2, 456, 0.4)
'unet-w0': (1.0, 1.0, 300, 0.2),
'unet-w1': (1.3, 1.0, 300, 0.2),
'unet-w2': (1.6, 1.0, 300, 0.2),
'unet-w3': (1.9, 1.0, 300, 0.2),
'unet-w4': (2.1, 1.0, 300, 0.2),
'unet-r0': (1.0, 1.0, 300, 0.2),
'unet-r1': (1.0, 1.0, 320, 0.2),
'unet-r2': (1.0, 1.0, 360, 0.2),
'unet-r3': (1.0, 1.0, 420, 0.2),
'unet-r4': (1.0, 1.0, 500, 0.2),
'unet-v1': (1.3, 1.0, 320, 0.2),
}
return params_dict[model_name]
|
d4ad2090bb01fd3d5c5a623e2e893e8d6f2a89cf
| 314,343
|
def set_intersect(variable1, variable2, d):
"""
Expand both variables, interpret them as lists of strings, and return the
intersection as a flattened string.
For example:
s1 = "a b c"
s2 = "b c d"
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
val1 = set(d.getVar(variable1).split())
val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
|
db68885a18f52bdc439ee0cdbd3f756b9e3dc1cb
| 75,824
|
def to_applescript(num):
"""
Convert a Python number to a format that can be passed to Applescript.
A number doesn't need coerced to print to stdout, but it's best to be
thorough and explicit.
"""
return str(num)
|
7d4fe31e276267668078cedc0b5fa6c5f97dc035
| 22,504
|
import re
def normalize_dates(list_dates):
"""
Converts a list of timeframes (type: string) into a normalized list of timeframes (type: string, all in the format YYYY only).
Does not affect YYYYQ1-3 or YYYYM01-M11 since these will be filtered out
Parameters
----------
list_dates: list of strings (each string is a timeframe which can be in the format YYYYQ#, YYYYM##, YYYY-YYYY, or YYYY-YY)
Returns
-------
normed_dates: list of normalized timeframes (type: string, all in the format YYYY only) based on the input list
Examples
--------
>>> normalize_dates(['2011', '2011Q4','2011Q1', '2013M12','2013M09','2011-2012', '2013-14'])
['2011', '2011', '2011Q1', '2013', '2013M09', '2011', '2013']
"""
normed_dates = [] # initialize blank list for collecting normalized timeframes
not_normed_list = [] # initialize blank list for collection boolean values (from the not_normed variable)
not_normed = True # initially set the not_normed variable to False
# iterate through each timeframe x in the input list_dates
for x in list_dates:
not_normed = True
# initialize the cleaned_date variable as being the same value as the input timeframe x
cleaned_date = x
# for ranged timeframes (e.g., YYYY-YYYY or YYYY-YY), we get the earlier timeframe (e.g., for 2008-2009 -- we use 2008)
m = re.match('(\d{4})-(\d{4})', str(x)) # search the string if it matches the format YYYY-YYYY
m2 = re.match('(\d{4})-(\d{2})', str(x)) # search the string if it matches the format YYYY-YY
n = re.match('(\d{4})Q4', str(x))
o = re.match('(\d{4})M12', str(x))
if m:
cleaned_date = m.groups()[0] # get the earlier timeframe
elif m2:
cleaned_date = m2.groups()[0] # get the earlier timeframe
elif n:
cleaned_date = n.groups()[-1]
elif o:
cleaned_date = o.groups()[-1]
# for monthly/quarterly dates (e.g., YYYYQ# or YYYYM##), we just map these to YYYY
else:
n = re.match('(\d{4})Q\d{1}', str(x)) # search the string if it matches the format YYYYQ#
o = re.match('(\d{4})M\d{2}', str(x)) # search the string if it matches the format YYYYM##
if n:
not_normed = False # set the not_normed boolean variable to True
elif o:
not_normed = False # set the not_normed boolean variable to True
normed_dates.append(cleaned_date) # collect all normalized dates in the list normed_dates
not_normed_list.append(not_normed) # collect all boolean variable not_normed in the list not_normed_list
# return the required objects
return normed_dates
|
ff9e6e197bdeb363c086ee95dd3a9133e2b76881
| 529,319
|
def get_name(device):
""" Get the display name of a device """
return device.name[0:-(len(device.type) + 1)]
|
f89839b0c78d4e550b93b460c454d424b3881466
| 541,252
|
def convert_angle(start, end):
"""開始角startと終了角endを適正に変換する
start -60, end 60をstart 300, end 420とする
Args:
start (int): ポリゴンの開始角
end (int): ポリゴンの終了角
Returns:
int, int: start, endを0以上の数値に変換した値
"""
if start < 0:
start = 360 + start
if start > end:
end = end + 360
return start, end
|
5d7f56174e7739068bdf6e1e958dfcb806cd7ede
| 39,395
|
import struct
def float_to_bin(f):
""" Convert a float into a binary string. """
ba = struct.pack('>d', f)
ba = bytearray(ba) # convert string to bytearray - not needed in py3
s = ''.join('{:08b}'.format(b) for b in ba)
return s[:-1].lstrip('0') + s[0]
|
670db28fc3462bc1413710c5a3708c95024a5009
| 125,280
|
def deep_get(obj, keys):
"""
Recurses through a nested object get a leaf value.
There are cases where the use of inheritance or polymorphism-- the use of allOf or
oneOf keywords-- will cause the obj to be a list. In this case the keys will
contain one or more strings containing integers.
:type obj: list or dict
:type keys: list of strings
"""
if not keys:
return obj
if isinstance(obj, list):
return deep_get(obj[int(keys[0])], keys[1:])
else:
return deep_get(obj[keys[0]], keys[1:])
|
fd5cc0216eb3e0b40d9a7347ae8506dbddaca5aa
| 115,034
|
def has_permissions(user, component):
"""Checks if the given user meets the permissions requirements for
the component.
"""
return user.has_perms(getattr(component, 'permissions', set()))
|
eea94580b179d1c1ef2a7cd50926656aabe73d00
| 502,610
|
def rl_to_vswr(rl):
"""Calculate VSWR from RL"""
vswr = (1 + pow(10, -(rl/20)))/(1 - pow(10, -(rl/20)))
return vswr
|
ab2f760c58da1c755867494a022f26812a1137c3
| 88,209
|
def list_strip_all_newline(list_item: list) -> list:
"""
Strip all newline characters from all list_items in list object.
:param list_item: A list object to be cleaned from newline characters.
:return list: A list object which has been cleaned.
"""
return list(map(lambda x: x.strip('\n'), list_item))
|
c002af6daffac42b2696a7048b53b2e5e751effd
| 456,206
|
from typing import Tuple
from typing import Callable
def parallel_helper(piece_of_work: Tuple, worker_func: Callable):
"""
Helper function to explode tuple arguments
Parameters
----------
piece_of_work: Tuple
the input for the worker func, in tuple form
worker_func: Callable
the function that will do the work
Returns
-------
returns the result of calling the worker function
"""
result = worker_func(*piece_of_work)
return result
|
329133a627625e0f5160e798067ef3c29927c4fe
| 495,065
|
def print_metrics_resource_type(classif_report, phase, resource_type):
"""
Prints metrics (precision, recall) for one resource type
Resource type can be one of: 'B-MET', 'I-MET', 'B-DAT', 'O'
"""
resource_stats = classif_report[resource_type]
precision = round(resource_stats['precision'], 3)
recall = round(resource_stats['recall'], 3)
if (recall + precision) != 0:
f1 = round(2 * recall * precision / (recall + precision), 3)
else:
f1 = 0
if resource_type == 'O':
print(phase + ':', resource_type, 'Precision: ', precision, resource_type, 'Recall: ', recall, 'F1: ', f1)
else:
print(phase + ':', resource_type, 'Precision:', precision, resource_type, 'Recall:', recall, 'F1:', f1)
return f1
|
779f89adab5b48bef70fc11ab0080a89d4a97298
| 289,459
|
import random
def make_rgb() -> str:
"""generate a list of random rgb values of format rgb(0,0,0)"""
rnum = lambda: random.randint(0, 255)
return f'rgb({rnum()},{rnum()},{rnum()})'
|
04a56953c4568329e9181b9f1a3c150107332ca1
| 295,230
|
def __trimTrailingSlash__(args):
"""
Takes in a list filepaths and removes any trailing /'s
:param arg: list of string file paths
:return: list of filepaths with trailing /'s removed
"""
for i in range(len(args)):
if args[i][-1] == "/":
args[i] = args[i][:-1]
return args
|
a2a9ac1c3aa7e6882a58fd28ed099d13c844d775
| 162,071
|
def create_task_descriptor( name, arguments ):
"""
Decouples the structure of a task descriptor from code outside this
module. Don't count on the returned object having a consistent type or
format.
@param name Task identifier
@param arguments Arguments to pass to the task
"""
# for now, just use a dict
return { 'name' : name, 'arguments' : arguments }
|
0422c9f62c2d19890bbe8465c28ceb47f842a59c
| 634,561
|
import re
def escape_html_syntax_characters(string):
"""
Escape the three HTML syntax characters &, <, >.
& becomes &
< becomes <
> becomes >
"""
string = re.sub('&', '&', string)
string = re.sub('<', '<', string)
string = re.sub('>', '>', string)
return string
|
2e07a45c8aa30ca3a7ef0e4a5296d9205de634c6
| 75,674
|
import inspect
def get_module_funcs(module):
"""Get all functions found in module"""
modulename = module.__name__
clsmembers = inspect.getmembers(module, inspect.isfunction)
clsmodules = [t[1].__module__ for t in clsmembers]
new = []
for member, module in zip(clsmembers, clsmodules):
if module == modulename:
new.append(member[-1])
return new
|
e70db25bcc2b59a7b2370696c6498ca174787c77
| 551,140
|
def __myfloat(x):
"""
Private method that returns 0.0 if a string cannot be converted to float, or
the result of the float() call
"""
try:
y=float(x)
except ValueError:
y=0.0
return y
|
da067a91f3932fab715c69c47fe0a1e90a75e8ba
| 91,472
|
def get_subset(container, subset_bounds):
"""Returns a subset of the given list with respect to the list of bounds"""
subset = []
for bound in subset_bounds:
subset += container[bound[0]: bound[1]]
return subset
|
4932ecba987c4936f9f467f270c6c07fd8681840
| 7,511
|
def calc_prefix(_str, n):
"""
Return an n charaters prefix of the argument string of the form
'prefix...'.
"""
if len(_str) <= n:
return _str
return _str[: (n - 3)] + '...'
|
e7c666667dc24941ad496158f321bbc7706a5d06
| 77,693
|
def compute_pad(input_shape, region_shape, region_stride):
"""
Determines the padding that has to be applied in any dimension
:rtype: tuple
:returns: tuple (padded_input_shape, paddings)
* padded_input_shape (tuple): the new (height, width) input shape
* paddings (tuple): how many pixels must be added to the (left, right, top, bottom)
"""
in_h, in_w = input_shape
stride_y, stride_x = region_stride
reg_h, reg_w = region_shape
new_w = (in_w - 1) * stride_x + reg_w
new_h = (in_h - 1) * stride_y + reg_h
pad_r = (new_w - in_w) // 2
pad_l = new_w - (in_w + pad_r)
pad_b = (new_h - in_h) // 2
pad_t = new_h - (in_h + pad_b)
return (new_h, new_w), (pad_l, pad_r, pad_t, pad_b)
|
23583f1830f583a767df7d653f5248abd013eb5c
| 237,516
|
def preprocess_image(img):
"""Unit interval preprocessing"""
img = (img - img.min()) / (img.max() - img.min())
return img
|
6ef23d00d99bdd0b48837a24c5b6b557f39848f5
| 90,896
|
def PATCH_getControlSurfaceCount(tixi, comp_seg_uid):
"""
Function to get the number of Control Surfaces
Function 'getControlSurfaceCount' returns the number of control surface
(only TED) on a given component segment.
Args:
:tixi (TIXI Handle): TIXI Handle of the CPACS file
:comp_seg_uid (str): UID of the component segment
Returns:
:contrl_surf_count (integer): Number of control surfaces on the component segment
Note:
* Written by Aidan Jungo
"""
if not tixi.checkDocumentHandle():
ValueError('Probleme with TIXI handle')
if not tixi.uIDCheckExists(comp_seg_uid):
raise ValueError(f"No UID named '{comp_seg_uid}' has been found!")
comp_sec_xpath = tixi.uIDGetXPath(comp_seg_uid)
contrl_surf_xpath = comp_sec_xpath + '/controlSurfaces/trailingEdgeDevices/trailingEdgeDevice'
# tixi.xPathEvaluateNodeNumber() raises an error if the path does not exist
if tixi.checkElement(contrl_surf_xpath):
contrl_surf_count = tixi.xPathEvaluateNodeNumber(contrl_surf_xpath)
else:
contrl_surf_count = 0
return contrl_surf_count
|
1eda2df358fb7a26b39bdc3ab97428251210d263
| 619,103
|
def callback_to_list(callback):
"""Cast callback to list.
Parameters
----------
callback : callable or list of callables
Callable object(s).
Returns
-------
list
List of callable objects.
"""
check_callable = True
# convert callback to list
if not isinstance(callback, list):
if callback is None:
callback = []
check_callable = False
elif callable(callback):
callback = [callback]
else:
raise TypeError("'callback' must be callables or list of "
"callables.")
# check if all callbacks are callable
if check_callable is True:
for c in callback:
if callable(c) is False:
raise TypeError("'callback' is not callable.")
return callback
|
53ce8aa556015fdac4e0b5729d54617c904b6c7c
| 79,153
|
import asyncio
def asyncio_run(tasks):
"""
Helper method which abstracts differences from
Python 3.7 and before about coroutines
"""
if hasattr(asyncio, "run"):
done , _ = asyncio.run(asyncio.wait(tasks))
else:
loop = asyncio.new_event_loop()
try:
done, _ = loop.run_until_complete(asyncio.wait(tasks))
finally:
loop.close()
task = done.pop()
retval_exception = task.exception()
if retval_exception is not None:
raise retval_exception
return task.result()
|
5dee64dd0716cadae57dbe5d962cd3f732d14a95
| 120,614
|
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '()%!^<>&|"':
s = s.replace(c, '^' + c)
s = s.replace('/?', '/.')
return s
|
7d2b1ed30e01810602b583e76ac0efc9b08509df
| 287,394
|
import hashlib
def _name_hash(name: str) -> int:
""" Turn a string name into a unique hashcode using MD5 """
return hashlib.md5(name.encode('utf8')).digest()[0]
|
35154cc096e02f765ea2211cfcf31aeff5c113f0
| 557,229
|
def _get_match_groups(ping_output, regex):
"""
Get groups by matching regex in output from ping command.
"""
match = regex.search(ping_output)
if not match:
raise Exception('Invalid PING output:\n' + ping_output)
return match.groups()
|
7780b40cef696167907ce4dd145aa4b178ec81d4
| 671,055
|
def to_float(value: str | None) -> float | None:
"""Safely convert string value to rounded float."""
if value is None:
return None
try:
return round(
float(value),
1,
)
except (ValueError, TypeError):
return None
|
1fdf86ce8fe2f5218b974d01a499a73afca962f5
| 433,645
|
from typing import Tuple
import math
def get_xy_components(lon: float, lat: float) -> Tuple[int, int]:
"""For a given longitude and latitude, returns the relevant
components of the name of the zip file containing the SRTM data (for
example "35" and "02" in "srtm_35_02.zip").
"""
mod_lon = (int(math.floor(lon)) + 180) // 5 + 1
mod_lat = (64 - int(math.floor(lat))) // 5
return mod_lon, mod_lat
|
076a4339278766f59c91338fb95953455e4ae0eb
| 634,337
|
def is_object(swagger_spec, object_spec, no_default_type=False):
"""
A schema definition is of type object if its type is object or if it uses
model composition (i.e. it has an allOf property).
:param swagger_spec: :class:`bravado_core.spec.Spec`
:param object_spec: specification for a swagger object
:type object_spec: dict
:param no_default_type: ignore bravado-core 'default_type_to_object' configuration
:type no_default_type: bool
:return: True if the spec describes an object, False otherwise.
"""
deref = swagger_spec.deref
default_type = 'object' if not no_default_type and swagger_spec.config['default_type_to_object'] else None
object_type = deref(deref(object_spec).get('type', default_type))
return object_type == 'object' or (object_type is None and 'allOf' in object_spec)
|
13d604a30e829a1c475e770ef75266d22e5ee227
| 398,695
|
import collections
def flip(containers):
"""
Invert the input so that instead of representing the containers, we
represent the bags that can be contained in each container.
"""
contents = collections.defaultdict(set)
for container in containers:
for content in containers[container]:
contents[content].add(container)
return contents
|
2e10eae2a8e449379fc81daa23370bb7dd20576d
| 486,789
|
def set_emulated_vision_deficiency(type: str) -> dict:
"""Emulates the given vision deficiency.
Parameters
----------
type: str
Vision deficiency to emulate.
**Experimental**
"""
return {"method": "Emulation.setEmulatedVisionDeficiency", "params": {"type": type}}
|
906fc7b62a3d669b1a7843bf47df06c0b1862095
| 680,618
|
import random
def spread(topic):
"""
Return a fictional spread in bps, tight triangular
distribution in most cases, except for Fabrikam where
the spreads are more scattered, higher, and with a longer tail.
"""
if topic.startswith("Fabrikam"):
if " 1Y CDS Spread" in topic:
return random.triangular(140, 280, 180)
elif " 3Y CDS Spread" in topic:
return random.triangular(200, 400, 300)
else:
assert False
else:
if " 1Y CDS Spread" in topic:
return random.triangular(140, 150)
elif " 3Y CDS Spread" in topic:
return random.triangular(150, 160)
else:
assert False
|
7c1f559c516396564ac618767f79630f6ce515b8
| 25,637
|
def isAnonVariable(s):
"""Specificies whether the given string is an anonymized variable name"""
preUnderscore = s.split("_")[0] # the part before the function name
return len(preUnderscore) > 1 and \
preUnderscore[0] in ["g", "p", "v", "r", "n", "z"] and \
preUnderscore[1:].isdigit()
|
ae24653e6213e5c264cbebb4a50da770750c4d35
| 252,754
|
def nop(token):
"""Create a dummy function for tests."""
return token
|
a4f7e14d37ba14b1e0a316fe61abad9b1894cb2b
| 90,624
|
from typing import Collection
import requests
def efetch_sequences_request(headers: Collection) -> requests.Response:
"""Sends POST request for sequence retrieval from NCBI Entrez."""
response = requests.post(
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?",
params={"db": "nuccore", "rettype": "fasta"},
files={"id": ",".join(headers)},
)
if response.status_code != 200:
raise requests.HTTPError(
f"Error fetching sequences from NCBI [code {response.status_code}]."
" Bad query IDs?"
)
return response
|
948eca0497ca2569ca86b369c8361c1650c67e17
| 90,874
|
def check_loaded_dict(dkt) -> bool:
"""
Recursive check if dict `dkt` or any sub dict contains '__error__' key.
:param dkt: dict to check
"""
if not isinstance(dkt, dict):
return True
if "__error__" in dkt:
return False
for val in dkt.values():
if not check_loaded_dict(val):
return False
return True
|
23eb7e52a9daf81f9e51c273d371667bdf307208
| 352,866
|
import re
def strip_escapes(s):
"""
Strips escapes from the string.
:param string s: The string.
:rtype: string
"""
return re.sub("\033\[(?:(?:[0-9]*;)*)(?:[0-9]*m)", "", s)
|
fc518635a3cf1c4451de1db0e46513f537a44734
| 472,220
|
def _mogrify(cursor, xs):
"""Shortcut for mogrifying a list as if it were a tuple."""
return cursor.mogrify('%s', (tuple(xs),))
|
9c660571e5e7604cb79b125dc3d6d94e510ab812
| 451,374
|
def sort_x_by_y(x, y):
"""Sort a list of x in the order of sorting y"""
return [xx for _, xx in sorted(zip(y, x), key=lambda pair: pair[0])]
|
6cf234bc1cdf44a9b9cf6018c2892397178922e8
| 231,557
|
import torch
def batch_sparse_dense_matmul(S, D):
"""
Batch sparse-dense matrix multiplication
:param torch.SparseTensor S: a sparse tensor of size (batch_size, p, q)
:param torch.Tensor D: a dense tensor of size (batch_size, q, r)
:return: a dense tensor of size (batch_size, p, r)
:rtype: torch.Tensor
"""
num_b = D.shape[0]
S_shape = S.shape
if not S.is_coalesced():
S = S.coalesce()
indices = S.indices().view(3, num_b, -1)
values = S.values().view(num_b, -1)
ret = torch.stack([
torch.sparse.mm(
torch.sparse_coo_tensor(indices[1:, i], values[i], S_shape[1:], device=D.device),
D[i]
)
for i in range(num_b)
])
return ret
|
d591a462d5152edeb155ae41cd4c6d9592c135a0
| 375,603
|
def get_player_name() -> str:
"""
Utility fill-in function to get player's name.
:return: string of player's name
"""
return 'Sahil'
|
0b31d8b9073c49b49ff73ba0fabe570f3d445344
| 173,855
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.