content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def rotate_point_by_90(x, y, k, w = 1.0, h = 1.0):
"""
Rotate a point xy on an image by k * 90
degrees.
Params:
x, y: a point, (x, y). If not normalized within 0 and 1, the
width and height of the image should be specified clearly.
w, h: the width and height of image
k: k * 90 degrees will be rotated
"""
k = k % 4
if k == 0:
return x, y
elif k == 1:
return y, w - x
elif k == 2:
return w - x, h - y
elif k == 3:
return h - y, x
|
df8e0a31d25d13dd01faa7db8ed7b91d8ce87e49
| 79,734
|
def list_square_to_input(list_signs,list_index_signs):
"""
Take the list of squares fill in by a sign and create a 9x1
array (input_layer for neural network) with values -1 for a cross,
0 if empty square and 1 for a circle.
"""
input_layer = [0 for k in range(9)]
for i in range(len(list_signs)):
if type(list_signs[i]) == list:
input_layer[list_index_signs[i]] = -1
else:
input_layer[list_index_signs[i]] = 1
return input_layer
|
532918046029493ca8e2fb43369808e46dcd8501
| 79,743
|
import copy
def sh_curve_summer_off(sh_array, resc=0.2):
"""
Modifies space heating load array to be zero during non heating period
from April to September
Parameters
----------
sh_array : np.array (of floats)
Numpy array with space heating power in Watt (for each timestep)
resc : float, optional
Defines rescaling factor, related to "cut off" space heating energy
(default: 0.2). E.g. 0.2 means, that 20 % of "cut off" space heating
energy are used to rescale remaining demand
Returns
-------
sh_array_mod : np.array (of floats)
Numpy array holding modified space heating power in Watt (per timestep)
"""
sh_array_mod = copy.copy(sh_array)
timestep = int(365 * 24 * 3600 / len(sh_array_mod))
cut_off_energy = 0
idx_summer_start = int(114 * 24 * 3600 / timestep)
idx_summer_stop = int(297 * 24 * 3600 / timestep)
# Set sh powers to zero during non heating periode
for i in range(idx_summer_start, idx_summer_stop, 1):
if sh_array_mod[i] > 0:
cut_off_energy += sh_array_mod[i] * timestep / (1000 * 3600)
sh_array_mod[i] = 0
sh_dem_after = sum(sh_array_mod) * timestep / (1000 * 3600)
if sh_dem_after == 0:
resc_factor = 1
else:
resc_factor = (resc * cut_off_energy + sh_dem_after) / sh_dem_after
# Rescale remaining power curve
sh_array_mod *= resc_factor
return sh_array_mod
|
915a1417075119106d29ed20bd7274f798887951
| 79,747
|
def _hash(number, alphabet):
"""Hashes `number` using the given `alphabet` sequence."""
hashed = ''
len_alphabet = len(alphabet)
while True:
hashed = alphabet[number % len_alphabet] + hashed
number //= len_alphabet
if not number:
return hashed
|
98bb4078c7b573651cc06dbbb280a3b53e6a7467
| 79,748
|
def get_width(n: int) -> int:
"""Returns width of rangoli."""
if n == 1: # basecase
return 1
width = 0
for i in range(n):
if i == 0: ## Middle char
width += 3
elif i < n-1: ## Inter-rim chars
width += 4
elif i == n-1: ## Bookend chars
width += 2
return width
|
c4a1f3eadc62cc547dc210fc2745406788c3154b
| 79,750
|
def has_alpha(img):
"""Checks if the image has an alpha channel.
Args:
img: an image
Returns:
True/False
"""
return img.ndim == 4
|
15e673deb3024e3a321571b1a1a27d5c310089a1
| 79,752
|
import json
def load_dict(dict_path, array_key=False):
"""
Args:
dict_path: The path to a json dictionary
array_key: If true the parser will consider the first element in a sublist at the key {_:[K,...,V],...}.
Otherwise it will parse as {K:V,...} or {K:[...,V],...}
Returns:
A dictionary corresponding to the info from the file. If the file was formatted with arrays as the values for a
key, the last element of the array is used as the value for the key in the parsed dictionary
"""
parsed = None
if dict_path is not None:
with open(dict_path) as f:
parsed = json.load(f)
for key in list(parsed.keys()):
entry = parsed[key]
if type(entry) == list:
if array_key:
val = parsed.pop(key)
parsed[val[0]] = val[-1]
else:
parsed[key] = parsed[key][-1]
return parsed
|
b3a0e50e2cf5cd73123ee7461dcaa46e6d04950a
| 79,760
|
def prepend_reserved_windows_names(parts: list):
"""
Prepend the first part of a file or directory name with underscore if that name part is a reserved windows name.
The prepend is applied in all operating systems.
Parameters
==========
parts : list
list of parts making up the name. parts in the list are in order i.e. part[0] is the first part
Returns
=======
list:
list of parts modified if required
"""
reserved_names = {'con', 'prn', 'aux', 'nul', 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8',
'com9', 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9'}
if parts[0].lower() in reserved_names:
parts[0] = f'_{parts[0]}'
return parts
|
ba61680f8b67d1913c7d0680c52b081536c823f2
| 79,768
|
def base_convert_to_ten(base: int, digits: list[int]) -> int:
"""
Examples:
>>> digits = [0, 1, 1]
>>> base_convert_to_ten(2, digits)
6
"""
assert abs(base) >= 2
p = 1
n = 0
for d in digits:
n += d * p
p *= base
return n
|
007f887a2dc5e8cc19dc9c9b9d1d248456d5eda3
| 79,771
|
def dot3(vec1, vec2):
"""Calculate dot product for two 3d vectors."""
return vec1[0]*vec2[0] + vec1[1]*vec2[1] + vec1[2]*vec2[2]
|
61b2e8b0ee5d5f53be5604ba345022c39c36e1ae
| 79,772
|
import re
def check_id_type(row_dict):
"""Determine which type of id this record holds.
Varying on specific field values and lack thereof.
Results: 'specimen', 'extraction', 'library', 'pool', 'Invalid'
"""
parent = row_dict['parent_jaxid']
sample = row_dict['sample_type']
nucleic = row_dict['nucleic_acid_type']
seqtype = row_dict['sequencing_type']
ic = re.IGNORECASE
pool = re.compile('pool', flags=ic)
zero = re.compile('^Z$')
if pool.match(parent):
return 'pool'
elif zero.match(sample) and zero.match(nucleic) and zero.match(seqtype):
return 'Invalid!'
elif zero.match(nucleic) and zero.match(seqtype):
return 'specimen'
elif zero.match(seqtype):
return 'extraction'
else: # none == 'Z'
return 'library'
|
5fc383dfea4053f7cac9a568c9118b7098dc5ff9
| 79,774
|
def read_labels_file(inputfile):
""" Read a text file containing true and predicted labels """
labels, preds = [], []
with open(inputfile) as fin:
for line in fin:
line = line.strip()
if line and not line.startswith('#'):
y, p = line.split()
labels.append(int(y))
preds.append(int(p))
return labels, preds
|
8e3597c857bd396face1816211d8c7a3604204e0
| 79,777
|
def compute_in_degrees(digraph):
"""
Compute the in-degrees for the nodes in digraph
and store them in a dictionary
"""
in_degrees = {}
for key_i in digraph:
degree = 0
for key_j in digraph:
if key_i in digraph[key_j]:
degree += 1
in_degrees[key_i] = degree
return in_degrees
|
891880f9db83522364d78502673abf304daf3b1d
| 79,778
|
def get_columns_of_type(df, type_):
"""
Generate a list of columns from a given df that match a certain type
"""
return list(df.select_dtypes(include=type_).columns)
|
0a32e18e1b90ad54b7a477a7a1b669a9e99eb570
| 79,779
|
import logging
def get_logger(name="urlchecker", level=logging.INFO):
"""
Get a default logger for the urlchecker library, meaning
that we use name "urlchecker" and use the default logging
level INFO
Parameters:
- name: the name for the logger (defaults to urlchecker)
- level: the logging.<level> to set with setLevel()
Returns: logging logger
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# Stream handler
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# formatting
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
dfcb0c10206c4e375e4934a09f631f4bd7cc0fd9
| 79,780
|
def validate_positive_int(ctx, param, value):
"""Check that a command line argument is a positive integer."""
if value < 1:
raise ValueError("%s parameter cannot be < 1", param)
if int(value) != value:
raise ValueError("%s parameter must be an int, not a float", param)
return value
|
bcfb381c02aa93237d720c314750df1c9255a9d4
| 79,782
|
def stats(d_raw_materials, f_secret_points, f_total_money_collected):
"""
Show machine statistics
Params:
d_raw_materials: dict
f_secret_points: int
f_total_money_collected: float
Returns:
str
"""
cm_stats = 'sugar {0} tablespoons remaining\n'.format(d_raw_materials['sugar'])
cm_stats += 'butter {0} teaspoons remaining\n'.format(d_raw_materials['butter'])
cm_stats += 'dark chocolate {0} tablespoons remaining\n'.format(d_raw_materials['dark chocolate'])
cm_stats += 'caramel {0} tablespoons remaining\n'.format(d_raw_materials['caramel'])
cm_stats += 'light corn syrup {0} teaspoons remaining\n'.format(d_raw_materials['light corn syrup'])
cm_stats += 'sweetened condensed milk {0} teaspoons remaining\n'.format(d_raw_materials[
'sweetened condensed milk'])
cm_stats += 'vanilla extract {0} teaspoons remaining\n'.format(d_raw_materials['vanilla extract'])
cm_stats += 'sprinkles {0} tablespoons remaining\n'.format(d_raw_materials['sprinkles'])
cm_stats += 'bing cherries {0} tablespoons remaining\n'.format(d_raw_materials['bing cherries'])
cm_stats += 'candied bacon {0} tablespoons remaining\n'.format(d_raw_materials['candied bacon'])
cm_stats += 'bacon infused bourbon {0} tablespoons remaining\n'.format(d_raw_materials['bacon infused bourbon'])
cm_stats += 'sea salt {0} tablespoons remaining\n'.format(d_raw_materials['sea salt'])
cm_stats += 'Total Money Collected: ${0:.2f}\n'.format(f_total_money_collected)
cm_stats += 'Total secret points earned: {0}'.format(f_secret_points)
return cm_stats
|
76e5d0938ae1c2895a2ad3f4777ceefbfaecfe1d
| 79,785
|
def process_asa_data(rsa_data):
"""Process the .asa output file: atomic level SASA data."""
naccess_atom_dict = {}
for line in rsa_data:
full_atom_id = line[12:16]
atom_id = full_atom_id.strip()
chainid = line[21]
resseq = int(line[22:26])
icode = line[26]
res_id = (" ", resseq, icode)
id = (chainid, res_id, atom_id)
asa = line[54:62] # solvent accessibility in Angstrom^2
naccess_atom_dict[id] = asa
return naccess_atom_dict
|
87065fef29c363a23de0967b9cfe356332d6cf53
| 79,788
|
def add(x, y):
"""Return the addition of values x & y"""
return (x + y)
|
9cd2540c29e801c49ad03765c27cb31d3f37d0d5
| 79,793
|
def collect(msg):
""" Collecting cookie from HttpMessage """
cookies = msg.getRequestHeader().getHttpCookies();
return cookies;
|
e62e637c6de39fb0462479275559c73752af3c0c
| 79,795
|
def call_function_from_varnames(fn_name, input_names, output_names):
"""
Build c++ code to call a function using the given paramerers
"""
lines = ''
outputs_avail = output_names is not None and len(output_names) != 0
inputs_avail = input_names is not None and len(input_names) != 0
lines += fn_name + '('
if outputs_avail:
output_parameter_string = ', '.join(output_names)
lines += output_parameter_string
if outputs_avail and inputs_avail:
lines += ', '
if inputs_avail:
input_parameter_string = ', '.join(input_names)
lines += input_parameter_string
lines += ');\n'
return lines
|
5eca59e29317afb1d9cf89ec2f53257d99c6a8aa
| 79,799
|
def mul_by_const(constant, rh_coeffs):
"""Multiplies a constant by a list of coefficients.
Parameters
----------
constant : numeric type
The constant to multiply by.
rh_coeffs : list
The coefficients of the right hand side.
Returns
-------
list
A list of (id, size, coefficient) tuples.
"""
new_coeffs = []
# Multiply all right-hand terms by the left-hand constant.
for (id_, coeff) in rh_coeffs:
new_coeffs.append((id_, constant*coeff))
return new_coeffs
|
5dace9baea486a45189e50acb19a2b6a7cba8ee9
| 79,801
|
def make_valid_latex_string(s: str) -> str:
""" Take a string and make it a valid latex math string by wrapping it with "$"" when necessary.
Of course, strings are only wrapped with the "$" if they are not already present.
Args:
s: The input string.
Returns:
The properly formatted string.
"""
if s == "":
return s
if not s.startswith("$"):
s = "$" + s
if not s.endswith("$"):
s = s + "$"
return s
|
9f2355cb7a4f59efa47f40a746149804ac5f0229
| 79,804
|
import math
def erfc(x):
"""The complementary error function."""
exp = math.exp
p = 0.3275911
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
t = 1.0 / (1.0 + p*x)
erfcx = ( (a1 + (a2 + (a3 +
(a4 + a5*t)*t)*t)*t)*t ) * exp(-x*x)
return erfcx
|
6de6d815b8256be240192f7680b78f07fb578283
| 79,810
|
def get_char_num(x):
"""
Convert signed integer to number in range 0 - 256
"""
return int((256+x)%256)
|
f4fd6f9e38effe3011b80e738439e70fab910c0d
| 79,811
|
def _get_instance(resp):
"""
Get the one and only match for the instance list in the response. If
there are more or less and one match, log errors and return None.
"""
instances = [i for r in resp['Reservations'] for i in r['Instances']]
# return the one and only match
if len(instances) == 1:
return instances[0]
|
00644a4a9292f067a938fefced4441d92363b918
| 79,816
|
def divisor(field, data):
"""
Construct a divisor from the data.
INPUT:
- ``field`` -- function field
- ``data`` -- dictionary of place and multiplicity pairs
EXAMPLES::
sage: K.<x> = FunctionField(GF(2)); R.<t> = K[]
sage: F.<y> = K.extension(t^3 - x^2*(x^2 + x + 1)^2)
sage: from sage.rings.function_field.divisor import divisor
sage: p, q, r = F.places()
sage: divisor(F, {p: 1, q: 2, r: 3})
Place (1/x, 1/x^2*y + 1)
+ 2*Place (x, (1/(x^3 + x^2 + x))*y^2)
+ 3*Place (x + 1, y + 1)
"""
divisor_group = field.divisor_group()
return divisor_group.element_class(divisor_group, data)
|
f87e3bdb40e88f448f5d286b39decf77044c56ec
| 79,818
|
from pathlib import Path
def get_config_path(base_path, vin):
"""Return path to settings file."""
path = Path(str(vin) + '.ini')
if path.is_file():
return path
path = Path(base_path) / path
if path.is_file():
return path
path = Path('default.ini')
if path.is_file():
return path
path = Path(base_path) / path
if path.is_file():
return path
raise ValueError(f"no default.ini or {vin}.ini available")
|
68cb87e2e91dd5723c958d6f35df658ba0e88c87
| 79,826
|
from typing import List
import functools
import operator
def prod(factors: List[int]) -> int:
""" Compute the product of a list of integers. """
return functools.reduce(operator.mul, factors, 1)
|
b4e0aac9d3dd66d36eea04d98ee7676fa6f7841d
| 79,828
|
import math
def previous_power_of_2(n):
"""
Return the biggest power of 2 which is smaller than n
"""
return 2 ** math.floor(math.log2(n))
|
eb621b47d33409b84530d57e308a4debde7788e1
| 79,829
|
import shutil
def copy_sample(sample, destination):
"""
Copy a sample to a destination.
:param sample: Sample name (str)
:param destination: Destination directory (str)
:rtype: Output path (str)
"""
output_path = "{0}/{1}".format(destination, sample)
shutil.copytree("samples/{0}".format(sample), output_path)
return output_path
|
551a88e6243034b44cc2c863d8196ce2461c8e8b
| 79,831
|
def repeater(fun, num_repeats):
"""
Returns a new function f. f should take a single input i, and return
fun applied to i num_repeats times. In other words, if num_repeats is 1, f
should return fun(i). If num_repeats is 2, f should return fun(fun(i)). If
num_repeats is 0, f should return i.
:param fun: function
:param num_repeats: int
:return: function
"""
def retfun(x):
# Fill in
i = 0
if num_repeats > 0:
while i < num_repeats:
x = fun(x)
i += 1
return x
elif num_repeats == 0:
return x
pass
return retfun
|
8b1d1d60d17722a91832f9dfbadffcb9c96eecc9
| 79,833
|
import pathlib
def extract_file_extension(file_path_or_url):
"""
Extract the file extension if it exists.
Args:
file_path_or_url: The path to the file. Type can can be anything that pathlib.Path understands.
Returns:
File extension without the period, i.e. ("txt" not ".txt"). None if the path does not have an extension.
"""
p = pathlib.Path(file_path_or_url)
if len(p.suffix) > 0:
return p.suffix[1:]
else:
return None
|
85c8f12e1c501b80534fa91720dd6fff94808c9f
| 79,837
|
def _get_et(sel_cim_data):
"""Return evapotranspiration."""
return sel_cim_data.Evap_tavg
|
6c1ccabf075e897650dab0fdceef6b04046d8316
| 79,838
|
def update_query(context, **kwargs):
"""Construct an URL contains only the querystring (excluding "?") by updating the current URL's querystring with the provided arguments."""
request = context['request']
query = request.GET.copy()
for k, v in kwargs.items():
query[k] = v
return query.urlencode()
|
aa97f32ee4273c9f795ea513ae679d666ec81699
| 79,839
|
def paths_concordance_fna(path_fixtures_base):
"""Paths to FASTA inputs for concordance analysis."""
return [
_
for _ in (path_fixtures_base / "concordance").iterdir()
if _.is_file() and _.suffix == ".fna"
]
|
fc4d4c7a988db3602a245b57df1727c27ebbf867
| 79,851
|
def mapify_iterable(iter_of_dict, field_name):
"""Convert an iterable of dicts into a big dict indexed by chosen field
I can't think of a better name. 'Tis catchy.
"""
acc = dict()
for item in iter_of_dict:
acc[item[field_name]] = item
return acc
|
e2b21fcd9bb311f467f81888becc2e8f1ad84444
| 79,852
|
def get_required_static_files(analysis_settings):
"""Based on config options, return a list of static data files that
will be required.
"""
# Start with list of files that are always required
static_files = ['footprint', 'vulnerability', 'damage_bin_dict']
# Check if random is required
if 'model_settings' in analysis_settings:
if (('use_random_number_file'in analysis_settings['model_settings']) and
(analysis_settings['model_settings']['use_random_number_file'])):
static_files.append('random')
return static_files
|
1bae2bf2e16caef0c568cbc2237a36ebf5d7780b
| 79,859
|
def nest_level(lst):
"""Calculate the nesting level of a list."""
if not isinstance(lst, list):
return 0
if not lst:
return 1
return max(nest_level(item) for item in lst) + 1
|
a7ca3f768b638c4588bab50fa5e3b5b148fade7d
| 79,860
|
def in_shape_from(output_shape):
"""
Returns input_shape required for a output_shape x output_shape SPD tensor
Args:
output_shape (int): The dimension of square tensor to produce.
Returns:
(int): The input shape associated from a
`[output_shape, output_shape` SPD tensor.
Notes:
This is the sum of the first n natural numbers
https://cseweb.ucsd.edu/groups/tatami/handdemos/sum/
"""
input_shape = output_shape * (output_shape + 1) // 2
return input_shape
|
84ada6010d53e292dc8366d3eccbf5d59da12f5e
| 79,861
|
def extract_sample_name_from_sam_reader(sam_reader):
"""Returns the sample name as derived from the BAM file of reads.
Args:
sam_reader: Already opened sam_reader to use to extract the sample names
from. This sam_reader will not be closed after this function returns.
Returns:
The sample ID annotated in the read group.
Raises:
ValueError: There is not exactly one unique sample name in the SAM/BAM.
"""
samples = {
rg.sample_id
for rg in sam_reader.header.read_groups
if rg.sample_id
}
if not samples:
raise ValueError(
'No non-empty sample name found in the input reads. Please provide the '
'name of the sample with the --sample_name argument.')
elif len(samples) > 1:
raise ValueError(
'Multiple samples ({}) were found in the input reads. DeepVariant can '
'only call variants from a BAM file containing a single sample.'.format(
', '.join(sorted(samples))))
return next(iter(samples))
|
f0be6306f8a107e874baec36b94b74ae9a5e0a1c
| 79,863
|
def vmul(vect, s):
"""
This function returns the vector result of multiplying each entries of a vector by a scalar. Works also for flat matrices
:param vect: the vector to be multiplied with the scalar value
:param s: the scalar value
:type vect: double or integer iterable
:type s: double or integer
:return: The resulting vector s(vect1)
:rtype: double iterable
"""
ret=[]
for x in vect:
ret.append(x*s)
return ret
|
e6c1da5fbdaa7f4e13025c6d97cf87f543be4341
| 79,867
|
import re
def remove_punctuation(question):
"""
Removes punctuation from a question.
"""
return re.sub(r"[?|\.|!|:|,|;]", '', question)
|
93e10c6001a60fedcb5c72801fb23f95bd77e4db
| 79,869
|
def test(_method):
"""@test annotation for test methods."""
_method.IS_TEST_METHOD = True
return _method
|
ba51acaef451c726900ded7222cbf5f044c45734
| 79,871
|
def hhmm(secs):
"""
Format a number of seconds in hours and minutes
"""
hr = int(secs // 3600)
mn = int((secs % 3600) / 60)
return "{:02d}:{:02d}".format(hr, mn)
|
2d834aa2b3396f83e992c447bd0d3625dcc11b1c
| 79,872
|
import random
import string
def get_random_str(length):
"""Return random string with desired length."""
return ''.join(
random.choice(string.ascii_lowercase) for i in range(length))
|
e6ef28b941f5d54bb79a6a4dc08bfbde5a8bc7e5
| 79,874
|
from typing import List
def tag(parts: List[str]) -> str:
"""
Get only interesting values from *parts*.
- values without `=`
- values starting with `text=`
Source: https://sv.wiktionary.org/wiki/Mall:tagg
>>> tag(["historia"])
'historia'
>>> tag(["biologi", "allmänt"])
'biologi, allmänt'
>>> tag(["politik", "formellt", "språk=tyska"])
'politik, formellt'
>>> tag(["kat=nedsättande", "text=något nedsättande"])
'något nedsättande'
"""
words = []
for part in parts:
if "=" not in part:
words.append(part)
elif part.startswith("text="):
words.append(part.split("=")[1])
return ", ".join(words)
|
e4f513633419613276053463e9e686ea030cc57d
| 79,875
|
from typing import List
from typing import Set
def unanimous_answers(group: List[str]) -> Set[str]:
"""Finds the unique answers in a given group, that anyone gave."""
answers = set.intersection(*(set(item) for item in group))
return answers
|
78c2aff0bab86cd0394009eea2d55096fe65fe7c
| 79,877
|
def get_classes(y) -> int:
"""
Get the total number of classes.
Args:
y : The labels list of the data.
Returns:
int: Number of total classes
"""
return int(y.shape[1])
|
f1f518cca3e5f7e43f2f93513be17283bcb0f443
| 79,881
|
import logging
def update_classy_state(task, state_dict):
"""
Updates the task with the provided task dictionary.
Args:
task: ClassyTask instance to update
state_dict: State dict, should be the output of a call to
ClassyTask.get_classy_state().
"""
logging.info("Loading classy state from checkpoint")
try:
task.set_classy_state(state_dict)
logging.info("Checkpoint load successful")
return True
except Exception:
logging.exception("Could not load the checkpoint")
return False
|
cb05441c1def7c0f1508d9271472fd854db155f1
| 79,882
|
def find_contributing_basins(chainids, HB_gdf):
"""
Given an input GeoDataFrame of HydroBasins shapefiles and a list of chainids
denoting which basins are part of the chain, this function walks upstream
from the upstream-most basin by following the "NEXT_DOWN" attribute until
all possible basins are included. This process is repeated, but stops when
the previous basin is encountered. The result is a list of sets, where each
set contains the INCREMENTAL basin indices for each subbasin. i.e. the
most-downstream subbasin would be found by unioning all the sets.
IMPORTANT: chainids must be arranged in US->DS direction.
Parameters
----------
chainids : list
Denotes which basins are part of the chain
HB_gdf : GeoDataFrame
HydroBasins shapefiles
Returns
-------
list of sets
each set contains the incremental basin indices for each subbasin
"""
subbasin_idcs = []
visited_subbasins = set()
for idx in chainids:
sb_idcs = set([idx])
sb_check = set(HB_gdf[HB_gdf.NEXT_DOWN == HB_gdf.HYBAS_ID[idx]].index)
while sb_check:
idx_check = sb_check.pop()
if idx_check in visited_subbasins:
continue
sb_idcs.add(idx_check)
basin_id_check = HB_gdf.HYBAS_ID[idx_check]
sb_check = (
sb_check | set(HB_gdf[HB_gdf.NEXT_DOWN == basin_id_check].index) - visited_subbasins
)
# Store the incremental indices
subbasin_idcs.append(sb_idcs)
# Update the visited subbasins (so we don't redo them)
visited_subbasins = visited_subbasins | sb_idcs
return subbasin_idcs
|
54a7d1fc97de37e284dfcce5217f3fded1d04d77
| 79,885
|
def transform_labels_two(row):
"""Takes original score (label) and converts to binary classes"""
label = int(row['label'])
if label <= 2:
return 0
else:
return 1
|
97ea43e8a58f0b176dac62f3ed172fbf94110d06
| 79,886
|
def FindPhase(name, phases):
"""Find the specified phase, or return None"""
for phase in phases:
if phase.name.lower() == name.lower():
return phase
return None
|
0497a4dc29aa170bd45af87401d9a518a16979fb
| 79,889
|
def rsrpad(data: bytes, block_size_bytes: int) -> bytes:
"""Return data padded to match the specified block_size_bytes.
Arguments:
data {bytes} -- Data string to pad.
block_size_bytes {int} -- Block size for padding.
Returns:
bytes -- Padded data.
The first byte of the padding is null to match the rocksmith standard,
the remainder fill with the count of bytes padded (RS appears to use
random chars).
"""
padding = (block_size_bytes - len(data)) % block_size_bytes
if padding > 0:
null_bytes = 1
pad_byte = chr(padding).encode()
padding -= 1
else:
null_bytes = 0
pad_byte = b"\x00"
return data + b"\x00" * null_bytes + pad_byte * padding
|
a449d42200a626cb97a90e256f46b3ea8de2d1e6
| 79,892
|
def _split_by_length(msg, size):
"""
Splits a string into a list of strings up to the given size.
::
>>> _split_by_length('hello', 2)
['he', 'll', 'o']
:param str msg: string to split
:param int size: number of characters to chunk into
:returns: **list** with chunked string components
"""
return [msg[i:i + size] for i in range(0, len(msg), size)]
|
1e2314c8c65d824501f2f5dd679bc56c650ab98a
| 79,894
|
def remove_chars(str_, char_list):
"""
removes all chars in char_list from str_
Args:
str_ (str):
char_list (list):
Returns:
str: outstr
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = '1, 2, 3, 4'
>>> char_list = [',']
>>> result = remove_chars(str_, char_list)
>>> print(result)
1 2 3 4
"""
outstr = str_[:]
for char in char_list:
outstr = outstr.replace(char, '')
return outstr
|
9571d88be0d5d33d45da9d28887f8010a7302306
| 79,895
|
import random
import string
def random_alpha(n=8):
"""Return random set of ASCII letters with length `n`."""
return "".join(random.SystemRandom().choice(string.ascii_letters) for _ in range(n))
|
ef297dfea568ef498d5ff7a60b7879327915236e
| 79,902
|
from datetime import datetime
import pytz
def set_last_update(details, *args, **kwargs): # pylint: disable=unused-argument
"""
Pipeline function to add extra information about when the social auth
profile has been updated.
Args:
details (dict): dictionary of informations about the user
Returns:
dict: updated details dictionary
"""
details["updated_at"] = datetime.now(tz=pytz.UTC).timestamp()
return details
|
fb1856f368aa5e87daace63d906cca975ee2c1f1
| 79,909
|
def get_shrink_type(plugins):
"""Gets the type of shrinking process requested by the user, taking into account backward compatibility
values
:param dict plugins: the part of the manifest related to plugins
:return: None (if none selected), "vmware-vdiskmanager" or "qemu-img" (tool to be used)"""
shrink_type = plugins['minimize_size'].get('shrink')
if shrink_type is True:
shrink_type = 'vmware-vdiskmanager'
elif shrink_type is False:
shrink_type = None
elif shrink_type == 'qemu-img-no-compression':
shrink_type = 'qemu-img'
return shrink_type
|
aef9d08345af85d77ceb5b48ef5a2b642ff71bfa
| 79,910
|
def getKfactorsFrom(output):
"""
Read NLLfast output and return the k-factors.
"""
if not output:
return False
else:
lines = output.split('\n')
il = 0
line = lines[il]
process = False
while not "K_NLO" in line and il < len(lines) - 2:
if "process" in line:
process = line[line.find("process:") + 8:].replace(" ", "")
il += 1
line = lines[il]
if not process:
return False
# Line with header
line = lines[il]
# Count number of mass entries
nmass = line.count('GeV')
# Line with values
line = lines[il + 2]
data = [eval(x) for x in line.split()]
if len(data) != nmass + 11:
return False
else:
kFacs = data[9 + nmass:]
return kFacs
|
12d1c1f8e221ce264389ee2329f348e19d0a8f67
| 79,911
|
def indexAll(inputList=None, value=None):
"""
find the index of a given value in a given list
:param inputList: input as a list
:param value: the value that targeted to index
:return: the index of value lies in a list
"""
if not isinstance(inputList, list):
raise TypeError('Input list must be a list object.')
return [i for i, x in enumerate(inputList) if x == value]
|
ce6c7d9bc389aedcb89e881f8920ae2e27b3391f
| 79,917
|
def trim(x):
"""Removes (strips) whitespace from both ends of the string"""
return x.strip()
|
13d40de5372fc08de529baf10709026263418cf9
| 79,918
|
def correct_ordering_of_component_masses(mass_1, mass_2):
"""Ensure mass 1 always greater than mass 2."""
if mass_1 > mass_2:
return mass_1, mass_2
else:
return mass_2, mass_1
|
e822bb6a9799148d6fea286f18c149d9e7042718
| 79,919
|
def getMid(elem):
"""Get mid xyz coordinates given a pyNastran Element
"""
return elem.get_node_positions().mean(axis=0)
|
8599c45d5b2a4c3db490abead7b3d19cfd437462
| 79,924
|
from typing import Union
from pathlib import Path
def resource(*nodes: Union[Path, str]) -> Path:
"""Gets a path of a test resource file under resources/."""
return Path(Path(__file__).parent, "resources", *nodes)
|
76d1fb793c3ae6f3f444f6744ffc8b5a59a95d97
| 79,926
|
def sq_vidx(self, i, ltrip_of_idx):
"""Computes the index of the optimal distribution (q_vars) in the optimal solution of the Exponential Cone Programming
Args:
i: int
ltrip_of_idx: int - length of triplet t,u,v
Returns:
int
"""
return 3*ltrip_of_idx + i
|
aea5e26695fd26f6b79d8925675b50040e04eef5
| 79,935
|
def _format_values(val, n, e_type):
"""Returns a feature formated value
Args:
val: `str`, "num1,num2,num3,...", value to format
n: `int`, number of elememts within tuples
e_type: `str`, the type of each elements within tuples
Returns
a list of tuples of e_type values
"""
lst = [e_type(float(x)) for x in val.split(',')]
return [lst[i:i+n] for i in range(0, len(lst), n)]
|
cb9af4fc8d52089ba8832456d946cebcef67f096
| 79,938
|
def land(value):
"""Returns a new trampolined value that lands off the trampoline."""
return ('land', value)
|
ace17a82150849a61aa141e8dd977ccf00c90b1f
| 79,940
|
import math
def hsvToRGB(h, s, v):
"""
Convert HSV (hue, saturation, value) color space to RGB (red, green blue)
color space.
**Parameters**
**h** : float
Hue, a number in [0, 360].
**s** : float
Saturation, a number in [0, 1].
**v** : float
Value, a number in [0, 1].
**Returns**
**r** : float
Red, a number in [0, 1].
**g** : float
Green, a number in [0, 1].
**b** : float
Blue, a number in [0, 1].
"""
hi = math.floor(h / 60.0) % 6
f = (h / 60.0) - math.floor(h / 60.0)
p = v * (1.0 - s)
q = v * (1.0 - (f * s))
t = v * (1.0 - ((1.0 - f) * s))
D = {0: (v, t, p), 1: (q, v, p), 2: (p, v, t), 3: (p, q, v), 4: (t, p, v),
5: (v, p, q)}
return D[hi]
|
f602d02fc7f3d7d9dada5c02fea94a8d10bb38b0
| 79,941
|
def square(a):
"""
Square a number
Parameters:
a (float): Number to square
Returns:
float: square of a
"""
return a * a
|
917ce0008f29a32304eb42021867ae9fade094f4
| 79,942
|
def _unpack_player(sid, p, **_): # pylint: disable=invalid-name
"""Unpack a player"""
return sid, p
|
c0c0e37bafbd14488c6cf7ca58ecc8ac5f330a45
| 79,943
|
def figure_layout(annotations=None, title_text=None, x_label=None, y_label=None, show_legend=False):
"""Customize plotly figures
Parameters
----------
annotations: a list of dictionaries informing the values of parameters
to format annotations.
x_label: str. Title for the xaxis.
y_label: str. Title for the yaxis.
show_legend: boolean. If 'True' show legend.
"""
layout_parameters = dict(xaxis=dict(showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(family='Arial',
size=12,
color='rgb(82, 82, 82)')
),
yaxis=dict(showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
tickfont=dict(family='Arial',
size=12,
color='rgb(82, 82, 82)')
),
title=dict(text=title_text,
font=dict(family='Arial',
size=20,
color='rgb(37,37,37)'),
),
showlegend=show_legend,
plot_bgcolor='white',
autosize=True,
yaxis_title = y_label,
xaxis_title = x_label,
annotations = annotations
)
return layout_parameters
|
abcd332b62b9e32b0b682014979ae627456204fe
| 79,949
|
def SetCounts(setList):
"""Take a list of sets and return a dict that maps elements in the sets
to the number of sets they appear in as ints.
"""
keys = set()
for curSet in setList:
keys.update(curSet)
# Initialize the dictionary that will be used to store the counts of each element
countDict = dict.fromkeys(keys, 0)
# Populate the counts
for curSet in setList:
for element in curSet:
countDict[element] += 1
return countDict
|
bc37cfb65682e5bcbfc6400cc9c63fa69a3ae4e7
| 79,950
|
def get_root_tables(metadata):
"""Get all root tables (tables who are never a child table in a foreign key relation)
of the dataset.
Args:
metadata (dict): a ``MetaData`` instance
Returns:
set: the root table names
"""
all_tables = {table['name'] for table in metadata.get_tables()}
for fk in metadata.get_foreign_keys():
if fk['table'] in all_tables:
all_tables.remove(fk['table'])
if len(all_tables) > 0:
return all_tables
else:
return {table['name'] for table in metadata.get_tables()}
|
47c795724973f74fa02f5602514589250535c2bb
| 79,954
|
from typing import Any
def table_input(name: str, label: str, subparameters: list[Any], **kwargs: Any) -> dict[str, Any]:
"""Allows to have a table input for SLimsGate.
Args:
name (String): the name of the input
label (String): the label of the input
subparameters (list): the list of parameters that need to be in the table
**kwargs -- every additional and optional parameter
it needs to be of the form defaultValue="it is a default value"
Returns: (dict) a dictionary containing all these elements
"""
values = {'name': name, 'label': label, 'type': "TABLE", 'subParameters': subparameters}
values.update(kwargs)
return values
|
ce936a9011b36d6715054edfe320da390e7ba3ff
| 79,958
|
def global_maxima(mylist):
"""
Returns the max index and global maxima of the list.
"""
maxima = -1
index = -1
for i in range(0, len(mylist), 1):
distance = float(mylist[i])
if distance > maxima:
maxima = distance
index = i
return index, maxima
|
75008235fdf89b8579e594372c05973df4f09aca
| 79,959
|
import hashlib
def hash_barcode(barcode_data):
"""
Calculate an MD5 hash of barcode data
"""
hash = hashlib.md5(str(barcode_data).encode())
return str(hash.hexdigest())
|
49887c88d3fa64f1a9e0b2840ddc1756f0ed6154
| 79,960
|
from typing import List
from typing import Any
from typing import Tuple
import itertools
def common_subsequences(
dataset: List[Any],
min_match_len = 3,
max_match_len = 10) -> List[Tuple[int, List[Any]]]:
"""Catalog common sequential patterns
(ie, occuring twice or more) that are of length
min_match_len to max_match_len that occur within
list dataset.
Return a list of (count, subsequence), most
frequent first.
"""
results = []
def _count_occurrences(sublist, parent):
return sum(parent[i:i+len(sublist)]==\
sublist for i in range(len(parent)))
if max_match_len > len(dataset) > 1:
raise Exception("max_match_len cannot be > len(dataset)")
for i1 in range(len(dataset) - max_match_len):
previous = None
for i2 in range(min_match_len, max_match_len):
head = dataset[i1:i1 + i2]
tail = dataset[i1 + i2:]
count = _count_occurrences(head, tail)
if head not in results:
results = results + [head for i in range(count + 1)]
results = [list(j) for i, j in itertools.groupby(sorted(results))]
results = [(len(seq), seq[0]) for seq in results]
results = filter(lambda r: r[0] > 1, results)
results = sorted(results,
key=lambda m: m[0], reverse=True)
return results
|
84f74bf34d08c94f9a06d938935ef12e74333924
| 79,961
|
def read_query(name: str):
""" Read in a query from a separate SQL file
Retrieves a query from a separate SQL file and stores it as a string for
later execution. Query files are stored in the utils/sql_queries directory
Args:
name: Name of the SQL file to retreive
Returns:
(str): String representation of the full SQL query.
"""
with open(f"src/phoebe_shelves_clt/sql_backend/sql_queries/{name}.sql") as f:
return(f.read())
|
5b1dc1f9d1aee856cd8b47651aeadf6cd9b5408d
| 79,962
|
def decimal_to_binary(n, i):
"""
Converts i to a binary list that contains n bits
:param n: total number of bits (integer)
:param i: number to convert (integer)
:return: (list)
"""
return list(map(int, list(format(i, "0{0}b".format(n)))))
|
cdf5d77c99ea802986e8647564ab8a8cc201a85c
| 79,967
|
def aic(log_likelihood, df):
"""Calculate AIC given log-likelihood and degrees of freedom."""
return -2 * log_likelihood + 2 * df
|
1a19aab95b264ee7b9729e865144b2a6e9e28647
| 79,969
|
def toggle_switch(context):
"""
Dependencies for toggle_switch gizmo.
"""
return ('tethys_gizmos/vendor/bootstrap_switch/dist/css/bootstrap3/bootstrap-switch.min.css',
'tethys_gizmos/vendor/bootstrap_switch/dist/js/bootstrap-switch.min.js')
|
4c64627214c2ddf058369c04fda3c8b8c7b22811
| 79,970
|
def string_to_number(value: str, strict: bool=False):
"""
Check if the value is a number and
return it's true numberic rpresentation
Parameters
----------
value (Any): value to convert
Returns
-------
Any: int, str, float
"""
if not isinstance(value, str):
raise ValueError(f"Value to convert to number should be a string. Got {value}")
if strict and not value.isnumeric():
raise ValueError(f"Value should be an integer or a float. Got '{value}'.")
if value.isnumeric():
try:
return int(value)
except:
return float(value)
return value
|
ac3266a3c2ee3d029bffb260ef6bf4af689a57cc
| 79,973
|
def get_bar_type_space(ax, plot_params):
"""
Gets the amount of space to place in between the ends of bars and labels
"""
# Estimate bar_type_space as a fraction of largest xlim
x_width = 2 * abs(max(ax.get_xlim(), key=lambda x: abs(x)))
bar_type_space = plot_params["bar_type_space_scaling"] * x_width
return bar_type_space
|
6e5ee30966a7239a9a3b0502ac4023c14ec622b0
| 79,977
|
def moeda(n=0, moeda='R$'):
"""
Formata o valor com R$ e vírgula(,)
:param n: valor monetário
:param moeda: padrão R$
:return: valor formatado se solicitado pelo usuário
"""
return f'{moeda}{n:.2f}'. replace('.', ',')
|
9c8d3e2f4b488ae8ba866caaa7c1ec391479384c
| 79,978
|
def isB(ner_tag: str):
"""
We store NER tags as strings, but they contain two pieces: a coarse tag type (BIO) and a label (PER), e.g. B-PER
:param ner_tag:
:return:
"""
return ner_tag.startswith("B")
|
b24752ccb3a83936157fa7a7e73b5591bc64a625
| 79,980
|
def make_choices(choices):
"""
Zips a list with itself for field choices.
"""
return list(zip(choices, choices))
|
907d1212f1d6981d1b9fdb5a9b8e57ca7a596cb0
| 79,981
|
import pickle
def read_usernames(username_path: str, current_username_set: set = set()) -> set:
"""reads usernames from a saved pickle. Also adds any locally added usernames if available.
Args:
username_path (str): path to the pickle file
current_username_set DEFAULT -> set(): in case there was an exsisting set
Returns:
set: a set of lichess.org usernames
"""
with open(username_path, "rb") as f:
username_set = pickle.load(f)
for i in current_username_set:
username_set.add(i)
return username_set
|
2c094012394921b85e877a1009e19a844e3e30f9
| 79,983
|
import copy
def _format_options_dict(options):
"""
Format the options dictionnary to print 'False' or 'True' instead of '0' or '1'.
:param options: options module dict.
:return: Formatted options for Beautifultable printing
:rtype: Dict.
"""
# Make a copy of the original options dict (To avoid modification of the parent)
f_options = copy.deepcopy(options)
for opt_n, opt_p in f_options.items():
opt_p["Required"] = "True" if opt_p["Required"] else "False"
return f_options
|
878c21d30b7247e2dc79ccbd168275f9bf6a45bb
| 79,987
|
def add_qartod_ident(qartod_id, qartod_test_name):
"""
Adds attributes to the QARTOD functions corresponding to database fields.
"""
def dec(fn):
fn.qartod_id = qartod_id
fn.qartod_test_name = qartod_test_name
return fn
return dec
|
1aeaea32c652af6801bdf30c977e08211c69bde0
| 79,989
|
def get_more_than_x(numbers, value):
""" Get numbers more than x in a list
"""
ret = list()
for i in numbers:
if i >= value:
ret.append(i)
return ret
|
7f63fb87ec8f32b3ee4ddf3b6459b77f6fc417a4
| 79,992
|
def symbols_from_xyz(xyz_list):
"""
Takes a list of lists containing xyz file lines and the elemental symbols
Parameters
----------
xyz_list: List of lists
Outer list is whole xyz file, each inner list is a line of the file containing
the symbol and x,y,z coordinates
Returns
----------
symbols: List of strings
List of atomic symbols
"""
symbols=[]
for i in range(len(xyz_list)):
symbols.append(str(xyz_list[i][0]))
return symbols
|
293dcd0d52282ab7eab0831fe93de9420fdffb1b
| 79,996
|
def extract_text_before_first_compute_cell(text):
"""
OUTPUT: Everything in text up to the first {{{.
"""
i = text.find('{{{')
if i == -1:
return text
return text[:i]
|
d0312c74e542cbe4f6460d7fdef192a376d3cbe2
| 79,997
|
def check_reaction(reaction_row, keep_list):
"""Checks a row parsed from a reaction file and checks it only contains acceptable things.
Args:
reaction_row (list): List parsed from a reaction file and formatted to be able to called Reaction(reaction_row)
keep_list (list): list of elements that are acceptable in the reactant or product bits of row
"""
if all(x.upper() in keep_list for x in reaction_row[0:7]):
if reaction_row[10] == "":
reaction_row[10] = 0.0
reaction_row[11] = 10000.0
return True
else:
if reaction_row[1] in ["DESORB", "FREEZE"]:
reac_error = (
"Desorb or freeze reaction in custom input contains species not in species list"
)
reac_error += f"\nReaction was {reaction_row}"
raise ValueError(reac_error)
return False
|
f7062dc16e7652749fe659b488f6cd3bc13a9773
| 80,000
|
def inverse_of_relation(r):
"""
Function to determine the inverse of a relation
:param r: a relation
:return: inverse of the relation
"""
return [(y, x) for (x, y) in r]
|
e886877b87e09ac72dfc07bfce7ae19e64d38806
| 80,008
|
def start_end_edges_subpath_list(subpath_list):
"""
First/last node pairs (edges) from list of node pairs
:param subpath_list: list of node pairs
:return: first and last node pairs
"""
return subpath_list[0], subpath_list[-1]
|
a76fef442b790505eb04d528ee45c312b4c77d11
| 80,011
|
def is_macro(var):
"""returns true if given var is macro"""
return var.startswith("@@{") and var.endswith("}@@")
|
ebc2aee1abfe98202a0434787d1d829f4dcf16ab
| 80,014
|
def value_set(d):
"""A set of all the values of a dict."""
return set(d.itervalues())
|
8b68720bade866826bcd9cbd52e326c4a7d618e0
| 80,015
|
from pathlib import Path
from typing import Optional
def get_current_version(datadir: Path) -> Optional[str]:
"""Retrieves current version from the app data directory, if it exists
Args:
datadir (Path): Path of the data directory
Returns:
Optional[str]: The current version, otherwise None
"""
if datadir.joinpath("VERSION").exists():
return datadir.joinpath("VERSION").read_text().strip()
return None
|
622d4417e49d7cc837db6c754b3356bd1811d07e
| 80,017
|
import itertools
def get_list_count(*args):
"""Calculates the number of elements in the list of lists passed"""
return len(list(itertools.chain(*args)))
|
4e9df524da3ce069685bcdd6db5c81a43f46b640
| 80,018
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.