content stringlengths 42 6.51k |
|---|
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this class.
"""
return [min(values), 0.2, 3, 0.2, 3, 115] |
def _is_number(s):
"""
Checks if input is a number
Parameters
----------
s : anything
"""
try:
float(s)
return True
except ValueError:
return False |
def link_local(mac: str) -> str:
"""
Convert MAC to IPv6 Link-local address
:param mac: MAC address
:type mac: str
:return: IPv6 Link-local address
:rtype: str
"""
# only accept MACs separated by a colon
parts = mac.split(":")
# modify parts to match IPv6 value
parts.insert(3, "ff")
parts.insert(4, "fe")
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
# format output
ipv6_parts = []
for i in range(0, len(parts), 2):
ipv6_parts.append("".join(parts[i:i+2]))
ipv6 = "fe80::%s" % (":".join(ipv6_parts))
return ipv6 |
def repeat_range(pattern, nmin, nmax):
""" `m` to `n` repeats of a pattern
:param pattern: an `re` pattern
:type pattern: str
:param nmin: minimum number of repeats
:type nmin: int
:param nmax: maximum number of repeats
:type nmax: int
:rtype: str
"""
return r'(?:{:s}){{{:d},{:d}}}'.format(pattern, nmin, nmax) |
def wpa_validation_check(words=[]):
"""
Function to optimise wordlist for wpa cracking
> Removes Duplicates.
> Removes passwords < 8 or > 63 characters in length.
"""
custom_list = list(set(words))
custom_list = [x for x in custom_list if not (len(x) < 8 or len(x) > 63)]
return custom_list |
def levenshtein(s1, s2, allow_substring=False):
"""Return the Levenshtein distance between two strings.
The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted,
inserted or deleted to transform s1 into s2.
Setting the `allow_substring` parameter to True allows s1 to be a
substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero.
:param string s1: The first string
:param string s2: The second string
:param bool allow_substring: Whether to allow s1 to be a substring of s2
:returns: Levenshtein distance.
:rtype int
"""
len1, len2 = len(s1), len(s2)
lev = []
for i in range(len1 + 1):
lev.append([0] * (len2 + 1))
for i in range(len1 + 1):
lev[i][0] = i
for j in range(len2 + 1):
lev[0][j] = 0 if allow_substring else j
for i in range(len1):
for j in range(len2):
lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j]))
return min(lev[len1]) if allow_substring else lev[len1][len2] |
def sort_apps(app_list):
"""Sort the apps in the admin site"""
app_idx = dict()
app_names = []
for idx, app in enumerate(app_list):
app_idx[app['app_label']] = idx
app_names.append(app['app_label'])
new_list = []
if 'auth' in app_names:
app_names.remove('auth')
idx = app_idx['auth']
new_list.append(app_list[idx])
if 'web' in app_idx.keys():
app_names.remove('web')
idx = app_idx['web']
new_list.append(app_list[idx])
for name in app_names:
idx = app_idx[name]
new_list.append(app_list[idx])
return new_list |
def _shared(permissions, groupid):
"""
Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode
"""
return permissions["read"] == ["group:{id}".format(id=groupid)] |
def mock_getenv_old(name):
"""
a mock for getenv to return the OLD environment variable set with pre-defined values
"""
if name == "DB_TYPE":
return "OLDENV_DB_TYPE"
elif name == "DB_HOST":
return "OLDENV_DB_HOST"
elif name == "DB_NAME":
return "OLDENV_DB_NAME"
elif name == "DB_USER":
return "OLDENV_DB_USER"
elif name == "DB_PASS":
return "OLDENV_DB_PASSWORD"
elif name == "FAKE_LOCALE":
return "OLDENV_FAKE_LOCALE" |
def apply_offsets(coordinates, offset_scales):
"""Apply offsets to coordinates
#Arguments
coordinates: List of floats containing coordinates in point form.
offset_scales: List of floats having x and y scales respectively.
#Returns
coordinates: List of floats containing coordinates in point form.
"""
x_min, y_min, x_max, y_max = coordinates
x_offset_scale, y_offset_scale = offset_scales
x_offset = (x_max - x_min) * x_offset_scale
y_offset = (y_max - y_min) * y_offset_scale
x_min = int(x_min - x_offset)
y_max = int(y_max + x_offset)
y_min = int(y_min - y_offset)
x_max = int(x_max + y_offset)
return (x_min, y_min, x_max, y_max) |
def which_path(execname):
"""
Returns either the path to `execname` or None if it can't be found
"""
import subprocess
from warnings import warn
try:
p = subprocess.Popen(['which', execname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = p.communicate()
if p.returncode != 0:
warn('"which" failed to find the executable {0}, is it '
'installed?' + execname)
else:
return sout.strip()
except OSError as e:
warn('Could not find "which" to find executable location. '
'Continuing, but you will need to set `execpath` manually.'
' Error:\n' + str(e))
return None |
def is_hex(s: str) -> bool:
"""
Hack way to show if a string is a hexadecimal -- works for our purposes here
"""
try:
int(s, 16)
return True
except ValueError:
return False |
def insertion_sort_recursive(integers):
"""Performs insertion sort recursively."""
integers_clone = list(integers)
def helper(arr, n):
if n > 0:
helper(arr, n-1)
while arr[n] < arr[n-1] and n > 0:
arr[n], arr[n-1] = arr[n-1], arr[n]
n -= 1
helper(integers_clone, len(integers_clone) - 1)
return integers_clone |
def pp_num(num):
"""
pretty prints number with commas
"""
s = '%d' % num
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups)) |
def str_decrypt(key,enc):
"""malware string decryption algorithm"""
dec =""
for i in enc:
tmp1 = ((key << 13) ^ key) & 0xffffffff
tmp2 = (tmp1 >> 17) ^ tmp1 & 0xffffffff
key = (tmp2 << 5) ^ tmp2 & 0xffffffff
dec += chr(i + key & 0xff)
return dec |
def validate_tweet(tweet: str) -> bool:
"""It validates a tweet.
Args:
tweet (str): The text to tweet.
Raises:
ValueError: Raises if tweet length is more than 280 unicode characters.
Returns:
bool: True if validation holds.
"""
str_len = ((tweet).join(tweet)).count(tweet) + 1
if str_len > 280:
raise ValueError(f"tweet is more than 280 unicode characters\n {tweet}")
else:
return True |
def switch_subgraph_ids(adj_nodes_list, idx_mapping):
"""function maps the node indices to new indices using a mapping
dictionary.
Args:
adj_nodes_list (list): list of list containing the node ids
idx_mapping (dict): node id to mapped node id
Returns:
list: list of list containing the new mapped node ids
"""
new_adj_nodes_list = []
for adj_nodes in adj_nodes_list:
new_adj_nodes = []
for node in adj_nodes:
new_adj_nodes.append(idx_mapping[node])
new_adj_nodes_list.append(new_adj_nodes)
return new_adj_nodes_list |
def hello(name: str) -> str:
"""
This is module function and it is added to documentation even if it does
not have a docstring. Function signature will be also generated respecting
regular and comment-style type annotations. Let's use PEP 257 format here.
Examples::
# Google-style code block here, but we could use Markdown code block as well
>>> hello('John')
'Hello, John!'
>>> hello('')
'Hello!'
Arguments:
name -- Name of the person to greet.
Returns:
A greeting. No need to put types here if you use type annotations.
"""
if not name:
return "Hello!"
return f"Hello, {name}!" |
def is_iterable(obj):
"""Returns true of the object is iterable.
"""
try:
iter(obj)
except Exception:
return False
else:
return True |
def fix_nested_filter(query, parent_key):
"""
Fix the invalid 'filter' in the Elasticsearch queries
Args:
query (dict): An Elasticsearch query
parent_key (any): The parent key
Returns:
dict: An updated Elasticsearch query with filter replaced with query
"""
if isinstance(query, dict):
if 'filter' in query and parent_key == 'nested':
copy = dict(query)
if 'query' in copy:
raise Exception("Unexpected 'query' found")
copy['query'] = copy['filter']
del copy['filter']
return copy
else:
return {
key: fix_nested_filter(value, key) for key, value in query.items()
}
elif isinstance(query, list):
return [
fix_nested_filter(piece, key) for key, piece in enumerate(query)
]
else:
return query |
def _is_json_mimetype(mimetype):
"""Returns 'True' if a given mimetype implies JSON data."""
return any(
[
mimetype == "application/json",
mimetype.startswith("application/") and mimetype.endswith("+json"),
]
) |
def _build_rule(protocol: str, port: int, address: str) -> str:
"""
builds a rule string to use when updating a firewall
Args:
protocol (str): protocol to use
port (int): port to use
address (str): ip address to use
Returns:
str: formatted string to use when sending the api request
"""
return f"protocol:{protocol},ports:{port},address:{address}" |
def _opSeqToStr(seq, line_labels):
""" Used for creating default string representations. """
if len(seq) == 0: # special case of empty operation sequence (for speed)
if line_labels is None or line_labels == ('*',): return "{}"
else: return "{}@(" + ','.join(map(str, line_labels)) + ")"
def process_lists(el): return el if not isinstance(el, list) else \
('[%s]' % ''.join(map(str, el)) if (len(el) != 1) else str(el[0]))
if line_labels is None or line_labels == ('*',):
return ''.join(map(str, map(process_lists, seq)))
else:
return ''.join(map(str, map(process_lists, seq))) \
+ "@(" + ','.join(map(str, line_labels)) + ")" |
def make_capital(string):
"""Capitalize first letter of a string, leaving the rest of the string as is
Parameters
----------
string : str
The string to capitalize (e.g. 'foRUm').
Returns
-------
str
The capitalized string (e.g. 'FoRUm').
"""
if isinstance(string, str) and string:
return string[:1].upper() + string[1:]
else:
return string |
def create_register_form_data(username, password, first_name, last_name, phone_number):
""" Returns a register_form_data (dict) containing username, password, first_name, last_name and phone_number items
Args:
username (dict): [description]
password (dict): [description]
first_name (dict): [description]
last_name (dict): [description]
phone_number (dict): [description]
Returns:
dict: register_form_data
"""
register_form_data = {}
register_form_data['username'] = username
register_form_data['password'] = password
register_form_data['first_name'] = first_name
register_form_data['last_name'] = last_name
register_form_data['phone_number'] = phone_number
return register_form_data |
def _func_star(args):
"""
A function and argument expanding helper function.
The first item in args is callable, and the remainder of the items are
used as expanded arguments. This is to make the function header for reduce
the same for the normal and parallel versions. Otherwise, the functions
would have to do their own unpacking of arguments.
"""
f = args[0]
args = args[1:]
return f(*args) |
def treversed(*args, **kwargs):
"""Like reversed, but returns a tuple."""
return tuple(reversed(*args, **kwargs)) |
def delete_nth(order, max_e):
"""
Given a list lst and a number N, create a new list
that contains each number of lst at most N times without
reordering. For example if N = 2, and the input is
[1,2,3,1,2,1,2,3], you take [1,2,3,1,2], drop the next
[1,2] since this would lead to 1 and 2 being in the
result 3 times, and then take 3, which leads
to [1,2,3,1,2,3].
"""
if type(order) is list or type(order) is tuple:
# starttime = time.time()
d = {}
out = []
for i in order:
if type(i) is not int:
if type(i) is not float:
raise TypeError('Non integer or float.')
if i in d:
if d[i] < max_e:
d[i] = d[i] + 1
else:
d[i] = 1
for j in order:
if j in d and d[j] > 0:
out.append(j)
d[j] = d[j] - 1
# print('execution time: {}'.format(time.time() - starttime))
return out
else:
raise TypeError('Arument must be list or tuple of integers.') |
def mergeDicts(dict1, dict2):
"""Merge two dictionaries."""
res = {**dict1, **dict2}
return res |
def num_classes(name):
""" Gets the number of classes in specified dataset to create models """
if name == 'cifar10':
return 10
elif name == 'cifar100':
return 10
return 0 |
def _is_set(i, k):
"""
:param i: the index of the bit
:param k:
:return:
"""
# not sure this is actually the most efficient implementation
return (1 << i) & k != 0 |
def point_distance_squared(ax: int, ay: int, bx: int, by: int) -> int:
"""Returns distance between two points squared"""
return (ax - bx) ** 2 + (ay - by) ** 2 |
def get_smaller_and_greater(a, b):
"""compare two elements then return smaller and greater"""
if a < b:
return a, b
return b, a |
def configuration_str( configuration, repetition = 1, prefix = '', suffix = '' ):
""" Return a string repr (with a prefix and/or suffix) of the configuration or '' if it's None """
s = ''
if configuration is not None:
s += '[' + ' '.join( configuration ) + ']'
if repetition:
s += '[' + str(repetition+1) + ']'
if s:
s = prefix + s + suffix
return s |
def flatten(l):
"""Flattens a list of lists to the first level.
Given a list containing a mix of scalars and lists,
flattens down to a list of the scalars within the original
list.
Args:
l (list): Input list
Returns:
list: Flattened list.
"""
if not isinstance(l, list):
return [l]
else:
return sum(map(flatten, l), []) |
def do_almost_nothing(value):
"""Do almost nothing."""
value += 1
return value |
def get_file_from_path(path):
"""
Trims a path string to retrieve the file
:param path:
:return: file
"""
return path.split('/')[-1] |
def judge(expected_out: str, output: str) -> bool:
"""
:param expected_out: expected output value of the task
:param output: output value of the task
:return: **true** or **false** (if output and expected_out are equal or not)
"""
return ' '.join(expected_out.split()) == ' '.join(output.split()) |
def minmax(value, min_value, max_value):
"""Returns `value` or one of the min/max bounds if `value` is not between them.
"""
return min(max(value, min_value), max_value) |
def parse_runtime_duration(runtime):
"""Parse duration information from TransXChange runtime code"""
# Converters
HOUR_IN_SECONDS = 60 * 60
MINUTE_IN_SECONDS = 60
time = 0
runtime = runtime.split("PT")[1]
if 'H' in runtime:
split = runtime.split("H")
time = time + int(split[0]) * HOUR_IN_SECONDS
runtime = split[1]
if 'M' in runtime:
split = runtime.split("M")
time = time + int(split[0]) * MINUTE_IN_SECONDS
runtime = split[1]
if 'S' in runtime:
split = runtime.split("S")
time = time + int(split[0]) * MINUTE_IN_SECONDS
return time |
def get_keys_by_value(dict_of_elements, value_to_find):
"""
Parse a dict() to get keys of elements with a specified value
:param dict_of_elements:
:param value_to_find:
:return:
"""
list_of_keys = list()
list_of_items = dict_of_elements.items()
for item in list_of_items:
if item[1] == value_to_find:
list_of_keys.append(item[0])
return list_of_keys |
def get_pipeline(args):
""" Extract pipeline plugin/argument pairs from arguments """
ret_pipeline = list()
str_arguments = " ".join(args)
plugin_groups = str_arguments.split("@@")
if plugin_groups[0] != "":
return ret_pipeline
for plugin_group in plugin_groups[1:]:
plugin_group = plugin_group.rstrip()
tokens = plugin_group.split(" ")
plugin = tokens[0]
plugin_args = " ".join(tokens[1:])
ret_pipeline.append((plugin, plugin_args))
return ret_pipeline |
def formatSignificantDigits(q,n):
"""
Truncate a float to n significant figures, with exceptions for numbers below 1
Only works for numbers [-100;100]
Arguments:
q : a float
n : desired number of significant figures # Hard-coded
Returns:
Float with only n s.f. and trailing zeros, but with a possible small overflow.
"""
if abs(q) < 10:
return '{: 3.2f}'.format(q)
else:
return '{: 3.1f}'.format(q) |
def database(result):
"""
Normalize the database name.
"""
if result["database"] == "PDBE":
return "PDB"
return result["database"] |
def get_time_duration_string(seconds):
"""Returns a string with the time converted to reasonable units."""
if seconds >= 1:
val = "{:.3f} s".format(seconds)
elif seconds >= 0.001:
val = "{:.3f} ms".format(seconds * 1000)
elif seconds >= 0.000001:
val = "{:.3f} us".format(seconds * 1000000)
elif seconds == 0:
val = "0 s"
else:
val = "{:.3f} ns".format(seconds * 1000000000)
return val |
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
fs = 2 * nyq
return fs |
def norm_to_pump(dataDict):
""" Divide all curves in dataDict by it's pump power value"""
dataDictNorm = dataDict
norm = []
for key in dataDict:
norm = dataDict[key]['data'][1] / dataDict[key]['Pump Power']#dataDict[key]['Pump Power']
#rest = norm-dataDict[key]['data'][1][1]
dataDictNorm[key]['data'][1] = norm
#print('rest: '+str(rest))
return(dataDictNorm) |
def has_loop(page_list):
"""
Check if a list of page hits contains an adjacent page loop (A >> A >> B) == True.
:param page_list: list of page hits derived from BQ user journey
:return: True if there is a loop
"""
return any(i == j for i, j in zip(page_list, page_list[1:])) |
def _dictify(value):
"""
Converts non-dictionary value to a dictionary with a single
empty-string key mapping to the given value. Or returns the value
itself if it's already a dictionary. This is useful to map values to
row's columns.
"""
return value if isinstance(value, dict) else {'': value} |
def temporal_iou(span_A, span_B):
"""
Calculates the intersection over union of two temporal "bounding boxes"
span_A: (start, end)
span_B: (start, end)
"""
union = min(span_A[0], span_B[0]), max(span_A[1], span_B[1])
inter = max(span_A[0], span_B[0]), min(span_A[1], span_B[1])
if inter[0] >= inter[1]:
return 0
else:
return float(inter[1] - inter[0]) / float(union[1] - union[0]) |
def invert_defs_dict(defs_dict):
"""
Inverts the definitions dictionary such that
the instrument program numbers are the keys
and the
Args:
defs_dict (dict):
Returns:
"""
result = {}
for v in defs_dict.values():
for i in v['program_numbers']:
result[i] = v['defs']
return result |
def union(cluster_a, value_a, cluster_b, value_b):
"""
Returns union of compatible clusters.
Compatibility means that values agree on common
variables.
The set of variable indices is the set union
of variables in both clusters.
"""
cluster_a_list = list(cluster_a)
value_a_list = list(value_a)
cluster_b_list = list(cluster_b)
value_b_list = list(value_b)
value_ab = []
cluster_ab = sorted(set(cluster_a_list + cluster_b_list))
for i in range(len(cluster_ab)):
if cluster_ab[i] in cluster_a_list:
index = cluster_a_list.index(cluster_ab[i])
value_ab.append(value_a_list[index])
else:
index = cluster_b_list.index(cluster_ab[i])
value_ab.append(value_b_list[index])
return tuple(cluster_ab), tuple(value_ab) |
def Dic_Apply_Func(pyfunc,indic):
"""
Purpose: apply a function to value of (key,value) pair in a dictionary and maintain the dic.
Arguments:
pyfunc --> a python function object.
"""
outdic=dict()
for key in indic.keys():
outdic[key]=pyfunc(indic[key])
return outdic |
def euler56(lim=100):
"""Solution for problem 56."""
int_ = {str(d): d for d in range(10)}
maxi = 1
for a in range(2, lim + 1):
p = a
for b in range(2, lim + 1):
p *= a
maxi = max(maxi, sum(int_[d] for d in str(p)))
return maxi |
def dict_to_list(inp_dictionary,replace_spaces=False):
"""Method to convert dictionary to a modified list of strings for input to argparse. Adds a '--' in front of keys
in the dictionary.
Args:
inp_dictionary (dict): Flat dictionary of parameters
replae_spaces (bool): A flag for replace spaces with replace_spaces_str for handling spaces in command line.
Returns:
(list): a list of default parameters + user specified parameters
None if inp_dictionary is None
"""
#if replace_spaces is true, replaces spaces with replace_spaces_str for os command line calls
replace_spaces_str = "@"
if not isinstance(inp_dictionary,dict):
raise ValueError("input to dict_to_list should be a dictionary!")
# Handles optional names for the dictionary.
optional_names_dict = \
{'dataset_bucket':'bucket','feat_type':'featurizer','y':'response_cols','optimizer':'optimizer_type'}
orig_keys = list(inp_dictionary.keys())
for key, vals in optional_names_dict.items():
if key in orig_keys:
inp_dictionary[vals] = inp_dictionary.pop(key)
temp_list_to_command_line = []
# Special case handling for arguments that are False or True by default
default_false = ['previously_split','use_shortlist','datastore', 'save_results','verbose', 'hyperparam', 'split_only', 'is_ki']
default_true = ['transformers','previously_featurized','uncertainty', 'rerun']
for key, value in inp_dictionary.items():
if key in default_false:
true_options = ['True','true','ture','TRUE','Ture']
if str(value) in true_options:
temp_list_to_command_line.append('--' + str(key))
elif key in default_true:
false_options = ['False','false','flase','FALSE','Flase']
if str(value) in false_options:
temp_list_to_command_line.append('--' + str(key))
else:
temp_list_to_command_line.append('--' + str(key))
# Special case handling for null values
null_options = ['null','Null','none','None','N/A','n/a','NaN','nan','NAN','NONE','NULL']
if str(value) in null_options:
temp_list_to_command_line.append('None')
elif isinstance(value, list):
sep = ","
newval = sep.join([str(item) for item in value])
if replace_spaces == True:
temp_list_to_command_line.append(newval.replace(" ",replace_spaces_str))
else:
temp_list_to_command_line.append(newval)
else:
newval = str(value)
if replace_spaces == True:
temp_list_to_command_line.append(newval.replace(" ",replace_spaces_str))
else:
temp_list_to_command_line.append(newval)
return temp_list_to_command_line |
def _merge_batch_class(existing, other):
"""
Sets or unions two sets, for building batch class restrictions.
If existing is None, the other set is returned directly
Parameters:
existing: iterable object of the current batch classes
other: iterable object of batch classes to add
Returns:
(set): union of existing and other
"""
if existing is None:
return set(other)
else:
return set(existing).union(set(other)) |
def defineParameter(dictionary, key, fallback = None):
"""
Define a variable if it is contained in the dictionary, if not use a fallback
Fairly self explanitory, this gets used a *lot*
"""
if key in dictionary:
parameter = dictionary[key]
else:
parameter = fallback
return parameter |
def _for(x):
""" _for """
ret = x * x
for i in (2, 3):
ret = ret * i
return ret |
def bet_size_sigmoid(w_param, price_div):
"""
Part of SNIPPET 10.4
Calculates the bet size from the price divergence and a regulating coefficient.
Based on a sigmoid function for a bet size algorithm.
:param w_param: (float) Coefficient regulating the width of the bet size function.
:param price_div: (float) Price divergence, forecast price - market price.
:return: (float) The bet size.
"""
return price_div * ((w_param + price_div**2)**(-0.5)) |
def getMetricSummaries(metrics):
"""This function takes in a list of confusion metric summaries (usually from cross validation over a range of factors) and
then computes the listing of precision, recall, and accuracy for each factor then returns precision, recall and accuracy in that order."""
precision = []
recall = []
acc = []
for i in metrics:
#the max(...,1) is in the case of 0 being in the denominator so precision shows up as 0 instead of error
precision.append(i[1][1]/max((i[0][1]+i[1][1]),1))
recall.append(i[1][1]/max(sum(i[1]),1))
acc.append((i[0][0]+i[1][1])/max(1,(sum(i[0])+sum(i[1]))))
return precision, recall, acc |
def bgm_variant_sample(bgm_project, institution, variant):
"""
This item is not pre-posted to database so gene list association with
variant samples can be tested (due to longer process of associating variant
samples with gene lists when the latter is posted after the former).
"""
item = {
"project": bgm_project["@id"],
"institution": institution["@id"],
"variant": variant["@id"],
"CALL_INFO": "some_bgm_sample",
"file": "some_bgm_vcf_file",
}
return item |
def dbsimpson(f, limits: list, d: list):
"""Simpson's 1/3 rule for double integration
int_{ay}^{by} int_{ax}^{bx} f(x,y) dxdy
Args:
f (func): two variable function, must return float or ndarray
limits (list): limits of integration [ax, bx, ay, by]
d (lsit): list of integral resolution [dx, dy]
Returns:
float: double integral of f(x,y) between the limits
"""
ax, bx, ay, by = limits
dx, dy = d
nx = int((bx - ax) / dx)
ny = int((by - ay) / dy)
s = 0
for i in range(ny + 1): # loop of outer integral
if i == 0 | i == ny:
p = 1
elif i % 2 != 0:
p = 4
else:
p = 2
for j in range(nx + 1): # loop of inner integral
if j == 0 | j == nx:
q = 1
elif j % 2 != 0:
q = 4
else:
q = 2
x = ax + j * dx
y = ay + i * dy
s += p * q * f(x, y)
return dx * dy / 9 * s |
def euclidean_distance(list1, list2):
""" Calculate the Euclidean distance between two lists """
# Make sure we're working with lists
# Sorry, no other iterables are permitted
assert isinstance(list1, list)
assert isinstance(list2, list)
dist = 0
# 'zip' is a Python builtin, documented at
# <http://www.python.org/doc/lib/built-in-funcs.html>
for item1, item2 in zip(list1, list2):
dist += (item1-item2)**2
return dist**0.5 |
def manhattan(p,q):
"""
Calculate the manhattan distance
"""
same = 0
for i in p:
if i in q:
same += 1
n = same
vals = range(n)
distance = sum(abs(p[i] - q[i]) for i in vals)
return distance |
def safe_int(n):
"""
Safely convert n value to int.
"""
if n is not None:
return int(n)
return n |
def function1(arg1, arg2):
"""This is my doc string of things"""
ans = arg1 + arg2
return ans |
def mult(vector, float):
"""
Multiplies a vector with a float and returns the result in a new vector.
:param vector: List with elements of the vector.
:param float: float is factor.
:return: vector
"""
new_vector = []
for item in vector:
new_vector.append(item * float)
return new_vector |
def sum_up_diagonals(matrix):
"""Given a matrix [square list of lists], return sum of diagonals.
Sum of TL-to-BR diagonal along with BL-to-TR diagonal:
>>> m1 = [
... [1, 2],
... [30, 40],
... ]
>>> sum_up_diagonals(m1)
73
>>> m2 = [
... [1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... ]
>>> sum_up_diagonals(m2)
30
"""
# unq_set = set(matrix)
lst = []
total = 0
for x in matrix:
lst.append(list(x))
length_of_matrix = len(matrix)
if length_of_matrix == 2:
tl = lst[0][0] + lst[1][1]
br = lst[0][1] + lst[1][0]
total = tl + br
if length_of_matrix == 3:
tl = lst[0][0] + lst[1][1] + lst[2][2]
br = lst[2][0] + lst[1][1] + lst[0][2]
total = tl + br
return total
# FIGURE THIS ONE OUT LATER:
# total = 0
# for i in range(len(matrix)):
# total += matrix[i][i]
# total += matrix[i][-1 - i]
# return total |
def get_parallel(a, n):
"""Get input for GNU parallel based on a-list of filenames and n-chunks.
The a-list is split into n-chunks. Offset and amount are provided."""
k, m = divmod(len(a), n)
# chunked = list((a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n)))
offset = ' '.join(list((str(i * k + min(i, m)) for i in range(n))))
amount = ' '.join(list((str(k + min(i + 1, m) - min(i, m)) for i in range(n))))
parallel = "parallel --delay 1 --linebuffer --link python3 bbw_cli.py "
input_4_gnu_parallel = parallel + "--amount ::: " + amount + " ::: --offset ::: " + offset
return input_4_gnu_parallel |
def set_data_format(array_values):
"""
Check and set the corresponding format for each value
:param list[list[str]] array_values: list of values
:return: list[list[str]]: array formatted
"""
formatted_data = []
for d in array_values:
values = []
for v in d:
# Try to transform a text to int, if an exception occurs, treat it as an alphanumeric text
try:
v = int(v)
v = str(v)
except ValueError:
if not v:
v = "''"
elif type(v) is str:
if v.startswith("\""): # If starts with " replace it with '
v = "'" + v[1:-1] + "'"
elif not v.startswith("'"):
v = "'" + v + "'"
values.append(v)
formatted_data.append(values)
# end for
return formatted_data |
def query_generator(search="", loc="", org="", created="", sort=""):
""" Generates a query for repository search based on input parameters.
Args:
search: the string that the query is looking for.
loc: where the query should look for the search string.
org: the repository that contains the repository.
created: the date range for repository creation that is considered.
sort: the order in which results are sorted.
Returns:
Query string for the "get_repos_after" function.
"""
param_list = [search, loc, org, created, sort]
query_str = " ".join(list(filter(None, param_list)))
return f"""\"{query_str}\"""" |
def var_is_in_cosmic(variant_data):
"""Check if variant is in the COSMIC database
:param variant_data: A GeminiRow for a single variant.
:type variant_data: GeminiRow.
:returns: bool -- True or False.
"""
if variant_data['cosmic_ids'] is not None:
return True
else:
return False |
def upperize(val):
"""
Converts string to uppercase if it exists
:param val: string to be converted
:return: converted string
"""
if val is not None:
return str(val).upper()
else:
return None |
def moveTo(obj, device):
"""
obj: the python object to move to a device, or to move its contents to a device
device: the compute device to move objects to
"""
if hasattr(obj, "to"):
return obj.to(device)
elif isinstance(obj, list):
return [moveTo(x, device) for x in obj]
elif isinstance(obj, tuple):
return tuple(moveTo(list(obj), device))
elif isinstance(obj, set):
return set(moveTo(list(obj), device))
elif isinstance(obj, dict):
to_ret = dict()
for key, value in obj.items():
to_ret[moveTo(key, device)] = moveTo(value, device)
return to_ret
else:
return obj |
def topics_from_keys(keys):
"""
Extracts the desired topics from specified keys
:param Keys: List of desired keys
:return: List of topics
"""
topics = set()
for key in keys:
if not key.startswith("/"):
key = "/" + key
chunks = key.split("/")
for i in range(2, len(chunks)):
topics.add("/".join(chunks[0:i]))
return list(topics) |
def map_to_range(x, in_min, in_max, out_min, out_max, clip=True):
"""
Maps a value from one range to another.
:param x: Input value
:param in_min: Input range minimum
:param in_max: Input range maximum
:param out_min: Output range minimum
:param out_max: Output range maximum
:param clip: If True, clip the input value x to the input range limits
:return: Mapped value in the output range
"""
if clip:
x = min(in_max, max(in_min, x))
if in_min > in_max or out_min > out_max:
raise ValueError("Minimum has to be smaller or equal to the maximum")
return out_min + ((x - in_min) * (out_max - out_min)) / (in_max - in_min) |
def check_dependents(full_name, import_list):
""" Check if we are parent of a loaded / recursed-to module file.
Notes:
Accept full_name if full_name.something is a recursed-to module
Args:
full_name: The full module name
import_list: List of recursed-to modules
Returns:
Bool
"""
search_name = full_name + "."
for item in import_list:
if item.startswith(search_name):
return True
return False |
def _GetVersionIndex(version_str):
"""Returns the version index from ycsb version string.
Args:
version_str: ycsb version string with format '0.<version index>.0'.
Returns:
(int) version index.
"""
return int(version_str.split('.')[1]) |
def generate_repo_name(team_name: str, master_repo_name: str) -> str:
"""Construct a repo name for a team.
Args:
team_name: Name of the associated team.
master_repo_name: Name of the template repository.
"""
return "{}-{}".format(team_name, master_repo_name) |
def fake_update_dict(fake_message_dict):
"""Return a fake Telegram update with message as dict."""
return {
'update_id': 12345,
'message': fake_message_dict
} |
def _build_option_macro_name(config_key: str) -> str:
"""Build macro name for configuration key.
All configuration variables require a macro name, so that they can be referenced in a header file.
Some values in config define "macro_name", some don't. This helps generate consistent macro names
for the latter.
"""
sanitised_config_key = config_key.replace(".", "_").replace("-", "_").upper()
return f"MBED_CONF_{sanitised_config_key}" |
def none_wrapper(value, default=""):
"""
Pure laziness! Sometimes this ends up being nice syntactic sugar for code readability.
"""
return value if value is not None else default |
def degrees_to_decimal(degrees_string):
"""
Convert degrees:minutes:seconds to degrees.decimal
# E.g. '-37:47:10.6' returns -37.78627777777778 Originally: -37.7862648
# '175:19:55.2' returns 175.332 Originally:'175.3319996'
"""
minutes = 0
seconds = 0
degrees_list = degrees_string.split(":")
degrees = float(degrees_list[0])
if len(degrees_list) > 1:
minutes = float(degrees_list[1]) / 60
if len(degrees_list) > 2:
seconds = float(degrees_list[2]) / 3600
if degrees >= 0:
degrees = degrees + minutes + seconds
else:
degrees = degrees - minutes - seconds
# print(degrees)
return degrees |
def getUserRecCalories(weight, height, age, gender):
"""
Parameter:
weight A integer value of user's weight
height A integer value of user's height
age A integer value of user's age
gender A string value contains user's genders
"""
recCal = 0
# The Harris-Benedict Equation
if gender == 'Male':
recCal = int(66.5 + 13.8 * weight + 5.0 * height - 6.8 * age)
if gender == 'Female':
recCal = int(655.1 + 9.6 * weight + 1.9 * height - 4.7 * age)
return recCal |
def isCommentCSVLine(Entries):
"""
Treat CSV lines starting with a '#' as a comment.
"""
return len(Entries) > 0 and Entries[0].startswith("#") |
def relative_path(root,path):
"""both [root] and [path] are in split tuple form"""
i = 0
while i < len(path) and path[i]=='..':
i+=1
if i == 0:
return root + path
if i > len(root):
# Relative goes back further than root, return path relative to root
return ('..',)*(i-len(root))+path[i:]
return root[:-i]+path[i:] |
def to_bool(value):
""" Convert an int (0 or 1) to a boolean.
"""
if value == 1:
return True
else:
return False |
def adjust_relations(relations):
"""Adjust relations to Siren protocol.
:param relations: iterable with relations.
:returns: tuple with string relations.
:raises: :class:ValueError.
"""
try:
return tuple(str(relation) for relation in relations)
except TypeError as error:
raise ValueError("Relations must be iterable with string values") from error |
def binarize_value(df_element: str, patterns: str) -> int:
""" Binarizes searching value.
Parameters:
----------
df_element:
searching source.
patterns:
searching elements.
Returns:
----------
Binarized value.
"""
for pattern in patterns:
if pattern.strip() in str(df_element).split(","):
return 1
return 0 |
def _reverse(x: int) -> int:
"""
Args:
x: 32-bit signed integer
Returns: x with digits reversed if it would fit in a 32-bit signed integer,
0 otherwise
Examples:
>>> _reverse(123)
321
>>> _reverse(-123)
-321
>>> _reverse(120)
21
>>> _reverse(0)
0
>>> _reverse(1534236469)
0
>>> _reverse(7463847412)
2147483647
>>> _reverse(-8463847412)
-2147483648
>>> _reverse(8463847412)
0
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
number_base, max_and_min_val_prefix = 10, 214748364
## MIGRATE DIGITS from x_pos to reversed_x_pos
x_pos, reversed_x_pos = abs(x), 0
while x_pos > 0:
digit = x_pos % number_base
x_pos //= number_base # Shift digits right, truncating off decimal
# CHECK if reversal would cause 32-bit overflow
if reversed_x_pos > max_and_min_val_prefix: # GUARANTEED overflow
return 0
elif reversed_x_pos == max_and_min_val_prefix: # MAY overflow
will_be_negative_overflow = x < 0 and digit > 8 # < -214748364_8
will_be_positive_overflow = x >= 0 and digit > 7 # > 214748364_7
if will_be_negative_overflow or will_be_positive_overflow:
return 0
# Shift digits left and add new digit
reversed_x_pos = reversed_x_pos * number_base + digit
sign = -1 if x < 0 else 1
return sign * reversed_x_pos |
def unquote(tag):
# type: (str) -> str
"""Remove namespace from prefixed tag.
See: [Python issue 18304](https://bugs.python.org/issue18304)
Arguments:
tag {str} -- (possibly-)namespaced tag
Returns:
str -- tag name without namespace
"""
return tag.split('}').pop() |
def _arguments_str_from_dictionary(options):
"""
Convert method options passed as a dictionary to a str object
having the form of the method arguments
"""
option_string = ""
for k in options:
if isinstance(options[k], str):
option_string += k+"='"+str(options[k])+"',"
else:
option_string += k+"="+str(options[k])+","
option_string = option_string.strip(',')
return option_string |
def get_ad_sublist(adlist, names):
"""
Select a sublist of AstroData instances from the input list using a list
of filename strings. Any filenames that don't exist in the AstroData list
are just ignored.
"""
outlist = []
for ad in adlist:
if ad.filename in names:
outlist.append(ad)
return outlist |
def euclideanDistance(loc1, loc2):
"""
Return the Euclidean distance between two locations, where the locations
are pairs of numbers (e.g., (3, 5)).
"""
# BEGIN_YOUR_CODE (our solution is 1 line of code, but don't worry if you deviate from this)
return ((loc1[0]-loc2[0])**2+(loc1[1]-loc2[1])**2)**.5
# END_YOUR_CODE |
def get_tail(raw_url_str):
"""
Get final end chunk of https url- such as, for given "https:1/2/3/4.pdf,
returns "4.pdf"
:param raw_url_str: str
:return: str
"""
split = raw_url_str.split('/')
if len(split[-1]) == 0: # in case there's appended '/' at the end of url
return split[-2]
else:
return split[-1] |
def for_loop(function, argument_list):
"""Apply a multivariate function to a list of arguments in a serial fashion.
Uses Python's built-in for statement.
Args:
function: A callable object that accepts more than one argument
argument_list: An iterable object of input argument collections
Returns:
List of output results
Example:
>>> def add(x, y, z):
... return x+y+z
...
>>> for_loop(add, [(1, 2, 3), (10, 20, 30)])
[6, 60]
References:
- https://docs.python.org/3/reference/compound_stmts.html#the-for-statement
- https://docs.python.org/3/tutorial/controlflow.html#for-statements
"""
result_list = []
for args in argument_list:
result_list.append(function(*args))
return result_list |
def fill_buckets(buckets, all_jobs):
"""Split jobs in buckets according their memory consumption."""
if 0 in buckets.keys(): # do not split it
buckets[0] = list(all_jobs.keys())
return buckets
# buckets were set
memlims = sorted(buckets.keys())
prev_lim = 0
for memlim in memlims:
# buckets and memory limits are pretty much the same
# if buckets are 5 and 10 then:
# memlim[5] -> jobs that require <= 5Gb
# memlim[10] -> jobs that require > 5Gb AND <= 10Gb
buckets[memlim] = [
job for job, job_mem in all_jobs.items() if prev_lim < job_mem <= memlim
]
prev_lim = memlim
# remove empty
filter_buckets = {k: v for k, v in buckets.items() if len(v) > 0}
return filter_buckets |
def digital_root(number: int) -> int:
"""Sums all numbers in given number.
Args:
number (int): given number
Examples:
>>> assert digital_root(166) == 4
"""
while number > 10:
number = sum(map(int, str(number)))
return number |
def rivers_with_station(stations):
"""
Returns a set of all rivers monitored by 'stations'.
'stations' is a list of 'MonitoringStation' objects. Documentation for object 'station' can be found
by importing 'station' from 'floodsystem' and typing 'help(station.MonitoringStation)'
"""
river_list = []
for station in stations:
river_list.append(station.river)
return set(river_list) |
def replace_module_suffix(state_dict, suffix, replace_with=""):
"""
Replace suffixes in a state_dict
Needed when loading DDP or classy vision models
"""
state_dict = {
(key.replace(suffix, replace_with, 1) if key.startswith(suffix) else key): val
for (key, val) in state_dict.items()
}
return state_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.