content stringlengths 42 6.51k |
|---|
def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
t: A duration in seconds.
Returns:
A formatted duration string.
"""
if t < 1:
return '%dms' % round(t * 1000)
else:
return '%.2fs' % t |
def merge_lines(line_sets, merge_fun=set.union, sort=True):
"""
Merge the contents of a list of sets of strings.
:param line_sets: A list of sets of strings.
:param merge_fun: A callable that merges sets
:param sort: Sort the resulting merged entries.
:return:
"""
merged_lines = merge_fun(*line_sets)
if sort:
return tuple(sorted(merged_lines))
else:
return tuple(merged_lines) |
def levenshtein_distance(s1, s2, normalize=False):
"""
A function computes the levenshtein distance between two string.
Parameters:
s1: String
s2: String
normalize: bool
divide edit distance by maximum length if true
Returns:
The levenshtein distance
"""
if len(s1) < len(s2):
return levenshtein_distance(s2, s1, normalize)
if not s2:
return len(s1)
current_row = None
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
if current_row:
if normalize:
return current_row[-1] / len(s1)
return current_row[-1]
return -1 |
def tuple_to_str(data_tuple, sep=','):
"""Convert tuple to string."""
data_string = sep.join(data_tuple)
return data_string |
def _FieldRef(column):
"""Returns a field reference name.
Args:
column: The field column number counting from 0.
Returns:
A field reference name.
"""
return 'f{column}'.format(column=column) |
def checkpoint_epoch_filename(epoch):
"""
"""
assert 0 <= epoch <= 9999
return f'checkpoint_{epoch:04d}.ckpt' |
def mul_by_two_or_min_one(n):
"""Simple timeout calculation: multiple the max response time by 2 or at least a minimum of 1 second."""
return max(n * 2, 1) |
def intersect_regions(region_a, region_b):
"""
Given a pair of coordinates, returns the coordinates that overlap between
the two regions.
"""
return (max(region_a[0], region_b[0]), min(region_a[1], region_b[1])) |
def _limit_from_settings(x):
"""
limit=0 (or any falsy value) in database means "no limit". Convert that to
limit=None as limit=0 in code means "reject all".
"""
return int(x or 0) or None |
def minFlagellumLength(lst=None):
"""
Return the minimum flagellum length from the list of flagella lst.
"""
if lst is None:
return 0
lengths = [len(i) for i in lst]
return min(lengths) |
def PEER_cmd(PEER_exec_path, phenotype_file, covariates_file, num_peer, output_prefix, output_dir):
"""
Command to execute PEER covariate correction. Be sure to use r-4.0.3
"""
return f"time Rscript {PEER_exec_path} {phenotype_file} {output_prefix} {num_peer} -o {output_dir} --covariates {covariates_file}" |
def sorted_plugins_dicom_first(plugins):
"""Sort plugins such that any Nifti plugin is used early."""
for pname, ptype, pclass in plugins:
if ptype == 'nifti':
plugins.remove((pname, ptype, pclass))
plugins.insert(0, (pname, ptype, pclass))
break
"""Sort plugins such that any DICOM plugin is used first."""
for pname, ptype, pclass in plugins:
if ptype == 'dicom':
plugins.remove((pname, ptype, pclass))
plugins.insert(0, (pname, ptype, pclass))
break
return plugins |
def verifyPassword(password):
"""
Check the length and complexity of the password
return true if a pass, false otherwise
"""
if len(password) < 7:
return False
return True |
def is_sum(op):
"""
Tests whether it's the sum operation
"""
return op == '+' |
def create_authorization_header(token: str):
"""
Generate authorization header for LogSnag's API
:param token: API Token
:return: Authorization Header
"""
return {"Authorization": f"Bearer {token}"} |
def containschars(str_, charset) -> bool:
"""Returns if {str} contains any chars in {chars}"""
for char in str_:
if char in charset:
return True
return False |
def signed(n: int, size: int) -> int:
"""
Convert an unsigned integer to a signed integer
:param uint n: value to convert
:param int size: byte width of n
:return int: signed conversion
"""
size *= 8
if n >> (size - 1): # Is the hi-bit set?
return n - (1 << size)
return n |
def end(cargo):
"""Expects (infile, drawing) as cargo, called when eof has been reached."""
#print "Entering end state!"
infile = cargo[0]
drawing = cargo[1]
#infile.close()
return drawing |
def try_import(module_name):
"""Attempt to import the given module (by name), returning a tuple (True, module object) or (False,None) on ImportError"""
try:
module = __import__(module_name)
return True, module
except ImportError:
return False, None |
def is_format_medgen(concept=2881):
"""
UID style notation
:param concept:
:return: boolean
"""
try:
concept = int(concept)
return str(concept).__len__() <= (6)
except ValueError:
return False |
def coerce(P, x):
"""
Coerce ``x`` to type ``P`` if possible.
EXAMPLES::
sage: type(5)
<type 'sage.rings.integer.Integer'>
sage: type(coerce(QQ,5))
<type 'sage.rings.rational.Rational'>
"""
try:
return P._coerce_(x)
except AttributeError:
return P(x) |
def _check_eofs_frequency(eofs_frequency):
"""Check given EOFs frequency is valid."""
if eofs_frequency is None:
eofs_frequency = 'monthly'
if eofs_frequency not in ('daily', 'monthly', 'seasonal'):
raise ValueError("Unsupported EOF frequency '%r'" % eofs_frequency)
return eofs_frequency |
def get_jugada(p_jugada, p_total):
"""
Recibe la jugada y el total para coger solo las que difieren
:param p_jugada: jugada a comprobar
:param p_total: valor actual total
:return: la jugada si es distinta al total
"""
if p_jugada == p_total:
p_jugada = 0
return p_jugada |
def get_config_item(config, item, default=None, replace_char='_'):
"""
Args:
config:
item:
default:
replace_char:
Returns:
Configuration item
"""
value = config.get(item, default)
if type(value) == str:
value = value.replace(" ", replace_char).lower()
return value |
def to_null(string):
"""
Usage::
{{ string|to_null}}
"""
return 'null' if string is None else string |
def count_char_in_grid(char, grid):
"""Count number of occurences of char in grid."""
return sum(line.count(char) for line in grid) |
def get_iou(p1, p2):
"""
get Jaccard overlap(IoU) value
p1 = [x, y, w, h] :: x, y in [0, 1]
p2 = [x, y, w, h] :: x, y in [0, 1]
return : IoU
"""
"""converting to [left-bottom coord, right-top coord]"""
# in my case image-size = 128,128
p1[0] *= 128.
p1[1] *= 128.
p2[0] *= 128.
p2[1] *= 128.
box1 = [p1[0] - (p1[2] // 2), p1[1] - (p1[3] // 2),
p1[0] + (p1[2] // 2), p1[1] + (p1[3] // 2)]
box2 = [p2[0] - (p2[2] // 2), p2[1] - (p2[3] // 2),
p2[0] + (p2[2] // 2), p2[1] + (p2[3] // 2)]
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(box1[0], box2[0])
yA = max(box1[1], box2[1])
xB = min(box1[2], box2[2])
yB = min(box1[3], box2[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = p1[2] * p1[3]
boxBArea = p2[2] * p2[3]
if interArea == 0:
return 0.
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou |
def IoU(box1, box2):
"""
Compute the IoU of two bounding boxes.
:param box1: [x, y, w, h]
:param box2: [x, y, w, h]
:return:
"""
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
xl1, yl1, xr1, yr1 = x1, y1, x1 + w1, y1 + h1
xl2, yl2, xr2, yr2 = x2, y2, x2 + w2, y2 + h2
overlap_w = max(min(xr1, xr2) - max(xl1, xl2), 0)
overlap_h = max(min(yr1, yr2) - max(yl1, yl2), 0)
return overlap_w * overlap_h / (w1 * h1 + w2 * h2 - overlap_w * overlap_h) |
def is_unique_msg(msg, previous_msg_ids, previous_run_time):
"""
Determines if message is unique given previous message ids, and that it's greater than previous run time
:param msg: raw Message object
:param previous_msg_ids: set of previously fetched message ids
:param previous_run_time: previous run time string
:return: True if message is unique
"""
message_dict = msg.get("message", {})
if message_dict:
msg_id = message_dict.get("messageId")
msg_pub_time = message_dict.get("publishTime", "")
return msg_id not in previous_msg_ids and msg_pub_time > previous_run_time
return False |
def arithmetic_series(a: int, n: int, d: int = 1) -> int:
"""Returns the sum of the arithmetic sequence with parameters a, n, d.
a: The first term in the sequence
n: The total number of terms in the sequence
d: The difference between any two terms in the sequence
"""
return n * (2 * a + (n - 1) * d) // 2 |
def prob_class_1_arrival(state, lambda_1, mu, num_of_servers):
"""Gets the probability of a class 1 arrival to occur"""
return lambda_1 / (lambda_1 + (mu * min(state[1], num_of_servers))) |
def match_id(dis_id):
"""Match a discord id depending on mention or raw ID"""
# ID is user mention or role mention
if any(x in dis_id for x in ["<@!", "<@&"]):
if len(dis_id) == 22:
return int(dis_id[3:-1])
# Simple mention
elif "<@" in dis_id:
if len(dis_id) == 21:
return int(dis_id[2:-1])
# No mention, just ID
elif dis_id.isdigit():
if len(dis_id) == 18:
return int(dis_id)
else:
return False |
def bayes(prior, sensitivity, specitivity):
"""Compute Bayes rule over the prior, sensitivity, and specitivity."""
joint1 = prior * sensitivity
joint2 = (1.0 - prior) * (1.0 - specitivity)
normalizer = joint1 + joint2
return joint1 / normalizer |
def buildSignatureKey(signature):
"""
Build static file filename suffix used by mkstemp()
"""
return signature[0]+"_"+str(signature[1][0])+"x"+str(signature[1][1])+"_"+str(signature[2])+"_staticMask.fits" |
def unique(seq, preserve_order=True):
"""
Take a sequence and make it unique. Not preserving order is faster, but
that won't matter so much for most uses.
copied from: http://www.peterbe.com/plog/uniqifiers-benchmark/uniqifiers_benchmark.py
"""
if preserve_order:
# f8 by Dave Kirby
# Order preserving
seen = set()
seen_add = seen.add # lookup method only once
return [x for x in seq if x not in seen and not seen_add(x)]
# f9
# Not order preserving
return list({}.fromkeys(seq).keys()) |
def _rec_filter_to_info(line):
"""Move a DKFZBias filter to the INFO field, for a record.
"""
parts = line.rstrip().split("\t")
move_filters = {"bSeq": "strand", "bPcr": "damage"}
new_filters = []
bias_info = []
for f in parts[6].split(";"):
if f in move_filters:
bias_info.append(move_filters[f])
elif f not in ["."]:
new_filters.append(f)
if bias_info:
parts[7] += ";DKFZBias=%s" % ",".join(bias_info)
parts[6] = ";".join(new_filters or ["PASS"])
return "\t".join(parts) + "\n" |
def _set_private_function_thing(value, another):
"""Do something here."""
# Do something with these values
# and more comment text, here.
#
if value:
return 2
# Another comment
return 1 |
def encrypt2(text, n):
"""
Another person's solution. I like how they used the 3rd parameter for skipping every other char.
"""
for i in range(n):
text = text[1::2] + text[::2]
return text |
def get_venue(d):
"""
Prettify the name of the venue that the papaer appeared in.
:param d:
:return:
"""
et = d['ENTRYTYPE'].lower()
if et == 'inproceedings':
return 'In ' + d['booktitle']
elif et == 'article':
return d['journal']
elif et == 'misc':
return d['howpublished']
elif et == 'techreport':
return 'Technical Report. ' + d['institution']
else:
raise Exception("Do not know how to parse entry type {}".format(et)) |
def map_serial_number(facilities) -> str:
"""Map serial number."""
facility = facilities.get("body", dict()).get("facilitiesList", list())[0]
return str(facility.get("serialNumber", None)) |
def swap_byte_order(buf):
"""
Swap the byte order of a bytes object.
The rippled implementation of RFC-1751 uses the reversed byte order as the
examples included in the RFC-1751 spec (which doesn't mention byte order).
"""
size = len(buf)
# doesn't actually matter if it's "really" big-endian
i = int.from_bytes(buf, byteorder="big", signed=False)
revbuf = i.to_bytes(size, byteorder="little", signed=False)
return revbuf |
def knot_vector_uniform(num_points, degree, periodic=False):
"""Computes a uniform knot vector.
Parameters
----------
num_points : int
Number of points to compute parameters for.
degree : int
The degree of the curve.
Returns
-------
list of float
The knot vector in the domain of [0, 1].
Notes
-----
Is the same to which Rhino refers to as CurveKnotStyle.Uniform
"""
kv = [0.0 for _ in range(degree + 1)]
step = 1. / (num_points - degree)
for i in range(1, num_points - degree):
kv.append(step * i)
kv += [1.0 for _ in range(degree + 1)]
return kv |
def _make_divisible(v, divisor=4, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def ProcessValueList(incoming_list_str):
"""Returns a list of strings, or None if it is not ."""
values = []
# If this is a lookup from our args, do the lookup
if incoming_list_str.startswith('{') and incoming_list_str.endswith('}'):
text = incoming_list_str.split('{', 1)[1]
text = text.split('}', 1)[0]
values = text.split(',')
# Strip any spaces around the values
for count in range(0, len(values)):
values[count] = values[count].strip()
else:
raise Exception('Incorrectly formatted Value List: %s' % incoming_list_str)
return values |
def is_number(string):
"""Check if given string is a number.
Arguments:
string - string that supposed to have number
"""
try:
complex(string)
except ValueError:
return False
return True |
def is_search_bot(line: str) -> bool:
"""Determine if 'line' has a user agent which looks like a search bot."""
search_bots = [
"AhrefsBot",
"AhrefsBot",
"CCBot",
"Googlebot",
"SemrushBot",
"YandexBot",
"bingbot",
]
for search_bot in search_bots:
if search_bot in line:
return True
return False |
def calculate_temperature_rise_power_loss_weight(
power_operating: float,
weight: float,
) -> float:
"""Calculate the temperature rise based on the power loss and xfmr weight.
:param power_operating: the power loss in W.
:param weight: the weight of the device in lbf.
:return: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed a weight=0.0.
"""
return 11.5 * (power_operating / weight**0.6766) |
def dicty(s):
"""
Test helper. Helps test dict equivalence. Given different versions of Python,
stringified dicts may have different item order. This gets us back to real
dicts, which have more logical equality operations.
"""
d = eval('dict({0})'.format(s))
if '_magic' in d:
del d['_magic']
return d |
def rearange_books_by_category(books_json):
"""rearange_books_by_category.
Returns JSON with following structure:
{
"Beginner":[
{
"title":
"author":
"url"
}
],
"Intermediate":[
{
"title":
"author":
"url"
}
],
}
"""
rearanged_books = {}
#####################################
# YOUR CODE IS HERE #
#####################################
return rearanged_books |
def printer(*args,**kwargs):
"""Dummy printer function."""
return print(*args,**kwargs) |
def primality(n):
"""A basic primality test
http://en.wikipedia.org/wiki/Primality_test
"""
if n < 0:
primeness = False
elif n <= 3:
if n <= 1:
primeness = False
else:
primeness = True
elif not(n % 2) or not(n % 3):
primeness = False
else:
primeness = True
for i in range(5, int(n ** 0.5) + 1, 6):
if not n % i or not n % (i + 2):
primeness = False
break
return primeness |
def confirm_genres(args, artist, genres):
"""Allow user to confirm a new genre label."""
if any((args['force_save'], args['force'])):
return True
inp = input('Label {} as {}?\n: '.format(artist, ';'.join(genres)))
return 'y' in inp.lower() |
def get_device_from_entities(entities):
"""Get first device from entities"""
for item in entities:
if item['entity'] == 'device':
return item['sourceText']
return None |
def dict_value_to_key(d, value):
"""
Utility function to key for a given value inside the dic
:param d:
:param value:
:return:
"""
for k, v in d.items():
if v == value:
return k
return None |
def dict_to_lines(d, level=0, use_repr=False):
"""
Dump a dictionary to a set of string lines to be written to a
file.
Args:
d (:obj:`dict`):
The dictionary to convert
level (:obj:`int`, optional):
An indentation level. Each indentation level is 4 spaces.
use_repr (:obj:`bool`, optional):
Instead of using string type casting (i.e.,
``str(...)``), use the objects ``__repr__`` attribute.
Returns:
:obj:`list`: A list of strings that represent the lines in a
file.
"""
lines = []
if len(d) == 0:
return lines
w = max(len(key) for key in d.keys()) + level*4
for key in d.keys():
if isinstance(d[key], dict):
lines += [key.rjust(w) + ':'] + dict_to_lines(d[key], level=level+1, use_repr=use_repr)
continue
lines += [key.rjust(w) + ': ' +
(d[key].__repr__() if use_repr and hasattr(d[key], '__repr__') else str(d[key]))]
return lines |
def custom_score(word, opt):
"""Make your custom word score. Just put in a dictionary each letter being assigned a score.
Args:
word: the word to get the score of
opt: the options to use.
Retuns:
The word in the score opt gave.
Raises:
Exception: if opt is not a dict
"""
if isinstance(opt, dict) == False:
raise Exception("options are not a dict.")
score = 0
for i in list(word):
score += opt[i]
return score |
def madau_14(z, **kwargs):
"""Star formation history as a function of redshift, from
Madau & Dickinson 2014 (http://arxiv.org/abs/1403.0007v3)
"""
rate = (1+z)**2.7 / (1 + ((1+z)/2.9)**5.6)
return rate |
def addVectors(lst1, lst2):
"""Add two lists together like a vector."""
return list(map(int.__add__, lst1, lst2)) |
def name_col(existing_col_names, proposed_name):
""" Give a column a unique name.
If the name already exists, create a unique name
by appending a number.
"""
# if there is a dup, add .<n> to make name unique
candidate = proposed_name
i = 1
while candidate in existing_col_names:
candidate = '{}.{}'.format(proposed_name, i)
i += 1
return candidate |
def GCD(num1, num2):
"""assumes num1 and num2 are ints
returns an int, the greatest common divisor of num1 and num2"""
for i in range(min(num1, num2), 0, -1):
if (num1 % i == 0) and (num2 % i == 0):
return i |
def process_form_data(dict_form_data, *args):
"""
After casting form data to dict, the values
become lists. Transform the lists to non-iterables
"""
new_dict = {}
try:
for key in dict_form_data.keys():
new_dict[key] = dict_form_data[key][0]
except AttributeError:
raise AttributeError('The input should be a dictionary')
# check for mandatory fields as directed by args
for arg in args:
try:
value = new_dict[arg]
if isinstance(value, str):
if len(value.strip()) == 0:
raise ValueError('%s should not be an empty string' % str(arg))
except KeyError:
raise ValueError('%s is an expected key' % str(arg))
return new_dict |
def get_latest_analysis_inputs(files):
"""Get input files for the latest analysis version number of each chip_well_barcode."""
latest_analysis_inputs = []
for chip_name, chip_well_barcode in files.items():
for barcode, versions in chip_well_barcode.items():
latest_version = sorted(versions.keys(), key=lambda x: int(x))[-1]
latest_analysis_inputs.extend(versions[latest_version])
return latest_analysis_inputs |
def side_effect(L):
"""list[int] ->int"""
#M : list[int]
M = L + []
M.append(2)
return 0 |
def is_sorted(data):
"""
check whether list is sorted
"""
return all([d_1 <= d2 for d_1, d2 in zip(data, data[1:])]) |
def escape_html(text):
"""Convert some problematic characters to entities"""
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c, c) for c in text) |
def i_or_args(result, results):
"""Return an iterable for functions with signature f(arg, *args).
f can be called with f([a, b, c, ...]) or f(a, b, c, ...).
In both cases, calling i_or_args(arg, args) returns an iterable
over a, b, c, ...
"""
if len(results) == 0:
return iter(result)
return (result, ) + results |
def del_kw(kwarg_name, kwargs, warning=False):
"""
Delete a key from a dictionary.
Parameters
----------
kwarg_name : str
Name of the argument.
kwargs : dict
Dictionary of all keyword
arguments.
Returns
-------
kwargs : dict
Notes
-----
Name of the function is as short as possible
because it's frequently typed.
We can't not include kwargs as an argument.
"""
try:
del kwargs[kwarg_name]
except KeyError:
if warning:
del_kw._log.warning('Value of ' + kwarg_name + ' was not specified.')
return kwargs |
def check_execution(executor, nb, raise_fast):
"""Check that all code cells with source have been executed without error."""
error = None
for cell in nb.get("cells", []):
# Only check code cells
if cell["cell_type"] != "code":
continue
if cell["source"] and cell["execution_count"] is None:
error = "Notebook has unexecuted code cell(s)."
if raise_fast:
raise RuntimeError(error)
break
else:
for output in cell["outputs"]:
if output["output_type"] == "error":
if output["ename"] in executor.allow_error_names:
continue
error = "\n".join(output["traceback"])
if raise_fast:
raise RuntimeError("\n" + error)
break
return error |
def C_calc(seq_dict):
"""
Calculate C Matrix used in winning probability process.
:param seq_dict: players sequences
:type seq_dict: dict
:return: C Matrix as a 2D list.
"""
C = []
names = sorted(list(seq_dict.keys()))
p_seq = lambda seq: 1 / 2 ** len(seq)
for name1 in names:
A_i = seq_dict[str(name1)]
C_row = []
for name2 in names:
A_j = seq_dict[str(name2)]
w_i_j = 0
for k in range(1, min(len(A_i), len(A_j)) + 1):
if A_i[:k] == A_j[len(A_j) - k:]:
w_i_j += p_seq(A_i[k:])
C_row.append(w_i_j)
C.append(C_row)
return C |
def utility_of(state):
"""
returns +1 if winner is X (MAX player), -1 if winner is O (MIN player), or 0 otherwise
:param state: State of the checkerboard. Ex: [0; 1; 2; 3; X; 5; 6; 7; 8]
:return:
"""
if state[0] == state[1] == state[2]:
if state[0] == 'X':
return 1
else:
return -1
if state[0] == state[4] == state[8]:
if state[0] == 'X':
return 1
else:
return -1
if state[0] == state[3] == state[6]:
if state[0] == 'X':
return 1
else:
return -1
if state[3] == state[4] == state[5]:
if state[3] == 'X':
return 1
else:
return -1
if state[6] == state[7] == state[8]:
if state[6] == 'X':
return 1
else:
return -1
if state[2] == state[4] == state[6]:
if state[2] == 'X':
return 1
else:
return -1
if state[1] == state[4] == state[7]:
if state[1] == 'X':
return 1
else:
return -1
if state[2] == state[5] == state[8]:
if state[2] == 'X':
return 1
else:
return -1
return 0 |
def mergesort(content_list, size):
"""Sorts the features and their positions in respect to the latter.
A merge sort algorithm sorting the "content_list" from "smallest" position
value to the "highest".
Args:
content_list: list with features of a wortverbund and their positions of
occurrence.
size:
Returns the sorted content_list."""
if size <= 1:
return content_list
else:
half = int(size/2)
left = []
right = []
for i in range(size):
if i < half:
left.append(content_list[i])
else:
right.append(content_list[i])
if len(left) != 1:
left = mergesort(left, half)
if len(right) != 1:
right = mergesort(right, size-half)
j = 0
for i in range(size):
if j >= half:
content_list[i] = right[i-j]
continue
if j <= i-(size-half):
content_list[i] = left[j]
j += 1
continue
if left[j][1] <= right[i-j][1]:
content_list[i] = left[j]
j += 1
else:
content_list[i] = right[i-j]
return content_list |
def check_time( curtm, newtm, prevtm ):
"""
If 'newtm' is >= 'curtm', then the minimum of 'newtm' and 'prevtm' is
returned.
"""
if newtm >= curtm:
if prevtm == None:
return newtm
return min( prevtm, newtm )
return prevtm |
def sum_infos(info_list: list):
"""generate a list of all strings in info_list, seperated by |
Args:
info_list (list): list of information to concatenate to a string
Returns:
str: string of all concatenated information
"""
all_infos = ''
for info in info_list:
all_infos += info + '|'
return all_infos[:(len(all_infos) - 1)] |
def has_file_extension(file_path: str, file_extension: str) -> bool:
""" Checks if a file path ends with a given file extension. """
return file_path.lower().endswith('.' + file_extension) |
def isPoint(item):
""" Determine if the given list has the structure of a point. This is:
it is a list or tuple with two int or float items """
if isinstance(item, list) or isinstance(item, tuple):
if len(item) == 2:
lon = item[0]
if isinstance(lon, int) or isinstance(lon, float):
return True
else:
return False
else:
return False
else:
return False |
def handle_ret_json(ctx):
""" return JSON """
return {'this': 'will', "return": "JSON"} |
def nice_size(size, si=False, decimal_precision=1):
"""
Returns a string representation of a file size in SI (KiB, MiB, etc.)
or binary units (KB, MB, etc.)
:param size: a size in single bytes
:param si: whether to use SI units (or binary units, the default)
:param decimal_precision: the number of decimals to show in rounded
representations
:return: a string representation of size
"""
threshold = 1000 if si else 1024
if abs(size) < threshold:
return '{} B'.format(size)
units = ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] if si \
else ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
u = -1
r = 10 ** decimal_precision
while True:
size /= threshold
u += 1
if round(abs(size) * r) / r < threshold or u == len(units) - 1:
break
return ('%.{}f '.format(decimal_precision) % size) + units[u] |
def get_item(where, key, defaultvalue=None):
"""
Finds the key in the dict. The key can be made up of multiple pieces seperated with a period.
:param where: dict to search
:param key: multiple keywords combined with a period
:param defaultvalue: if the key is not found return this value
:return: either the defaultvalue or the value found
"""
x = where
for k in key.split("."):
if k in x:
x = x[k]
else:
return defaultvalue
return x |
def replace_periodic_sequence_positions(sequence, step_size, offset, substitution_char):
"""Edit nucleotide sequence using by replacing every 'step_size' nucleotides with an offset
note: we convert sequence to uppercase
eg: replace_periodic_sequence_positions("ATGCATGC", 3, 1, "F") = "AFGCFTGF"
:param sequence: nucleotide sequence
:param step_size: every 'step_size' locations the offset position is changed
:param offset: the
:param substitution_char: replacement character
"""
assert offset < step_size, "Offset has to be less than step size"
sequence = list(sequence)
for i in range(offset, len(sequence), step_size):
sequence[i] = substitution_char
subst_sequence = ''.join(sequence).upper()
return subst_sequence |
def intStr(i, total=3):
""" Return a sting of the integer i begin with 0. """
return '0'*(total-len(str(i)))+str(i) |
def _to_num(src_string):
"""Convert string to int or float if possible.
Original value is returned if conversion failed.
"""
if not isinstance(src_string, str):
return src_string
src_string = src_string.strip()
try:
return int(src_string)
except ValueError:
try:
return float(src_string)
except ValueError:
return src_string |
def _one_forward_open(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
x -= 1
y -= 1
if not c:
x, y = l - y, l - x
return x, y |
def remove_parallel_wrapper(model):
"""
Return the model or extract the model out of the parallel wrapper
:param model: The training model
:return: The model without parallel wrapper
"""
# Take care of distributed/data-parallel wrapper
model_no_wrapper = model.module if hasattr(model, "module") else model
return model_no_wrapper |
def clean_title(text):
"""
Return the string passed in stripped of its numbers and parentheses
Except for the DRC. Of course.
"""
text = str(text)
if text != "Congo (the Democratic Republic of the)":
return text[text.find(')')+1:].lstrip()
return text |
def check_shape_by_index(index, input_shape, min_size) -> bool:
"""
Check the Shape of one object of the tuple.
:param index: Index of Tuple to Test
:param input_shape: Input Tuple to test
:param min_size: Minimum size of of tuple object
:return: 'bool' result of test
"""
return input_shape[index] is not None and input_shape[index] < min_size |
def unpack_distrib(d):
"""
Return the original data from a given distribution, e.g.: ::
>>> unpack_distrib([0, 3, 2, 0, 1])
[1, 1, 1, 2, 2, 4]
"""
data = []
for i, n in enumerate(d):
if n:
data += [i] * n
return data |
def get_keys(dic, key_path=""):
"""get_keys of nested dictionary
>>> a = {"a": 1, "b": {"c": 2}}
>>> get_keys(a)
['/a', '/b/c']
"""
keys_list = []
def get_keys(dic, key_path):
if isinstance(dic, dict):
for i in dic:
get_keys(dic[i], key_path + "/" + str(i))
else:
keys_list.append(key_path)
get_keys(dic, key_path)
return keys_list |
def remove_empty_rows(thingies):
"""
For some reason, maybe a clj-kondo bug, a Var usage might have a None row.
This function is suitable for any thingy data - not only Var usages.
"""
return [thingy_data for thingy_data in thingies if thingy_data["row"] != None] |
def insertion_sort(array: list) -> list:
"""
>>> insertion_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
"""
_n = len(array)
for i in range(1, _n):
k = array[i]
j = i - 1
while k < array[j]:
break
return array |
def report_config_defaults(report_config, test_harness=None):
"""Creates copy of report_config and sets defaults for missing entries."""
config = report_config.copy()
config['report_project'] = report_config.get(
'report_project', 'google.com:tensorflow-performance')
config['report_dataset'] = report_config.get('report_dataset',
'benchmark_results_dev')
config['report_table'] = report_config.get('report_table', 'result')
# Details about where the test was run
if test_harness:
config['test_harness'] = test_harness
else:
config['test_harness'] = report_config.get('test_harness', 'unknown')
config['test_environment'] = report_config.get('test_environment', 'unknown')
config['platform'] = report_config.get('platform', 'unknown')
config['platform_type'] = report_config.get('platform_type', 'unknown')
config['accel_type'] = report_config.get('accel_type', 'unknown')
config['framework_describe'] = report_config.get('framework_describe',
'unknown')
return config |
def fileReadable(filepath):
""" Check if a file path can be analysed or not and return a boolean """
import os
if not os.path.isfile(filepath):
return False
return True |
def align_up(value, align):
"""
alignment
"""
return int(int((value + align - 1) / align) * align) |
def GCD(a,b):
""" The Euclidean Algorithm """
a = abs(a)
b = abs(b)
while a:
a, b = (b % a), a
return b |
def get_user_type(code):
"""Get user type from code."""
user_type = {1: "User", 2: "Admin", 3: "Super admin"}
if code in user_type:
return user_type[code] + " (" + str(code) + ")"
return "Unknown ({})".format(str(code)) |
def newton(f, f_derivative, a, b, eps, M, m):
"""Newton method: """
x = []
x.append(a) # x[0]
x.append(b) # x[i]
i = 1
while True:
# x[i+1] =
x.append( x[i] - f(x[i])/f_derivative(x[i]) )
i += 1
if abs(((x[i] - x[i-1]) ** 2) * M/(2*m)) < eps:
break
return x[2:] |
def get_field_list(fields: list, *args):
"""Get list of query fields.
__________
Parameters
fields : `list[str]`
A predefined collection of default fields.
args : `tuple`
Field names passed into the request.
_______
Returns
fields : `list[str]`
The fields to be retrieved for the query.
"""
if not args:
return fields
return args |
def msb(val: int, bits: int = 8) -> bool:
"""returns whether the Most Significant Bit (msb) of the provided value is 1
:rtype: bool
:param val: numeric value to check the most msb
:param bits: bit length of the value
:return: true if the msb is a 1, false otherwise
"""
return bool(val & (1 << (bits - 1))) |
def createnozzlelistsp(nozzles, nozzlelist, firstnozzle=0):
"""
create a nozzlelist, takes a list of active nozzles and converts this to
ones and zeros on the specified places
"""
list = [0] * nozzles
for x in nozzlelist:
list[x + firstnozzle] = 1
return list |
def is_cap_used(use_caps, i):
"""Returns ``True`` if a cap is used.
Parameters
----------
use_caps : :class:`int`
Bit mask indicating which cap is used.
i : :class:`int`
Number indicating which cap we are interested in.
Returns
-------
:class:`bool`
``True`` if a cap is used.
"""
return (use_caps & 1 << i) != 0 |
def get_vendor_extension_fields(mapping):
"""
Identify vendor extension fields and extract them into a new dictionary.
Examples:
>>> get_vendor_extension_fields({'test': 1})
{}
>>> get_vendor_extension_fields({'test': 1, 'x-test': 2})
{'x-test': 2}
"""
return {k: v for k, v in mapping.items() if k.startswith('x-')} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.