content stringlengths 42 6.51k |
|---|
def is_image(filename: str) -> bool:
""" Renvoie True si le fichier possede une extension d'image valide. """
return '.' in filename and filename.rsplit('.', 1)[-1].lower() in ('png', 'jpg', 'jpeg', 'gif', 'bmp') |
def verified_flag_name(record_key):
"""
Return the name for verified flag for the corresponding record key.
"""
return '{}_verified'.format(record_key) |
def odd_even(x):
"""Classify a number as odd or even.
>>> odd_even(4)
'even'
>>> odd_even(3)
'odd'
"""
if x % 2==0:
return 'even'
else:
return 'odd' |
def convert_to_int(val):
""" """
try:
return int(val)
except:
return None |
def string_match(string, substring):
"""Return True if a string matches a substring. This substring may include
wildcards
Parameters
----------
string: str
string you wish to match
substring: str
string you wish to match against
"""
import re
import sre_constants
try:
match = re.match(re.compile(substring), string)
if match:
return True
return False
except sre_constants.error:
import fnmatch
return string_match(string, fnmatch.translate(substring)) |
def str_to_int(num_string, decimal_separator='.', thousand_separator=','):
"""
Convert a string denoting a genomic location to int (base pairs).
:param num_string: input :class:`~str`
:param decimal_separator: Decimal separator used for float conversion
:param thousand_separator: Thousand separator (to be ignored)
:return: int (base pairs)
"""
try:
num_string = num_string.replace(thousand_separator, '').lower()
except AttributeError:
pass
try:
return int(num_string)
except ValueError:
i = 0
while i < len(num_string) and (num_string[i].isdigit() or num_string[i] == decimal_separator):
i += 1
try:
number = float(num_string[:i])
suffix = num_string[i:]
multipliers = {
'gb': 1000000000,
'mb': 1000000,
'kb': 1000,
'bp': 1,
'g': 1000000000,
'm': 1000000,
'k': 1000,
'b': 1,
}
return int(number * multipliers[suffix])
except (KeyError, ValueError):
raise ValueError("Cannot convert '{}' to integer!".format(num_string)) |
def apply(fn, *x):
"""Applies fn to the argument list formed by prepending intervening arguments to args.
apply(fn, x) --> fn(*x)"""
if len(x) > 0 and isinstance(x[-1], (tuple, list)):
return apply(fn, *(x[:-1] + tuple(x[-1])))
else:
return fn(*x) |
def format_currency(value, decimals=2):
"""
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
"""
number, decimal = ((u'%%.%df' % decimals) % value).split(u'.')
parts = []
while len(number) > 3:
part, number = number[-3:], number[:-3]
parts.append(part)
parts.append(number)
parts.reverse()
if int(decimal) == 0:
return u','.join(parts)
else:
return u','.join(parts) + u'.' + decimal |
def correct_neg(line):
"""Add space before negative coefficient numbers.
This is a helper function only called by parse_line_regex(), which is currently not being used
by default.
Args:
line (str): raw read line
Returns:
line (str): line with space(s) added before negative coefficients
"""
import re
rneg = re.compile(r'(-[1-9]\.)')
line = re.sub(rneg, r' \1', line)
return line |
def split_fold_input_row(row: str) -> tuple:
"""Split fold information in the input."""
row_split = row.split("=")
if "x" in row_split[0]:
axis = "x"
elif "y" in row_split[0]:
axis = "y"
else:
raise ValueError("axis not found")
value = int(row_split[1])
return axis, value |
def to_number(string):
""" takes a string and will try to convert it to an integer first,
then a float if that fails. If neither of these work, return
the string
"""
try:
return int(string)
except ValueError:
try:
return float(string)
except ValueError:
return string |
def check_close_real(x, y, eps=1e-9):
"""Return true iff floats x and y "are close\""""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
return abs((x-y)/y) < eps |
def queue_requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> queue_requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x |
def snake_to_title_case(snake_case: str):
"""Convert snake_case to TitleCase"""
if len(snake_case) == 0:
return snake_case
return "".join([w[0].upper() + w[1:] for w in snake_case.split("_")]) |
def process_max_frames_arg(max_frames_arg):
"""Handle maxFrames arg in vidstab.__main__
Convert negative values to inf
:param max_frames_arg: maxFrames arg in vidstab.__main__
:return: max_frames as is or inf
>>> process_max_frames_arg(-1)
inf
>>> process_max_frames_arg(1)
1
"""
if max_frames_arg > 0:
return max_frames_arg
return float('inf') |
def cycle(value, subjectnum):
"""
>>> value = 1
>>> for _ in range(11):
... value = cycle(value, 5764801)
>>> value
14897079
>>> value = 1
>>> for _ in range(8):
... value = cycle(value, 17807724)
>>> value
14897079
"""
value *= subjectnum
value %= 20201227
return value |
def is_integer(s):
"""
test if string parameter is valid integer value
:param s: string to test
:return: boolean
"""
try:
int(s)
return True
except ValueError:
return False |
def breakLine(line):
"""Breaks a file line into it's command/instruction components.
Most lines in tuflow files are in the form::
Read Command == ..\some\path.ext ! comment
This separates the command from the rest.
Args:
line(str): the line as read from the file.
Returns:
Tuple - (command, instruction).
"""
line = line.strip()
command, instruction = line.split('==', 1)
command = command.strip()
instruction = instruction.strip()
return command, instruction |
def _recur_flatten(key, x, out, sep='.'):
"""Helper function to flatten_dict
Recursively flatten all nested values within a dict
Args:
key (str): parent key
x (object): object to flatten or add to out dict
out (dict): 1D output dict
sep (str): flattened key separator string
Returns:
dict: flattened 1D dict
"""
if x is None or isinstance(x, (str, int, float, bool)):
out[key] = x
return out
if isinstance(x, list):
for i, v in enumerate(x):
new_key = '{}{}{}'.format(key, sep, i)
out = _recur_flatten(new_key, v, out, sep)
if isinstance(x, dict):
for k, v in x.items():
new_key = '{}{}{}'.format(key, sep, k)
out = _recur_flatten(new_key, v, out, sep)
return out |
def reverse_bisect_left(a, x, lo=None, hi=None):
"""
Locate the insertion point for x in a to maintain its reverse sorted order
"""
if lo is None:
lo = 0
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if x > a[mid]:
hi = mid
else:
lo = mid + 1
return lo |
def tp_fp_fn(CORRECT_SET, GUESS_SET):
"""
INPUT: dictionary with the elements in the cluster from the ground truth
(CORRECT_SET) and dictionary with the elements from the estimated cluster
(ESTIMATED_SET).
OUTPUT: number of True Positives (elements in both clusters), False
Positives (elements only in the ESTIMATED_SET), False Negatives (elements
only in the CORRECT_SET).
"""
tp = 0
fp = 0
fn = 0
for elem in GUESS_SET:
# True Positives (elements in both clusters)
if elem in CORRECT_SET:
tp += 1
else:
# False Positives (elements only in the "estimated cluster")
fp += 1
for elem in CORRECT_SET:
if elem not in GUESS_SET:
# False Negatives (elements only in the "correct cluster")
fn += 1
return tp, fp, fn |
def _x_replacer(args, kwargs, dispatchables):
"""
uarray argument replacer to replace the transform input array (``x``)
"""
if len(args) > 0:
return (dispatchables[0],) + args[1:], kwargs
kw = kwargs.copy()
kw['x'] = dispatchables[0]
return args, kw |
def transform_ratio(value):
""" Transformation that takes ratios and applies a function that preserves equal distances to origin (1)
for similar relationships, eg. a ratio of 2 (twice the size) is at the same distance of 1 (same size) as
0.5 (half the size).
Read: 'how many times larger or smaller than reference disparity'."""
if value >= 1:
return value - 1
else:
return 1 - 1 / value |
def specialConditions(condition, match):
"""Implements special condition logic to decide whether a map file matches a log or not.
Args:
condition: special condition type
match: match status
Returns:
match status (True/False)
"""
if condition == "*":
pass
else:
match = False
return match |
def filesize(bytes):
"""Calculate human readable sive from given bytes"""
if not isinstance(bytes, float):
bytes = float(bytes)
for x in ['bytes','KB','MB','GB','TB', 'PB', 'EB']:
if bytes < 1024.0:
return "{0:3.1f}{1}".format(bytes, x)
bytes /= 1024.0 |
def no_shouting(str):
""" no capital letters in str"""
return str.lower() |
def get_unsigned_value(obj):
"""
Returns unsigned integer from LLDB value.
:param lldb.SBValue obj: LLDB value object.
:return: Unsigned integer from LLDB value.
:rtype: int | None
"""
return None if obj is None else obj.GetValueAsUnsigned() |
def _get_trailing_metadata_from_interceptor_exception(exception):
"""Retrieves trailing metadata from an exception object.
Args:
exception: an instance of grpc.Call.
Returns:
A tuple of trailing metadata key value pairs.
"""
try:
# GoogleAdsFailure exceptions will contain trailing metadata on the
# error attribute.
return exception.error.trailing_metadata()
except AttributeError:
try:
# Transport failures, i.e. issues at the gRPC layer, will contain
# trailing metadata on the exception iself.
return exception.trailing_metadata()
except AttributeError:
# if trailing metadata is not found in either location then
# return an empty tuple
return tuple() |
def has_index_together_changed(old_model_sig, new_model_sig):
"""Returns whether index_together has changed between signatures."""
old_meta = old_model_sig['meta']
new_meta = new_model_sig['meta']
old_index_together = old_meta.get('index_together', [])
new_index_together = new_meta['index_together']
return list(old_index_together) != list(new_index_together) |
def get_proxy(my_proxy):
"""
Function to set proxy
:param my_proxy: Proxy IP Address
:returns: dictionary
"""
http_proxy = "http://edcguest:edcguest@{}:3128".format(my_proxy)
https_proxy = "https://edcguest:edcguest@{}:3128".format(my_proxy)
# Proxy dictionary definition
proxy = {
"http": http_proxy,
"https": https_proxy
}
return proxy |
def _cnvListCard(val, itemCnv=int):
"""Stupid utility to cons up a single string card from a list."""
return " ".join([str(itemCnv(v)) for v in val]) |
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+" |
def chrelerr(fbest, stop):
"""
checks whether the required tolerance for a test function with known
global minimum has already been achieved
Input:
fbest function value to be checked
stop(0) relative error with which a global minimum with not too
small absolute value should be reached
stop(1) global minimum function value of a test function
stop(2) if abs(fglob) is very small, we stop if the function
value is less than stop(2)
Output:
flag = 0 the required tolerance has been achieved
= 1 otherwise
"""
fglob = stop[1]
if fbest - fglob <= max(stop[0] * abs(fglob), stop[2]):
return 0
return 1 |
def get_vars(variants, loop_only=False):
"""For purposes of naming/identifying, provide a way of identifying which variables contribute
to the matrix dimensionality"""
special_keys = ('pin_run_as_build', 'zip_keys', 'ignore_version')
loop_vars = [k for k in variants[0] if k not in special_keys and
(not loop_only or
any(variant[k] != variants[0][k] for variant in variants[1:]))]
return loop_vars |
def args_to_list(args):
"""Convert given args to a list"""
if args == None:
args = ('', )
else:
if not isinstance(args, tuple):
args = [args]
else:
args = list(args)
active_filters = []
for _filter in args:
active_filters.append(_filter.encode("latin-1").decode("utf-8"))
return active_filters |
def sparse_indexes(coord, depth):
"""Generate sparse indexes from coordinate."""
indexes = [0] * depth
x = coord[0]
y = coord[1]
z = coord[2]
for i in range(depth):
divx, modx = divmod(x, 4)
divy, mody = divmod(y, 4)
divz, modz = divmod(z, 4)
index = modx + (mody * 4) + (modz * 16)
level = (depth - i) - 1
indexes[level] = index
x = divx
y = divy
z = divz
return indexes |
def _is_json_mime(mime):
"""Is a key a JSON mime-type that should be left alone?"""
return mime == 'application/json' or \
(mime.startswith('application/') and mime.endswith('+json')) |
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if ints is None:
return None
if len(ints) == 1:
return (ints[0], ints[0])
minValue = None
maxValue = None
for e in ints:
if minValue is None or minValue > e:
minValue = e
if maxValue is None or maxValue < e:
maxValue = e
return (minValue, maxValue) |
def get_kubevirt_cfg(k8s_conf):
"""
Returns kubevirt enablement choice
:return: true/false
"""
if k8s_conf.get('enable_kubevirt') :
return k8s_conf['enable_kubevirt'] |
def str2bool(value):
"""Translate a string to a boolean value."""
return str(value).lower() in ("yes", "true", "y", "1") |
def recur_fibonacci(k):
"""
Find the Fibonacci number of order n by recursion
"""
if k < 2:
return k
else:
return recur_fibonacci(k - 1) + recur_fibonacci(k - 2) |
def find_match_characters(string, pattern):
"""Find match match pattern string.
Args:
params: string
pattern
Returns:
Raises:
"""
matched = []
last_index = 0
if not string or not pattern:
return matched
if string[0] != pattern[0]:
return matched
for c in pattern:
index = string.find(c, last_index)
if index < 0:
return []
matched.append((c, index))
last_index = index + 1
return matched |
def reg_extract(openfield, address):
"""
Extract data from openfield. 'ip:port:pos' or with option 'ip2:port:pos2,reward=bis2a'
:param openfield: str
:param address: str
:return: tuple (ip, port, pos, reward)
"""
options = {}
if "," in openfield:
# Only allow for 1 extra param at a time. No need for more now, but beware if we add!
parts = openfield.split(",")
openfield = parts.pop(0)
for extra in parts:
key, value = extra.split("=")
options[key] = value
ip, port, pos = openfield.split(":")
reward = options["reward"] if "reward" in options else address
source = options["source"] if "source" in options else None
if source and source != address:
raise ValueError("Bad source address")
return ip, port, pos, reward |
def flip(s, i, N):
"""Flip the bits of the state `s` at positions i and (i+1)%N."""
return s ^ (1 << i | 1 << ((i+1) % N)) |
def _remove_ids(elements):
"""
We remove the question and answer ids from the text (they are noise for the input)
prior to convert the corpus to JSON
"""
new_elements = []
for e in elements:
new_elements.append(e.split(".",1)[1].strip())
return new_elements |
def merge_clouds(pc1, pc2):
"""Add Gaussian noise to pc.
Merge two point clouds
inputs:
pc1 - a list of numpy 3 x 1 matrices that represent one set of points.
pc2 - a list of numpy 3 x 1 matrices that represent another set of points.
outputs:
pc_out - merged point cloud
"""
pc_out = pc1
for pt in pc2:
pc_out.append(pt)
return pc_out |
def _str_template_fields(template):
"""
Return a list of `str.format` field names in a template string.
"""
from string import Formatter
return [
field_name
for _, field_name, _, _ in Formatter().parse(template)
if field_name is not None
] |
def new_measure_group(
grpid, attrib, date, created, category, deviceid, more, offset, measures
):
"""Create simple dict to simulate api data."""
return {
"grpid": grpid,
"attrib": attrib,
"date": date,
"created": created,
"category": category,
"deviceid": deviceid,
"measures": measures,
"more": more,
"offset": offset,
"comment": "blah", # deprecated
} |
def get_line_indent_num(line_str: str) -> int:
"""
Get the number of indents of the target line.
Parameters
----------
line_str : str
String of target line.
Returns
-------
line_indent_num : int
Number of indents.
"""
space_num: int = 0
for line_char in line_str:
if line_char != ' ':
break
space_num += 1
line_indent_num: int = space_num // 4
return line_indent_num |
def fontInfoPostscriptBluesValidator(values):
"""
Version 2+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) > 14:
return False
if len(values) % 2:
return False
for value in values:
if not isinstance(value, (int, float)):
return False
return True |
def topo_sorted(depmap):
"""Return list of items topologically sorted.
depmap: { item: [required_item, ...], ... }
Raises ValueError if a required_item cannot be satisfied in any order.
The per-item required_item iterables must allow revisiting on
multiple iterations.
"""
ordered = [item for item, requires in depmap.items() if not requires]
depmap = {item: set(requires) for item, requires in depmap.items() if requires}
satisfied = set(ordered)
while depmap:
additions = []
for item, requires in list(depmap.items()):
if requires.issubset(satisfied):
additions.append(item)
satisfied.add(item)
del depmap[item]
if not additions:
raise ValueError(("unsatisfiable", depmap))
ordered.extend(additions)
additions = []
return ordered |
def supervised_hyperparams(lr=1e-3, wd=5e-5, scheduler=True):
"""
Return a dictionary of hyperparameters for the Supervised algorithm.
Default parameters are the best ones as found through a hyperparameter search.
Arguments:
----------
lr: float
Learning rate.
wd: float
Weight decay for the optimizer.
scheduler: bool
Will use a OneCycleLR learning rate scheduler if set to True.
Returns:
--------
hyperparams: dict
Dictionary containing the hyperparameters. Can be passed to the `hyperparams` argument on Supervised.
"""
hyperparams = {'learning_rate': lr,
'weight_decay': wd,
'cyclic_scheduler': scheduler
}
return hyperparams |
def linear_search(list_to_search_in, key_to_search_for, field = lambda item: item["title"]):
"""
Does a linear search through a list.
Defaults to searching for "title" if no other field is specified.
"""
if len(list_to_search_in) == 0 or key_to_search_for == None:
return "Error"
#list to put all found items in
list_with_found_items = []
#go through every item in the list
for item in list_to_search_in:
#if the search key is in list[field], append to list and go on
if str(key_to_search_for).lower() in str(field(item)).lower():
list_with_found_items.append(item)
continue
return list_with_found_items |
def computeTicks (x, step = 5):
"""
Computes domain with given step encompassing series x
@ params
x - Required - A list-like object of integers or floats
step - Optional - Tick frequency
"""
import math as Math
xMax, xMin = Math.ceil(max(x)), Math.floor(min(x))
dMax, dMin = xMax + abs((xMax % step) - step) + (step if (xMax % step != 0) else 0), xMin - abs((xMin % step))
return range(dMin, dMax, step) |
def set_http_sts(test, sts):
"""Set http status code
:param test: test object to append response info to
:param sts: http status code as String
:return: updated test dict
"""
test['extra'][0]['http status'] = sts
return test |
def blend_color(a, b, r):
"""blends color a and b in r ratio."""
return (
int(a[0] + (b[0] - a[0]) * r[0]),
int(a[1] + (b[1] - a[1]) * r[1]),
int(a[2] + (b[2] - a[2]) * r[2])
) |
def trimCompressionSuffix(fileName):
"""
Trim .gz, .bz2, .tar.gz and .tar.bz2
"""
try:
if fileName[-3:] == ".gz":
fileName = fileName[:-3]
if fileName[-4:] == ".bz2":
fileName = fileName[:-4]
if fileName[-4:] == ".tar":
fileName = fileName[:-4]
except IndexError:
pass
return fileName |
def normalize_label(label):
"""Normalizes a label to be used for data sent to performance dashboard.
This replaces:
'/' -> '-', as slashes are used to denote test/sub-test relation.
' ' -> '_', as there is a convention of not using spaces in test names.
Returns:
Normalized label.
"""
return label.replace('/', '-').replace(' ', '_') |
def wolfe(f_alpha, fk, alpha, gpk):
"""
:param f_alpha: func(xk + alpha * pk)
:param fk: func(xk)
:param alpha:
:param gpk: grad(xk) @ pk
"""
c1 = 1e-4
return f_alpha <= fk + c1 * alpha * gpk |
def translate(seq): #Translate the exon sequence of the gene into its respective amino acid codes using a dictionary
""""Translate nucleotide sequences"""
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'', 'TAG':'',
'TGC':'C', 'TGT':'C', 'TGA':'', 'TGG':'W',
}
protein =""
try:
if len(seq)%3 == 0:
for i in range(0, len(seq), 3):
codon = seq[i:i + 3] #Defining a codon as 3 bases
protein+= table[codon] #Translate the codon into an amino acid based on the dictionary and append this to the protein sequence
except:
protein = ""
return protein |
def first_line(st):
"""
Extracts first line of string.
"""
return st.lstrip().partition("\n")[0] |
def get_ext(url):
"""Return the filename extension from url, or ''."""
ext = str(url).split('?')[0]
file = ext.split('.')[-1]
return file |
def align(alignment, x):
"""align(alignment, x) -> int
Rounds `x` up to nearest multiple of the `alignment`.
Example:
>>> [align(5, n) for n in range(15)]
[0, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, 15, 15, 15, 15]
"""
return ((x + alignment - 1) // alignment) * alignment |
def status_str(status):
"""return status string from status code"""
status_map = {
0: 'MATCH',
10: 'OK',
15: 'SKIP',
20: 'FAIL',
30: 'CRASH'
}
return status_map.get(status, 'UNKNOWN') |
def _recall(tp, fn):
"""Calculate recall from true positive and false negative counts."""
if fn == 0:
return 1 # by definition.
else:
return tp / (tp + fn) |
def removeLines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out |
def _prepare_versions(listen_versions, supported_versions):
"""Return listen versions."""
assert supported_versions
if not listen_versions:
return supported_versions
# Check if any desired versions are unsupported.
unsupported = set(listen_versions) - set(supported_versions)
if unsupported:
raise ValueError("Unsupported OpenFlow versions: %r" % unsupported)
return listen_versions |
def res_to_url(resource, action):
"""Convert resource.action to (url, HTTP_METHOD)"""
i = action.find("_")
if i < 0:
url = "/" + resource
httpmethod = action
else:
url = "/%s/%s" % (resource, action[i + 1:])
httpmethod = action[:i]
return url, httpmethod.upper() |
def complement(sequence):
"""Provides the complement in the 5' - 3' direction
Assumption: reference consists of A, G, C, T only
complement(str) --> str
"""
d = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
return ''.join(d[c] if c in d else c for c in reversed(sequence)) |
def byte_size(num, suffix='B'):
"""
Return a formatted string indicating the size in bytes, with the proper
unit, e.g. KB, MB, GB, TB, etc.
:arg num: The number of byte
:arg suffix: An arbitrary suffix, like `Bytes`
:rtype: float
"""
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix) |
def time_metric(secs=60, mins=0):
"""Returns user-readable string representing given number of seconds."""
if mins:
secs += (mins * 60)
time = ''
for metric_secs, metric_char in [[7 * 24 * 60 * 60, 'w'],
[24 * 60 * 60, 'd'],
[60 * 60, 'h'],
[60, 'm']]:
if secs > metric_secs:
time += '{}{}'.format(int(secs / metric_secs), metric_char)
secs -= int(secs / metric_secs) * metric_secs
if secs > 0:
time += '{}s'.format(secs)
return time |
def sanitize_path(text: str) -> str:
"""Replaces all invalid characters for a path with valid ones.
E.g. useful for creating files with the name of KEGG pathways,
as these names may contain invalid characters.
Argument
----------
* text: str ~ The string that may contain invalid characters.
"""
return text.replace("\\", "_").replace("/", "_").\
replace(":", "_").replace("*", "_").\
replace("<", "_").replace(">", "_").\
replace("|", "_") |
def single_total(iterable, odd=False):
"""This example function sums the data contained in a single iterable, i.e.
a list, tuple, set, pd.Series, etc.
Args:
iterable (list, tuple, set, pd.Series, etc): Any iterable that holds data
that can be added together.
Returns:
total (int): Sum of passed iterable.
"""
total = 0
for item in iterable:
if odd:
if item % 2 != 0:
total += int(item)
else:
total += int(item)
return total |
def merge_dict(a, b):
"""Perform a merge of dictionaries a and b
Any subdictionaries will be recursively merged.
Any leaf elements in the form of a list or scalar will use the value from a
"""
d = {}
for k, v in a.items():
if k not in b:
d[k] = v
elif isinstance(v, dict) and isinstance(b[k], dict):
d[k] = merge_dict(v, b[k])
for k, v in b.items():
if k not in d:
d[k] = v
return d |
def numpy_data(data):
"""Dictionary to numpy."""
keys = ['ft01_BBInMethod',
'ft02_BBWithOneSuccessor',
'ft03_BBWithTwoSuccessors',
'ft04_BBWithMoreThanTwoSuccessors',
'ft05_BBWithOnePredecessor',
'ft06_BBWithTwoPredecessors',
'ft07_BBWithMoreThanTwoPredecessors',
'ft08_BBWithOnePredOneSuc',
'ft09_BBWithOnePredTwoSuc',
'ft10_BBWithTwoPredOneSuc',
'ft11_BBWithTwoPredTwoSuc',
'ft12_BBWithMoreTwoPredMoreTwoSuc',
'ft13_BBWithInstructionsLessThan15',
'ft14_BBWithInstructionsIn[15-500]',
'ft15_BBWithInstructionsGreaterThan500',
'ft16_EdgesInCFG',
'ft17_CriticalEdgesInCFG',
'ft18_AbnormalEdgesInCFG',
'ft19_DirectCalls',
'ft20_ConditionalBranch',
'ft21_AssignmentInstructions',
'ft22_ConditionalBranch',
'ft23_BinaryIntOperations',
'ft24_BinaryFloatPTROperations',
'ft25_Instructions',
'ft26_AverageInstruction',
'ft27_AveragePhiNodes',
'ft28_AverageArgsPhiNodes',
'ft29_BBWithoutPhiNodes',
'ft30_BBWithPHINodesIn[0-3]',
'ft31_BBWithMoreThan3PHINodes',
'ft32_BBWithArgsPHINodesGreaterThan5',
'ft33_BBWithArgsPHINodesGreaterIn[1-5]',
'ft34_SwitchInstructions',
'ft35_UnaryOperations',
'ft36_InstructionThatDoPTRArithmetic',
'ft37_IndirectRefs',
'ft38_AdressVarIsTaken',
'ft39_AddressFunctionIsTaken',
'ft40_IndirectCalls',
'ft41_AssignmentInstructionsWithLeftOperandIntegerConstant',
'ft42_BinaryOperationsWithOneOperandIntegerConstant',
'ft43_CallsWithPointersArgument',
'ft44_CallsWithArgsGreaterThan4',
'ft45_CallsThatReturnPTR',
'ft46_CallsThatReturnInt',
'ft47_ConstantZero',
'ft48_32-bitIntegerConstants',
'ft49_ConstantOne',
'ft50_64-bitIntegerConstants',
'ft51_ReferencesLocalVariables',
'ft52_DefUseVariables',
'ft53_LocalVariablesReferred',
'ft54_ExternVariablesReferred',
'ft55_LocalVariablesPointers',
'ft56_VariablesPointers']
values = []
functions = []
for func, msf in data.items():
values.append([msf[key] for key in keys])
functions.append(func)
return functions, values |
def _field_key(x):
"""Order field.min, field, field.max."""
if x.endswith('.min'):
return x[:-4] + '.0'
elif x.endswith('.max'):
return x[:-4] + '.2'
return x + '.1' |
def get_common_ancestor(name1, name2):
"""
Args
----
name1 : str
Name of first system.
name2 : str
Name of second system.
Returns
-------
str
Absolute name of any common ancestor `System` containing
both name1 and name2. If none is found, returns ''.
"""
common_parts = []
for part1, part2 in zip(name1.split('.'), name2.split('.')):
if part1 == part2:
common_parts.append(part1)
else:
break
if common_parts:
return '.'.join(common_parts)
else:
return '' |
def parse_list_of_doubles(string):
"""
Ensure that the given input string is a list of comma-separated floats.
Return a formatted version of the input string, preceded by 'INVALID: ' if something is incorrect.
"""
for val in string.split(','):
try:
float(val)
except ValueError:
return f'INVALID: {string}'
return string.replace(' ', '').replace(',', ', ').upper() |
def convert_value_to_list(dictionary, key):
"""
Given a dictionary and a key, make sure the key value is a list type.
If the key does not exist in the dictionary, just return None.
:param dictionary: A dictionary instance.
:param key: A dictionary key.
:return: Return a list type.
"""
try:
value = dictionary.get(key)
return None if value is None else value if type(value) is list else [value]
except:
raise ValueError("The value associated with '{}' is expected to be a list type - {}.".format(key, dictionary)) |
def _process_binary( sig, base, max ):
"""
Returns int value from a signal in 32b form. Used for testing.
Example: input: 0b00000000000000000000000000000000 10
output: 0
"""
if sig[1] == "b":
sig = sig[2:]
if base == 10:
temp_int = int(sig,2)
if sig[0] == '1':
return temp_int -2 **32 # taking 2's complement.
# leading 1 indicates a negative number
else:
return temp_int
#hex number
else:
temp_hex = hex(int(sig,2))[2:]
l = len(temp_hex)
if l > max:
temp_hex = temp_hex[l-max:]
if l < max:
temp_hex = '0'*(max-l) + temp_hex
return temp_hex |
def parse_instances(instances):
"""Parse lists of instances or instance ranges into a set().
Examples:
0-2
0,1-3,5
1,3,5
"""
if instances is None or instances == '':
return None
result = set()
for part in instances.split(','):
x = part.split('-')
result.update(range(int(x[0]), int(x[-1]) + 1))
return sorted(result) |
def to_id_str(name: str) -> str:
"""Converts a full-name to its corresponding id string.
:param name: The name to convert.
:type name: str
:return: The corresponding id string.
:rtype: str
"""
name = name.lower()
for c in " -%:'.":
name = name.replace(c, "")
return name |
def shrink_seq(seq):
"""remove repeated ids"""
s0 = seq[0]
new_seq = [s0]
for s in seq[1:]:
if s == s0:
continue
else:
new_seq.append(s)
s0 = s
return new_seq |
def safe_str(val):
"""
Returns:
str(val) or None if val is None.
"""
if val is None:
return None
return str(val) |
def keyword_count(keyword, the_comments):
"""Returns the number of comments in @p the_comments contain the @p keyword"""
count = 0
for comment in the_comments:
if keyword.lower() in comment.body.lower():
count += 1
return count |
def group_by_key(dict_list, key):
"""
>>> data = [
... {'a': 1, 'b': 2},
... {'a': 1, 'b': 3}
... ]
>>> group_by_key(data, 'a')
{1: [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}]}
"""
grouped = {}
for d in dict_list:
group_key = d.get(key)
grouped.setdefault(group_key, [])
grouped[group_key].append(d)
return grouped |
def flatten_lists_to_csv(data):
"""Converts the passed in data to csv.
Assuming:
x = [
["v1", 98, 23],
["v2", 0.25, 0.56],
]
then flatten_lists_to_csv(data) will return the following string:
v1,v2
98,0.25
23,0.56
:param list data: A list of lists holding a flat view of the data
to convert to csv.
:return: A string representing the csv view of the passed in data.
"""
rows = []
i = 0
while True:
try:
row = []
for j in range(len(data)):
row.append(str(data[j][i]))
rows.append(",".join(row))
i += 1
except IndexError:
break
return "\n".join(rows) |
def avg_word(sentence):
"""
avg_word - calculates the average word length of a sentence
"""
words = sentence.split()
num_words = len(words)
if num_words == 0:
num_words = 1
return sum(len(word) for word in words) / num_words |
def _all_indices(iterable, value):
"""
Return all indices of `iterable` that match `value`.
"""
return [i for i, x in enumerate(iterable) if x == value] |
def format_bytes(nbytes):
"""Format ``nbytes`` as a human-readable string with units"""
if nbytes > 2 ** 50:
return "%0.2f PiB" % (nbytes / 2 ** 50)
if nbytes > 2 ** 40:
return "%0.2f TiB" % (nbytes / 2 ** 40)
if nbytes > 2 ** 30:
return "%0.2f GiB" % (nbytes / 2 ** 30)
if nbytes > 2 ** 20:
return "%0.2f MiB" % (nbytes / 2 ** 20)
if nbytes > 2 ** 10:
return "%0.2f KiB" % (nbytes / 2 ** 10)
return "%d B" % nbytes |
def xml_only_url(live_server):
"""Return the XML-RPC specific endpoint URL. See 'testsite.urls' for additional info."""
return live_server + '/xml-only/' |
def read_results(infile, parser, num_elements, allow_early_end=False):
"""DEPRECATED function, replaced by npread() which runs faster.
Parses a binary file multiple times, allowing for control if the
file ends prematurely.
Arguments:
infile {file-like} -- The binary file to read the select number of bytes.
parser {[type]} -- Parsing function to apply iteratively over the infile.
num_bytes {integer} -- The number of elements to parse.
Keyword Arguments:
allow_early_end {bool} -- Whether it is ok to reach the end of the file early. (default: {False})
Raises:
EOFError: If the end of the file is reached before the number of elements have
been processed.
Returns:
[list(any)] -- A list of the parsed values.
"""
results = []
while len(results) < num_elements:
try:
results.append(parser(infile))
continue
except EOFError:
if allow_early_end:
break
raise EOFError('End of file reached before number of results parsed')
return results |
def getpage(page):
"""To change pages number into desired format for saving"""
page = str(page)
if int(page) < 10:
page = '0' + page
return page |
def union_crops(crop1, crop2):
"""Union two (x1, y1, x2, y2) rects."""
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return [min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)] |
def is_optional(pattern):
""" An optional pattern is enclosed in brackets.
"""
if pattern.startswith("(") and pattern.endswith(")"):
return True
return False |
def parent2DB(value, options):
"""transforma um tuple de um parent num valor para gravar na db"""
for option in options:
if value == option[1]:
value = option[0]
return value |
def degdiff(angle1, angle2):
"""
The difference of two angles given in degrees. The answer is an angle from
-180 to 180. Positive angles imply angle2 is clockwise from angle1 and -ve
angles imply counter-clockwise.
>>> int(degdiff(40, 30))
-10
>>> int(degdiff(30, 40))
10
>>> int(degdiff(361, 40))
39
>>> int(degdiff(40, 361))
-39
>>> degdiff(40,250)
-150
>>> degdiff(40,200)
160
>>> degdiff(40, 219)
179
>>> degdiff(40, 220)
180
>>> degdiff(40, 221)
-179
"""
# bring the angle into the 0 to 360 range
delta = ((angle2 - angle1) + 360) % 360
# angles above 180 need to be shifted down by 360 degrees so that 181 is -179
# 200 is -160 etc.
return delta - (delta > 180) * 360 |
def factorial(n):
"""Computes factorial of n."""
if n == 0:
return 1
else:
recurse = factorial(n-1)
result = n * recurse
return result |
def compute_name(path, char="_"):
"""
Compute a name from a list of components.
Blank entries are filtered out.
Used to find C_error_pattern.
Args:
path - list of name components.
"""
work = [ part for part in path if part ] # skip empty components
return char.join(work) |
def match_by_split(split: str) -> dict:
"""Get the $match query by split one of ['train', 'valid', 'test']."""
return {"$and": [{"is_AF": {"$exists": True}}, {"split": split}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.