content stringlengths 42 6.51k |
|---|
def is_dict(klass: type):
"""Determine whether klass is a Dict."""
return getattr(klass, "__origin__", None) == dict |
def resnet_params(model_name):
""" Map resnet model name to parameter coefficients. """
params_dict = {
# Coefficients: res,dropout
"resnet20": (32, 0.2),
"resnet32": (32, 0.2),
"resnet44": (32, 0.2),
"resnet56": (32, 0.2),
"resnet110": (32, 0.2),
"resnet1202": (32, 0.2),
}
return params_dict[model_name] |
def _build_dropdown(options):
"""Convert a list to (value, caption) tuples."""
return [(x, x) for x in options] |
def format_version_code(version_code: int) -> str:
"""Converts version code of TheoTown to a version name string. Returns original input string on failure."""
string = str(version_code)
length = len(string)
if length == 3:
return '1.' + string[0] + '.' + string[1:]
if length == 4:
return string[0] + '.' + string[1:2] + '.' + string[2:]
if length == 5:
return string[0] + '.' + string[1:3] + '.' + string[3:]
return string |
def format_coord(x, y):
"""
If for some reason the matplotlib backend doesn't visualize your mouse position replace ax.format_coord with
this function. According to the internet this should fix the problem anyway.
Code modifed from user tacaswell on Stack Overflow post:
https://stackoverflow.com/questions/15876011/add-information-to-matplotlib-navigation-toolbar-status-bar
"""
col = int(x+0.5)
row = int(y+0.5)
return 'x=%1.4f, y=%1.4f'%(x, y) |
def breakdate(date):
"""For a date in the YYYYMMDD format, return day, month, year as integers
"""
day=int(date[6:8])
month=int(date[4:6])
year=int(date[0:4])
return day, month, year |
def calTrArea(a, b, c):
"""[summary]
Args:
a ([type]): [description]
b ([type]): [description]
c ([type]): [description]
Returns:
[type]: [description]
"""
return abs(a[0] * (b[1] - c[1]) + b[0] * (c[1] - a[1]) + c[0] * (a[1] - b[1])) / 2 |
def is_legacy_brief_response(brief_response, brief=None):
"""
In the legacy flow (DOS 1 only), the essentialRequirements answers were evaluated at the end of the application
(giving the supplier a pass or fail).
In the current flow, the supplier can't proceed past the essentialRequirements question unless they meet the
criteria - it's done with form validation on that page, rather than evaluating the answers at the end of the flow.
:param brief: allows brief to be specified manually (for cases where brief can't automatically be extracted from
the brief_response.
"""
return ((brief or brief_response['brief'])['framework']['slug'] == 'digital-outcomes-and-specialists') and \
'essentialRequirements' in brief_response and \
'essentialRequirementsMet' not in brief_response |
def lung_concepts2labels(report_concepts):
"""
Convert the concepts extracted from lung reports to the set of pre-defined labels used for classification
Params:
report_concepts (dict(list)): the dict containing for each lung report the extracted concepts
Returns: a dict containing for each lung report the set of pre-defined labels where 0 = absence and 1 = presence
"""
report_labels = dict()
# loop over reports
for rid, rconcepts in report_concepts.items():
# assign pre-defined set of labels to current report
report_labels[rid] = {
'cancer_scc': 0, 'cancer_nscc_adeno': 0, 'cancer_nscc_squamous': 0, 'cancer_nscc_large': 0, 'no_cancer': 0}
# make diagnosis section a set
diagnosis = set([concept[1].lower() for concept in rconcepts['Diagnosis']])
# update pre-defined labels w/ 1 in case of label presence
for d in diagnosis:
if 'small cell lung carcinoma' == d:
report_labels[rid]['cancer_scc'] = 1
if 'lung adenocarcinoma' == d or 'clear cell adenocarcinoma' == d or 'metastatic neoplasm' == d:
report_labels[rid]['cancer_nscc_adeno'] = 1
if 'non-small cell squamous lung carcinoma' == d:
report_labels[rid]['cancer_nscc_squamous'] = 1
if 'lung large cell carcinoma' == d:
report_labels[rid]['cancer_nscc_large'] = 1
# update when no label has been set to 1
if sum(report_labels[rid].values()) == 0:
report_labels[rid]['no_cancer'] = 1
return report_labels |
def get_default_extension_max_count(loan):
"""Return a default extensions max count."""
return float("inf") |
def dd_duration_map_nb(record):
"""`map_func_nb` that returns total duration of a drawdown."""
return record['end_idx'] - record['start_idx'] |
def sizeof_fmt(num):
"""Little helper to get size in human readable form.
Size is rounded to a closest integer value (for simplicity).
>>> sizeof_fmt(100)
'100 B'
>>> sizeof_fmt(1024)
'1 KiB'
>>> sizeof_fmt(1024*1024 + 100)
'1 MiB'
"""
for unit in ['B', 'KiB', 'MiB']:
if abs(num) < 1024.0:
return "%.0f %s" % (num, unit)
num /= 1024.0
return "%.0f %s" % (num, 'GiB') |
def identify_word_classes(tokens, word_classes):
"""
Match word classes to the token list
:param list tokens: List of tokens
:param dict word_classes: Dictionary of word lists to find and tag with the
respective dictionary key
:return: Matched word classes
:rtype: list
"""
if word_classes is None:
word_classes = []
classes = set()
for key in word_classes:
for token in tokens:
if token.lower() in word_classes[key]:
classes.add(key)
return classes |
def intersect_1d(p1, p2, p3, p4):
"""Find the intersection, if any, of two line segments on the same line"""
e1, e2 = sorted((p1, p2))
e3, e4 = sorted((p3, p4))
if e2 >= e3 and e4 >= e1:
# Segments overlap, so find the interval with the two middle points
ordered = sorted((e1, e2, e3, e4))
return range(ordered[1], ordered[2] + 1)
return [] |
def _smooth_curve(vals, beta=0.98):
"""From fastai"""
avg_val = 0
smoothed = []
for (i,v) in enumerate(vals):
avg_val = beta * avg_val + (1-beta) * v
smoothed.append(avg_val/(1-beta**(i+1)))
return smoothed |
def sents_to_vec(vec_dict, sentences):
"""read data, produce training data and labels.
Args:
vec_dict: a dict mapping words to vectors.
sentences: A list of ConllSent objects
Returns:
embeddings: a list of tensors
tags: a nested list of gold tags
"""
embeddings = []
for sent in sentences:
sample = [vec_dict[word] for word in sent]
embeddings.append(sample)
return embeddings |
def file_extension(filename):
"""
Get file extension from filename
"""
return filename.rsplit('.', 1)[1].lower() |
def flax_fori_loop(start, stop, body_fun, init_val): # pragma: no cover
"""
for debugging purposes, use this instead of jax.lax.fori_loop
"""
val = init_val
for i in range(start, stop):
val = body_fun(i, val)
return val |
def get_overlapping_adjectives_raw_results(merged_results):
"""
Looks through the male adjectives and female adjectives across the corpus and extracts adjective
that overlap across both and their occurrences. FORMAT - {'adjective': [male, female]}
:param merged_results:
:return:
"""
overlap_results = {}
male_adj = list(merged_results['male'].keys())
female_adj = list(merged_results['female'].keys())
for a in male_adj:
if a in female_adj:
overlap_results[a] = [merged_results['male'][a], merged_results['female'][a]]
return overlap_results |
def cmpVersion(ver1, ver2):
"""
return -1:ver1<ver2, 0:ver1==ver2, 1:ver1>ver2
version format: * - 1 1.2 1.2.3 1.2.3ah 2018-01-16 v3 4.0\(1h\ 8.3(0)sk(0.39) 1.00(aaxm.6)c0
"""
if ver1 == "-":
ver1 = "*"
if ver2 == "-":
ver2 = "*"
#
if ver1 == ver2:
return 0
#
if ver2 == "*":
return -1
elif ver1 == "*":
return 1
ver1 = ver1.split(".")
ver2 = ver2.split(".")
for i in range(min(len(ver1), len(ver2))):
# parse ver item
for j in range(1, len(ver1[i])+1):
if ver1[i][:j].isdigit() == False:
v1 = int(ver1[i][:j-1])
v1a = ver1[i][j-1:]
break
else:
v1 = int(ver1[i])
v1a = ""
for j in range(1, len(ver2[i])+1):
if ver2[i][:j].isdigit() == False:
v2 = int(ver2[i][:j-1])
v2a = ver2[i][j-1:]
break
else:
v2 = int(ver2[i])
v2a = ""
# comp a ver item
if v1 == v2:
if v1a == v2a:
continue
elif len(v1a) == len(v2a):
# cmp alpha of a ver item
for j in range(len(v1a)):
if ord(v1a[j]) < ord(v2a[j]):
return -1
elif ord(v1a[j]) > ord(v2a[j]):
return 1
else:
continue
elif len(v1a) < len(v2a):
return -1
else:
return 1
elif v1 < v2:
return -1
else:
return 1
if len(ver1) < len(ver2):
return -1
elif len(ver1) > len(ver2):
return 1
return None |
def _process_childs(obj, base_path):
"""
Recursively resolve imports of an object
"""
# A list of types for which recursion is already done by the handler
recursive_handlers = ['import']
if isinstance(obj, dict):
original_obj_type = obj.get('type')
# Call specific handler if it exists
handler = globals().get('_handle_{}'.format(original_obj_type))
if handler is not None:
obj = handler(obj, base_path)
# Process all childs if the specific handler did not do it
if original_obj_type not in recursive_handlers:
for k in obj:
obj[k] = _process_childs(obj[k], base_path)
elif isinstance(obj, list):
# Process all childs
for k in range(len(obj)):
obj[k] = _process_childs(obj[k], base_path)
return obj |
def trigger_delay(session, Type='Real64', RepCap='', AttrID=1150006, buffsize=0, action=['Get', '']):
"""[Trigger Delay (seconds)]
Specifies the trigger delay, in seconds. The range of allowable values is -1*acquisition time to +1000 seconds. Reset value: 0 seconds.
"""
return session, Type, RepCap, AttrID, buffsize, action |
def to_spans(sequence, lut, strict_iob2=False):
"""Turn a sequence of IOB chunks into single tokens."""
iobtype = 2 if strict_iob2 else 1
chunks = []
current = None
for i, y in enumerate(sequence):
label = lut[y]
#if label.startswith('B-'):
if not label.startswith('I-') and not label == 'O':
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B-', ''), '%d' % i ]
elif label.startswith('I-'):
if current is not None:
base = label.replace('I-', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning, type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (label, current[0], i))
current = [base, '%d' % i]
else:
current = [label.replace('I-', ''), '%d' % i]
if iobtype == 2:
print('Warning, unexpected format (I before B @ %d) %s' % (i, label))
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks) |
def next_permutation(arr):
"""
:param arr:
:return:
"""
i = len(arr) - 1
while i > 0 and arr[i - 1] >= arr[i]:
i -= 1
if i <= 0:
return False
# Find successor to pivot
j = len(arr) - 1
while arr[j] <= arr[i - 1]:
j -= 1
arr[i - 1], arr[j] = arr[j], arr[i - 1]
# Reverse suffix
arr[i:] = arr[len(arr) - 1: i - 1: -1]
return True |
def get_test_metadata(mod):
"""Create a dictionary of test metadata defined in an example
Every top-level constant prefixed with "_TEST_META_" is treated as
metadata which may control test behavior. The prefix is stripped and the
remaining text is lowercased to form the key in the metadata dictionary.
Example: '_TEST_META_SKIP_FULL_VALIDATION' -> 'skip_full_validation'
"""
test_metadata_prefix = '_TEST_META_'
return {key[len(test_metadata_prefix):].lower(): getattr(mod, key)
for key in mod.__dict__
if key.startswith(test_metadata_prefix)} |
def get_field_model_type(field_type):
"""
Convert the simple field type string to a field model type, ie
float -> FloatField
iso8601 -> DateTimeField
unixtime_float_second -> DateTimeField
unixtime_int_microsecond -> DateTimeField
boolean -> BooleanField
These are then used to look up the database data type from the connection
:param field_type: incoming simple field type (string)
:return: the field model type
"""
if field_type == 'string':
return 'CharField'
if field_type in ['iso8601', 'unixtime_float_second', 'unixtime_int_microsecond']:
return 'DateTimeField'
if field_type == 'nullboolean':
return 'NullBooleanField'
return field_type.capitalize() + 'Field' |
def _ptrs_having_accessor_methods( ptrs ):
"""some pointers we don't know to build accessor methods, we need
to filter thme out. this method return a list of
pointers that can have accessor methods"""
# pointer to pointer (double **, for example) is not yet handled
return [p for p in ptrs if not p.type[-2:] == '**'] |
def subdivide(verstr):
"""subdivide takes a version or release string and attempts to subdivide
it into components to facilitate sorting. The string is divided into a
two level hierarchy of sub-parts. The upper level is subdivided by
periods, and the lower level is subdivided by boundaries between digit,
alpha, and other character groupings.
"""
parts = []
# parts is a list of lists representing the subsections which make up a version string.
# example:
# 4.0.2b3 would be represented as [[4],[0],[2,'b',3]].
major_parts = verstr.split('.')
for major_part in major_parts:
minor_parts = []
index = 0
while index < len(major_part):
# handle digit subsection
if major_part[index].isdigit():
digit_str_part = ""
while index < len(major_part) and major_part[index].isdigit():
digit_str_part = digit_str_part + major_part[index]
index = index + 1
digit_part = int(digit_str_part)
minor_parts.append(digit_part)
# handle alpha subsection
elif major_part[index].isalpha():
alpha_part = ""
while index < len(major_part) and major_part[index].isalpha():
alpha_part = alpha_part + major_part[index]
index = index + 1
minor_parts.append(alpha_part)
# handle other characters. this should only be '_', but we will treat is as a subsection to keep it general.
elif not major_part[index].isalnum():
other_part = ""
while index < len(major_part) and not major_part[index].isalnum():
other_part = other_part + major_part[index]
index = index + 1
minor_parts.append(other_part)
parts.append(minor_parts)
return parts |
def prod_iter(iterable, start=1, inplace=False):
"""Compute the product of a series of elements.
This function works with any type that implements __mul__
(or __imul__ if inplace is True). In particular, it works with
tf.Tensor objects.
Parameters
----------
iterable : series of elements
start : starting value, default=1
inplace : bool, default=False
Returns
-------
prod_of_values : product of the elements
"""
prod_of_values = start
for value in iterable:
if inplace:
prod_of_values *= value
else:
prod_of_values = prod_of_values * value
return prod_of_values |
def min_max(a, b):
"""Get min and max values in a one-liner."""
c = min(a, b)
d = max(a, b)
return c, d |
def pca_kwargs(n_dims):
"""Argument options for PCA.
Args:
n_dims (int): Number of dimensions to reduce the data down to.
Returns:
dict: Dictionary for kwargs for PCA.
"""
return {
"no_dims": n_dims
} |
def despecced(text):
"""
Args:
text (str): Text of form <name>==<version>, or just <name>
Returns:
(str, str | None): Name and version
"""
version = None
if text and "==" in text:
text = text.strip()
i = text.index("==")
version = text[i + 2:].strip() or None
text = text[:i].strip()
return text, version |
def averaged(list1):
"""
List1 is a list of [name, oms-counts..., total]. Divide the oms-counts
by the total and return in list2.
"""
list2 = []
for entry in list1:
newline = [entry[0]]
for indx in range(1, 7):
denom = entry[7]
if denom == 0:
denom = 1
newline.append(entry[indx]/denom)
newline.append(entry[7])
list2.append(newline)
return list2 |
def calc_proximity(variant, tx, segment, annotation, segments_annotation):
"""Calculate the proximity to the exon boundary, exon1 and exon3 only."""
if annotation == 'exon1':
if tx.strand == '+':
proximity = variant.start - segment.start
else:
proximity = segment.end - variant.end
elif annotation == "5'UTR":
exon1 = next(seg for seg, anno in segments_annotation if anno == 'exon1')
if tx.strand == '+':
proximity = variant.end - exon1.start
else:
proximity = exon1.end - variant.start
elif annotation == 'exon3':
if tx.strand == '+':
proximity = segment.end - variant.end
else:
proximity = variant.start - segment.start
elif annotation == "3'UTR":
try:
exon = next(seg for seg, anno in segments_annotation if anno == 'exon3')
except StopIteration:
exon = next(seg for seg, anno in segments_annotation if anno == 'exon1')
if tx.strand == '+':
proximity = exon.end - variant.start
else:
proximity = variant.end - exon.start
else:
proximity = 'NA'
return proximity |
def validate_integer_input(param, param_type):
""" Validate Integer Input
:param param:
:param param_type:
:return:
"""
# use hex()
# address payable 160
# address 256
if param_type == 'uint8' and param <= 255:
return True, ''
elif param_type == 'uint16' and param <= 65535:
return True, ''
elif param_type == 'uint32' and param <= 4294967295:
return True, ''
elif param_type == 'uint64'and param <= 18446744073709551615:
return True, ''
elif param_type == 'uint128'and param <= 340282366920938463463374607431768211455:
return True, ''
elif param_type == 'uint160'and param <= 1461501637330902918203684832716283019655932542975:
return True, ''
elif param_type == 'uint256'and param <= 115792089237316195423570985008687907853269984665640564039457584007913129639935:
return True, ''
return False, 'Not a valid {0} (Does not fit the current type for the function input)'.format(param_type) |
def fmap(f, xs):
"""
Map a function f over the values in an iterable xs and return a list
This in effect is the same function as `map`, only returning a list for
convenience
"""
return list(map(f, xs)) |
def validate_tags(data: list) -> bool:
"""
Validate input for the ``tags`` field.
* It must be a list
* Must be at least 3 characters
* May not end nor start with whitespace
Args:
data (list): The data to be validated.
Returns:
bool: Validation passed.
Raises:
ValueError: Validation failed.
"""
if not isinstance(data, list):
raise ValueError(f"Not a list ({data})")
for value in data:
if not isinstance(value, str):
raise ValueError(f"All list entries must be str ({value})")
if len(value) < 3:
raise ValueError("Must be at least three characters")
if len(value) != len(value.strip()):
raise ValueError("May not start nor end with whitespace")
return True |
def is_switchport_default(existing):
"""Determines if switchport has a default config based on mode
Args:
existing (dict): existing switchport configuration from Ansible mod
Returns:
boolean: True if switchport has OOB Layer 2 config, i.e.
vlan 1 and trunk all and mode is access
"""
c1 = str(existing["access_vlan"]) == "1"
c2 = str(existing["native_vlan"]) == "1"
c3 = existing["trunk_vlans"] == "1-4094"
c4 = existing["mode"] == "access"
default = c1 and c2 and c3 and c4
return default |
def sumfeat(obj_list, feat):
"""a helper method that calculates the sum of a target feature over a list of objects
Args:
obj_list: a list of objects
feat: a string containing the name of the target feature
Returns:
the sum of the target feature over the given list of objects
"""
sum = 0
for obj in obj_list:
sum += eval('obj.'+feat)
return sum |
def left(str, length, pad=" "):
"""return left of string str of length padded with pad chars"""
if length<len(str):
return str[0:length]
else:
return str + (pad*(length-len(str))) |
def _mgrid_slice(n, shifted, normalized):
"""
Util to generate a `slice` representing a 1d linspace
as expected by `np.mgrid`.
:param shifted: shifted by half of grid or not when n is even.
:param normalized: normalize the grid in the range of (-1, 1) or not.
:return: `slice` to be used by `np.mgrid`.
"""
num_points = n * 1j
start = -n // 2 + 1
end = n // 2
if shifted and n % 2 == 0:
start -= 1 / 2
end -= 1 / 2
elif n % 2 == 0:
start -= 1
end -= 1
if normalized:
# Compute the denominator for normalization
denom = n / 2
if shifted and n % 2 == 0:
denom -= 1 / 2
# Apply the normalization
start /= denom
end /= denom
return slice(start, end, num_points) |
def field_has_keyword(field, keywords):
"""For internal use. Determines whether the field has any of
the given keywords."""
for keyword in keywords:
if field.has_keyword(keyword):
return True
return False |
def print_leaf(counts):
"""A nicer way to print the predictions at a leaf."""
total = sum(counts.values()) * 1.0
probs = {}
for lbl in counts.keys():
probs[lbl] = str(int(counts[lbl] / total * 100)) + "%"
return probs |
def bit2int(bits):
"""
Returns the integer corresponding of a bit state.
"""
value = 0
for p, c in enumerate( reversed(bits) ):
value += c * 2 ** p
return value |
def timeframe_int_to_str(timeframe: int) -> str:
"""
Convert timeframe from integer to string
:param timeframe: minutes per candle (240)
:return: string representation for API (4h)
"""
if timeframe < 60:
return f"{timeframe}m"
elif timeframe < 1440:
return f"{int(timeframe / 60)}h"
else:
return f"{int(timeframe / 1440)}d" |
def _espeak_code_corrections(espeak_code):
"""
Manually curated list of replacements for specific espeak-ng encodings
which espeak-ng does not process as expected otherwise
:param espeak_code: a single espeak_code code string
:return: the espeak_code code with replacements in case
"""
return espeak_code \
.replace('Y', 'Y:') \
.replace('V"', '@r') \
.replace('V', '@') \
.replace('#', ' ') \
.replace('&', 'E') \
.replace('<trl>', '') \
.replace('<o>', '') \
.replace('.', '') \
.replace('E~', 'W') \
.replace(' ', '||') |
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
# A lambda function is a small anonymous function.
# The filter() method constructs an iterator from elements of an iterable for which a function returns true.
# The next() function returns the next item from the iterator.
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None) |
def diff(b: list, d: list) -> list:
"""
Sorting 2 list numbers without same numbers
>>> diff([1, 2, 3, 6, 4, 7], [1, 2, 36, 96, 78, 99])
[3, 4, 6, 7, 36, 78, 96, 99]
"""
# Making set of list
x = set(b)
y = set(d)
# Find numbers which are not same in lists
p = y.difference(x)
q = x.difference(y)
# Make union of 2 lists numbers
r = p.union(q)
r = list(r)
r.sort()
return r |
def convert_time(t):
"""
converts a time given in seconds to a time in
minutes
:param t: int
:return: string
"""
if type(t) == str:
return t
if int(t) < 60:
return str(t) + "s"
else:
minutes = str(t // 60)
seconds = str(t % 60)
if int(seconds) < 10:
seconds = "0" + seconds
return minutes + ":" + seconds |
def _expand_string_into_dict(string, value, sep='.'):
"""
Converts a encapsulated string-dict to a sequence of dict. Use separator (default '.') to split the string.
Example:
string1.string2.stringN : value -> {string1: {string2: {string3: value}}
:param string: The encapsulated "string-dict"
:param value: Value associated to the last field of the "string-dict"
:param sep: Separator character. Default: '.'
:rtype: dict
"""
if sep not in string:
return {string: value}
key, val = string.split(sep, 1)
return {key: _expand_string_into_dict(val, value)} |
def _uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
maintaining the order in which they first appear.
From ipython_genutils.data
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)] |
def _to_ffmpeg_time(n):
""" Format number of seconds to time expected by FFMPEG.
:param n: Time in seconds to format.
:returns: Formatted time in FFMPEG format.
"""
m, s = divmod(n, 60)
h, m = divmod(m, 60)
return '%d:%02d:%09.6f' % (h, m, s) |
def internal_params_1D(
pixels_per_clock: int,
window_width: int,
image_size: int,
output_stride: int,
origin: int
):
"""Calculate "internal" parameters of linebuffer based on user parameters.
This includes the window_count (number of total windows outputted),
the parallelism (width of the output bus in number of windows),
and the valid_count (number of times valid should be asserted).
Return as tuple (window_count, parallelism, valid_count).
"""
stride = output_stride
# Total number of windows outputted.
window_count = image_size//stride
# Number of parallel window outputs.
parallelism = pixels_per_clock//stride
if parallelism == 0:
parallelism = 1
else:
assert parallelism*stride == pixels_per_clock, \
"Expected integer throughput (stride evenly dividing px/clk)."
# Number of times valid should be asserted.
valid_count = window_count//parallelism
assert valid_count * parallelism == window_count, \
"Expected window count (img/stride) to be divisible by parallelism " \
"(px/clk / stride)"
return window_count, parallelism, valid_count |
def p10(docs, n=10):
"""Precision at N"""
return len([doc for doc in docs[:n] if doc['relevant'] == "true"]) / n |
def is_int(s):
"""
decide whether string represents integer or not
:param s: input string
:return: boolean result
"""
try:
int(s)
return True
except ValueError:
return False |
def snake2title(var_name):
"""Title-ize a variable name in snake_case
"""
return ' '.join(var_name.split('_')).title() |
def _get_dict_value_with_default(dict_object, key, default):
"""
Returns default if the dict doesn't have the key or if the value is Falsey.
Otherwise, returns the dict's value for the key.
"""
value = dict_object.get(key, None)
return value if value else default |
def is_json_filename(filename: str) -> bool:
"""Check whether filename represents a json file.
Args:
filename:
UNIX filename of .json file,
for example - '/Users/king/urls.json'.
Returns:
Boolean, True if json filename is valid,
otherwise False.
Example:
>>> is_json_filename_valid(filename='/Users/king/urls.json')
>>> True
>>> is_json_filename_valid(filename='/Users/king/urls.incorrectext')
>>> False
"""
valid_extension = '.json'
extension = filename[-len(valid_extension):]
return valid_extension == extension |
def tau_model(y, t, io, tau, p_nom):
"""Define ODE for a simplified dynamic model using time constant"""
dydt = (p_nom * io - y) / tau
return dydt |
def per_token_accuracy(gold_seq, pred_seq):
""" Returns the per-token accuracy comparing two strings (recall).
Inputs:
gold_seq (list of str): A list of gold tokens.
pred_seq (list of str): A list of predicted tokens.
Returns:
float, representing the accuracy.
"""
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq) |
def convertFloat(s):
"""
:param s: string representing a number
"""
if s.strip() == '':
return 0.
else:
try:
return float(s)
except ValueError:
return s |
def to_pos_strand(sstart, send, slen):
"""
If blast position info is scaled on the negative strand, flipping the info to the positive
"""
# nothing required if positive strand
if send >= sstart:
return sstart,send,'+'
# convert to positive strand position if on negative
return slen-sstart,slen-send,'-' |
def sc(txt, shft_amt=3, isDeq=False):
"""This is a chiper alg; given a text an a possible integer, it will shift the text 'shft_amt' """
shft_amt *= (-1) ** isDeq
result = ""
for c in txt:
new_chr = c
if c.isalpha():
ascii_chr = ord(c)
new_ascii = ( (ascii_chr & 0x1F) + shft_amt ) % 26
new_chr = chr(new_ascii | ascii_chr & 96)
result += new_chr
return result |
def normalizeMessage(message):
"""
un-tab and rstrip an informational message
tab-to-space replacement and rstripping helps with repeatable doctests
"""
message = message.decode('utf-8')
n = []
for k in message.split('\n'):
n.append(k.strip().replace('\t',' '))
return '\n'.join(n) |
def rotate_until_not_in_candidates(t, v, direction, candidates):
""" rotate around a vertex, starting from triangle t
until a triangle is found that is not in the candidates list
this triangle is then returned
"""
seen = set()
while t is not None and t not in seen:
seen.add(t)
side = t.vertices.index(v)
t = t.neighbours[direction(side)]
if t not in candidates:
return t |
def parser_linkage_Descriptor(data,i,length,end):
"""\
parser_linkage_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "linkage", "contents" : unparsed_descriptor_contents }
(Defined in ETSI EN 300 468 specification)
"""
return { "type" : "linkage", "contents" : data[i+2:end] } |
def none_or_string_flag(argument_value):
"""Checks if the argument value should be None."""
possible_none_arguments = ("none", "null")
# try to infer if the string is None or Null
if argument_value.strip().lower() in possible_none_arguments:
return None
# otherwise just return the string as it is
return argument_value |
def get_nested_compat_files(compat_api_versions):
"""Return __init__.py file paths for files under nested compat modules.
A nested compat module contains two __init__.py files:
1. compat/vN/compat/vK/__init__.py
2. compat/vN/compat/vK/compat/__init__.py
Args:
compat_api_versions: list of compat versions.
Returns:
List of __init__.py file paths to include under nested compat modules.
"""
files = []
for v in compat_api_versions:
files.extend([
"compat/v%d/compat/v%d/__init__.py" % (v, sv)
for sv in compat_api_versions
])
files.extend([
"compat/v%d/compat/v%d/compat/__init__.py" % (v, sv)
for sv in compat_api_versions
])
return files |
def remove_keys(dic, keys=[]):
"""
Remove a list of keys from a dict and return a new one.
Parameters
----------
dic : dcit
The input dictionary.
keys : list of str
A list of arguments to remove or a string for single argument.
Returns
-------
dict
Dictionary with removed keys.
"""
dic_copy = dict(dic)
if isinstance(keys, str):
keys = [keys]
for key in keys:
del dic_copy[key]
return dic_copy |
def square_to_condensed(i: int, j: int, n: int):
"""Convert a square matrix position (i, j) to a condensed distance matrix index.
Args:
i: Index i.
j: Index j.
n: The dimension of the matrix.
See Also:
https://stackoverflow.com/questions/13079563/how-does-condensed-distance-matrix-work-pdist
Returns:
Condensed index.
"""
assert i != j, 'No diagonal elements in condensed matrix'
if i < j:
i, j = j, i
return n * j - j * (j + 1) / 2 + i - 1 - j |
def buprint(str):
"""Underlined bold <str>"""
return "\033[1m\033[4m{0}\033[0m".format(str) |
def isqrt_sumonly(n: int) -> int:
"""
>>> isqrt_sumonly(2)
2
"""
p = 1
def mul(a, b):
result = 0
for _ in range(b):
result += a
return result
while mul(p, p) <= n < mul(p+1, p+1):
p += 1
return p |
def custom_mapping(cls):
"""
Method to provide custom mapping to parser.
Configuration of this method in manifest.json should be:
"uploader" : {
"mapping" : "mapping:custom_mapping"
}
This is a class method but @classmethod decorator is not necessary.
See https://docs.biothings.io/en/latest/tutorial/studio_guide.html#manifest-based-data-plugins
"""
return {
"subject": {
"properties": {
"id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"name": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"ENSEMBL": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
}
}
},
"association": {
"properties": {
"edge_label": {
"type": "text"
}
}
},
"object": {
"properties": {
"id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"name": {
"type": "text"
},
"CHEBI": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"CHEMBL.COMPOUND": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"PUBCHEM": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"HMS_LINCS_ID": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"CID": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
}
}
}
} |
def column_name(axis_name):
"""
Return the columns name for the datset
Parameters:
axis_name (list): list of axis names
Returns:
list: the name of column
"""
features = ['mean', 'count above mean', 'count below mean', 'mean absolute diff', 'sum absolute diff', 'median',
'sum', 'absolute energy', 'standard deviation', 'variation coefficient', 'variance', 'skewness',
'kurtosis', 'number peaks', 'maximum', 'minimum', '25quantile', '75quantile', 'Complexity-Invariant Distance ']
col_names = []
for axis in axis_name:
for feature in features:
col_names.append(axis+'_'+feature)
col_names.append('label')
return col_names |
def calc_fpr(fp: float, tn: float) -> float:
"""
:param fp: false positive or false alarm
:param tn: true negative or correct rejection
:return: false positive rate
"""
try:
calc = fp / (fp + tn)
except ZeroDivisionError:
calc = 0
return calc |
def _convert_bounding_box_to_geo_json(bbox):
"""Converts a lng/lat bounding box to a GeoJSON string."""
lng_min = bbox[0]
lat_min = bbox[1]
lng_max = bbox[2]
lat_max = bbox[3]
return ('{{"type":"Polygon","coordinates":'
'[[[{0},{1}],[{2},{1}],[{2},{3}],[{0},{3}],[{0},{1}]]]}}'.format(
lng_min, lat_min, lng_max, lat_max)) |
def _fmt(string: str) -> bytes:
"""Return a string that escapes 'C printf' side effects."""
return string.encode("utf-8").replace(b"%", b"%%") |
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
hand1=hand.copy()
if word not in wordList:
return False
for i in word:
if hand1.get(i,0)>0:
if hand1[i]<=0:
return False
else:
hand1[i]=hand1[i]-1
else:
return False
return True |
def rotated_array_search(input_list, number):
"""Find the index of a given number in a sorted but rotated list.
Uses a modified binary search.
Args:
input_list: A list of ints representing the list to search in
number: An int representing the value to search for
Returns:
index: An int representing the index of the number in the list, -1 if
the number is not found
"""
if len(input_list) == 0:
return -1
guess = len(input_list) // 2
if (input_list[0] <= number < input_list[guess]) or (
number > input_list[0] > input_list[guess]
):
guess = rotated_array_search(input_list[:guess], number)
elif input_list[guess] != number:
added = rotated_array_search(input_list[guess + 1 :], number)
if added == -1:
return -1
guess += added + 1
return guess |
def rightOfDeciSeperatorToDeci(a):
"""This function only convert value at the right side of decimal seperator to decimal"""
deciNum = 0
for i in range(len(a)):
deciNum += (int(a[i]))*2**-(i+1)
return deciNum |
def adjust_config(config, mode): # pylint: disable=unused-argument
"""Hot-patch the experiment configuration for a specific mode."""
# Don't misuse this!
# Results are better if batchnorm is always used in 'training' mode and
# normalizes the observed distribution. That's why it's important to leave
# the batchsize unchanged.
# if mode not in ['train', 'trainval']:
# config['batch_size'] = 1
return config |
def append_querystring(next_url: str, **kwargs):
"""
Append passed couple of key,value as querystring to next_url address given
Ex. append_querystring("game/?t=47326443",**{'next_url':'/registration/',session_id="43276832"})
"""
if next_url:
kwargs = kwargs or {}
for key, value in kwargs.items():
if not next_url.find('?') == -1:
next_url = "%s&%s=%s" % (next_url, key, value)
else:
next_url = "%s?%s=%s" % (next_url, key, value)
return next_url |
def topoheaderwrite(topoheader, outputfile, closefile=True):
"""
topoheaderwrite(topoheader,outputfile) opens an ascii topography data file and writes the header
using the dictionary "topoheader"
The header is of the following form with columns containing the topoheader value and key respectively.
int ncols
int nrows
float xll
float yll
float cellsize
float nodata_value
if closefile==True: the file is closed. Otherwise return the open file object.
"""
fout = open(outputfile, "w")
fout.write("%s %s\n" % (topoheader["ncols"], "ncols"))
fout.write("%s %s\n" % (topoheader["nrows"], "nrows"))
fout.write("%s %s\n" % (float(topoheader["xll"]), "xll"))
fout.write("%s %s\n" % (float(topoheader["yll"]), "yll"))
fout.write("%s %s\n" % (float(topoheader["cellsize"]), "cellsize"))
fout.write("%s %s\n" % (topoheader["nodata_value"], "nodata_value"))
if closefile:
fout.close()
else:
return fout
# end headerwriter========================================================================= |
def flat_keys(iterable, max_iterations=1000):
"""Return the list of keys in iterable and nested iterables."""
iteration = 0
keys = []
remaining_iterables = [iterable]
seen_iterables = set()
while len(remaining_iterables) != 0:
iteration += 1
# If we have a very big or nested iterable, return False.
if iteration >= max_iterations:
break
iterable = remaining_iterables.pop(0)
# Protection against recursive objects.
if id(iterable) in seen_iterables:
continue
seen_iterables.add(id(iterable))
# If we get an iterable, add it to the list of remaining iterables.
if isinstance(iterable, dict):
dict_values = iterable.values()
if len(dict_values) > 0:
remaining_iterables.extend(list(dict_values))
dict_keys = iterable.keys()
if len(dict_keys) > 0:
keys.extend(dict_keys)
elif isinstance(iterable, list) and len(iterable) > 0:
remaining_iterables.extend(iterable)
return keys |
def get_time_string( totalSeconds ):
"""
Converts seconds to days/hours/minutes/seconds
:param totalSeconds: int
:return: str
"""
try:
totalSeconds = int( totalSeconds )
except ValueError:
return totalSeconds
# we work in seconds
minute = 60
hour = 60 * minute
day = 24 * hour
minuteCount = 0
hourCount = 0
dayCount = 0
# count the days
while totalSeconds >= day:
dayCount += 1
totalSeconds -= day
# count the hours
while totalSeconds >= hour:
hourCount += 1
totalSeconds -= hour
# count the minutes
while totalSeconds >= minute:
minuteCount += 1
totalSeconds -= minute
secondCount = int( round( totalSeconds, 0 ) )
def addUnit( dateStr, value, unit ):
if value != 0:
if value > 1:
unit += 's'
# first unit we add, don't need to add a space between the previous date string
if dateStr == '':
return '{} {}'.format( value, unit )
else:
return '{} {} {}'.format( dateStr, value, unit )
else:
return dateStr
time = addUnit( '', dayCount, 'day' )
time = addUnit( time, hourCount, 'hour' )
time = addUnit( time, minuteCount, 'minute' )
time = addUnit( time, secondCount, 'second' )
return time |
def v3_multimax(iterable):
"""Return a list of all maximum values.
Bonus 1: make sure our function returned an empty list when
given an empty iterable.
"""
max_item = None
for item in iterable:
if max_item is None or item > max_item:
max_item = item
return [
item
for item in iterable
if item == max_item
] |
def get_image_path(pokemon_index):
"""Give a pokemon index, this helper method returns the format of the
image."""
pokemon_id = "{0:0=3d}".format(pokemon_index)
return f"static/img/{pokemon_id}.png" |
def shift_letter(letter, shift):
"""Shift letter by shift number."""
base = ord('a')
num_letters_in_alphabet = 26
return chr(base + (ord(letter) - base + shift) % num_letters_in_alphabet) |
def RemoveElements(x: list, y: list) -> list:
"""Remove elements (y) from a list (x)."""
for e in y:
x.remove(e)
return x |
def h(level: int, text: str) -> str:
"""Wrap text into an HTML `h` tag.
Parameters
----------
level : int
HTML `h` level tag
text : str
Contents for `h` level tag
Returns
-------
str
HTML code as string
"""
return f"<h{str(level)}>{text}</h{level}>" |
def pick_alternates(alist):
"""
Given a list like ["1", "hey", "2", "come"]
we want two lists of the the order
list1 = [1, 2]
list2 = ["hey", "come"]
"""
start_elements = []
next_elements = []
for i in range(0, len(alist), 2):
start_elements.append(alist[i])
for i in range(1, len(alist), 2):
next_elements.append(alist[i])
return {"count": [float(i) for i in start_elements], "items": next_elements} |
def shortSizeToInt(size_str):
""" Go from 16kB -> 16384. """
if size_str.endswith("kB"):
return int(size_str[:-2])*1024
elif size_str.endswith("MB"):
return int(size_str[:-2])*1024*1024
elif size_str.endswith("GB"):
return int(size_str[:-2])*1024*1024*1024
elif size_str.endswith("B"):
# If we're dealing with anything larger than what can be expressed in GB,
# there's a problem.
return int(size_str[:-2])
else:
raise ValueError("Size \"%s\" cannot be converted into bytes." % size_str) |
def first(s):
""" return first word or '' """
splitted = s.strip().split()
if splitted:
return splitted[0]
else:
return '' |
def get_case_insensitive(dictionary, key):
"""Get `key` in `dictionary` case-insensitively."""
key = key.lower()
return next(
(value for other_key, value in dictionary.items() if other_key.lower() == key),
None,
) |
def decode_link_code(p_code):
"""
LinkCode - 0=PLM is Responder, 1=PLM is Controller, FF=Deleted
"""
l_msg = 'Unknown code {}'.format(p_code)
if p_code == 0:
l_msg = 'PLM=Responder'
elif p_code == 1:
l_msg = 'PLM=Controller'
elif p_code == 0xFF:
l_msg = 'Link Deleted'
return l_msg |
def geometricMean(values):
"""Calculates the geometric mean.
Geometric Mean is a type of average which indicates a typical value
in a set of numbers by using the product of values in the set.
Returns NaN (Not A Number) if passed an empty sequence.
Args:
values (list[float]): A Sequence of numerical values. Accepts
both Integers and Floats. The sequence may not contain None
type values. However, passing a None type object instead of
a Sequence of numerical values will return nan.
Returns:
float: The geometric mean, or nan if the input was empty or
null. Because this uses logs to compute the geometric mean,
will return nan if any entries are negative.
"""
print(values)
return float(43) |
def cross(A, B):
"""
Cross product of elements in A and elements in B.
Returns:
A list containing the cross product of A and B
"""
return [a+b for a in A for b in B] |
def create_tranform_config(training_config):
"""
Transform config specifies input parameters for the transform job.
"""
return {
# We reuse the training job name for the model name and corresponding
# transform job name.
'TransformJobName': training_config['TrainingJobName'],
'ModelName': training_config['TrainingJobName'],
'S3OutputPath': training_config['S3OutputPath']
} |
def get_list_of_types(stats):
"""
Helper function to get list of types registered in stats
Args:
stats (dict): dictionary with statistics
Returns:
list: list of types registered
"""
type_list = []
for k, _ in stats.items():
if k.type not in type_list:
type_list.append(k.type)
return type_list |
def cmp_lines(path_1, path_2):
"""Compare two files, ignoring line-endings"""
line_1 = line_2 = ' '
with open(path_1, 'r') as file_1:
with open(path_2, 'r') as file_2:
while line_1 != '' and line_2 != '':
line_1 = file_1.readline()
line_2 = file_2.readline()
if line_1 != line_2:
return False
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.