content stringlengths 42 6.51k |
|---|
def _correction(v, N):
"""protects input to ltqnorm"""
# used to protect input to ltqnorm
# v is assumed to be a probability between 0 and 1
if 0 < v < 1:
return v
elif N is None or v < 0 or v >1:
raise ValueError('v should be >= 0 and <= 1')
# at this point we know v must be 0 or 1 and N is not None
return (1/(2*N), 1-1/(2*N))[int(v)] |
def get_corners(cont):
"""
prep box coordinates for plotting
"""
out = [cont[0]+1j*cont[1], cont[2]+1j*cont[1], cont[2]+1j*cont[3],
cont[0]+1j*cont[3], cont[0]+1j*cont[1]]
return out |
def bit_list_to_int(bitList):
"""
In input list LSB first, in result little endian ([0, 1] -> 0b10)
"""
res = 0
for i, r in enumerate(bitList):
res |= (r & 0x1) << i
return res |
def extract_yaourt_pkgs_to_update(json: dict):
""" Extract the list of yaourt's packages from the json passed in parameters.
Keyword arguments:
json - a dict that represent the json
"""
return json.get('yaourt') |
def payoff_bull_spread(underlying, lower_strike, upper_strike, gearing=1.0):
"""payoff_bull_spread
Buy call option with lower_strike :math:`K_{\mathrm{lower}}`
and sell put option with upper_strike :math:`K_{\mathrm{upper}}`.
As the name denotes, lower_strike is lower than upper_strike.
Payoff formula is as follows:
.. math::
g(\max(S - K_{\mathrm{lower}}, 0) - \min(S - K_{\mathrm{upper}}, 0))
= g\min(K_{\mathrm{upper}}, \max(S - K_{\mathrm{lower}}))
where :math:`S` is underlying, :math:`g` is gearing.
:param float underlying:
:param float lower_strike:
:param float upper_strike:
:param float gearing: coefficient of this option. Default value is 1.
:return: payoff of bull spread option.
If lower_strike >= upper_strike, then return 0.
:rtype: float
"""
if lower_strike >= upper_strike:
return 0.0
return gearing * min(upper_strike, max(underlying - lower_strike, 0)) |
def _parse_mods(mods):
"""
Parse modules.
"""
if isinstance(mods, str):
mods = [item.strip() for item in mods.split(",") if item.strip()]
return mods |
def previous_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
before the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i - 1][0]
except IndexError:
return None |
def get_forward_content(paragraph):
""" extract the content from paragraph between
the head of paragraph or the index of symbols
like '.', '!' which appear. """
res = []
for i, c in enumerate(paragraph):
if i < 80:
res.append(c)
continue
if c == '.' or c == '!' or c == '?':
res.append(c)
break
res.append(c)
return ''.join(res) |
def generate_command(tup, etup=None):
"""
args:
tup - Coordinate tuple.
format: (r1, c1, r2, c2)
etup - Erase cell tuple.
format: (r, c)
"""
if (etup is not None):
# erase a single cell.
return 'ERASE_CELL ' + str(etup[0]) + ' ' + str(etup[1])
if ((tup[0] == tup[2]) and (tup[1] == tup[3])):
# draw a single cell.
return 'PAINT_SQUARE ' + str(tup[0]) + ' ' + str(tup[1]) + ' 0'
elif ((tup[0] == tup[2]) or (tup[1] == tup[3])):
# draw a line.
return 'PAINT_LINE %s %s %s %s' % (str(tup[0]), str(tup[1]),
str(tup[2]), str(tup[3]))
else:
# draw a square
s = tup[2] - tup[0] + 1
s = (s - 1) / 2
c = ((tup[0] + tup[2]) / 2, (tup[1] + tup[3]) / 2)
return 'PAINT_SQUARE %s %s %s' % (str(c[0]), str(c[1]), s) |
def split_envvar(envvar):
"""Splits str formatted as `key=val` into [key, val]
if string is missing an `=val` it will return [key, None]
"""
return (envvar.split('=', 1) + [None])[:2] |
def is_close(a: float, b: float, relative_tolerance: float=1e-09, absolute_tolerance: float=0.0) -> bool:
"""
Same as ``math.isclose()`` but also works with Python versions before 3.5.
"""
return abs(a - b) <= max(relative_tolerance * max(abs(a), abs(b)), absolute_tolerance) |
def make_space(space_padding=0):
"""
Return string with x number of spaces. Defaults to 0.
"""
space = ''
for i in range(space_padding):
space += ' '
return space |
def variable(_printer, ast):
"""Prints a variable in an expression."""
name_str = ast["name"]
return f'{name_str}' |
def _extended_euclidean(q, r):
"""Return a tuple (p, a, b) such that p = aq + br,
where p is the greatest common divisor.
"""
# see [Davenport], Appendix, p. 214
if abs(q) < abs(r):
p, a, b = _extended_euclidean(r, q)
return p, b, a
Q = 1, 0 # noqa: N806
R = 0, 1 # noqa: N806
while r:
quot, t = divmod(q, r)
T = Q[0] - quot*R[0], Q[1] - quot*R[1] # noqa: N806
q, r = r, t
Q, R = R, T # noqa: N806
return q, Q[0], Q[1] |
def relu(x):
"""
Implements a rectified linear (ReLU) activation function.
:param x: 2D numpy array equal to
(the dot product of the input and hidden layer weights) + hidden layer bias
:return: 2D numpy array that is zeroed out where x <= 0 and equal to the original value if x > 0
"""
return x * (x > 0) |
def get_fabric_design(fabric_design_uri, rest_obj):
"""
Get the fabric design name from the fabric design uri which is returned from GET request
:param fabric_design_uri: fabric design uri
:param rest_obj: session object
:return: dict
"""
fabric_design = {}
if fabric_design_uri:
resp = rest_obj.invoke_request("GET", fabric_design_uri.split('/api/')[-1])
design_type = resp.json_data.get("Name")
fabric_design = {"Name": design_type}
return fabric_design |
def dedupe_and_sort(sequence, first=None, last=None):
"""
De-dupe and partially sort a sequence.
The `first` argument should contain all the items that might appear in
`sequence` and for which the order (relative to each other) is important.
The `last` argument is the same, but matching items will be placed at the
end of the sequence.
For example, `INSTALLED_APPS` and `MIDDLEWARE_CLASSES` settings.
Items from `first` will only be included if they also appear in `sequence`.
Items from `sequence` that don't appear in `first` will come
after any that do, and retain their existing order.
Returns a sequence of the same type as given.
"""
first = first or []
last = last or []
# Add items that should be sorted first.
new_sequence = [i for i in first if i in sequence]
# Add remaining items in their current order, ignoring duplicates and items
# that should be sorted last.
for item in sequence:
if item not in new_sequence and item not in last:
new_sequence.append(item)
# Add items that should be sorted last.
new_sequence.extend([i for i in last if i in sequence])
# Return a sequence of the same type as given.
return type(sequence)(new_sequence) |
def _ensure_tuple(value):
"""Returns a tuple if `value` isn't one already"""
if isinstance(value, int):
if value == 1:
return ()
else:
return (value, )
elif isinstance(value, tuple):
if value == (1,):
return ()
return tuple(value)
else:
return tuple(value) |
def format_bool(form_data, key):
"""
"""
if key not in form_data:
return None
try:
res = bool(int(form_data[key]))
except:
return None
return res |
def check_unique_possible_value(possible_value_, solution_):
"""
:param possible_value_: the dict of storing all possible numbers of each cell
:param solution_: the list of existing solution
For each cell, if there is only one possible number, update solution_ and remove from possible_value_
"""
for key, value in possible_value_.items():
if len(value) == 1:
solution_[key[0] - 1][key[1] - 1] = value[0]
possible_value_[key] = []
return 0 |
def validate_pairs(password):
"""
It contains a pair of any two letters that appears at least twice in the
string without overlapping, like xyxy (xy) or aabcdefgaa (aa), but not like
aaa (aa, but it overlaps).
"""
for i in range(len(password) - 2):
if password[i:i + 2] in password[i + 2:]:
return True
return False |
def merge_configs(*configs, differentiators=("type",)):
"""Merge configuration dictionaries following the given hierarchy
Suppose function is called as merge_configs(A, B, C). Then any pair (key, value) in C would
overwrite any previous value from A or B. Same apply for B over A.
If for some pair (key, value), the value is a dictionary, then it will either overwrite previous
value if it was not also a directory, or it will be merged following
`merge_configs(old_value, new_value)`.
.. warning:
Redefinition of subdictionaries may lead to confusing results because merges do not remove
data.
If for instance, we have {'a': {'b': 1, 'c': 2}} and we would like to update `'a'` such that
it only have `{'c': 3}`, it won't work with {'a': {'c': 3}}.
merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'c': 3}}) -> {'a': {'b': 1, 'c': 3}}
Examples
--------
.. code-block:: python
:linenos:
a = {'a': 1, 'b': {'c': 2}}
b = {'b': {'c': 3}}
c = {'b': {'c': {'d': 4}}}
m = resolve_config.merge_configs(a, b, c)
assert m == {'a': 1, 'b': {'c': {'d': 4}}}
a = {'a': 1, 'b': {'c': 2, 'd': 3}}
b = {'b': {'c': 4}}
c = {'b': {'c': {'e': 5}}}
m = resolve_config.merge_configs(a, b, c)
assert m == {'a': 1, 'b': {'c': {'e': 5}, 'd': 3}}
"""
merged_config = configs[0]
def _can_be_merged(dict_a, dict_b):
for differentiator in differentiators:
if dict_a.get(differentiator, None) and dict_a[
differentiator
] != dict_b.get(differentiator, None):
return False
return True
for config_i in configs[1:]:
for key, value in config_i.items():
if (
isinstance(value, dict)
and isinstance(merged_config.get(key), dict)
and _can_be_merged(merged_config[key], value)
):
merged_config[key] = merge_configs(
merged_config[key], value, differentiators=differentiators
)
elif value is not None:
merged_config[key] = value
return merged_config |
def chunkify(seq, n):
"""Split seq into n roughly equally sized lists.
https://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts
"""
avg = len(seq) / float(n)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out |
def urlmaker_sec(queryDic):
"""
Produces the URL, which can be entered into the search (Designed for SEC.gov)
Parameters
----------
queryDic : dict
searchText (str): Company name to be searched (Default: '*')
formType (str): Type of the document to be retrieved (Default: '1')
sic (str): SIC code for the companies to be searched (Default: '*')
cik (str): CIK code for the company to be searched (Default: '*')
startDate (str): Start date of the produced results (YYYYMMDD) (Default: '*')
endDate (str): End date of the produced results (YYYYMMDD) (Default: '*')
sortOrder (str): Ascending (Value = 'Date') or Descending (Value = 'ReverseDate') retrieval of results, (Default: 'Date')
Returns
-------
str
URL to be searched on the SEC website
"""
#query for SEC
searchText = queryDic['searchText'] if 'searchText' in queryDic else '*'
formType = queryDic['formType'] if 'formType' in queryDic else '1'
sic = queryDic['sic'] if 'sic' in queryDic else '*'
cik = queryDic['cik'].lstrip('0') if 'cik' in queryDic else '*'
startDate = queryDic['startDate'] if 'startDate' in queryDic else '*'
endDate = queryDic['endDate'] if 'endDate' in queryDic else '*'
sortOrder = queryDic['sortOrder'] if 'sortOrder' in queryDic else 'Date'
url = "https://searchwww.sec.gov/EDGARFSClient/jsp/EDGAR_MainAccess.jsp?search_text={}&sort={}&formType=Form{}&isAdv=true&stemming=true&numResults=100&fromDate={}&toDate={}&queryCik={}&querySic={}&numResults=100".format(searchText, sortOrder, formType, startDate, endDate, cik, sic)
return url |
def normalize(text):
"""Normalizes whitespace in a specified string of text."""
return " ".join(text.strip().split()) |
def to_list(data):
"""Convert data to a list.
Args:
data: Input data, with or without a python container.
Returns:
list: Replace python container with list or make input a list.
"""
if not isinstance(data, list):
if isinstance(data, (tuple, set)):
data = list(data)
else:
data = [data]
return data |
def orderdictoffvalues(dict):
""" Assuming each key has a value that is a number,
split keys from values, put in lists, reorder with that
"""
keys = list(dict.keys())
values = list(dict.values())
# just in case the list is messeed up
if len(keys) != len(values):
newdict = "missing key or value"
else:
newdict = {}
for i in range(len(values)):
maxi = max(values)
indexofmax = values.index(maxi)
newdict[keys[indexofmax]] = values[indexofmax]
values.pop(indexofmax)
keys.pop(indexofmax)
return newdict |
def quote_sql_string(value):
"""
If "value" is a string type, escapes single quotes in the string
and returns the string enclosed in single quotes.
Thank you to https://towardsdatascience.com/a-simple-approach-to-templated-sql-queries-in-python-adc4f0dc511
"""
if isinstance(value, str):
new_value = str(value)
new_value = new_value.replace("'", "''")
return f"'{new_value}'"
return value |
def normalize_timestamp(timestamp):
""" Normalize timestamp to seconces since epoch
"""
if (str(timestamp).find('E12') >= 0 or
len(str(int(float(timestamp)))) == 13):
timestamp = int(float(timestamp) / 1000.0)
else:
timestamp = int(float(timestamp))
return timestamp |
def nth_fib(n):
"""Return the nth fibonacci number. Per the kata, f(1) is supposed to be
0 so the fibonacci sequence for this kata was not indexed at 0."""
a, b = 0, 1
for __ in range(n-1):
a, b = b, a + b
return a |
def bottom_lift(f, args):
"""Calls f on the arguments, returns None if there is
an error of any sort. USE WITH CAUTION
Arguments:
- `f`: a function
- `args`: a tuple of arguments
"""
try:
return f(*args)
except Exception:
return None |
def get_minimal_representation(pos, ref, alt):
"""
ExAC - MIT License (MIT)
Copyright (c) 2014, Konrad Karczewski, Daniel MacArthur, Brett Thomas, Ben Weisburd
Get the minimal representation of a variant, based on the ref + alt alleles in a VCF
This is used to make sure that multiallelic variants in different datasets,
with different combinations of alternate alleles, can always be matched directly.
Args:
pos (int): genomic position in a chromosome (1-based)
ref (str): ref allele string
alt (str): alt allele string
Returns:
tuple: (pos, ref, alt) of remapped coordinate
"""
pos = int(pos)
# If it's a simple SNV, don't remap anything
if len(ref) == 1 and len(alt) == 1:
return pos, ref, alt
else:
# strip off identical suffixes
while(alt[-1] == ref[-1] and min(len(alt),len(ref)) > 1):
alt = alt[:-1]
ref = ref[:-1]
# strip off identical prefixes and increment position
while(alt[0] == ref[0] and min(len(alt),len(ref)) > 1):
alt = alt[1:]
ref = ref[1:]
pos += 1
return pos, ref, alt |
def _listify(ids):
"""convert string to list of unit length"""
if isinstance(ids, str):
ids = [ids]
return ids |
def line_intersection(line1, line2):
""" Finds the intersection coordinate between two lines
Args:
line1: `tuple` line 1 to calculate intersection coordinate (X, Y) [pix]
line2: `tuple` line 2 to calculate intersection coordinate (X, Y) [pix]
Returns:
inter_coord: `tuple` intersection coordinate between line 1 and line 2
"""
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
# if lines do not intersect
if div == 0:
return 0, 0
# Calculates intersection cord
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
inter_coord = int(round(x)), int(round(y))
# Return X and Y cords of intersection
return inter_coord |
def evaluate(labels, predictions):
"""
Given a list of actual labels and a list of predicted labels,
return a tuple (sensitivity, specificty).
Assume each label is either a 1 (positive) or 0 (negative).
`sensitivity` should be a floating-point value from 0 to 1
representing the "true positive rate": the proportion of
actual positive labels that were accurately identified.
`specificity` should be a floating-point value from 0 to 1
representing the "true negative rate": the proportion of
actual negative labels that were accurately identified.
"""
t_positive = float(0)
t_negative = float(0)
sensitivity = float(0)
specificity = float(0)
for label, prediction in zip(labels, predictions):
if label == 0:
t_negative += 1
if label == prediction:
specificity += 1
if label == 1:
t_positive += 1
if label == prediction:
sensitivity += 1
# sensitivity: represent the "true positive rate": the proportion of
# actual positive labels that were accurately identified
sensitivity /= t_positive
# specificity: represent the "true negative rate": the proportion of
# actual negative labels that were accurately identified.
specificity /= t_negative
return sensitivity, specificity |
def does_classes_contain_private_method(classes, method):
"""
Check if at least one of provided classes contains a method.
If one of the classes contains the method and this method has private access level, return true and class
that contains the method.
"""
for class_ in classes:
if hasattr(class_, method.__name__):
if getattr(class_, method.__name__).__name__ in 'private_wrapper':
return True, class_
return False, None |
def vanishing_line(n, focal):
""" Returns the equation of the vanishing line given a normal """
return (n[0], n[1], n[2] * focal) |
def group_from(type):
"""Get the group part of an event type name.
E.g.::
>>> group_from('task-sent')
'task'
>>> group_from('custom-my-event')
'custom'
"""
return type.split('-', 1)[0] |
def _merge_strings(a, b, append=False):
"""
Merge two strings.
"""
merged = b
if append:
merged = a + b
return merged |
def correct_box(box, z):
"""Get good box limits"""
x0, y0, x1, y1 = box
new_x0 = max(0, min(x0, x1))
new_x1 = min(2**z - 1, max(x0, x1))
new_y0 = max(0, min(y0, y1))
new_y1 = min(2**z - 1, max(y0, y1))
return (new_x0, new_y0, new_x1, new_y1) |
def _frontside_location(frontside_location):
"""
Location (for csv)
"""
if frontside_location == True:
return 'Yes'
else:
return 'No' |
def block_device_properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
if 'root_device_name' in properties:
return properties.get('root_device_name')
elif 'mappings' in properties:
return next((bdm['device'] for bdm in properties['mappings']
if bdm['virtual'] == 'root'), None)
else:
return None |
def number_of_routes(max_i, max_j):
"""Pascal triangle implementation to compute combinations."""
routes = {}
for i in range(1, max_i + 1):
routes[(i, 0)] = 1
for j in range(1, max_j + 1):
routes[(0, j)] = 1
for i in range(1, max_i + 1):
for j in range(1, max_j + 1):
routes[(i, j)] = routes[(i - 1, j)] + routes[(i, j - 1)]
return routes[(max_i, max_j)] |
def xorNA(x):
"""Return x if x is not None, or return 'NA'."""
return str(x) if x is not None else 'NA' |
def _APINameFromCollection(collection):
"""Get the API name from a collection name like 'api.parents.children'.
Args:
collection: str, The collection name.
Returns:
str: The API name.
"""
return collection.split('.')[0] |
def thousands_separator(value):
"""
Using settings.THOUSANDS_SEPARATOR generic way has two problems:
a) it then is not possible to use DATE_FORMAT as we want
b) all the numbers have thousand separators not only amounts
"""
if value is None:
return None
value = float(value)
return f'{value:_.2f}'.replace('_', "'") |
def D(u, dfs_data):
"""The DFS-numbering function."""
return dfs_data['ordering_lookup'][u] |
def expand_errors(data):
""" Cleans up the error data of forms to enable proper json serialization """
res = {}
for k, v in data.items():
tmp = []
for x in v:
tmp.append(str(x))
res[k] = tmp
return res |
def van_der_corput(n_sample, base=2):
"""Van der Corput sequence.
:param int n_sample: number of element of the sequence.
:param int base: base of the sequence.
:return: sequence of Van der Corput.
:rtype: list (n_samples,)
"""
sequence = []
for i in range(n_sample):
n_th_number, denom = 0., 1.
while i > 0:
i, remainder = divmod(i, base)
denom *= base
n_th_number += remainder / denom
sequence.append(n_th_number)
return sequence |
def AppendPatternsToFilter(test_filter, positive_patterns=None,
negative_patterns=None):
"""Returns a test-filter string with additional patterns.
Args:
test_filter: test filter string
positive_patterns: list of positive patterns to add to string
negative_patterns: list of negative patterns to add to string
"""
positives = []
negatives = []
positive = ''
negative = ''
split_filter = test_filter.split('-', 1)
if len(split_filter) == 1:
positive = split_filter[0]
else:
positive, negative = split_filter
positives += [f for f in positive.split(':') if f]
negatives += [f for f in negative.split(':') if f]
positives += positive_patterns if positive_patterns else []
negatives += negative_patterns if negative_patterns else []
final_filter = ':'.join([p.replace('#', '.') for p in positives])
if negatives:
final_filter += '-' + ':'.join([n.replace('#', '.') for n in negatives])
return final_filter |
def even_chars(st):
"""
Finds all the even characters in a string.
:param st: string value.
:return: a sequence (index begins with 1) of all the even characters from a string. If the string is smaller than
two characters or longer than 100 characters, the function should return "invalid string".
"""
if len(st) < 2 or len(st) > 100: return "invalid string"
else: return [st[i] for i in range(len(st)) if i % 2] |
def pluralize(word, count):
"""
Given a word and a count, return the pluralized version of the word.
>>> pluralize('cat', 1)
'cat'
>>> pluralize('cat', 2)
'cats'
"""
if count == 1:
return word
else:
return word + "s" |
def _make_extension_entry(
name,
description,
url,
enabled,
core,
latest_version,
installed_version,
status,
pkg_type,
installed=None,
install=None,
):
"""Create an extension entry that can be sent to the client"""
ret = dict(
name=name,
description=description,
url=url,
enabled=enabled,
core=core,
latest_version=latest_version,
installed_version=installed_version,
status=status,
pkg_type=pkg_type,
)
if installed is not None:
ret["installed"] = installed
if install is not None:
ret["install"] = install
return ret |
def _is_string_like(obj):
"""Check whether obj behaves like a string."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True |
def feet_to_cm(feet, inches):
"""
Converts feet and inches to centimeters.
"""
if type(feet) is not int:
feet = int(feet.replace("'", ""))
if type(inches) is not int:
inches = int(inches.replace('"', ''))
return feet * 30.48 + inches * 2.54 |
def critical_pressure(Po, uni_comp_str, k):
"""
Calculates and returns the critical pressure given Uniaxial Compressive Strength, Pressure (Vertical/Overburden), and k value.
"""
return (2*Po-uni_comp_str)/(1+k) |
def encode_topic_name(topic_names, to_byte=True):
"""Create topic name.
Mainly used for creating a topic name for publisher.
# Arguments
topic_names: list
a list of strings
# Returns
topic_name: byte string
the topic name separated by "/"
"""
topic_name = "/".join(topic_names)
if to_byte is True:
return topic_name.encode("utf-8")
else:
return topic_name |
def bar(x, greeting="hello"):
"""bar greets its input"""
return f"{greeting} {x}" |
def sum_digits(s):
"""Assumes s is a string
Returns the sum of the decimal digits in s
For example, if s is 'a2b3c' it returns 5"""
sum = 0
for c in s:
try:
sum += int(c)
except (TypeError, ValueError):
continue
return sum |
def _reverse_task_map(task_map: dict) -> dict:
"""
Given a map {oozie_node: [airflow_node1, airflow_node2]} it returns
reversed map {airflow_node1: oozie_node, airflow_node2: oozie_node}.
:param task_map: oozie to airflow task map
:return: reversed task map
"""
new_map = dict()
for oozie_node, airflow_tasks in task_map.items():
new_map.update({t: oozie_node for t in airflow_tasks})
return new_map |
def parse_repository_tag(repo_path):
"""Splits image identification into base image path, tag/digest
and it's separator.
Example:
>>> parse_repository_tag('user/repo@sha256:digest')
('user/repo', 'sha256:digest', '@')
>>> parse_repository_tag('user/repo:v1')
('user/repo', 'v1', ':')
"""
tag_separator = ":"
digest_separator = "@"
if digest_separator in repo_path:
repo, tag = repo_path.rsplit(digest_separator, 1)
return repo, tag, digest_separator
repo, tag = repo_path, ""
if tag_separator in repo_path:
repo, tag = repo_path.rsplit(tag_separator, 1)
if "/" in tag:
repo, tag = repo_path, ""
return repo, tag, tag_separator |
def reconstruct_path(came_from, current_node):
"""Reconstruct the path from the end node back to the beginning using the
mapping of previous nodes.
"""
total_path = [current_node]
while current_node in came_from.keys():
current_node = came_from[current_node]
total_path.insert(0, current_node)
return total_path |
def sequence_similarity_fraction(sequence, listofsequences, tolerance,
aboveorbelow):
"""WEV this will count the number of sequences from the list which appear
within the test insulating sequence"""
totalnumberofsequences = len(listofsequences)
numberofhits = 0
for seq in listofsequences:
if seq in sequence:
numberofhits += 1
if float(numberofhits)/totalnumberofsequences >= tolerance:
if aboveorbelow == 'above':
return True
if aboveorbelow == 'below':
return False
if aboveorbelow == 'below':
return True
if aboveorbelow == 'above':
return False |
def convert_to_tuples(features):
"""Convert feature dictionary to (image, label) tuples."""
return features["image"], features["label"] |
def dict_merger(dict1, dict2):
"""
Merge recursively two nested python dictionaries and
if key is in both digionaries tries to add the entries in both dicts.
(merges two subdicts, adds lists, strings, floats and numbers together!)
:param dict1: dict
:param dict2: dict
:return dict: Merged dict
"""
new_dict = dict1.copy()
if not dict1:
return dict2
if not dict2:
return dict1
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
# add uncommon
for key in keys2:
if key not in keys1:
new_dict[key] = dict2[key]
# merge common
for key, val in dict1.items():
if isinstance(val, dict):
new_dict[key] = dict_merger(val, dict2.get(key, {}))
elif isinstance(val, list):
new_dict[key] = val + dict2.get(key, [])
elif isinstance(val, str):
new_dict[key] = val + dict2.get(key, '')
elif isinstance(val, int):
new_dict[key] = val + dict2.get(key, 0)
elif isinstance(val, float):
new_dict[key] = val + dict2.get(key, 0.0)
else:
print(f"don't know what to do with element : {key}")
return new_dict |
def _split_name(name):
"""Splits given state name (model or optimizer state name) into the param_name, optimizer_key, view_num and the fp16_key"""
name_split = name.split('_view_')
view_num = None
if(len(name_split) > 1):
view_num = int(name_split[1])
optimizer_key = ''
fp16_key = ''
if name_split[0].startswith('Moment_1'):
optimizer_key = 'Moment_1_'
elif name_split[0].startswith('Moment_2'):
optimizer_key = 'Moment_2_'
elif name_split[0].startswith('Update_Count'):
optimizer_key = 'Update_Count_'
elif name_split[0].endswith('_fp16'):
fp16_key = '_fp16'
param_name = name_split[0]
if optimizer_key != '':
param_name = param_name.split(optimizer_key)[1]
param_name = param_name.split('_fp16')[0]
return param_name, optimizer_key, view_num, fp16_key |
def pixel_color(x, y):
""" Given an x,y position, return the corresponding color.
The Bayer array defines a superpixel as a collection of 4 pixels
set in a square grid:
R G
G B
`ds9` and other image viewers define the coordinate axis from the
lower left corner of the image, which is how a traditional x-y plane
is defined and how most images would expect to look when viewed. This
means that the `(0, 0)` coordinate position will be in the lower left
corner of the image.
When the data is loaded into a `numpy` array the data is flipped on the
vertical axis in order to maintain the same indexing/slicing features.
This means the the `(0, 0)` coordinate position is in the upper-left
corner of the array when output. When plotting this array one can use
the `origin='lower'` option to view the array as would be expected in
a normal image although this does not change the actual index.
Note:
Image dimensions:
----------------------------
x | width | i | columns | 5208
y | height | j | rows | 3476
Bayer Pattern (as seen in ds9):
x / j
0 1 2 3 ... 5204 5205 5206 5207
--------------------------------------------
3475 | R G1 R G1 R G1 R G1
3474 | G2 B G2 B G2 B G2 B
3473 | R G1 R G1 R G1 R G1
3472 | G2 B G2 B G2 B G2 B
. |
y / i . |
. |
3 | R G1 R G1 R G1 R G1
2 | G2 B G2 B G2 B G2 B
1 | R G1 R G1 R G1 R G1
0 | G2 B G2 B G2 B G2 B
This can be described by:
| row (y) | col (x)
--------------| ------
R | odd i, | even j
G1 | odd i, | odd j
G2 | even i, | even j
B | even i, | odd j
bayer[1::2, 0::2] = 1 # Red
bayer[1::2, 1::2] = 1 # Green
bayer[0::2, 0::2] = 1 # Green
bayer[0::2, 1::2] = 1 # Blue
Returns:
str: one of 'R', 'G1', 'G2', 'B'
"""
x = int(x)
y = int(y)
if x % 2 == 0:
if y % 2 == 0:
return 'G2'
else:
return 'R'
else:
if y % 2 == 0:
return 'B'
else:
return 'G1' |
def inside_obstacle(node):
"""
This function check if the point is inside an obstacle
Args:
node: location of a node on map
Returns:
True, if not inside obstacles
"""
x = node[0]
y = node[1]
# Rectangle bar half plane conditions
if y<=(8/5)*x+28 and y<=(-37/70)*x+(643/7) and y>=(9/5)*x-141 and y>=(-19/35)*x+(571/7): return 1
# Ellipse half plane conditions
if ((x-150)/(40))**2+((y-100)/(20))**2<=1: return 1
# Non convex half plane conditions
if y<=13*x-140 and y<=185 and y<=(-7/5)*x+290 and y>=(6/5)*x+30:
if (y<=x+100 and y<=(-6/5)*x+210):
f=0
else:
return 1
# Rhombus half plane conditions
if y<=(3/5)*x-95 and y>=(3/5)*x-125 and y<=(-3/5)*x+175 and y>=(-3/5)*x+145: return 1
# Circle half plane conditions
if (x-225)**2+(y-150)**2<=(25)**2: return 1
return 0 |
def equal_partitions(a,b):
""" check whether two partitions represent the same grouping of students"""
return set(frozenset(i) for i in a) == set(frozenset(i) for i in b) |
def _clean_join(content):
"""
Joins a list of values together and cleans (removes newlines)
:param content: A str or list of str to process
:return: The joined/cleaned str
"""
if not isinstance(content, str):
content = ''.join(content) if content else ''
return content.replace('\n', '') |
def _make_slice_object_a_tuple(slc):
"""
Fix up a slc object to be tuple of slices.
slc = None returns None
slc is container and each element is converted into a slice object
Parameters
----------
slc : None or sequence of tuples
Range of values for slicing data in each axis.
((start_1, end_1, step_1), ... , (start_N, end_N, step_N))
defines slicing parameters for each axis of the data matrix.
"""
if slc is None:
return None # need arr shape to create slice
fixed_slc = list()
for s in slc:
if not isinstance(s, slice):
# create slice object
if s is None or isinstance(s, int):
# slice(None) is equivalent to np.s_[:]
# numpy will return an int when only an int is passed to
# np.s_[]
s = slice(s)
else:
s = slice(*s)
fixed_slc.append(s)
return tuple(fixed_slc) |
def build_update_mask(params):
"""Creates an update mask list from the given dictionary."""
mask = []
for key, value in params.items():
if isinstance(value, dict):
child_mask = build_update_mask(value)
for child in child_mask:
mask.append('{0}.{1}'.format(key, child))
else:
mask.append(key)
return sorted(mask) |
def get_component_status(obj, module, component_name: str):
"""
get_component_status returns a boolean to indicate if a certain component is enabled or disabled.
obj can be either a dict of a MCH CR, or a dict of a MCE CR.
If the component_name is not existed in the spec.components list, will return False.
"""
if obj is None:
return False
obj_feature_path = ['spec', 'overrides', 'components']
curr = obj
for p in obj_feature_path:
next = curr.get(p)
if next is None:
return False
curr = next
components = curr
try:
for component in components:
if component.get('name', '') != component_name:
continue
return component.get('enabled', False)
except (TypeError, AttributeError) as e:
module.fail_json(
msg=f'failed to get enablement status of component {component_name}: {e}', exception=e)
return False |
def is_tuple(value):
"""is value a tuple"""
return isinstance(value, tuple) |
def _remove_none_values(dictionary):
""" Remove dictionary keys whose value is None."""
return list(map(dictionary.pop, [i for i in dictionary if dictionary[i] is None])) |
def gcd(a, b):
"""Compute the greatest common divisor (gcd) using the Euclid algorithm"""
if a == b:
return a
if a > b:
return gcd(a - b, b)
elif b > a:
return gcd(a, b - a) |
def leading_zero(in_string):
"""Add a leading zero to a string with only one character.
:param in_string: string of min length 1 and max length 2
:return: string with length of 2 and with leading zero where applies
"""
len_string = len(in_string)
if (len_string > 2) or (len_string < 1):
raise ValueError("Value must have only one or two characters. Input '{}' has {}".format(in_string, len_string))
elif len_string == 1:
return "0{}".format(in_string)
else:
return in_string |
def SerializeProfiles(profiles):
"""Returns a serialized string for the given |profiles|.
|profiles| should be a list of (field_type, value) string pairs.
"""
lines = []
for profile in profiles:
# Include a fixed string to separate profiles.
lines.append("---")
for (field_type, value) in profile:
if field_type == "ignored":
continue;
lines.append("%s: %s" % (field_type, value))
return '\n'.join(lines) |
def suffix(num: int) -> str:
"""
Returns the suffix of an integer
"""
num = abs(num)
# Suffix only depends on last 2 digits
tens, units = divmod(num, 10)
tens %= 10
# suffix is always 'th' unless the tens digit
# is not 1 and the units is either 1, 2 or 3
if tens != 1:
if units == 1:
return "st"
if units == 2:
return "nd"
if units == 3:
return "rd"
return "th" |
def allowed_file(filename):
"""Returns `True` if file extension is `.tar`"""
return '.' in filename and filename.rsplit('.', 1)[1] in ['tar'] |
def _undefined_pattern(value, fn, undefined):
"""
If ``fn(value) == True``, return `undefined`, else `value`.
"""
if fn(value):
return undefined
return value |
def get_body(data):
"""
Turns snake's body data into a coordinate list
:param data:
:return: list of all snake body coordinates
"""
body = []
for coord in data['you']['body']:
body.append((coord['x'], coord['y']))
return body |
def set_param(input_param):
"""Converts input param to a dict of param_name: init value"""
new_param = {}
for k, v in input_param.items():
if type(v) == list:
new_param.update({k: v[0]}) # First value is default.
else:
new_param.update({k: v['init']})
return new_param |
def maybe_singleton(py_object):
"""Returns `True` if `py_object` might be a singleton value .
Many immutable values in python act like singletons: small ints, some strings,
Bools, None, the empty tuple.
We can't rely on looking these up by their `id()` to find their name or
duplicates.
This function checks if the object is one of those maybe singleton values.
Args:
py_object: the object to check.
Returns:
A bool, True if the object might be a singleton.
"""
# isinstance accepts nested tuples of types.
immutable_types = (int, str, bytes, float, complex, bool, type(None))
is_immutable_type = isinstance(py_object, immutable_types)
# Check if the object is the empty tuple.
return is_immutable_type or py_object is () # pylint: disable=literal-comparison |
def format_job_matrix_collection_specification(specification):
"""Formatter function for creating a format for new settings of job matrix
:param dict specification: dictionary containging section, name and setting value
:return: dictionary containing formatted units information
"""
output = []
for spec_item in specification['value']:
item_format = {}
item_format['name'] = spec_item['name']
if ('parameters' in spec_item):
item_format['params'] = []
for parameter in spec_item['parameters']:
if ('options' in parameter):
param_format = {}
if (len(parameter['options']) > 1):
options_format = []
for option in parameter['options']:
options_format.append(option)
param_format[parameter['param']] = options_format
else:
param_format[parameter['param']] = parameter['options'][0]
item_format['params'].append(param_format)
else:
item_format['params'].append(parameter['param'])
output.append(item_format)
return output |
def convert_args_list_to_float(*args_list):
""" Converts inputs to floats, returns a list in the same order as the input"""
try:
args_list = [float(arg) for arg in args_list]
except ValueError:
raise ValueError("Unable to convert inputs to floats")
return args_list |
def SOD(fp):
"""Parse an SOD marker segment.
SOD - Start of data segment 0xFF 0x93
Last marker in a tile-part header. Bitstream data between a SOD and
the next SOT or EOC shall be a multiple of 8 bits.
"""
info = {}
return info |
def character_frequency(filename):
"""counts the frequency of each character in the given file"""
#first try open the file
characters={}
try:
f=open(filename)
except FileNotFoundError: # first most detailed exception
print("File not found")
characters=None
except OSError: # second less detailed exception
print("Other error raised by OS. Maybe disk full?")
characters=None
except: # last most global exception
print("Something else went wrong")
characters=None
else:
print("Jeeeh, it worked!!")
# Now process the file (intermezzo)
for line in f:
for char in line:
characters[char]=characters.get(char,0)+1
f.close()
finally:
return characters |
def ds2423(rd_val):
"""Converts the counter.ALL file value of the DS2423 dual counter to an
A and a B counter reading.
"""
a_ct, b_ct = rd_val.split(',')
return [ (int(a_ct), 'A'), (int(b_ct), 'B') ] |
def true_report(report):
"""Converts a boolean report into a string for output
Only used when the --boolean option is used. Converts the boolean
report into a string that is every key in the boolean report that has a
True value, joined by linebreaks (\\n)
Arguments:
report (list): the iterable (list or dict) that is to be converted
Returns:
str: A string that is the report joined by linebreaks (\\n)
"""
return '\n'.join(key for key in report.keys() if report[key]) |
def combine_periods_and_elements(periods, elements):
"""
combine information on periods and orbital elements
"""
missing = set(periods.keys()) - set(elements.keys())
if len(missing) > 0:
raise KeyError('missing orbital elements for: {:}'.format(missing))
all_data = dict()
for (id_, (period, flag)) in periods.items():
all_data[id_] = {
'period': period,
'period_flag': flag,
**elements[id_]}
return all_data |
def celciusToFarenheit(celcius):
""" Convert a temperatur in Celcius to Farenheit """
if celcius is None:
return None
else:
return float(celcius) * 1.8 + 32.0 |
def get_all_tunables(search_space_json):
"""
Query Autotune API for the application_name, direction, hpo_algo_impl, id_, objective_function, tunables and
value_type, and return them.
Parameters: search_space_json (json array): A JSON array containing the input search space to hyperparameter
optimization module.
Returns:
application_name (str): The name of the application that is being optimized.
direction (str): Direction of optimization, minimize or maximize.
hpo_algo_impl (str): Hyperparameter optimization library to perform Bayesian Optimization.
id_ (str): The id of the application that is being optimized.
objective_function (str): The objective function that is being optimized.
tunables (list): A list containing the details of each tunable in a dictionary format.
value_type (string): Value type of the objective function.
"""
# JSON returned by the Autotune API
# search_space_json = {"id": "auto123", "application_name": "petclinic-deployment-6d4c8678d4-jmz8x",
# "objective_function": "transaction_response_time", "value_type": "double", "direction": "minimize",
# "hpo_algo_impl": "optuna_tpe", "tunables": [{"name": "cpu_request", "value_type": "double", "upper_bound": 6,
# "lower_bound": 1, "step": 0.01}, {"name": "memory_request", "value_type": "integer", "upper_bound": 1024,
# "lower_bound": 100, "step": 1}]}
search_space_json = search_space_json[0]
id_ = search_space_json["id"]
application_name = search_space_json["application_name"]
objective_function = search_space_json["objective_function"]
value_type = search_space_json["value_type"]
direction = search_space_json["direction"]
hpo_algo_impl = search_space_json["hpo_algo_impl"]
tunables = search_space_json["tunables"]
return application_name, direction, hpo_algo_impl, id_, objective_function, tunables, value_type |
def _get_request_uri(environ):
"""Returns REQUEST_URI from WSGI environ
Environ variable REQUEST_URI is not specified in PEP 333 but provided
by most servers. This function tries the server generated value and
fallbacks to reconstruction from variables specified in PEP 333.
"""
try:
rv = environ['REQUEST_URI']
except KeyError:
parts = [environ.get('SCRIPT_NAME', ''),
environ.get('PATH_INFO', '')]
query = environ.get('QUERY_STRING')
if query:
parts.extend(['?', query])
rv = ''.join(parts)
return rv |
def euclidean_dist_vec(y1, x1, y2, x2):
"""
Calculate Euclidean distances between pairs of points.
Vectorized function to calculate the Euclidean distance between two
points' coordinates or between arrays of points' coordinates. For accurate
results, use projected coordinates rather than decimal degrees.
Parameters
----------
y1 : float or numpy.array of float
first point's y coordinate
x1 : float or numpy.array of float
first point's x coordinate
y2 : float or numpy.array of float
second point's y coordinate
x2 : float or numpy.array of float
second point's x coordinate
Returns
-------
dist : float or numpy.array of float
distance from each (x1, y1) to each (x2, y2) in coordinates' units
"""
# pythagorean theorem
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 |
def get_bits(register, index, length=1):
"""
Get selected bit(s) from register while masking out the rest.
Returns as boolean if length==1
:param register: Register value
:type register: int
:param index: Start index (from right)
:type index: int
:param length: Number of bits (default 1)
:type length: int
:return: Selected bit(s)
:rtype: Union[int, bool]
"""
result = (register >> index) & (2 ** length - 1)
if length == 1:
return result == 1
return result |
def createLoghostUndoConfig(IMCDevRunCfg, LOGHOST_DEFAULT_ADRESS):
"""
creates a syntax list with all non compliant ntp imc_server
"""
deviceLoghostUndoConfig = list()
for idx, loghost in enumerate(IMCDevRunCfg.splitlines()):
if "info-center loghost " in loghost and LOGHOST_DEFAULT_ADRESS not in loghost:
tempString = f"undo {loghost.strip()}"
deviceLoghostUndoConfig.append(tempString)
return deviceLoghostUndoConfig |
def _regroup(args, fmt):
"""Reconstruct the structured arguments based on the flattened version.
Parameters
----------
args : NDArray, Symbol, or (nested) list of Symbol or NDArray
We allow None inside the args.
fmt : (nested) list of ints
Stores the format information of the original structured args.
Returns
-------
ret : NDArray, Symbol, or (nested) list of Symbol or NDArray
"""
def _merger(args, fmt):
"""Recursive call to merge the arguments"""
if isinstance(fmt, int):
if fmt < -1:
raise ValueError("Unsupported encoded format {}.".format(fmt))
if fmt == 0:
return args[0], args[1:]
if fmt == -1:
if args[0] is not None:
raise ValueError('We do not support passing types that are not None'
' when the initial HybridBlock has received NoneType and'
' has been hybridized.'
' Received arg = {}, fmt = {}.'.format(args[0], fmt))
return None, args[1:]
else:
return args[:fmt], args[fmt:]
if not isinstance(args, (list, tuple)):
raise ValueError("When hybridized, the output of HybridBlock must be (nested)"
" list of Symbol or NDArray, "
"but got {} of type {}".format(args, type(args)))
ret = []
for i in fmt:
res, args = _merger(args, i)
ret.append(res)
return ret, args
return _merger(args, fmt)[0] |
def to_bool(value):
"""Take a value and convert it to a boolean type.
:param value: string or int signifying a bool
:type value: str
:returns: converted string to a real bool
"""
positive = ("yes", "y", "true", "t", "1")
if str(value).lower() in positive:
return True
negative = ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}")
if str(value).lower() in negative:
return False
raise Exception('Invalid value for boolean conversion: ' + str(value)) |
def reformat_n(n):
"""
reformat_n(n)
Returns reformatted n argument, converting ranges to lists.
Required args:
- n (str):
number or range (e.g., "1-1", "all")
Returns:
- n (str or list):
number or range (e.g., [1, 2, 3], "all")
"""
if isinstance(n, list):
n = [int(i) for i in n]
elif "-" in str(n):
vals = str(n).split("-")
if len(vals) != 2:
raise ValueError("If n is a range, must have format 1-3.")
st = int(vals[0])
end = int(vals[1]) + 1
n = list(range(st, end))
# elif n not in ["all", "any"]:
# n = int(n)
return n |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.