content stringlengths 42 6.51k |
|---|
def deepmixdicts(*dicts):
"""
Deeply mix dictionaries.
>>> deepmixdicts(
... {'a': 1, 'b': 2, 'sub': {'x': 1, 'y': 2, 'sub': {'v': 1}}},
... {'a': 2, 'c': 3, 'sub': {'x': 2, 'z': 3, 'sub': {'v': 2}}},
... ) == {
... 'a': 2, 'b': 2, 'c': 3,
... 'sub': {
... 'x': 2, 'y': 2, 'z': 3,
... 'sub': {'v': 2},
... },
... }
True
"""
mixed = {}
for d in dicts:
for k, v in d.items():
if isinstance(v, dict):
v = deepmixdicts(mixed.get(k, {}), v)
mixed[k] = v
return mixed |
def int2round(src):
"""
returns rounded integer recursively
:param src:
:return:
"""
if isinstance(src, float):
return int(round(src))
elif isinstance(src, tuple):
res = []
for i in range(len(src)):
res.append(int(round(src[i])))
return tuple(res)
elif isinstance(src, list):
res = []
for i in range(len(src)):
res.append(int2round(src[i]))
return res
elif isinstance(src, int):
return src
if isinstance(src, str):
return int(src) |
def do_classtype_lookup_as2type(asn_query, d_as2type_data):
"""
Execute lookup into CAIDA as2type and return Org type.
"""
return d_as2type_data[asn_query][1] |
def handle_includes(defns):
"""Recursive handling of includes for any input list of defns.
The assumption here is that when an include is handled by the
pyyaml reader, it adds them as a list, which is stands apart from the rest
of the expected YAML definitions.
"""
newdefns = []
for d in defns:
if isinstance(d, list):
newdefns.extend(handle_includes(d))
else:
newdefns.append(d)
return newdefns |
def prep_rg_names(item, config, fc_name, fc_date):
"""Generate read group names from item inputs.
"""
if fc_name and fc_date:
lane_name = "%s_%s_%s" % (item["lane"], fc_date, fc_name)
else:
lane_name = item["description"]
return {"rg": item["lane"],
"sample": item["description"],
"lane": lane_name,
"pl": item.get("algorithm", {}).get("platform",
config.get("algorithm", {}).get("platform", "illumina")).lower(),
"pu": lane_name} |
def get_channel_name(sample_rate, is_acceleration=True,
is_vertical=False, is_north=True):
"""Create a SEED compliant channel name.
SEED spec: http://www.fdsn.org/seed_manual/SEEDManual_V2.4_Appendix-A.pdf
Args:
sample_rate (int): Sample rate of sensor in Hz.
is_acceleration (bool): Is this channel from an accelerometer.
is_vertical (bool): Is this a vertical channel?
is_north (bool): Is this channel vaguely pointing north or the channel
you want to be #1?
Returns:
str: Three character channel name according to SEED spec.
"""
band = 'H' # High Broad Band
if sample_rate < 80 and sample_rate >= 10:
band = 'B'
code = 'N'
if not is_acceleration:
code = 'H' # low-gain velocity sensors are very rare
if is_vertical:
number = 'Z'
else:
number = '2'
if is_north:
number = '1'
channel = band + code + number
return channel |
def do_apply(ctx, lst):
"""Given a list, do apply"""
if len(lst) == 1:
return lst[0]
if len(lst) == 2:
return ctx['ap('](lst[-2], lst[-1])
return do_apply(ctx, lst[:-2] + [do_apply(ctx, lst[-2:])]) |
def risk_level(value):
"""
Returns a string based risk level from a number.
1: Low
2: Medium
3: Medium
4: High
"""
if value == 1:
return 'low'
if value == 2 or value == 3:
return 'medium'
if value == 4:
return 'high' |
def unpack_tensor_from_indices(inputs, indices):
"""Separated from function above because it can be useful in the case
were a packed_sequence.data has been modified (ex, through a neural
network) but is not an instance of packed_sequence anymore."""
outputs = [inputs[indices[i]] for i in range(len(indices))]
return outputs |
def extract_keys(dic, keys):
"""Return two copies of the dict. The first has only the keys specified.
The second has all the *other* keys from the original dict.
::
>> extract_keys({"From": "F", "To": "T", "Received", R"}, ["To", "From"])
({"From": "F", "To": "T"}, {"Received": "R"})
>>> regular, extra = extract_keys({"From": "F", "To": "T", "Received": "R"}, ["To", "From"])
>>> sorted(regular.keys())
['From', 'To']
>>> sorted(extra.keys())
['Received']
"""
for k in keys:
if k not in dic:
raise KeyError("key %r is not in original mapping" % k)
r1 = {}
r2 = {}
for k, v in dic.items():
if k in keys:
r1[k] = v
else:
r2[k] = v
return r1, r2 |
def remove_config_prefix(config, prefix, skip=None):
"""Iterate over keys in dict and remove given prefix.
Arguments
---------
config : dict
The configuration data.
prefix : str
The prefix to remove.
skip : List[str], optional
A list of keys which should not be altered.
Returns
-------
ret : dict
The transformed configuration.
"""
if skip is None:
skip = []
def helper(key):
return key.split(f"{prefix}.")[-1]
return {helper(key): value for key, value in config.items() if f"{prefix}." in key and key not in skip} |
def deg_to_qt(deg):
"""
Converts from degrees to QT degrees.
16 deg = 1 QTdeg
Parameters
----------
deg : float
The value to convert.
Returns
-------
float
The value converted.
"""
# Angles for Qt are in units of 1/16 of a degree
return deg * 16 |
def tokenize(lines, token='word'):
"""Split text lines into word or character tokens.
Defined in :numref:`sec_utils`"""
assert token in ('word', 'char'), 'Unknown token type: ' + token
return [line.split() if token == 'word' else list(line) for line in lines] |
def helper(n, x, ans):
"""
:param n: int, original input
:param x: int, numbers of digit
:param ans: int, the largest digit
return:
This function will return the largest digit.
"""
digit = pow(10, x) # numbers of digit
# base case: finish examining all the digits
if n // digit == 0:
return ans
else:
number = n // digit - n // (digit * 10) * 10 # extract the number we want to examine
if number > ans:
ans = number
return helper(n, x + 1, ans) |
def _custom_tbl_dtype_compare(dtype1, dtype2):
"""This is a custom equality operator for comparing table data types that
is less strict about units when unit is missing in one and dimensionless in
the other.
"""
for d1, d2 in zip(dtype1, dtype2):
for k in set(list(d1.keys()) + list(d2.keys())):
if k == 'unit':
if d1.get(k, '') != '' and k not in d2:
return False
if d2.get(k, '') != '' and k not in d1:
return False
if d1.get(k, '') != d2.get(k, ''):
return False
else:
if d1.get(k, '1') != d2.get(k, '2'):
return False
return True |
def massage_error_list(error_list, placeholder_description):
"""
Returns a best-effort attempt to make a nice error list
"""
output_error_list = []
if error_list:
for error in error_list:
if not error.get('message'):
output_error_list.append({'message': error, 'error': True})
else:
if 'error' not in error:
error['error'] = True
output_error_list.append(error)
if not output_error_list:
output_error_list.append({'message': placeholder_description})
return output_error_list |
def make_comma_list_a_list(elements_to_rocess):
"""Process a list with commas to simple list.
For example:
['elem1','elem2,elem3'] => ['elem1', 'elem2', 'elem3']
:param elements_to_rocess: list to process
:return: processed list with elemnts separated
"""
output_list = []
for element in elements_to_rocess:
output_list.extend(element.split(','))
return list(set(output_list)) |
def walk(node1, node2, parent_name=''):
"""Walk through each node in tree and compare difference"""
diff_results = []
if type(node1) != type(node2):
return (["[Type] {} vs {}".format(node1, node2)], False)
elif type(node1) not in [list, dict] and type(node2) not in [list, dict]:
return (["[Value] {} vs {}".format(node1, node2)], False)
elif type(node1) is dict and type(node2) is dict:
for k in [x for x in node1.keys() if x not in node2.keys()]:
diff_results.append("[Key added] {}".format(k))
node1.pop(k)
for k in [x for x in node2.keys() if x not in node1.keys()]:
diff_results.append("[Key removed] {}".format(k))
node2.pop(k)
# Dict same length, with same keys
for k, v in node1.items():
if v == node2[k]:
continue
if type(v) not in [list, dict] or \
type(node2[k]) not in [list, dict]:
diff_results.append(
"[Dict] key '{}' value conflict: {} != {}".format(
k, v, node2[k]))
else:
(walk_result, ok) = walk(node1[k],
node2[k],
'/'.join([parent_name, k]))
if not ok:
diff_results.append(walk_result)
elif type(node1) is list and type(node2) is list:
# Find the different items in both lists
intersect = [x for x in node1 if x in node2]
node1 = [x for x in node1 if x not in intersect]
node2 = [x for x in node2 if x not in intersect]
if len(node1) != len(node2):
diff_results.append(
"[List length different] {}, {} vs {}".format(
parent_name, len(node1), len(node2)))
return (diff_results, False)
for k in range(len(node1)):
v1 = node1[k]
v2 = node2[k]
if type(v1) != type(v2):
diff_results.append(
"[List item type][{}] {} != {}".format(
parent_name, type(v1), type(v2)))
elif type(v1) not in [list, dict] or type(v2) not in [list, dict]:
diff_results.append(
"[List][{}] value conflict: {} != {}".format(
'/'.join([parent_name, "[{}]".format(k)]), v1, v2))
else:
(walk_result, ok) = walk(v1, v2, '/'.join([parent_name,
"[{}]".format(k)]))
if not ok:
diff_results.append(walk_result)
if len(diff_results) == 0:
return ([], True)
else:
return (diff_results, False) |
def get_items_except(seq, indices, seq_constructor=None):
"""Returns all items in seq that are not in indices
Returns the same type as parsed in except when a seq_constructor is set.
"""
sequence = list(seq)
index_lookup = dict.fromkeys(indices)
result = [sequence[i] for i in range(len(seq)) \
if i not in index_lookup]
if not seq_constructor:
if isinstance(seq, str):
return ''.join(result)
else:
seq_constructor = seq.__class__
return seq_constructor(result) |
def cat2(l):
"""
%timeit cat2(slist)
10000 loops, best of 3: 23.3 us per loop
"""
l = l * 100
slist = [x for x in l]
return ''.join(slist) |
def write_data_file(data, filename):
"""Compile lines into a file."""
data_file = open(filename, 'w') # write
for line in data:
assert type(line) == type([])
assert all(type(item) == type('') for item in line)
data_line = ' '.join(line) + '\n' # join together and add a carriage return.
data_file.write(data_line)
data_file.close()
return None |
def syllable_count(word):
"""Calculates and returns the number of syllables in a given word"""
count = 0
vowels = "aeiouy"
if word[-1] in ".,?!":
word = word[:-1]
word = word.lower()
if word[0] in vowels:
count += 1
for i in range(1, len(word)):
if word[i] in vowels and word[i - 1] not in vowels:
count += 1
if word.endswith("e"):
count -= 1
if count == 0:
count += 1
return count |
def correct_score(list_of_possible_mvs, probable_mvs):
"""
Corrects original scores by comparing string distance to probable_mvs
INPUT:
list_of_possible_mvs: ex: [(mv, 0.3), (branch, 0.2)]
probable_mvs: ex ['nan', 'none']
OUTPUT:
list_of_possible_mvs: ex[(nan, 0.9), (branch, 0.1)]
"""
# Sum scores for same values detected by different methods in same column
new_list_of_possible_mvs_tmp = dict()
for (val, coef, orig) in list_of_possible_mvs:
if val not in new_list_of_possible_mvs_tmp:
new_list_of_possible_mvs_tmp[val] = dict()
new_list_of_possible_mvs_tmp[val]['score'] = coef
new_list_of_possible_mvs_tmp[val]['origin'] = [orig]
else:
new_list_of_possible_mvs_tmp[val]['score'] += coef
new_list_of_possible_mvs_tmp[val]['origin'].append(orig)
# NB: Taken care of in mv_from_usual_forms
# # If the value is a known form of mv, increase probability
# if val.lower() in [x.lower() for x in probable_mvs]:
# new_list_of_possible_mvs_tmp[val] += 0.5
# Reformat output like input
new_list_of_possible_mvs = []
for val, _dict in new_list_of_possible_mvs_tmp.items():
new_list_of_possible_mvs.append((val, _dict['score'], _dict['origin']))
return new_list_of_possible_mvs |
def human_readable_time(seconds):
"""Returns human readable time
:param seconds: Amount of seconds to parse.
:type seconds: string.
"""
seconds = int(seconds)
hours = seconds / 3600
seconds = seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
return "{:02.0f} hour(s) {:02.0f} minute(s) {:02.0f} second(s) ".format(
hours, minutes, seconds
) |
def max_n_consecutive_1s(binary_str):
"""This function takes a number's binary representation
and return the maximum number of consecutive 1 digits in the
string.
Parameters
----------
binary_str : str
Binary representation of an integer.
Returns
-------
max_n_1s : int
Maximum number of consecutive 1 digits in the string.
"""
ones = binary_str.split("0")
max_n_1s = max(list(map(lambda x: len(x), ones)))
return max_n_1s |
def unpack_error_data(data):
"""Unpack the dictionary-like structure used in Error and Notice
responses. keys are 1 character codes followed by a String. A
'\0' key signals the end of the data."""
pos = 0
result = {}
while pos < len(data):
k = data[pos]
if k == '\0':
break
pos += 1
if not k:
break
i = data.find('\0', pos)
if i < 0:
i = len(data)
result[k] = data[pos:i]
pos = i+1
return result |
def get_account_info(connection_string):
"""Get Account info from a connection string."""
account_name = connection_string.split(";")[1].split("=")[-1]
account_key = connection_string.split(";")[2].replace("AccountKey=", "")
return (account_name, account_key) |
def deparameterize(inp, sep='+'):
""" Somewhat-undo parameterization in string. Replace separators (sep) with spaces.
:param inp: (str)
:param sep: (str) default: '+'
:return: "deparameterized" string
"""
return inp.replace(sep, ' ') |
def replace_string_contents(raw_string, renaming_dictionary):
"""
Takes a string and replaces it with any changes provided in a renaming dictionary
:param raw_string: a raw string with which to pass through the renaming dictioanary
:param renaming_dictionary: a dictionary containing keys to be replaced with their associated values
:return: string identical to the raw string provided with all changes made based on the renaming dictionary
"""
# Renames a string using a provided renaming dictionary
replacement_string = raw_string
# Iterate over keys in renaming dictionary
for key in renaming_dictionary:
# For any keys found in the provided string, replace them using the value of the provided dictionary for that key
if key in replacement_string:
replacement_string = replacement_string.replace(key, renaming_dictionary[key])
# return the modified string
return replacement_string |
def remove_xa0(text):
"""Remove weird hex texts"""
return text.replace("\xa0", " ") |
def isnumeric(a):
"""
Returns ``True`` is an array contains numeric values.
:param array a: An array.
:return: bool
"""
import numpy as np
if type(a) == str:
return False
if np.issubdtype(a.dtype, np.number):
return True
return False |
def skip_special_members(app, what, name, obj, skip, options):
"""Skip some special members in the documentation."""
skip_modules = ['__module__', '__doc__', '__dict__', '__weakref__',
'__init__', '_params']
if name in skip_modules:
return True
else:
return False |
def combine_privacy_values(user_privacy_value: float, data_point_privacy_value: float) -> float:
""" Combine privacy values of user and user for data point
Parameters
----------
user_privacy_value
privacy value of user
data_point_privacy_value
privacy value of user for a data point
Returns
-------
float
the combined privacy value
"""
# return user_privacy_value + data_point_privacy_value # simple combination by addition
return user_privacy_value * data_point_privacy_value |
def validate_enum(datum, schema, **kwargs):
"""
Check that the data value matches one of the enum symbols.
i.e "blue" in ["red", green", "blue"]
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
kwargs: Any
Unused kwargs
"""
return datum in schema["symbols"] |
def determine_range(prices):
"""Approximates the range
to eliminate accessories"""
max_price = prices[0]
for x in range(1, len(prices)):
if prices[x] > max_price:
max_price = prices[x]
min_price = 100000000
for x in range(0, len(prices)):
if prices[x] < min_price and prices[x] / max_price > 0.5:
min_price = prices[x]
return {"min": min_price, "max": max_price} |
def _validate_data_codons(dataset):
""" Helper function to validate user input as codon sequence. """
if len(dataset) % 3 != 0:
return -1 |
def get_len(filename):
"""get len of wordtype from the filename"""
return int(filename.rstrip('.pt').split('len')[-1]) |
def boundary_distance(i: int, elv: float, n: int) -> float:
"""Calculates the distance of the boundaries to an elevated origin point
:param i: index of the boundary of interest
:type i: int
:param elv: elevation of the point
:type elv: float
:param n: distance granularity
:type n: int
:raises ValueError: if i is not even, it means it is not a boundary, but a sector, which is bounded by boundaries i - 1 and i + 1
:return: Distance of boundary i to the origin point
:rtype: float
"""
if i > n * 2 or i < 0:
raise ValueError(
'Region index {} is either greater than 2n ({}) or less than 0'.format(i, 2 * n)
)
if i % 2 != 0:
raise ValueError("region i is not a boundary (i must be even)")
if i <= n:
b = i * elv / n
elif i < 2 * n:
b = (n * elv) / (2 * n - i)
else:
b = float('inf')
return b |
def rem(x, a):
"""
x: a non-negative integer argument
a: a positive integer argument
returns: integer, the remainder when x is divided by a.
"""
if x == a:
return 0
elif x < a:
return x
else:
return rem(x-a, a) |
def make_links_dict(pairs_dict):
"""
Creates links_dict by pruning pairs_dict to a single best link for each scaffold end
"""
links_dict = {}
for end1 in pairs_dict:
if (end1 in pairs_dict) and (len(pairs_dict[end1])) > 0:
best_pair = max(pairs_dict[end1], key = pairs_dict[end1].get)
if best_pair in pairs_dict and len(pairs_dict[best_pair]) > 0:
if max(pairs_dict[best_pair], key = pairs_dict[best_pair].get) == end1:
links_dict[end1] = best_pair
links_dict[best_pair] = end1
return links_dict |
def rotate_90(grid):
"""
Rotate a given grid by 90 degrees.
Args:
grid: Grid to rotate.
Returns:
Grid after being rotated by 90 degrees.
"""
new_grid = []
for col, _ in enumerate(grid):
new_row = []
# Go through the rows,
# and form new rows depending on the flower's column.
for row in grid:
new_row.append(row[col])
new_grid.append(new_row)
return new_grid[::-1] |
def find_max_subarray_linear(A, low, high):
"""
Find the max subarray in O(n)
# Also listed as Ex4.1-5
:param A: array
:param low: leftmost index of the array
:param high: rightmost index of the array
:return:
leftmost index of the max crossing subarray
rightmost index of the max crossing subarray
the sum of the max crossing subarray
"""
# nums = A
# A = [nums[0]]
# for i in range(1, len(nums)):
# A.append(max(A[i - 1] + nums[i], nums[i]))
# return max(A)
max_ending_here = max_so_far = A[low]
meh_l = low
meh_r = low
msf_l = low
msf_r = low
for i in range(low+1, high + 1):
if max_ending_here >= 0:
meh_r = i
max_ending_here = max_ending_here + A[i]
else:
meh_l = i
meh_r = i
max_ending_here = A[i]
if max_so_far <= max_ending_here:
msf_l = meh_l
msf_r = meh_r
max_so_far = max_ending_here
return (msf_l, msf_r, max_so_far) |
def time_switch(x):
"""
Convenience codes to convert for time text to pandas time codes.
"""
return {
'min': 'Min',
'mins': 'Min',
'minute': 'Min',
'minutes': 'Min',
'hour': 'H',
'hours': 'H',
'day': 'D',
'days': 'D',
'week': 'W',
'weeks': 'W',
'month': 'M',
'months': 'M',
'year': 'A',
'years': 'A',
'water year': 'A-JUN',
'water years': 'A-JUN',
}.get(x, 'A') |
def extract_top_level_dict(current_dict):
"""
Builds a graph dictionary from the passed depth_keys, value pair. Useful for dynamically passing external params
:param depth_keys: A list of strings making up the name of a variable. Used to make a graph for that params tree.
:param value: Param value
:param key_exists: If none then assume new dict, else load existing dict and add new key->value pairs to it.
:return: A dictionary graph of the params already added to the graph.
"""
output_dict = dict()
for key in current_dict.keys():
name = key.replace("layer_dict.", "")
name = name.replace("layer_dict.", "")
name = name.replace("block_dict.", "")
name = name.replace("module-", "")
top_level = name.split(".")[0]
sub_level = ".".join(name.split(".")[1:])
# top_level = ".".join(name.split(".")[:-1])
# sub_level = name.split(".")[-1]
if top_level not in output_dict:
if sub_level == "":
output_dict[top_level] = current_dict[key]
else:
output_dict[top_level] = {sub_level: current_dict[key]}
else:
new_item = {key: value for key, value in output_dict[top_level].items()}
new_item[sub_level] = current_dict[key]
output_dict[top_level] = new_item
# print(current_dict.keys())
# print()
# print(output_dict.keys())
# print()
return output_dict |
def spherical_index_k(l, m=0):
""" returns the mode k from the degree l and order m """
if not -l <= m <= l:
raise ValueError('m must lie between -l and l')
return l*(l + 1) + m |
def _build_module_seperator(module, submodule=''):
"""
Creates a string line with a seperator for module and submodule.
Args:
- module (String): Name of the module for the seperator
- submodule (String): Name of the submodule for the seperator (default:
empty string)
Returns:
- (String): Seperator of the form: // Defines for module->submodule
"""
line = '\n// Defines for {}'.format(module)
if submodule:
line += '-> {}'.format(submodule)
line += '\n'
return line |
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Arguments:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurance of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx |
def activate(line, active='none'):
"""Checks if we are reading nodes, elements, or something else
Parameters
----------
line : string
The line to check for activation
Returns
-------
active : string
'nodes' if we are going to read nodes, 'elems' if we are going to read
elements, 'prop' if properties, or 'none' otherwise
"""
if active == 'step' or active == 'boundary':
if 'END' in line.upper() and 'STEP' in line.upper():
return 'none'
elif '*BOUNDARY' in line.upper():
return 'boundary'
else:
return 'step'
active = 'none'
if line.upper() == '*NODE':
active = 'nodes'
elif line[:14].upper() == '*UEL PROPERTY,':
active = 'prop'
elif line[:9].upper() == '*ELEMENT,':
active = 'elems'
elif line[:5].upper() == '*NSET':
if 'GENERATE' in line.upper():
active = 'gennset'
else:
active = 'nset'
elif line[:6].upper() == '*ELSET':
if 'GENERATE' in line.upper():
active = 'genelset'
else:
active = 'elset'
elif line[:5].upper() == '*STEP':
active = 'step'
elif line[:10].upper() == '*AMPLITUDE':
active = 'amplitude'
return active |
def cartesian_to_ccw_from_north(angle):
""" angle minus 90, in degrees"""
return angle - 90; |
def letters_only(s):
"""
:param s: string of characters (could be a word or a set of words separated by delimiters) \
:type s: str \
:returns: s without any delimiter, space, or punctuation \
:rtype:str
:Example:
>>> from bstools import bstools
>>> a = "Hello World !"
>>> letters_only(a)
'HelloWorld'
.. seealso:: bstools.remove(), bstools.remove_all()
"""
delim=["'","(",")","[","]","{","}","-","\n","_","/","*","+",".",",",";",":","!","?",'"','','\'',' ']
j=""
for l in s :
if (l in delim) :
pass
else:
j+=l
return j |
def classpath(klass):
"""Return the full class path
Args:
klass (class): A class
"""
return f"{klass.__module__}.{klass.__name__}" |
def UV_transmision(zenith, obstruction, SVF):
"""
This function is used to estimate the percentage of UV radiation reaching the
street canyons based on hemispherical images and the sun path, there are two
major parts of UV radiation reaching the ground, direct beam and diffusion
the direct beam can be estimated based on the obstruction, and the diffuseion
can be estimated based on the SVF. The returned ratio will need to multiply the
UV index to get the actual UV index in the street canyons
Reference:
https://www.epa.gov/enviro/web-services#hourlyzip
https://www.epa.gov/sunsafety
PhD dissertation of Roberto Hernandez, 2015
Parameters:
zenith: the sun zenith angle, in degree
obstruction: 1 for obstructed, 0 open sky
SVF: the sky view factor value
return:
UV_transRatio: the ratio of the UV reaching the street canyon
Last modified by Xiaojiang Li, MIT Senseable City Lab, Jan 8th, 2018
"""
import sys
import math
import numpy as np
# if the point at the time, sun is blocked, then shade is 0 or is 1
opensky_bool = 1 - obstruction
# calcualte the percentages of the direct radiation and the diffuse radiation
dif_rad_ratio_list = []
# convert the zenith angle to radians
zenith = np.pi*zenith/180.0
# band width of UV radiation, based on EPA, UV band width ranges from 280-400nm
numda_range = range(280,400,3)
for numda in numda_range:
exponent = math.pow(280.0/numda, 4*0.79)
exponent = exponent * math.pow(1 - (255.0/numda)**10, 1 - math.tan(zenith))
exponent = -exponent/math.cos(zenith)
dif_rad_ratio = 1 - math.exp(exponent)
dif_rad_ratio_list.append(dif_rad_ratio)
# convert the list to array and calculate the mean value of the diff/total ratio
dif_rad_ratioArr = np.asarray(dif_rad_ratio_list)
diffRatio = np.mean(dif_rad_ratioArr)
print ('The mean diffuse ratio is:', diffRatio)
# estimate the accumulated exposure to UV radiation
# The total UV exposure should be calculated as: (opensky_bool*(1 - diffRatio) + SVF*diffRatio)*UV_value
UV_transRatio = opensky_bool*(1 - diffRatio) + SVF*diffRatio
return UV_transRatio |
def _deep_update(main_dict, update_dict):
"""Update input dictionary with a second (update) dictionary
https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
Parameters
----------
main_dict: dict
Input dictionary
update_dict: dict
Update dictionary
Returns
-------
updated_dict : dict
Updated dictionary
"""
for key, val in update_dict.items():
if isinstance(val, dict):
main_dict[key] = _deep_update(main_dict.get(key, {}), val)
else:
main_dict[key] = val
# return updated main_dict
return main_dict |
def get_num_layer_for_replknet(var_name):
"""
Divide [2, 2, 18, 2] layers into 12 groups; each group is 2 RepLK BLocks + 2 ConvFFN
blocks, including possible neighboring transition;
adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
"""
# num_max_layer = 12
# stem: 0
# stages.0.blocks.0+1+2+3, -> 1 (and transitions.0)
# stages.1.blocks.0+1+2+3, -> 2 (and transitions.1)
# stages.2.blocks.0+1+2+3, -> 3
# stages.2.blocks.4+5+6+7, -> 4
# stages.2.blocks.8+9+10+11, -> 5
# ...
# stages.2.blocks.32+33+34+35, -> 11 (and transitions.2)
# stages.3.blocks.0+1+2+3 -> 12
if var_name.startswith("stem"):
return 0
elif var_name.startswith("stages"):
stage_id = int(var_name.split('.')[1])
block_id = int(var_name.split('.')[3])
if stage_id in [0, 1]:
layer_id = stage_id + 1
elif stage_id == 2:
layer_id = 3 + block_id // 4
else:
layer_id = 12
return layer_id
elif var_name.startswith('transitions'):
transition_id = int(var_name.split('.')[1])
if transition_id in [0, 1]:
return transition_id + 1
else:
return 11
else:
return 13 |
def factorial_iterative(n):
"""
:param n: Integer
:return: n * n-1 * n-2 * n-3.......1
"""
fac = 1
for i in range(n):
fac = fac * (i + 1)
return fac |
def decodeDegreesStr(valStr):
"""
Return a signed latitude/longitude value from a string. Only copes with the integer
values used in grid cell names.
"""
val = int(valStr[:-1])
if valStr[-1] in ("S", "W"):
val = -val
return val |
def intersperse(ls, elem, first=False, last=False):
"""
Args:
ls: A list of elements
elem: The element to insert in between each element
first: Whether to add the element at the beginning of the sequence
last: Whether to add the element at the end of the sequence
Returns:
``ls`` interspersed with ``elem```
Examples:
>>> intersperse([1, 2, 3], 0)
[1, 0, 2, 0, 3]
>>> intersperse([1, 2, 3], 0, first=True)
[0, 1, 0, 2, 0, 3]
>>> intersperse([1, 2, 3], 0, first=True, last=True)
[0, 1, 0, 2, 0, 3, 0]
"""
length = len(ls)
new = []
if first:
new.append(elem)
for i in range(length - 1):
new.append(ls[i])
new.append(elem)
new.append(ls[-1])
if last:
new.append(elem)
return new |
def get_callable_name(c):
""" Get a displayable name for the callable even if __name__
is not available. """
try:
return c.__name__ + '()'
except:
return str(c) |
def allowed_file(filename):
"""
Utility function that checks that the filename has an allowed extension.
Used when uploading the file. Checks the module-level variable
`ALLOWED_EXTENSIONS` for allowed uploads.
:param filename: The name of the file that is being uploaded.
:type filename: `str`
:example:
>>> ALLOWED_EXTENSIONS = ['.pdf', '.jpg'] # module-level variable
>>> file1 = 'my_file.txt'
>>> allowed_file(file1)
False
>>> file2 = 'my_file'
>>> allowed_file(file2)
False
>>> file3 = 'my_file.jpg'
>>> allowed_file(file3)
True
"""
ALLOWED_EXTENSIONS = set(["pdf"])
return (
"." in filename
and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
) |
def get_ban_query(bans, team, enemy):
"""
Helper function for ban query
"""
ban_query = "("
if bans != None:
for ban in bans:
ban_query += """(Matchup!="{}") AND """.format(ban)
for t in list(team.values()):
if t != "":
ban_query += """(Matchup!="{}") AND """.format(t)
for t in list(enemy.values()):
if t != "":
ban_query += """(Matchup!="{}") AND """.format(t)
if ban_query != "(":
ban_query = ban_query[:-5] + ")"
else:
ban_query = ""
return ban_query |
def generateDimText(dim,dataType):
""" This is used to generate a string with the new dimensions of the array """
text=""
for d in dim:
text=text+" ["+str(d)+" x"
text=text+" "+dataType+("]"*len(dim))
return text |
def normalize(columns, atts):
"""Creates an output record according to the columns schema from a set of attributes.
Args:
columns: the columns of the output record.
atts: the attribute values of the output record.
Returns:
row: a normalized row ready for dataframe integration.
"""
row = []
for c in columns:
added = False
for a in atts:
if a[0] == c:
row.append(a[1])
added = True
break
if not added:
row.append('')
return row |
def normalize_url(url):
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith('http'):
return 'http://%s' % url
return url |
def flattenList(listOfLists):
"""
flatten 2D list
return [1, 2, 3, 4, ...] for input [[1, 2], [3, 4, ...], ...]
"""
return [item for subList in listOfLists for item in subList] |
def check_directions(y, x, lst):
"""
Check all directions of a seat for the first seen seats.
Args:
y: The column index of the seat.
x: The row index of the seat.
list: The list the seat is in.
Returns:
The sum of the first visible occupied seats in all
eight directions of an occupied seat
"""
occupied = 0
# The original list input is divided into three parts:
# rows before the seat, rows after the seat, and the row of the seat.
# Each direction is then checked, and the value of the first encountered
# seat is stored in a dictionary. All '#' (occupied seats) in the
# dictionary are then added up to get the total
for count, half_lst in enumerate([lst[y+1:], lst[y-1::-1],
[lst[y]] * len(lst[y])]):
left = right = x
dct_seats = {0: '.', 1: '.', 2: '.'}
if y == 0 and count == 1:
continue
for row in half_lst:
if '.' not in dct_seats.values():
break
left -= 1
right += 1
for idx, item in enumerate([left, x, right]):
try:
if item < 0:
continue
elif dct_seats.get(idx) == '.':
dct_seats[idx] = row[item]
except IndexError:
pass
occupied += sum(value == '#' for value in dct_seats.values())
if occupied != 0 and lst[y][x] == '#':
occupied -= 1
return occupied |
def get_item_charge(amount_of_items: int) -> float:
"""
This function calculates extra item fee and adds it to delivery fee.
---
Args:
amount_of_items (int): amount of items in the basket
Returns:
item_charge (float): fee for extra items
"""
item_limit = 5
free_items = 4
extra_item_charge = 50
if amount_of_items >= item_limit:
extra_items = amount_of_items - free_items
return extra_item_charge * extra_items
return 0 |
def convert_float(string):
"""Convert string into a float, otherwise return string."""
try:
return float(string)
except (ValueError, TypeError):
return string |
def _psd_params_checker(params):
"""Utility function to check parameters to be passed to `power_spectrum`.
Parameters
----------
params : dict or None
Optional parameters to be passed to
:func:`mne_features.utils.power_spectrum`. If `params` contains a key
which is not an optional parameter of
:func:`mne_features.utils.power_spectrum`, an error is raised.
Returns
-------
valid_params : dict
"""
if params is None:
return dict()
elif not isinstance(params, dict):
raise ValueError('The parameter `psd_params` has type %s. Expected '
'dict instead.' % type(params))
else:
expected_keys = ['welch_n_fft', 'welch_n_per_seg', 'welch_n_overlap']
valid_keys = list()
for n in params:
if n not in expected_keys:
raise ValueError('The key %s in `psd_params` is not valid and '
'will be ignored. Valid keys are: %s' %
(n, str(expected_keys)))
else:
valid_keys.append(n)
valid_params = {n: params[n] for n in valid_keys}
return valid_params |
def intersection(lst1, lst2):
"""
>>> intersection([1, 2, 3], [2, 4, 6])
[2]
>>> intersection([1, 2, 3], [4, 5, 6])
[]
>>> intersection([2, 3, 2, 4], [2, 2, 4])
[2, 4]
"""
in_both = []
for item1 in lst1:
for item2 in lst2:
if item1 == item2 and item2 not in in_both:
in_both.append(item2)
return in_both |
def title_case(title, minor_words=''):
"""
Converts a string into title case, given an optional list of exceptions (minor words).
:param title: a string of words.
:param minor_words: a string of words.
:return: the title in title case form.
"""
return " ".join(x.capitalize() if x not in minor_words.lower().split() or i == 0 else x for i, x in enumerate(title.lower().split())) |
def acc_stats(stats_list):
"""Accumulate a list of sufficient statistics.
Parameters
----------
stats_list : list
List of sufficient statistics.
Returns
-------
stats : dict
Accumulated sufficient statistics.
"""
new_stats = {}
for stats in stats_list:
for key1, model_stats in stats.items():
if key1 not in new_stats:
new_stats[key1] = {}
for key2, value in model_stats.items():
try:
new_stats[key1][key2] += value
except KeyError:
new_stats[key1][key2] = value
return new_stats |
def _get_sm_proj_id(proj: str, namespace='main'):
"""
Matching the project ID to a sample-metadata project.
"""
if proj == 'csiro-als': # We don't have a project for ALS yet
proj = 'nagim'
if namespace != 'main':
proj = f'{proj}-test'
return proj |
def _has_missing_exe_output(output: str) -> bool:
"""Take output and check for exe missing errors"""
if "is not recognized as an internal or external command" in output:
return True
# AOD linux match
if ": not found" in output:
return True
return False |
def sort(nodes, total_order, dedup=False):
"""Sorts nodes according to order provided.
Args:
nodes: nodes to sort
total_order: list of nodes in correct order
dedup: if True, also discards duplicates in nodes
Returns:
Iterable of nodes in sorted order.
"""
total_order_idx = {}
for i, node in enumerate(total_order):
total_order_idx[node] = i
if dedup:
nodes = set(nodes)
return sorted(nodes, key=lambda n: total_order_idx[n]) |
def get_epoch_num_from_model_file_name(model_file_name):
# Unchanged from original work
"""
:param model_file_name: string
"""
return int(model_file_name.split("_")[2].split(".")[0]) |
def is_point_in_rect_circular_boundary(distance, circular_radius, boundary_range):
"""
judge whether a point is in boundary area for top center rect
"""
if distance < circular_radius + boundary_range:
return True
else:
return False |
def wrap_argument(text):
"""
Wrap command argument in quotes and escape when this contains special characters.
"""
if not any(x in text for x in [' ', '"', "'", '\\']):
return text
else:
return '"%s"' % (text.replace('\\', r'\\').replace('"', r'\"'), ) |
def overlap(x1, w1, x2, w2):
"""
:param x1: center_x
:param w1: bbox_w
:param x2: center_x
:param w2: bbox_w
:return:
"""
l1 = x1 - w1 / 2.0
l2 = x2 - w2 / 2.0
left = l1 if l1 > l2 else l2
r1 = x1 + w1 / 2.0
r2 = x2 + w2 / 2.0
right = r1 if r1 < r2 else r2
return right - left |
def remove_heterotachy_info(l):
"""Remove any information in brackets - ete3
does not support this format of newick"""
# --- Ensure tree is NaN value, if so return NoTree ---
if type(l) == float:
return "NoTree"
if ("[" not in l) and ("]" not in l):
return l
open_brackets = [i for i, x in enumerate(l) if x == "["]
close_brackets = [i for i, x in enumerate(l) if x == "]"]
final_string = f'{l[:open_brackets[0]]}'
for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):
final_string += l[cb+1:ob]
final_string += l[close_brackets[-1]+1:]
return final_string |
def createdecryptedfilename(filename):
"""
Determine a filename to save the decrypted diskimage to.
"""
i = filename.rfind(".")
if i<0:
return filename + "-decrypted"
return filename[:i] + "-decrypted" + filename[i:] |
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type |
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i] |
def copyfile(infile, outfile, chunksize=8192):
"""Read all data from infile and write them to outfile.
"""
size = 0
while True:
chunk = infile.read(chunksize)
if not chunk:
break
outfile.write(chunk)
size += len(chunk)
return size |
def force_str_2_bool(bool_str, raise_if_unknown=False):
"""convent 'True' or 'False' to bool, using on query_param"""
if bool_str in ["True", "true"]:
return True
elif bool_str in ["False", "false"]:
return False
if raise_if_unknown:
raise ValueError("str should be 'True/true' or 'False/false' ")
# unknown str regard as False
return False |
def is_backtrack(previous_label, next_label):
"""If we've already processes a header with 22(c) in it, we can assume
that any following headers with 1111.22 are *not* supposed to be an
analysis of 1111.22"""
previous_label = previous_label or []
next_label = next_label or []
trimmed = previous_label[:len(next_label)]
return (next_label and len(previous_label) > len(next_label)
and trimmed == next_label) |
def int_to_balt(n):
"""
Convert an integer to a list of its digits in balanced ternary
"""
D = []
while n != 0:
n,r = divmod(n,3)
if r == 2:
n += 1
D.append(-1)
else:
D.append(r)
return tuple([i for i in reversed(D)]) |
def chain_max_non_cyclables(non_cyclables, cyclabe_increase: int, minimal_cyclable: int):
"""
clean the non_cyclables so that each chain has only one value, the highest, and each value below minimal is removed
:param non_cyclables: the disequalities whether we allow taking the cycle
:param cyclabe_increase: the amount with which the countervalue increased when taking the cycle
:param minimal_cyclable: the minimal countervalue to take the value
:return: a dict where each chain has it's own bounded value
O(V) if maximum amount of disequalities per node is fixed
# the amount of chains that are bounded is also limited by the minimum between the positive_cycle_value
# and the amount of disequalities, which makes us bounded by O(V) as long as
# the amount of disequalities is fixed
"""
cleaned_non_cyclables = dict()
for non_cyclable in non_cyclables:
if non_cyclable < minimal_cyclable:
continue
value = (non_cyclable % cyclabe_increase)
if value in cleaned_non_cyclables:
if non_cyclable > cleaned_non_cyclables[non_cyclable % cyclabe_increase]:
cleaned_non_cyclables[value] = non_cyclable
else:
cleaned_non_cyclables[value] = non_cyclable
return cleaned_non_cyclables |
def clamp(value, min_value, max_value):
"""Clamps the given value between min and max"""
if value > max_value:
return max_value
if value < min_value:
return min_value
return value |
def index_table_from_name_table(elements, name_table):
"""Converts a table (list of lists) of strings into a table (list of lists) of ints."""
return [[elements.index(elem_name) for elem_name in row] for row in name_table] |
def image_alt_value_passthrough(image, *args, **kwargs):
"""Passthrough replacement for v1.jinja2tags.image_alt_value.
This is needed because, as written, the info unit template assumes that it
will get passed a Wagtail image object. We want to pass a dict which
contains the various image properties, including alt text, if defined.
"""
return image.get("alt", "") |
def search_list(l, k, v):
"""Search a list for an entry with a specific value."""
for item in l:
if k not in item:
continue
if item[k] == v:
return item
return None |
def get_orthogonal_scope_name(backbone_name):
"""Returns scope name of convolutions for orthogonal regularization.
:param backbone_name: Name of backbone
:return: Name of scope
"""
if backbone_name == 'rmnet' or backbone_name == 'twinnet':
return 'dim_red'
elif backbone_name == 'shufflenetv2':
return 'inner_map'
else:
raise Exception('Unknown backbone name: {}'.format(backbone_name)) |
def capitalLettersCipher(ciphertext):
"""
Returns the capital letters in the ciphertext
Example:
Cipher Text:
dogs are cuter than HorsEs in a LooP.
Decoded Text: HELP """
return "".join([i for i in ciphertext if i.isupper()]) |
def _handle_to_bytearray(handle):
"""Packs the 16-bit handle into a little endian bytearray"""
assert handle <= 0xFFFF
assert handle >= 0
return bytearray([handle & 0xFF, (handle >> 8) & 0xFF]) |
def dict_from_mappings(data, mappings):
"""create a dict in Activitypub format, using mappings supplies by
the subclass"""
result = {}
for mapping in mappings:
# sometimes there are multiple mappings for one field, don't
# overwrite earlier writes in that case
if mapping.local_field in result and result[mapping.local_field]:
continue
result[mapping.local_field] = mapping.get_value(data)
return result |
def parse_physloc_adapter_output(output):
"""
Parses the physical location command output from an IVM command.
:param output: The output from an IVM physical loction command
:returns: The output formatted into a dictionary
"""
# Output example:
# ['ent4:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C2-T1',
# 'ent10:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C10-T1',
# 'ent13:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C13-T1',
# 'ent14:Virtual I/O Ethernet Adapter (l-lan):
# U9117.MMC.0604C17-V100-C14-T1',
# 'ent5:Shared Ethernet Adapter: ',
# 'ent11:Shared Ethernet Adapter: ',
# 'ent12:VLAN: ']
if len(output) == 0:
return False
# convert output to a dictionary with eth key.
# Basically, it breaks each line( VEA or SEA) into two part.
# 1. the devname, which is used as the key for dict virt_eths.
# 2. The content of each dict element will be a list with
# [<description>, <physloc>].
virt_eths = {}
for item in output:
virt_eths[item.split(':')[0]] = item.split(':')[1:]
return virt_eths |
def autoalign(netapi, selection):
""" Autoalign nodes or nodespaces."""
if len(selection) == 1:
# if there's only one item selected, we assume it's a nodespace
# so we align its contents. If it's not, we return an error
try:
nodespace = netapi.get_nodespace(selection[0])
except:
return {'error': 'nothing to align'}
netapi.autoalign_nodespace(nodespace.uid)
else:
# otherwise, we retrieve the parent nodespace from the first selected
# entity, and autoalign the selected nodes in the given nodespace
nodespace = None
if len(selection):
try:
nodespace = netapi.get_node(selection[0]).parent_nodespace
except:
pass
try:
nodespace = netapi.get_nodespace(selection[0]).parent_nodespace
except:
pass
if nodespace is None:
return {'error': 'unknown entity in selection'}
netapi.autoalign_entities(nodespace, selection) |
def gcd(x, y):
"""
assume always x > y
:param x:
:param y:
:return: gcd value
"""
if x < y:
z = x
x = y
y = z
if y == 0:
print(f'gcd = {x}\n')
return x
else:
print(f'{x} = {(x - x % y) / y}*{y} + {x % y}')
return gcd(y, x % y) |
def int2bytes(a, b):
""" Converts a given integer value (a) its b-byte representation, in hex format.
:param a: Value to be converted.
:type a: int
:param b: Byte size to be filled.
:type b: int
:return: The b-bytes representation of the given value (a) in hex format.
:rtype: hex str
"""
m = pow(2, 8*b) - 1
if a > m:
raise Exception(str(a) + " is too big to be represented with " +
str(b) + " bytes. Maximum value is " + str(m) + ".")
return ('%0' + str(2 * b) + 'x') % a |
def relu(x: float) -> float:
"""A testing implementation of relu."""
if x < 0:
return 0
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.