content stringlengths 42 6.51k |
|---|
def _to_flat_dict_key(keys):
"""Converts a list of nested keys to flat keys used in state.as_dict().
Args:
keys: List of keys from outmost to innermost.
Returns:
Corresponding flat dictionary for the given list of keys.
"""
return '/' + '/'.join(keys) |
def linear_range(start_value, end_value, step_count):
""" linear range from x0 to x1 in n steps. """
arr = []
step_size = (end_value - start_value) / (step_count - 1)
value = 1.0 * start_value
while value <= end_value:
arr.append(round(value, 6))
value += step_size
return arr |
def _map_access_string(access_string):
"""Map an access string to a value Blogger will understand.
In this case, Blogger only cares about "is draft" so 'public' gets mapped to
False, everything else to True.
Returns:
Boolean indicating True (is a draft) or False (is not a draft).
"""
if not access_string:
return False
if access_string == 'public':
return False
return True |
def sortedDictValues(adict):
"""
Sort a dictionary by its keys and return the items in sorted key order.
"""
keys = list(adict.keys())
keys.sort()
return list(map(adict.get, keys)) |
def lighten_color(r, g, b, factor):
"""
Make a given RGB color lighter (closer to white).
"""
return [
int(255 - (255 - r) * (1.0 - factor)),
int(255 - (255 - g) * (1.0 - factor)),
int(255 - (255 - b) * (1.0 - factor)),
] |
def is_pds4_identifier(identifier):
"""
Determines if the provided identifier corresponds to the PDS4 LIDVID format
or not.
Parameters
----------
identifier : str
The identifier to check.
Returns
-------
True if the identifier is a valid PDS4 identifier, False otherwise.
"""
if identifier.startswith("urn:nasa:pds"):
return True
return False |
def round_up(number: int, multiple: int) -> int:
"""Round a number up to a multiple of another number. Only works for
positive numbers.
Example:
>>> round_up(57, 100)
100
>>> round_up(102, 100)
200
>>> round_up(43, 50)
50
>>> round_up(77, 50)
100
"""
assert multiple != 0
return int((number + multiple - 1) / multiple) * multiple |
def sub(x, y):
"""Subtracts two vectors x and y.
Args:
x: The minuend.
y: The subtrahend.
Returns:
The result is the vector z for which z_i = x_i - y_i holds.
"""
result = []
for i, x_i in enumerate(x):
result.append(x_i - y[i])
return result |
def is_cased(s):
"""Return True if string s contains some cased characters; otherwise false."""
return s.lower() != s.upper() |
def parse_codec_list(line, codec_type='video'):
"""
Parse through ffmpegs codec lists
:param line: string to parse
:param codecType: string of which codec type to look for.
:returns: string of codec name
"""
query = "V" if codec_type == 'video' else "A"
testOne = "E" in line[1:7] and query in line[1:7]
testTwo = query == line[1:7][0]
if testOne or testTwo:
return str.strip(line[8:29])
else:
return '' |
def getPdbLink(pdb_code):
"""Returns the html path to the pdb file on the ebi server
"""
file_name = 'pdb' + pdb_code + '.ent'
pdb_loc = 'https://www.ebi.ac.uk/pdbe/entry-files/download/' + file_name
return file_name, pdb_loc |
def make_markdown_url(line_string, s):
"""
Turns an URL starting with s into
a markdown link
"""
new_line = []
old_line = line_string.split(' ')
for token in old_line:
if not token.startswith(s):
new_line.append(token)
else:
new_line.append('[%s](%s)' % (token, token))
return ' '.join(new_line) |
def info(iterable):
"""
ESSENTIAL FUNCTION
returns the flattened union of an iterable
info([[1,2], [1,3],[2]]) --> {1,2,3}
"""
buf = set()
for el in iterable:
buf = buf.union(set(el))
return buf |
def safe_cast_to_list(string):
"""
this is used to cast a string object to a length-one list,
which is needed for some pandas operations.
it also detects if something is already a list,
and then does nothing (as to avoid a double list)
:param string: string
:return: either the string itself or a list comprising the string
"""
if not isinstance(string, list):
return [string]
else:
return string |
def _parseProperties (projectObj):
""" Parse maven properties
"""
properties = {}
allProperties = projectObj.get ('properties', {})
if allProperties is None:
allProperties = {}
for k, v in allProperties.items():
# when v is a list means that the same property is specified more
# than once, on those scenarios just get the last one defined
#
# XML is like:
# <prop>myvalue1</prop>
# <prop>myvalue2</prop>
#
# then v is like:
# v = ['myvalue1', 'myvalue2']
if isinstance (v, list):
v = v[-1]
properties [k] = v if (v is not None) else ''
return properties |
def frame_has_key(frame, key: str):
"""Returns whether `frame` contains `key`."""
try:
frame[key]
return True
except:
return False |
def mse(original_data, degradated_data):
"""
This calculates the Mean Squared Error (MSE)
:param original_data: As Pillow gives it with getdata
:param degradated_data: As Pillow gives it with getdata
:return: List containing the MSE in Y, U, V and average of those 3
"""
error_y = 0
error_cb = 0
error_cr = 0
for i in range(0, len(original_data)):
dif_y = abs(original_data[i][0] - degradated_data[i][0])
dif_cb = abs(original_data[i][1] - degradated_data[i][1])
dif_cr = abs(original_data[i][2] - degradated_data[i][2])
error_y += dif_y * dif_y
error_cb += dif_cb * dif_cb
error_cr += dif_cr * dif_cr
mse_y = error_y / len(original_data)
mse_cb = error_cb / len(original_data)
mse_cr = error_cr / len(original_data)
mse_avg = (mse_y*4 + mse_cb + mse_cr)/6
return [mse_y, mse_cb, mse_cr, mse_avg]
"""
Obtains the PSNR for a list of MSE values
:param mse_list: A list of mse quantities
:return: List containing the PSNR. If MSE is 0 the output PSNR is infinite
""" |
def find_object_with_matching_attr(iterable, attr_name, value):
"""
Finds the first item in an iterable that has an attribute with the given name and value. Returns
None otherwise.
Returns:
Matching item or None
"""
for item in iterable:
try:
if getattr(item, attr_name) == value:
return item
except AttributeError:
pass
return None |
def merge_two_dicts(config, base, path=None):
"""Merges two configs, overwriting properties in the base."""
assert config is not None
assert base is not None
final = {}
keys = set()
keys.update(config.keys())
keys.update(base.keys())
for key in keys:
# key only in config
if (key in config) and (key not in base):
final[key] = config[key]
# key only in base
elif key not in config and key in base:
final[key] = base[key]
# key is in both of them
else:
assert key in config
assert key in base
# both key values are dicts, recurse
if isinstance(config[key], dict) and isinstance(base[key], dict):
final[key] = merge_two_dicts(config[key],
base[key],
path=f"{path}.{key}")
# neither of them are dicts: config has preference
elif not isinstance(config[key], dict) and not isinstance(base[key], dict):
final[key] = config[key]
# something doesn't add up ...
else:
raise ValueError(f"Types don't match: {path}: {key}")
return final |
def str2bool(value):
""" Type to convert strings to Boolean (returns input if not boolean) """
if not isinstance(value, str):
return value
if value.lower() in ('yes', 'true', 'y', '1'):
return True
elif value.lower() in ('no', 'false', 'n', '0'):
return False
else:
return value |
def toggle_modal(n_open, n_close, n_load, is_open):
""" Toggles open/closed the form modal for DigitalRF playback requests"""
if n_open or n_close or n_load[0]:
return not is_open
return is_open |
def nth_line(src: str, lineno: int) -> int:
"""
Compute the starting index of the n-th line (where n is 1-indexed)
>>> nth_line("aaa\\nbb\\nc", 2)
4
"""
assert lineno >= 1
pos = 0
for _ in range(lineno - 1):
pos = src.find('\n', pos) + 1
return pos |
def rotation(s1, s2):
"""Given two strings, s1 and s2, write code to check if s2 is a rotation
of s1 using only one call to isSubstring."""
if len(s1) != len(s2):
return False
s1 *= 2
return s1.find(s2) != -1 |
def make_divisible(v, divisor=8, min_value=None):
"""make_divisible"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def get_data_type(param):
"""
Convert WhiteboxTools data types to ArcGIS data types
"""
data_type = '"GPString"' # default data type
data_filter = "[]" # https://goo.gl/EaVNzg
filter_type = '""'
multi_value = False
dependency_field = ""
# ArcGIS data types: https://goo.gl/95JtFu
data_types = {
"Boolean": '"GPBoolean"',
"Integer": '"GPLong"',
"Float": '"GPDouble"',
"String": '"GPString"',
"StringOrNumber": '["GPString", "GPDouble"]',
"Directory": '"DEFolder"',
"Raster": '"DERasterDataset"',
"Csv": '"DEFile"',
"Text": '"DEFile"',
"Html": '"DEFile"',
"Lidar": '"DEFile"',
"Vector": '"DEShapefile"',
"RasterAndVector": '["DERasterDataset", "DEShapefile"]',
"ExistingFileOrFloat": '["DERasterDataset", "GPDouble"]',
}
vector_filters = {
"Point": '["Point"]',
"Points": '["Point"]',
"Line": '["Polyline"]',
"Lines": '["Polyline"]',
"Polygon": '["Polygon"]',
"Polygons": '["Polygon"]',
"LineOrPolygon": '["Polyline", "Polygon"]',
"Any": "[]",
}
if type(param) is str:
data_type = data_types[param]
else:
for item in param:
if item == "FileList":
multi_value = True
elif item == "OptionList":
filter_type = '"ValueList"'
data_filter = param[item]
if param[item] == "Csv":
data_filter = '["csv"]'
elif param[item] == "Lidar":
data_filter = '["las", "zip"]'
elif param[item] == "Html":
data_filter = '["html"]'
if type(param[item]) is str:
data_type = data_types[param[item]]
elif type(param[item]) is dict:
sub_item = param[item]
for sub_sub_item in sub_item:
data_type = data_types[sub_sub_item]
if data_type == '"DEShapefile"':
data_filter = vector_filters[sub_item[sub_sub_item]]
elif item == "VectorAttributeField":
data_type = '"Field"'
dependency_field = param[item][1].replace("--", "")
else:
data_type = '"GPString"'
if param == {"ExistingFileOrFloat": "Raster"}:
data_type = '["DERasterDataset", "GPDouble"]'
ret = {}
ret["data_type"] = data_type
ret["data_filter"] = data_filter
ret["filter_type"] = filter_type
ret["multi_value"] = multi_value
ret["dependency_field"] = dependency_field
return ret |
def settings_value(value):
"""Returns the evaluated string."""
try:
return eval(value, {}, {})
except (NameError, SyntaxError):
return value |
def maybe_route_func(func, count):
"""
Routes the given `func` `count` times if applicable.
Parameters
----------
Parameters
----------
func : `callable`
The respective callable to ass
count : `int`
The expected amount of functions to return.
Returns
-------
result : `list` of `func`
"""
copy_function = getattr(type(func), 'copy', None)
result = []
if copy_function is None:
for _ in range(count):
result.append(func)
else:
for _ in range(count):
copied = copy_function(func)
result.append(copied)
return result |
def validate_bool_kwarg(value, arg_name):
"""Ensures that argument passed in arg_name is of type bool.
"""
if not (isinstance(value, bool) or value is None):
raise ValueError(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
return value |
def resolve_continuation(l, char='\\'):
"""Concatenates elements of the given string list with the
following one, if they end with the continuation character."""
result = []
temp = ''
for line in l:
if not line.endswith(char):
result.append(temp + line if len(temp) else line)
temp = ''
else:
temp += line.rstrip(" \t\\")
if len(temp):
raise EOFError(temp)
return result |
def num_deriv(r, func, h = 0.1e-5):
"""Returns numerical derivative of the callable `func`
:param r: Value at which derivative of `func` should be evaluated.
:param func: Function whose gradient is to be evaluated.
:param h: Step size used when performing numerical differentiation.
:return: Numerical derivative of func at `r`."""
r1 = r-(h/2.0)
r2 = r+(h/2.0)
dr = r2 -r1
dU = func(r2) - func(r1)
dUdr = dU/dr
return dUdr |
def tree_consistent(b):
"""FIXME: move this to the bracketing package.
"""
def crosses(xxx_todo_changeme, xxx_todo_changeme1):
(a, b) = xxx_todo_changeme
(c, d) = xxx_todo_changeme1
return (a < c and c < b and b < d) or (c < a and a < d and d < b)
for i in range(len(b)):
for j in range(i + 1, len(b)):
if crosses(b[i], b[j]):
return False
return True |
def process_related_id(plan):
"""Add plan records and other references as references
"""
rids = []
relationship = {'geonetwork': 'isReferencedBy', 'rda': 'isAlternateIdentifier',
'related': 'describes'}
for k in ['geonetwork','rda']:
if any(x in plan[k] for x in ['http://', 'https://']):
rids.append({'identifier': plan[k], 'relation': relationship[k]})
for rel in plan['related']:
if any(x in rel for x in ['http://', 'https://']):
rids.append({'identifier': rel, 'relation': relationship['related']})
return rids |
def convert_letters(Letter):
"""
Description
-----------
Input_Letters are convert to their ord-Number minus 64
Parameters
----------
Letter : String "A", "B" etc.
Context
----------
is called in wrapp_ProcessUnits and wrapp_SystemData
Returns
-------
Number : Integer
"""
Number = ord(Letter) - 64
return Number |
def get_icon_info(family_name, icons, host, asset_url_pattern):
"""Returns a list containing tuples of icon names and their URLs"""
icon_info = []
for icon in icons:
if family_name not in icon['unsupported_families']:
name = icon['name']
url_params = {
'family' : family_name,
'icon' : name,
'version' : icon['version'],
'asset' : '24px.xml'
}
info = (name, 'http://' + host + asset_url_pattern.format(**url_params))
icon_info.append(info)
return icon_info |
def reverse_bits(n, bit_count):
"""
reversed the order of the bits in the passed number
:param n: number of which the bits are reversed
:param bit_count: the number of bits that are used for the passed number
:return:
"""
rev = 0
# traversing bits of 'n' from the right
for _ in range(bit_count):
rev <<= 1
if n & 1:
rev = rev ^ 1
n >>= 1
return rev |
def gcd(a, b):
"""Calculate the greatest common divisor between integers 'a' and 'b'."""
if b == 0:
return a
else:
return gcd(b, a % b) |
def get_unique_lines(text_hashes):
"""Get the unique lines out of a list of text-hashes
Args:
text_hashes (list): A list of (hashes of) strings
Returns:
unique_indices (List): List (of the same length as text_hashes) indicating, for each
element of text_hash, the index of the corresponding entry in
unique_text_hashes
(i.e. "Which unique text-hash does this correspond to?")
unique_text_hashes (List): List of unique elements of text_hashes
"""
unique_text_hashes = []
unique_indices = []
unique_lookup = {}
for text_hash in text_hashes:
if text_hash not in unique_lookup:
unique_lookup[text_hash] = len(unique_lookup)
unique_text_hashes.append(text_hash)
unique_indices.append(unique_lookup[text_hash])
return unique_indices, unique_text_hashes |
def percentage(part, whole):
""" This function calculates the percentage of a given set """
return float(part)*float(whole)/100 |
def element(elem):
"""Element symbols"""
data = ["H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne",
"Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K", "Ca",
"Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn",
"Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y", "Zr",
"Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn",
"Sb", "Te", "I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd",
"Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb",
"Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg",
"Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac", "Th",
"Pa", "U"]
try:
return data[elem-1]
except TypeError:
return data.index(elem.capitalize())+1 |
def _leading_zero(l1):
"""
Add leading 0 if necessary
"""
return l1 if len(l1) == 2 else '0' + l1 |
def NoCaseCmp(x, y):
"""Case insensitive sort method"""
if x.lower() < y.lower():
return -1
elif x.lower() > y.lower():
return 1
else:
return 0 |
def bool_to_str(value: bool) -> str:
"""Converts boolean to 'ON' or 'OFF' string."""
if type(value) is bool:
return 'ON' if value is True else 'OFF'
else:
raise Exception(f"bool_to_str: unsupported variable type '{type(value)}', value '{value}'. Only boolean values are supported.") |
def get_recursive_content_as_str(doc):
"""
THIS METHOD IS DEPRECATED!
"""
text = ''
if isinstance(doc, str):
return doc.strip() + '\n'
elif isinstance(doc, dict):
for key in doc:
text += get_recursive_content_as_str(doc[key])
elif isinstance(doc, list):
for t in doc:
text += get_recursive_content_as_str(t)
else:
raise Exception('cannot parse document recursively, ' + str(type(doc)))
return text |
def interpolate(errors1, prob1, errors2, prob2, alpha):
"""
Perform a linear interpolation in the errors distribution to return the number of errors that has an accumulated
probability of 1 - alpha.
"""
result = errors1 + ((errors2 - errors1) * ((1 - alpha) - prob1) / (prob2 - prob1))
if result < 0:
#Happens only for very-short high qual sequences in which the probability of having 0 errors is higher than 1 - alpha.
result = 0
return result |
def overlap(a, b):
"""Check if a overlaps b.
This is typically used to check if ANY of a list of sentences is in the
ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
:rtype: boolean
"""
return not set(a).isdisjoint(b) |
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
assert (t0 <= t) & (t <= t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = (t - t0) / (t1 - t0)
total = coefficients[0] + x * coefficients[1]
x_power = x
for coefficient in coefficients[2:]:
x_power = x_power * x
total = total + x_power * coefficient
return total |
def find_anagrams_of(word):
"""Find all anagrams.
Params
------
word: str
Sequence of characters for which all combinaisons of anagram have to be found.
Returns
-------
list
A list of all anagrams.
"""
outputs = []
if len(word) == 1:
return word
else:
for subword in find_anagrams_of(word[1:]):
for k in range(len(word)):
outputs.append(subword[:k] + word[0] + subword[k:])
return outputs |
def unindent_text(text, pad):
"""
Removes padding at the beginning of each text's line
@type text: str
@type pad: str
"""
lines = text.splitlines()
for i,line in enumerate(lines):
if line.startswith(pad):
lines[i] = line[len(pad):]
return '\n'.join(lines) |
def letters_no_answer(word, answers, indices_non_letters):
"""
If the get_words function returns more than one world, then
this does not automatically decrypts any of the letters in the word.
However, if every word has the same letter in the same location,
then that letter can be decrypted.
This function takes in a list of words and returns two arrays.
The first is the numbers which have been decoded, the
second is the letters they correspond to.
"""
l = 0
letters_determined = []
numbers_determined = []
for index in indices_non_letters:
for k in range(len(answers)-1):
if answers[k][index] == (answers[k+1])[index]:
l += 1# If two words in the list have the same letter in the same location,
# then increase the couter by one.
if l + 1 == len(answers):# all words have the same letter in the same location.
letters_determined.append(answers[0][index])
numbers_determined.append(word[index])
l = 0# reset counter.
"""
!!BUG!!
This function has a slight bug in that it can return the same letter twice
this is compensated for in solver_1, but should be sorted here.
"""
return numbers_determined, letters_determined |
def get_res_string(res):
"""Converts resolution in bp to string (e.g. 10kb)"""
res_kb = res//1000
if res_kb < 1000:
return str(res_kb) + "kb"
else:
return str(int(res_kb/1000)) + "mb" |
def str_denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> str_denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out) |
def omni_tether_script(amount):
"""
:param amount: (display amount) * (10 ** 8)
:return omni tether script in hex format
"""
prefix = "6a146f6d6e69000000000000001f"
amount_hex = format(amount, 'x')
amount_format = amount_hex.zfill(16)
return prefix + amount_format |
def get_node(fasta_dic):
"""Get nodes with base and path name from multiple sequences"""
def get_coordinate_info(fasta_dic):
"""Divide the sequences into one base units"""
coordinate_info_list = []
for seqid, seq in fasta_dic.items():
tmp_list = [(seqid, base) for base in seq]
coordinate_info_list.append(tmp_list)
return coordinate_info_list
coordinate_info_list = get_coordinate_info(fasta_dic)
node_list = []
msaIndexPos = 0
for tmp_coordinate_info in zip(*coordinate_info_list):
if '-' in (i[1] for i in tmp_coordinate_info):
# except for gap
base_set = set([i[1] for i in tmp_coordinate_info if i[1] != '-'])
for base in base_set:
node_list.append({
'base': base.upper(),
'seq_name': [
i[0] for i in tmp_coordinate_info if i[1] == base
],
'pos': msaIndexPos
})
# gap
for seqid, tmp_base in tmp_coordinate_info:
if tmp_base == '-':
node_list.append({'base': '',
'seq_name': [
seqid
],
'pos': msaIndexPos
})
else:
base_set = set([i[1] for i in tmp_coordinate_info])
for base in base_set:
node_list.append({
'base': base.upper(),
'seq_name': [
i[0] for i in tmp_coordinate_info if i[1] == base
],
'pos': msaIndexPos
})
msaIndexPos+=1
return {i: j for i, j in enumerate(node_list, 1)} |
def close(session_attributes, fulfillment_state, message):
"""
Defines a close slot type response.
"""
response = {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "Close",
"fulfillmentState": fulfillment_state,
"message": message,
},
}
return response |
def GetPartitionName(dev, index):
"""Get partition name from device name and index.
Returns:
Partition name, add `p` between device name and index if the device is not a
sata device.
"""
return f'{dev}{index}' if dev.startswith('sd') else f'{dev}p{index}' |
def find_holder(card, hands):
"""returns the holder of a given cards"""
holder = -1
for player_num, hand in enumerate(hands):
if card in hand:
holder = player_num
return holder |
def all_pairs(elements):
"""
Generate all possible pairs from the list of given elements.
Pairs have no order: (a, b) is the same as (b, a)
:param elements: an array of elements
:return: a list of pairs, for example [('a', 'b)]
"""
if len(elements) < 2:
return []
elif len(elements) == 2:
return [(elements[0], elements[1])]
else:
new_pairs = []
for elt in elements[1:]:
new_pairs.append((elements[0], elt))
return all_pairs(elements[1:]) + new_pairs |
def reducer(s):
"""Return wrapped string and linecount where +/- items are concatenated
with blank and others with newline"""
ss = [s[0]]
try:
for i in range(1, len(s)):
if s[i] == "+" or s[i] == "-":
ss[-1] = ss[-1] + " " + (s[i+1])
s[i+1] = None
elif not s[i] is None:
ss.append(s[i])
except:
pass
return "\n".join(ss), len(ss) |
def midi2hz(note_number):
"""Convert a (fractional) MIDI note number to its frequency in Hz.
Parameters
----------
note_number : float
MIDI note number, can be fractional.
Returns
-------
note_frequency : float
Frequency of the note in Hz.
"""
# MIDI note numbers are defined as the number of semitones relative to C0
# in a 440 Hz tuning
return 440.0*(2.0**((note_number - 69)/12.0)) |
def splitConsecutive(collection, length):
"""
Split the elements of the list @collection into consecutive disjoint lists of length @length. If @length is greater than the no. of elements in the collection, the collection is returned as is.
"""
# Insufficient collection size for grouping
if len(collection) < length: return collection
# Iterate over the collection and collect groupings into results.
groupings = []
index = 0
while index < len(collection):
groupings.append(collection[index: index +length])
index += length
return groupings |
def are_broadcastable( *shapes ):
"""
Check whether an arbitrary list of array shapes are broadcastable.
:Parameters:
*shapes: tuple or list
A set of array shapes.
:Returns:
broadcastable: bool
True if all the shapes are broadcastable.
False if they are not broadcastable.
"""
if len(shapes) < 2:
# A single shape is always broadcastable against itself.
return True
else:
# Extract the dimensions and check they are either
# equal to each other or equal to 1.
for dim in zip(*[shape[::-1] for shape in shapes]):
if len(set(dim).union({1})) <= 2:
# Dimensions match or are 1. Try the next one.
pass
else:
# Dimensions do not match. Not broadcastable.
return False
# All dimensions are broadcastable.
return True |
def spikesFromVm(t, vm, **kwargs) :
""" Extract the spike times from a voltage trace.
Parameters
----------
t : array or list of int or float
time points (in ms)
vm : array or list of int or float
voltage points
threshold : int or float, optional
voltage at which we consider a spike to be fired
min_t : int or float, optional
only spikes after this moment are considered
max_t : int or float, optional
only spikes before this moment are considered
Returns
-------
spiketimes : list of int or float
spike times (in ms)
"""
T = kwargs['threshold'] if('threshold' in kwargs) else 0
min_t = kwargs['min_t'] if('min_t' in kwargs) else 0
max_t = kwargs['max_t'] if('max_t' in kwargs) else max(t)
in_spike = False
spiketimes = []
for i in range(len(t)):
if (vm[i] > T) and (not in_spike): # SLOW
if min_t <= t[i] <= max_t :
spiketimes.append(t[i])
in_spike = True
elif( (in_spike == True) and (vm[i] < T) ):
in_spike = False
'''
# if you want, do a visual check with matplotlib
plt.plot(data[:,0],data[:,1])
plt.plot(spiketimes[:],np.tile(0,len(spiketimes) ), 'rv' )
plt.savefig('temp_spikesplot.pdf')
plt.show()
'''
return spiketimes |
def add_globals(env):
""" Adds built-in procedures and variables to env. """
import operator
env.update({
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq
})
env.update({'True': True, 'False': False})
return env |
def proc(config: dict) -> int:
""""The number of processes to use for multiprocess operations."""
return config["proc"] |
def expand_feed_dict(feed_dict):
"""If the key is a tuple of placeholders,
split the input data then feed them into these placeholders.
"""
new_feed_dict = {}
for k, v in feed_dict.items():
if type(k) is not tuple:
new_feed_dict[k] = v
else:
# Split v along the first dimension.
n = len(k)
batch_size = v.shape[0]
assert batch_size > 0
span = batch_size // n
remainder = batch_size % n
base = 0
for i, p in enumerate(k):
if i < remainder:
end = base + span + 1
else:
end = base + span
new_feed_dict[p] = v[base: end]
base = end
return new_feed_dict |
def middle(shape):
""" Given the 2D vertices of a shape, return the coordinates of the middle
of the shape.
"""
return (sum(p[0] for p in shape) / len(shape),
sum(p[1] for p in shape) / len(shape)) |
def merge_string_without_overlap(string1, string2):
"""
Merge two Strings that doesn't have any common substring.
"""
return string1 + "\n" + string2 |
def oget(optional, default=None):
"""Get optional value or default value"""
return default if optional is None else optional |
def dragLossUpToAltitude(altitude):
"""gives the drag loss up to the given altitude"""
if 0 <= altitude and altitude <= 20000:
return 150 - 0.0075*altitude # m/s
else:
raise Exception("Invalid at given altitude: {0}".format(altitude)) |
def intOrNone(val):
"""
Returns None if val is None and cast it into int otherwise.
"""
if val is None:
return None
else:
return int(val) |
def vec2num(vec):
"""Convert list to number"""
num = 0
for node in vec:
num = num * 10 + node
return num |
def make_wkt_point(x_centre, y_centre):
"""Creates a well known text (WKT) point geometry for insertion into database"""
return f"POINT({x_centre} {y_centre})" |
def _flatten(t: list) -> list:
"""
Flatten nested list
"""
return [item for sublist in t for item in sublist] |
def get_port_ether(port_name):
"""
Get Host port Ethernet Address.
Input:
- The name of the port (e.g, as shown in ifconfig)
"""
return "ifconfig {} | grep \"ether \" | xargs | cut -d \' \' -f 2".format(port_name) |
def present_value(value, year, discount_rate, compounding_rate=1):
"""
Calculates the present value of a future value similar to numpy.pv, except numpy.pv gave weird negative values
:param value: The future value to get the present value of
:param year: How many years away is it?
:param discount_rate: The discount rate to use for all years/periods in between
:param compounding_rate: How often during the period should values be compounded. 1 means once a year, 365 means daily, etc.
:return: present value of the provided value
"""
return value * (1 + float(discount_rate)/float(compounding_rate)) ** (-year*compounding_rate) |
def flip(dictionary):
"""
Flips a dictionary with unique values.
:param dictionary: a mapping that is assumed to have unique values.
:returns: a dict whose keys are the mapping's values and whose
values are the mapping's keys.
"""
return {
value: key
for key, value in dictionary.items()
} |
def map_range(value, from_lower, from_upper, to_lower, to_upper):
"""Map a value in one range to another."""
mapped = (value - from_lower) * (to_upper - to_lower) / (
from_upper - from_lower
) + to_lower
return round(min(max(mapped, to_lower), to_upper)) |
def reselect_truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop(0)
else:
tokens_b.pop()
if tokens_a[0] != "[SOT]":
tokens_a.pop(0)
tokens_a = ["[SOT]"] + tokens_a
return tokens_a, tokens_b |
def move_angle_to_angle(theta1, theta2, p):
"""
Move a number towards another number (here, the numbers were angles).
args:
theta1: The first number
theta2: The second number
p: The extent of movement. 1 means full movement.
"""
return theta1 + (theta2-theta1) * p |
def percent(values, p=0.5):
"""Return a value a faction of the way between the min and max values in a list."""
m = min(values)
interval = max(values) - m
return m + p*interval |
def fromSpectralType(spectralType):
"""Gets info from spectral type."""
group = None
if spectralType.startswith("sd"):
group = "main sequence"
color = spectralType[2:]
return group, color
elif spectralType.startswith("D"):
group = "white dwarf"
return group, "white"
else:
try:
color = spectralType[:2]
except KeyError:
return "unknown", "unknown"
rest = spectralType[2:]
# Code to get the group. It's always at the start of rest.
if rest.startswith("I"):
if rest.startswith("II"):
if rest.startswith("III"):
group = "III"
else:
group = "II"
elif rest.startswith("IV"):
group = "IV"
else:
group = "I"
elif rest.startswith("V"):
group = "V"
else:
group = ""
table = {
"I": "supergiant",
"II": "giant",
"III": "giant",
"IV": "subgiant",
"V": "main sequence",
"": "unknown"
}
group = table[group]
return group, color |
def scaleND(v, s):
"""Scales an nD vector by a factor s."""
return [s * vv for vv in v] |
def is_msc_dep(dep):
""" Given dep str, check if it's one of the Msc dep offered by IIT KGP. Return bool """
msc_dep_list = ["GG", "EX", "MA", "CY", "HS", "PH"]
if dep in msc_dep_list:
return True
return False |
def get_training_image_size(original_size, multiple=32):
"""
Our inputs to the network must by multiples of 32.
We'll find the closest size that both a multiple of 32 and greater than the image size
"""
new_sizes = []
for dimension_size in original_size:
for j in range(20):
candidate_size = multiple * j
if candidate_size > dimension_size:
new_sizes.append(candidate_size)
break
if len(new_sizes) != len(original_size):
raise Exception("Could not find a valid size for the image")
return tuple(new_sizes) |
def pretty_repr(obj, linebreaks=True):
"""Pretty repr for an Output
Parameters
----------
obj : any type
linebreaks : bool
If True, split attributes with linebreaks
"""
class_name = obj.__class__.__name__
try:
obj = obj._asdict() # convert namedtuple to dict
except AttributeError:
pass
try:
items = list(obj.items())
except AttributeError:
try:
items = list(obj.__dict__.items())
except AttributeError:
return repr(obj)
attributes = []
indent = " " if linebreaks else ""
for key, value in items:
value = pretty_repr(value, linebreaks=False)
attributes.append("{indent}{key}={value}".format(
indent=indent,
key=key,
value=value
))
attribute_joiner = ",\n" if linebreaks else ", "
attributes = attribute_joiner.join(attributes)
joiner = "\n" if linebreaks else ""
return joiner.join([class_name + "(", attributes, ")"]) |
def create_type_map(cls, type_map=None):
""" Helper function for creating type maps """
_type_map = None
if type_map:
if callable(type_map):
_type_map = type_map(cls)
else:
_type_map = type_map.copy()
else:
_type_map = {}
return _type_map |
def revers_str2method(input_string):
"""
Input:
input_string is str() type sequence
Output:
reversed input_string by str() type
"""
lstr = list(input_string)
lstr.reverse()
return "".join(lstr) |
def filt_last_arg(list_, func):
"""Like filt_last but return index (arg) instead of value. Inefficiently traverses whole list"""
last_arg = None
for i, x in enumerate(list_):
if func(x):
last_arg = i
return last_arg |
def expand_axes_in_transpose(transpose, num_new_axes):
"""increases axis by the number of new axes"""
return tuple(list(range(num_new_axes)) + [axis + num_new_axes for axis in transpose]) |
def is_anagram(s, t):
"""
# Find if strings are anagram. t anagram of s
# @param {string, string} input strings
# @return bool if strings are anagram of each other or not
"""
s = list(s)
# Sort a string and then compare with each other
s.sort() # Quick sort O(n*log(n))
return s == t |
def gather_flex_fields(row, flex_data):
""" Getting the flex data, formatted for the error and warning report, for a row.
Args:
row: the dataframe row to get the flex data for
flex_data: the dataframe containing flex fields for the file
Returns:
The concatenated flex data for the row if there is any, an empty string otherwise.
"""
if flex_data is not None:
return flex_data.loc[flex_data['row_number'] == row['Row Number'], 'concatted'].values[0]
return '' |
def try_enum(cls, val):
"""A function that tries to turn the value into enum ``cls``.
If it fails it returns the value instead.
"""
try:
return cls._enum_value_map_[val]
except (KeyError, TypeError, AttributeError):
return val |
def fibo(iterations):
"""
This function calculate the fibonacci serie, calling itself many times iterations says
"""
if (iterations == 0 or iterations == 1):
return 1
else:
print(f"iteration = {iterations} -> {iterations - 1} + {iterations - 2} = {(iterations-1)+(iterations-2)}")
return (fibo(iterations-1) + fibo(iterations-2)) |
def print_scientific_8(value: float) -> str:
"""
Prints a value in 8-character scientific notation.
This is a sub-method and shouldnt typically be called
Notes
-----
print_float_8 : a better float printing method
"""
if value == 0.0:
return '%8s' % '0.'
python_value = '%8.11e' % value
svalue, sexponent = python_value.strip().split('e')
exponent = int(sexponent) # removes 0s
sign = '-' if abs(value) < 1. else '+'
# the exponent will be added later...
exp2 = str(exponent).strip('-+')
value2 = float(svalue)
leftover = 5 - len(exp2)
if value < 0:
fmt = "%%1.%sf" % (leftover - 1)
else:
fmt = "%%1.%sf" % leftover
svalue3 = fmt % value2
svalue4 = svalue3.strip('0')
field = "%8s" % (svalue4 + sign + exp2)
return field |
def get_eligible_craters(crater_list):
"""Create eligible crater list from crater_list"""
checker=True
eligible_crater_list=[]
for crater_tuple in crater_list:
crater_good=True
if crater_tuple[2]< -40:
crater_good=False
if crater_tuple[2]> 50:
crater_good=False
if crater_tuple[3]< 40:
crater_good=False
if crater_tuple[3]> 135:
crater_good=False
if crater_tuple[4]< 60:
crater_good=False
if crater_good==True:
eligible_crater_list.append(crater_tuple)
return eligible_crater_list; |
def parse_field_ref(field_ref):
"""Split a field reference into a model label and a field name.
Args:
field_ref (str): a label for the model field to clean, following the
convention `app_name.ModelName.field_name`
Return:
2-tuple of str
"""
app_name, model_name, field_name = field_ref.split('.')
model_label = '.'.join([app_name, model_name])
return model_label, field_name |
def kgtk_null_to_empty(x):
"""If 'x' is NULL map it onto the empty string, otherwise return 'x' unmodified.
"""
if x is None:
return ''
else:
return x |
def isnormaldataitem(dataitem):
"""
Detects if the data item is in standard form
"""
return 'Table' in dataitem and 'Action' in dataitem and 'Data' in dataitem |
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo")
[foo]
>>> delimit('""', "foo")
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]]) |
def _short_string(data, length=800):
"""
Truncate a string if it exceeds a max length
"""
data_short = str(data)[:length]
if len(str(data)) > len(str(data_short)):
data_short += "!!SHORTENED STRING!!"
return data_short |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.