content stringlengths 42 6.51k |
|---|
def get_max_array_dimension(signature):
"""Gets the number of array dimensions in this signature."""
return_value = 0
while signature.find((return_value + 1) * 'a') != -1:
return_value += 1
return return_value |
def is_prefix(x, pref):
"""Check prefix.
Args:
x (list): token id sequence
pref (list): token id sequence
Returns:
(boolean): whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref)):
if pref[i] != x[i]:
return False
return True |
def merge_option_dicts(old_opts, new_opts):
"""
Update the old_opts option dictionary with the options defined in
new_opts. Instead of a shallow update as would be performed by calling
old_opts.update(new_opts), this updates the dictionaries of all option
types separately.
Given two dictionaries
old_opts = {'a': {'x': 'old', 'y': 'old'}}
and
new_opts = {'a': {'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
this returns a dictionary
{'a': {'x': 'old', 'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
"""
merged = dict(old_opts)
for option_type, options in new_opts.items():
if option_type not in merged:
merged[option_type] = {}
merged[option_type].update(options)
return merged |
def compute_image_size(args):
"""Computes resulting image size after a convolutional layer
i=input channel, o=output channel, k = kernel size, s = stride, p = padding, d = dilation
old_size = size of input image,
new_size= size of output image.
"""
old_size, i, o, k, s, p, d = args
new_size = int((old_size + 2 * p - d * (k - 1) - 1) // s) + 1
return new_size |
def maxVelocity(l1, l2, v2):
"""
This function takes perihelion, aphelion distances and minimum velocity
to calculate the maximum velocity at the aphelion in AU/year.
This equation is taken from Lab4 handout.
"""
return (l2*v2)/l1 |
def _check_sectors(sect):
"""Checks the sectors input to :py:func:`partition_demand_by_sector` and
:py:func:`partition_flexibility_by_sector`.
:param set/list sect: The input sectors. Can be any of: *'Transportation'*,
*'Residential'*, *'Commercial'*, *'Industrial'*, or *'All'*.
:return: (*set*) -- The formatted set of sectors.
:raises TypeError: if sect is not input as a set or list, or if the components of
sect are not input as str.
:raises ValueError: if the components of sect are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(sect, (set, list)):
raise TypeError("Sector inputs must be input as a set or list.")
# Check that the components of sect are str
if not all(isinstance(x, str) for x in sect):
raise TypeError("Each individual sector must be input as a str.")
# Reformat components of sect
sect = {x.capitalize() for x in sect}
if "All" in sect:
sect = {"Transportation", "Residential", "Commercial", "Industrial"}
# Check that the components of sect are valid
if not sect.issubset({"Transportation", "Residential", "Commercial", "Industrial"}):
invalid_sect = sect - {
"Transportation",
"Residential",
"Commercial",
"Industrial",
}
raise ValueError(f'Invalid sectors: {", ".join(invalid_sect)}')
# Return the reformatted sect
return sect |
def GetMultiRegionFromRegion(region):
"""Gets the closest multi-region location to the region."""
if (region.startswith('us') or
region.startswith('northamerica') or
region.startswith('southamerica')):
return 'us'
elif region.startswith('europe'):
return 'eu'
elif region.startswith('asia') or region.startswith('australia'):
return 'asia'
else:
raise Exception('Unknown region "%s".' % region) |
def g_iter(n):
"""Return the value of G(n), computed iteratively.
>>> g_iter(1)
1
>>> g_iter(2)
2
>>> g_iter(3)
3
>>> g_iter(4)
10
>>> g_iter(5)
22
>>> from construct_check import check
>>> # ban recursion
>>> check(HW_SOURCE_FILE, 'g_iter', ['Recursion'])
True
"""
"*** YOUR CODE HERE ***"
if n <= 3:
return n
g1, g2, g3 = 1, 2, 3
for i in range(4, n + 1):
g1, g2, g3 = g2, g3, 3 * g1 + 2 * g2 + g3
return g3 |
def in_bin(input_num, bin_len):
"""
in_bin stands for "in binary"
"""
assert len(bin(input_num)) - 2 <= bin_len
format_str= "{0:0"+ str(bin_len) + "b}"
return format_str.format(input_num) |
def stage_changer(stage_string):
"""
Input: A string specifying the stage of the given tumor.
Output: An integer representing the stage of the given tumor.
"""
if (stage_string == 'stage i') or (stage_string == 'stage ia') or (stage_string == 'stage ib') or (stage_string == 'stage ic') or (stage_string == 'i') or (stage_string == 'ia') or (stage_string == 'ib') or (stage_string == 'ic'):
return 1
elif (stage_string == 'stage ii') or (stage_string == 'stage iia') or (stage_string == 'stage iib') or (stage_string == 'stage iic') or (stage_string == 'ii') or (stage_string == 'iia') or (stage_string == 'iib') or (stage_string == 'iic'):
return 2
elif (stage_string == 'stage iii') or (stage_string == 'stage iiia') or (stage_string == 'stage iiib') or (stage_string == 'stage iiic') or (stage_string == 'iii') or (stage_string == 'iiia') or (stage_string == 'iiib') or (stage_string == 'iiic'):
return 3
elif (stage_string == 'stage iv') or (stage_string == 'stage iva') or (stage_string == 'stage ivb') or (stage_string == 'stage ivc') or (stage_string == 'iv') or (stage_string == 'iva') or (stage_string == 'ivb') or (stage_string == 'ivc'):
return 4
elif (stage_string == 'stage x') or (stage_string == 'x'):
return 10 |
def bright_color(color: str):
""" Return the bright version of color, doens't work for RESET"""
if color != "\u001b[0m":
return color.replace("m", ";1m")
return color |
def to_collection(val, val_type, col_type):
"""
Validate and cast a value or values to a collection.
Args:
val (object): Value or values to validate and cast.
val_type (type): Type of each value in collection, e.g. ``int`` or ``str``.
col_type (type): Type of collection to return, e.g. ``tuple`` or ``set``.
Returns:
object: Collection of type ``col_type`` with values all of type ``val_type``.
Raises:
TypeError
"""
if val is None:
return None
if isinstance(val, val_type):
return col_type([val])
elif isinstance(val, (tuple, list, set, frozenset)):
if not all(isinstance(v, val_type) for v in val):
raise TypeError("not all values are of type {}".format(val_type))
return col_type(val)
else:
raise TypeError(
"values must be {} or a collection thereof, not {}".format(
val_type, type(val),
)
) |
def sort(array):
"""
The most naive version of Selection Sorting algorithm.
"""
length = len(array)
# Traverse through all elements in array
for i in range(len(array)):
# Find the minimum element in remaining unsorted array
index = i
for j in range(i+1, len(array)):
if array[index] > array[j]:
index = j
# Swap the found minimum element with the first element.
array[i], array[index] = array[index], array[i]
return array |
def guard_none(obj):
"""
Return a value, or a placeholder to draw instead if it is None.
"""
if obj is None:
return "-"
return obj |
def slope(x1, y1, x2, y2):
""" Finds slope from two points """
return (y2-y1)/(x2-x1) |
def build_person(first_name, last_name, age=''): # Returning a dictionary.
"""Return a dictionary of information about a person."""
if age:
return {'first': first_name, 'last': last_name, 'age': age}
return {'first': first_name, 'last': last_name} |
def check_choice(choice):
"""Validate choice for yes or no"""
return choice == 'y' or choice == 'n' |
def replace(key: str, value: str, line: str) -> str:
""" Replaces a key with a value in a line if it is not in a string or a comment and is a whole
word.
Complexity is pretty bad, so might take a while if the line is vvveeeerrrrryyyyyy long.
"""
i = 0
in_string = False
in_comment = False
while i < len(line) - len(key) and len(line) >= len(key): # Line length may change, so re evaluate each time
if(line[i] == "\""):
# Start or end of a string
in_string = not in_string
elif(line[i] == "'" or line[i] == ";"):
# Start of a comment
in_comment = True
elif(line[i] == "\n"):
# New line. Reset comment
in_comment = False
elif not in_comment and not in_string:
# We can check for the key starting at this position
if (line[i:i+len(key)] == key) and not (i > 0 and (line[i-1].isalpha() or line[i-1] == "_")) and not (i+len(key) < len(line) and (line[i+len(key)].isalpha() or line[i+len(key)] == "_" or line[i+len(key)].isnumeric())):
line = line[:i] + str(value) + line[i+len(key):] # Replace that appearance
i += len(value) # Skip over the value we replaced it with
i += 1
return line |
def _label(label):
"""
Returns a query item matching a label.
Args:
label (str): The label the message must have applied.
Returns:
The query string.
"""
return f"label:{label}" |
def modular_exponentiation(b, e, m):
"""produced modular exponentiation.
https://en.wikipedia.org/wiki/Modular_exponentiation
:param b: a base number.
:param e: an exponent.
:param m: a modulo.
:return: a reminder of b modulo m.
"""
x = 1
y = b
while e > 0:
if e % 2 == 0:
x = (x * y) % m
y = (y * y) % m
e = int(e / 2)
return x % m |
def dart_web_application_outputs(output_js, dump_info, emit_tar, script_file):
"""Returns the expected output map for dart_web_application."""
output_js = output_js or "%s.js" % script_file.name
outputs = {
"js": output_js,
"deps_file": "%s.deps" % output_js,
"sourcemap": "%s.map" % output_js,
"packages_file": "%{name}.packages",
}
if dump_info:
outputs["info_json"] = "%s.info.json" % output_js
if emit_tar:
outputs["tar"] = "%s.tar" % output_js
return outputs |
def _is_xml(s):
"""Return ``True`` if string is an XML document."""
return s.lower().strip().startswith('<?xml ') |
def has_imm(opcode):
"""Returns True if the opcode has an immediate operand."""
return bool(opcode & 0b1000) |
def fetch_priority(repset, args_array, **kwargs):
"""Function: fetch_priority
Description: Stub holder for mongo_rep_admin.fetch_priority function.
Arguments:
(input) repset -> Replication set instance.
(input) args_array -> Array of command line options and values.
"""
err_msg = "Error Message"
mail = kwargs.get("mail", None)
status = (False, err_msg)
if args_array and repset and mail:
status = (False, err_msg)
return status |
def str2bool(val):
"""
Convert string expression to boolean
:param val: Input value
:returns: Converted message as boolean type
:rtype: bool
"""
return val.lower() in ("yes", "true", "t", "1") |
def int_if_close(floating_number, tolerance=0.0001):
"""
Numbers printed in log files etc. (even integers) may have many decimal places.
In programming integers may be more useful.
This function converts such floating numbers to integers.
:type floating_number: float | str
:param floating_number: Floating number which may be better represented an integer.
:type tolerance: float
:param tolerance: If the number is within this range of its closest integer, then output an integer object.
:rtype: int | float
:return: Integer or float, depending on whether the input is close enough to its closest integer.
"""
floating_number = float(floating_number)
if abs(round(floating_number, 0) - floating_number) <= tolerance:
return round(floating_number, 0)
return floating_number |
def get_string_trailing(line, begin_at):
"""get the training part of sub-string starting from provided index
Args:
line (str): string line
begin_at (int): sub-str start point
Raises:
TypeError: Raise error if input is invalid or sub-string falls outside
Returns:
str: sub-string
"""
try: return line[begin_at:].strip()
except: raise TypeError("Unrecognized Input") |
def __get_int_ordinals(string):
"""
Return the integer ordinals of a string.
"""
output = ""
for char in string:
output += str(ord(char)).rjust(3, " ") + ", "
output = output.rstrip(", ")
return output |
def clean_text(text):
"""
split and clean the text.
:param text: text tokennized.
:return:
"""
#text = re.sub("[A-Za-z0-9]", "", text)
text = [x for x in text.split(" ") if x!='']
return text |
def factorial(n: int) -> int:
"""Implement factorials recursively
Raise:
- TypeError for given non integers
- ValueError for given negative integers
"""
if type(n) is not int:
raise TypeError("n isn't integer")
if n < 0:
raise ValueError("n is negative")
result = 1
for i in range(2, n+1):
result = result * i
return result |
def make_version_string(version_info):
"""
Turn a version tuple in to a version string, taking in to account any pre,
post, and dev release tags, formatted according to PEP 440.
"""
version_info = list(version_info)
numbers = []
while version_info and isinstance(version_info[0], int):
numbers.append(str(version_info.pop(0)))
version_str = '.'.join(numbers)
if not version_info:
return version_str
assert len(version_info) % 2 == 0
while version_info:
suffix_type = version_info.pop(0)
suffix_number = version_info.pop(0)
if suffix_type in {'a', 'b', 'rc'}:
suffix = f'{suffix_type}{suffix_number}'
elif suffix_type in {'dev', 'post'}:
suffix = f'.{suffix_type}{suffix_number}'
else:
raise ValueError(f"Unknown suffix type '{suffix_type}'")
version_str += suffix
return version_str |
def ClearAllIntegers(data):
"""
Used to prevent known bug; sets all integers in data recursively to 0.
"""
if type(data) == int:
return 0
if type(data) == list:
for i in range(0, len(data)):
data[i] = ClearAllIntegers(data[i])
if type(data) == dict:
for k, v in data:
data[k] = ClearAllIntegers(v)
return data |
def add_variables_to_expression(query_dict: dict, variables: dict) -> dict:
"""Attempt to make it easier to develop a query"""
ea_names = query_dict.get("ExpressionAttributeNames", {})
ea_values = query_dict.get("ExpressionAttributeValues", {})
for k, v in variables.items():
name = f"#{k}"
if name in ea_names:
raise ValueError(
f"Cannot add a duplicate expression attribute "
f"name {name} to your query {query_dict}"
)
ea_names[name] = k
name = f":{k}"
if name in ea_values:
raise ValueError(
f"Cannot add a duplicate expression attribute "
f"value {name} to your query {query_dict}"
)
ea_values[name] = v
query_dict["ExpressionAttributeNames"] = ea_names
query_dict["ExpressionAttributeValues"] = ea_values
return query_dict |
def convert_annotation_to_actions(annotations):
""" annotations: dict that map a string to a list of annotated sections. Each section is defined by a list of ordered frame indices
"""
result_str = ""
return result_str |
def genomic_dup5_rel_38(genomic_dup5_loc):
"""Create test fixture relative copy number variation"""
return {
"type": "RelativeCopyNumber",
"_id": "ga4gh:VRC.vy8SSVFuaeZTkUCCv6izNCkF0zgbBG7G",
"subject": genomic_dup5_loc,
"relative_copy_class": "partial loss"
} |
def _check_delimiter(output_filename, delim=None):
"""Detect delimiter by filename extension if not set"""
if output_filename and (delim is None):
delimiters = {"tsv": "\t", "csv": ","}
delim = delimiters[output_filename.rsplit(".", 1)[-1].lower()]
assert delim, "File output delimiter not known. Cannot proceed."
return delim |
def wikilinks_files(path):
"""
:param path: directory where wikilinks files are stored
:return: a list of wikilinks files (assuming their names are in form data-0000x-of-00010
"""
filenames = [path + '/data-0000{0}-of-00010'.format(i) for i in range(10)]
return filenames |
def convertb2d_(res, decimals):
"""Round the result."""
cad = str(round(res, decimals))
return cad |
def find_factors(num):
"""Find factors of num, in increasing order.
>>> find_factors(10)
[1, 2, 5, 10]
>>> find_factors(11)
[1, 11]
>>> find_factors(111)
[1, 3, 37, 111]
>>> find_factors(321421)
[1, 293, 1097, 321421]
"""
return_lst = []
for i in range(1, num + 1):
if num % i == 0:
return_lst.append(i)
return return_lst |
def _replace_pairwise_equality_by_equality(pc):
"""Rewrite pairwise equality constraints to equality constraints.
Args:
pc (list): List of dictionaries where each dictionary is a constraint.
It is assumed that the selectors in constraints were already processed.
Returns:
pc (list): List of processed constraints.
"""
pairwise_constraints = [c for c in pc if c["type"] == "pairwise_equality"]
pc = [c for c in pc if c["type"] != "pairwise_equality"]
for constr in pairwise_constraints:
equality_constraints = []
for elements in zip(*constr["indices"]):
equality_constraints.append({"index": list(elements), "type": "equality"})
pc += equality_constraints
return pc |
def prune_empty(d):
"""
Remove empty lists and empty dictionaries from d
(similar to jsonnet std.prune but faster)
"""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
if len(d) > 0:
return [v for v in (prune_empty(v) for v in d) if v is not None]
if isinstance(d, dict):
if len(d) > 0:
return {k: v for k, v in ((k, prune_empty(v)) for k, v in d.items()) if v is not None} |
def station_data(filename):
"""
Returns data from filename such as
Route number, station index, station id, station name
"""
station = dict()
raw_data = filename.split('-')
station['route_nr'] = raw_data[0]
station['order'] = raw_data[1]
station_data_raw = ''.join(raw_data[2:])
station_data = station_data_raw.split(' ')
station['id'] = station_data[0]
station['name'] = ''.join(station_data[1:]).replace('.xls', '')
return station |
def contains_recursive(text, pattern, index=None):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
if index is None:
index = 0
max = len(text) - (len(pattern) - 1)
if index > max:
return False
current_length = index + len(pattern)
current_letter = text[index:current_length]
if current_letter == pattern:
return True
else:
return contains_recursive(text, pattern, index + 1) |
def roundPrecision(number, precision=4):
""" Rounds the given floating point number to a certain precision, for output."""
return float(('{:.' + str(precision) + 'E}').format(number)) |
def cmp_compat(a, b):
"""
Simple comparison function
:param a:
:param b:
:return:
"""
return (a > b) - (a < b) |
def kaldi_lvl_to_logging_lvl(lvl: int) -> int:
"""Convert kaldi level to logging level"""
if lvl <= 1:
lvl = lvl * -10 + 20
else:
lvl = 11 - lvl
return lvl |
def is_palindrome(s):
"""
Decide if a string is a palindrome or not.
>>> is_palindrome("abba")
True
>>> is_palindrome("python")
False
"""
return s == s[::-1] |
def split_in_half(input_points):
"""
This function takes in a list of points, splits this list in half and
return the two new lists containing each subset of the input points.
Parameters
----------
input_points : list
The set of points to be split
Returns
-------
left : list
The first half of the input points
right : list
The second half of the input points
"""
mid_val = (len(input_points) + 1) // 2
left = input_points[:mid_val]
right = input_points[mid_val:]
return left, right |
def noll_to_zernike(j):
"""
Convert linear Noll index to tuple of Zernike indices.
j is the linear Noll coordinate, n is the radial Zernike index, and m is the azimuthal Zernike index.
Parameters
----------
j : int
j-th Zernike mode Noll index
Returns
-------
(n, m) : tuple
Zernike azimuthal and radial indices
Notes
-----
See <https://oeis.org/A176988>.
"""
if j == 0:
raise ValueError("Noll indices start at 1, 0 is invalid.")
n = 0
j1 = j - 1
while j1 > n:
n += 1
j1 -= n
m = (-1)**j * ((n % 2) + 2 * int((j1 + ((n + 1) % 2)) / 2.0))
return (n, m) |
def eval_func_tuple(f_args):
"""Takes a tuple of a function and args, evaluates and returns result"""
return f_args[0](*f_args[1:]) |
def header(data):
"""Returns the netstring header for a given string.
data -- A string you want to produce a header for.
"""
return str(len(data)).encode('utf8') + b":" |
def normalize_knot_vector(knot_vector, decimals=4):
""" Normalizes the input knot vector between 0 and 1.
:param knot_vector: knot vector to be normalized
:type knot_vector: list, tuple
:param decimals: rounding number
:type decimals: int
:return: normalized knot vector
:rtype: list
"""
try:
if knot_vector is None or len(knot_vector) == 0:
raise ValueError("Input knot vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Knot vector must be a list or tuple")
except Exception:
raise
first_knot = float(knot_vector[0])
last_knot = float(knot_vector[-1])
denominator = last_knot - first_knot
knot_vector_out = [(float(("%0." + str(decimals) + "f") % ((float(kv) - first_knot) / denominator)))
for kv in knot_vector]
return knot_vector_out |
def cnn_pooling_output_length(input_length, filter_size, pooling_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
pooling_size (int): Size of the 1D max pooling layer kernel
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'valid':
output_length = (input_length - dilated_filter_size + stride) // stride // pooling_size
else:
output_length = input_length // pooling_size
return output_length |
def wrap(string, left="[", right="]"):
"""Wrap a string in two delimiters iff the string is non empty (or None)."""
if string:
return left+string+right
return "" |
def rgb_to_xy(red, green, blue):
""" conversion of RGB colors to CIE1931 XY colors
Formulas implemented from: https://gist.github.com/popcorn245/30afa0f98eea1c2fd34d
Args:
red (float): a number between 0.0 and 1.0 representing red in the RGB space
green (float): a number between 0.0 and 1.0 representing green in the RGB space
blue (float): a number between 0.0 and 1.0 representing blue in the RGB space
Returns:
xy (list): x and y
"""
# gamma correction
red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)
green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)
blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)
# convert rgb to xyz
x = red * 0.649926 + green * 0.103455 + blue * 0.197109
y = red * 0.234327 + green * 0.743075 + blue * 0.022598
z = green * 0.053077 + blue * 1.035763
# convert xyz to xy
x = x / (x + y + z)
y = y / (x + y + z)
return [x, y] |
def splitFirstLine(data):
"""Extracts the first line from `data' and returns a tuple: (firstline, rest)."""
part = data.partition("\n")
return (part[0], part[2]) |
def hide_graph(input):
"""Only show graph if there is data."""
if input:
return {"display": "block"}
else:
return {"display": "none"} |
def euclidean_gcd(a, b):
"""
Euclidean algorithm
Complexity: O(log(min(A, B))
Euclidean algorithm to find the GCD of two
numbers. It takes advantage of the property
GCD(a, b) = GCD(b, a%b).
Cool property: GCD(a, b) * LCM(a, b) = a * b
"""
if b == 0:
return a
return euclidean_gcd(b, a % b) |
def SplitNewStyleEmpty(newstyle):
"""Splits according to one of the forms of path elements this code uses.
See tests/test-path_utils.py for semantics.
Args:
newstyle: 'newstyle' path.
Returns:
List of tokens from path argument.
"""
tokens = newstyle.split(".")
newstyle_tokens = []
while tokens:
token = tokens.pop(-1)
if token == "[]":
token = tokens.pop(-1) + ".[]"
newstyle_tokens.insert(0, token)
return newstyle_tokens |
def rob(nums):
"""
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Args:
nums: list[int]
Returns:
int
"""
# DP
r = nr = 0
for x in nums:
r_prev = r
r = nr + x
nr = max(r_prev, nr)
return max(r, nr)
# f(0): r = nums[0]; nr = 0
# f(1): r = nums[1]; nr = f(0)
# f(k) = max( f(k-2) + nums[k], f(k-1) ) |
def items_to_text(items: list) -> str:
"""
Converting more items to text for
easy use in prettify.
"""
output = ''
for item in items:
output += item + ', '
return output[:-2] |
def lookup_config_from_database(database):
"""
Read configuration values that might be already defined in the database configuration
file.
"""
if database is not None:
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding
if hasattr(database, "memory_demanding")
else False
)
else:
annotation_type = None
fixed_positions = None
memory_demanding = False
return annotation_type, fixed_positions, memory_demanding |
def reduced_mass(mass1, mass2):
"""
Calculates reduced mass
:param mass1: mass 1
:param mass2: mass 2
:return: reduced mass
"""
top = mass1 * mass2
bot = mass1 + mass2
output = top / bot
return output |
def delete_before(list, key):
"""
Return a list with the the item before the first occurrence of the
key (if any) deleted.
"""
if list == ():
return ()
else:
head1, tail1 = list
if tail1 == ():
return list
else:
head2, tail2 = tail1
if head2 == key:
return tail1
else:
return (head1, delete_before(tail1, key)) |
def get_detector_type(meta):
"""
Gets the IRIS detector type from a meta dictionary.
In this function, FUV1 and FUV2 are just assigned as FUV.
Parameters
----------
meta: dict-like
Dictionary-like object containing entry for "detector type"
Returns
-------
detector_type: `str`
Detector type.
"""
if "FUV" in meta["detector type"]:
detector_type = "FUV"
else:
detector_type = meta["detector type"]
return detector_type |
def strip_endlines(in_arg):
"""Remove eol characters Linux/Win/Mac (input can be string or list of strings)."""
if isinstance(in_arg, (tuple, list)):
buff = []
for x in in_arg:
if isinstance(x, bytes):
buff.append(x.rstrip().decode('utf-8'))
elif isinstance(x, str):
buff.append(x.rstrip())
else:
# input is unknown
buff.append(in_arg)
return buff
elif isinstance(in_arg, bytes):
return in_arg.rstrip().decode('utf-8')
elif isinstance(in_arg, str):
return in_arg.rstrip()
else:
# input is unknown
return in_arg |
def normalize_0_1_min_max(data, _min, _denominator, reverse=False):
"""
Normalize data in a [0, 1] interval, using the minmax technique.
It involves subtracting the minimum, and then dividing by the range (maximum - minimum)
:param data: the data to normalize; normalization is NOT performed in situ, so this data will be preserved
:param _min: the minimum value of the data; may not actually be min(data), can be a more 'global' value
:param _denominator: denominator, assumed to be maximum - minimum
:param reverse: if True, will actually denormalize - multiply by (max - min), then sum min. This expects the data
parameter to be normalized in [0, 1] interval
:return: the normalized (or denormalized if reverse=True) data
"""
if reverse:
# denormalize
return data * _denominator + _min
else:
# normalize
return (data - _min) / _denominator |
def s3_key_for_revision_metadata(wiki, pageid, revid):
"""Computes the key for the S3 object storing metadata about a revision."""
return '{:s}page_{:08d}/rev_{:08d}.yaml'.format(
wiki['s3_prefix'],
pageid,
revid
) |
def consistent_typical_range_stations(stations):
"""Applies typical_range_consistent to a list of station objects and returns a list of all station OBJECTS
which have consistent data"""
# Creates empty list
output_list = []
# Iterates over stations to find all stations with inconsistent range data
for i in range(len(stations)):
station_entry = stations[i]
if station_entry.typical_range_consistent() == True:
output_list.append(station_entry)
return output_list |
def is_master(config):
"""True if the code running the given pytest.config object is running in a xdist master
node or not running xdist at all.
"""
return not hasattr(config, 'slaveinput') |
def isUniqueSFW(str):
"""
Given a string, checks if the string has unique charachters
Note that this solution is inefficient as it takes O(n^2)
"""
l = len(str)
for i in range(l):
for j in range(l):
if i != j and not ord(str[i]) ^ ord(str[j]):
return False
return True |
def circulation_cds_extension_max_count(loan):
"""Return a default extensions max count."""
unlimited = loan.get("extension_count", 0) + 1
return unlimited |
def read_vocabulary(vocab_file, threshold):
"""read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
"""
vocabulary = set()
for line in vocab_file:
word, freq = line.split()
freq = int(freq)
if threshold == None or freq >= threshold:
vocabulary.add(word)
return vocabulary |
def correlate_lists(lst1, lst2, ignore_pin_nums=False):
"""
"""
if ignore_pin_nums:
for k in range(len(lst1)):
lst1[k] = lst1[k].split(".")[0]
# try:
# pin_num = lst1[k].split(".")[1]
# except:
# pin_num = ''
# if pin_num in IGNORE_PINS:
# lst1[k] = lst1[k].split(".")[0]
for k in range(len(lst2)):
lst2[k] = lst2[k].split(".")[0]
# try:
# pin_num = lst2[k].split(".")[1]
# except:
# pin_num = ''
# if pin_num in IGNORE_PINS:
# lst2[k] = lst2[k].split(".")[0]
num_in_common = len(set(lst1) & set(lst2))
num_not_in_common = len(set(lst2) - set(lst1)) + len(set(lst1) - set(lst2))
return float(num_in_common)/(num_in_common + num_not_in_common) |
def get_companies_house_number(activity):
"""Returns the companies house number of an activity"""
return activity['object']['attributedTo']['dit:companiesHouseNumber'] |
def get_version(iterable) -> str:
"""
Get the version of the WDL document.
:param iterable: An iterable that contains the lines of a WDL document.
:return: The WDL version used in the workflow.
"""
if isinstance(iterable, str):
iterable = iterable.split('\n')
for line in iterable:
line = line.strip()
# check if the first non-empty, non-comment line is the version statement
if line and not line.startswith('#'):
if line.startswith('version '):
return line[8:].strip()
break
# only draft-2 doesn't contain the version declaration
return 'draft-2' |
def convert_bool(value):
"""helper to make sure bools are bools"""
if value in (True, False):
return value
if value is None:
return False
if str(value).lower() in ('true', '1'):
return True
return False |
def reduce_list(data_set):
""" Reduce duplicate items in a list and preserve order """
seen = set()
return [item for item in data_set if
item not in seen and not seen.add(item)] |
def day_of_week_one_line(y, m, d):
"""Oneliner just for fun."""
return (y - (m < 3) + (y - (m < 3)) // 4 - (y - (m < 3)) // 100 + (y - (m < 3)) // 400 + ord('-bed=pen+mad.'[m]) + d) % 7 |
def bytes_to_str(data: bytes) -> str:
"""Converts a list bytes to a string of 1s and 0s.
"""
output = ''
for b in data:
bb = bin(b)[2:].ljust(8, '0')
output += bb
return output |
def get_current_version(config):
"""Return the current version of the config.
:return: current config version or 0 if not defined
"""
return config.get('CONFIG_VERSION', {}).get('CURRENT', 0) |
def combine_segments(segment_list):
"""Combines a list of lists that are segments (each segment is list of strings)
and returns a single list of strings, segments are assumed to be the same length"""
combined_list=[]
for index,row in enumerate(segment_list[0]):
new_row=""
for segment in segment_list:
new_row=new_row+segment[index]
combined_list.append(new_row)
return combined_list |
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units:
return "F"
return "C" |
def wild_card_find_doc(inverted_index, wild_card_tokens):
"""search in inverted index and find posting lists of all words which find
them related to the wildcars
Arguments:
inverted_index {dictionary} -- dictionary of tokens and posting-lists
wild_card_tokens {list} -- all words find them by tree which related to
wildcard
Returns:
dictionary, list -- dictionary of tokens and posting-lists which those
token found it in inverted index, list of all tokens not found in
inverted index
"""
search_wild_card = {}
list_not_found_token = []
if inverted_index is not None:
for token in list(wild_card_tokens):
if token in inverted_index:
search_wild_card[token] = inverted_index[token]
else:
list_not_found_token.append(token)
return search_wild_card, list_not_found_token |
def calculateBounds(sentences):
"""Given a list of sentence strings, calculate their bounds as character offsets from 0
"""
curbound, bounds = 0, []
for s in sentences:
bounds.append((curbound, curbound + len(s)))
curbound += len(s) + 1
commabounds = [','.join((str(b) for b in bnds)) for bnds in bounds]
strbounds = '\n'.join(commabounds)
return strbounds |
def lat_fixed_formatter(y):
"""Simple minded latitude formatter.
Only because those available in cartopy do not leave a blank space
after the degree symbol so it looks crammed. If used outside this
module, bear in mind this was thought to be used as part of an
iterator to later be included in a
matplotlib.ticker.FixedFormatter call.
Parameters
----------
y: int or float
Latitude value.
Returns
-------
str with our kind of format. We like that little space after the
degree symbol. Also notice we are using TeX's degree symbol.
""" # noqa
if y < 0:
fmt = r'%g\degree\,S' % abs(y)
elif y > 0:
fmt = r'%g\degree\,N' % y
else:
fmt = r'%g\degree' % y
return fmt |
def deduplicate_non_disjoint_tuple_spans(list_tuple_span):
""" auxiliary function for find_all_with_dict_regex
given the input list of tuple spans, when there are two spans that span on one another,
eliminate the smallest from the list
"""
list_tuple_span = sorted(list_tuple_span, key=lambda x: x[0][1], reverse = True)
list_tuple_span = sorted(list_tuple_span, key=lambda x: x[0][0])
to_drop = []
for index, item in enumerate(list_tuple_span[:-1]):
if item[0][1]> list_tuple_span[index+1][0][0]:
to_drop.append(index + 1)
return [x for ind, x in enumerate(list_tuple_span) if ind not in to_drop] |
def get_position (pair, s_mention) :
"""Get the position of the antecedent : {beginning (0) , middle(1), end (2)} of the sentence """
antd_index=int(pair[0][-1])-1
if antd_index == 0 :
return 0
if antd_index == len(s_mention) -1 :
return 2
return 1 |
def getFirstPlist(textString):
"""Gets the next plist from a text string that may contain one or
more text-style plists.
Returns a tuple - the first plist (if any) and the remaining
string after the plist"""
plist_header = '<?xml version'
plist_footer = '</plist>'
plist_start_index = textString.find(plist_header)
if plist_start_index == -1:
# not found
return ("", textString)
plist_end_index = textString.find(
plist_footer, plist_start_index + len(plist_header))
if plist_end_index == -1:
# not found
return ("", textString)
# adjust end value
plist_end_index = plist_end_index + len(plist_footer)
return (textString[plist_start_index:plist_end_index],
textString[plist_end_index:]) |
def max_profit(stocks: list) -> int:
"""
Time Complexity: O(n)
Space Complexity: O(1)
"""
start, end = 0, len(stocks) - 1
result: int = 0
while start < end:
buy = stocks[start]
index = start + 1
profit = 0
while index <= end and buy < stocks[index]:
profit = max(profit, stocks[index] - buy)
index += 1
# because any further investigation would be preferred using this as base
start = index
result = max(result, profit)
return result |
def recurrence_str_to_sec(recurrence_str):
"""Convert recurrence string to seconds value.
Args:
recurrence_str: The execution recurrence formatted as a numeric value
and interval unit descriptor, e.b., 1d for a daily recurrence.
Returns:
Recurrence in seconds or None if input is misformatted.
"""
if not recurrence_str or len(recurrence_str) < 2:
return None
value = int(recurrence_str[:-1])
assert value > 0
unit = recurrence_str[-1]
if unit == 'w':
return 7 * 24 * 60 * 60 * value
elif unit == 'd':
return 24 * 60 * 60 * value
elif unit == 'H':
return 60 * 60 * value
elif unit == 'M':
return 60 * value
else:
return None |
def shape_attr_name(name, length=6, keep_layer=False):
"""
Function for to format an array name to a maximum of 10 characters to
conform with ESRI shapefile maximum attribute name length
Parameters
----------
name : string
data array name
length : int
maximum length of string to return. Value passed to function is
overridden and set to 10 if keep_layer=True. (default is 6)
keep_layer : bool
Boolean that determines if layer number in name should be retained.
(default is False)
Returns
-------
String
Examples
--------
>>> import flopy
>>> name = flopy.utils.shape_attr_name('averylongstring')
>>> name
>>> 'averyl'
"""
# kludges
if name == 'model_top':
name = 'top'
# replace spaces with "_"
n = name.lower().replace(' ', '_')
# exclude "_layer_X" portion of string
if keep_layer:
length = 10
n = n.replace('_layer', '_')
else:
try:
idx = n.index('_layer')
n = n[:idx]
except:
pass
if len(n) > length:
n = n[:length]
return n |
def passenger_spawn(state, next_state):
"""
Compare state with next state to determine if new passenger is spawn
"""
return (state[7] == -1 and state[8] == -1) and (next_state[7] != -1 and next_state[8] != -1) |
def should_skip(cls, res):
"""Determine if the subnetwork is especially small, and if we should skip it.
"""
return (cls == 'Property' and len(res) < 4000) or (cls == 'Gene' and len(res) < 125000) |
def is_atom(unknown_object):
"""Determines whether an object is an atom or a collection"""
if hasattr(unknown_object, 'shape'):
return True
if hasattr(unknown_object, '__len__') and not hasattr(unknown_object, 'keys'):
return False
return True |
def modular_pow(base, exponent, modulus):
"""Source: https://en.wikipedia.org/wiki/Modular_exponentiation"""
if modulus == 1:
return 0
c = 1
for i in range(0, exponent):
c = (c * base) % modulus
return c |
def serialize_persona_fisica_moral(persona):
"""
'#/components/schemas/tipoPersona'
"""
if persona:
return "FISICA"
else:
return "MORAL" |
def build_dir_list(project_dir, year_list, product_list):
"""Create a list of full directory paths for downloaded MODIS files."""
dir_list = []
for product in product_list:
for year in year_list:
dir_list.append("{}\{}\{}".format(project_dir, product, year))
return dir_list |
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError() |
def convert_el(edges):
"""
Convert from a relation dictionary to single edgelist representation.
:param edges:
:param n:
:return: A dictionary mapping nodes to outgoing triples.
"""
res = []
for rel, (froms, tos) in edges.items():
for fr, to in zip(froms, tos):
res.append((fr, rel, to))
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.