content stringlengths 42 6.51k |
|---|
def find_most_similar_paragraph(similar_paragraphs):
""" Some paragraphs in the C++ started are worded very similarly.
Sometimes, multiple paragraphs may be marked as similar. This function
picks the paragraph with the highest similarity ratio.
"""
max_i = 0
max_ratio = 0
for i in range(len(similar_paragraphs)):
if similar_paragraphs[i][2] > max_ratio:
max_i = i
max_ratio = similar_paragraphs[i][2]
return similar_paragraphs[max_i] |
def sumEvenNum(x):
# Function comments definition
""" Sum even number"""
num = 1
sum = 0
while num <= x:
if num % 2 == 0:
sum = sum + num
num += 1
return sum |
def _get_short_language_description(full_language_description):
"""Given one of the descriptions in constants.ALL_LANGUAGE_CODES, generates
the corresponding short description.
Args:
full_language_description: str. Short description of the language.
Returns:
str. Short description of the language.
"""
if ' (' not in full_language_description:
return full_language_description
else:
ind = full_language_description.find(' (')
return full_language_description[:ind] |
def calculate_position(step_number, overlap, image_size=2048, resolution_factor=0.6215):
"""
:param step_number: 1-based step (x or y)
:param resolution_factor: of images (default is 0.6125)
:param overlap: percent overlap (whole number)
:param image_size: resolution of image
:return: absolute position given a step
"""
offset_fraction = (100 - overlap/2)/100
micron_step = image_size*offset_fraction*resolution_factor
absolute_pos = float(step_number)*float(micron_step)
return absolute_pos |
def get_next(it):
""" Ignore the pdf junk that delineates pages, and blank lines """
line = next(it)
while True:
if b"\x0c".decode('utf-8') in line.strip(): line = next(it)
elif "Downloaded from SAE International by" in line.strip(): line = next(it)
elif " "*34+"J1587" in line.strip(): line = next(it)
elif "_"*5 in line.strip(): line = next(it)
elif not line.strip(): line = next(it)
else: break
return line |
def calculateIncomeHelper(income, pay_period):
"""Returns annual income based on income each pay_period"""
pay_multipliers = {'weekly':52, 'biweekly':25, 'semimonthly':24,'monthly':12}
return income*pay_multipliers[pay_period] if pay_period in pay_multipliers else income*pay_period*52 |
def __removeUnpopularIds(listOfIds, popularIds):
"""
:param listOfIds: List of ids
:param popularIds: Most popular agent over whole population
:return: listOfIds without agents that aren't popular
"""
res = []
for id in listOfIds:
if (id in popularIds.keys()):
res.append(id)
return res |
def missing_functions(messages):
"""Names of missing functions."""
prefix = "no body for function"
length = len(prefix)
return [warning[length:].strip() for warning in messages
if warning.startswith(prefix)] |
def length_wu(length, logprobs, alpha=0.):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = (((5 + length) ** alpha) /
((5 + 1) ** alpha))
return logprobs / modifier |
def get_unique_ids_from_several_lists(*args) -> list:
"""
:param args:
:return:
"""
res = []
for my_list in args:
res += my_list
return res |
def sql_number_list(target):
"""
Returns a list of numbers suitable for placement after the SQL IN operator in
a query statement, as in "(1, 2, 3)".
"""
if not target:
raise ValueError
elif not isinstance(target, list):
target = [target]
return "(%s)" % (", ".join(["%d" % (i,) for i in target])) |
def get_products_of_all_ints_except_at_index(ints):
"""
Time Complexity: O(n)
Space Complexity: O(n)
n: number of ints (length of the list)
"""
if len(ints) < 2:
raise IndexError('Getting the product of numbers at other indices requires at least 2 numbers')
result = []
forward_products = []
backward_products = [0] * len(ints)
product = 1
for n in ints:
product *= n
forward_products.append(product)
product = 1
for idx in range(len(ints) - 1, -1, -1):
product *= ints[idx]
backward_products[idx] = product
result.append(backward_products[1])
for idx in range(1, len(ints) - 1):
result.append(forward_products[idx - 1] * backward_products[idx + 1])
result.append(forward_products[len(ints) - 2])
return result |
def get_target_for_label(label: str) -> int:
"""Convert a label to `0` or `1`.
Args:
label(string) - Either "POSITIVE" or "NEGATIVE".
Returns:
`0` or `1`.
"""
return 1 if label=="POSITIVE" else 0 |
def any_item_in_string(items, test_string):
"""Return true if any item in items is in test_string"""
return any([True for item in items if item in test_string]) |
def thermal_conductivity_carbon_steel(temperature):
"""
DESCRIPTION:
[BS EN 1993-1-2:2005, 3.4.1.3]
PARAMETERS:
OUTPUTS:
REMARKS:
"""
temperature += 273.15
if 20 <= temperature <= 800:
return 54 - 0.0333 * temperature
elif 800 <= temperature <= 1200:
return 27.3
else:
return 0 |
def isnumeric( numStr ):
""" Hack to determine if a non-unicode string is numeric or not """
numStr = str( numStr )
try:
int( numStr )
return True
except:
try:
float( numStr )
return True
except:
return False |
def prompt(question):
"""
ask a question (e.g 'Are you sure you want to do this (Y or N)? >')
return the answer
"""
try:
print(question)
foo = input()
return foo
except KeyboardInterrupt as e:
raise e
except:
return |
def mod10(list):
"""Implements the Luhn Algorithm (a.k.a. mod10), which
is a checksum formula to validade a variety of
identification numbers, such as credit card numbers.
Requires a list of integers with the numbers to be
validated.
"""
sum = 0
double = True
# Iterates til the last item of the list, adding to
# +sum the item and two times item, interspersed.
for item in reversed(list):
if double:
item *= 2
if item > 9: # Casting out nines
item -= 9
sum += item
double = not double
mod = sum % 10
# sum must be a multiple of 10. If it is, zero is
# +returned. Else, got to calculate how many numbers
# +are missing until 10.
if mod:
return 10 - mod
else:
return mod |
def parse_num(tokens):
"""Parser function for numerical data
:param tokens: The grammar tokens
:type tokens: list
"""
return float(tokens[0]) |
def signed2unsigned(value, width=32):
""" convert a signed value to it's 2 complement unsigned
encoding """
if value >= 0:
return int(value)
else:
return int(value + 2**(width) ) |
def extended_gcd(aa, bb):
"""Extended greatest common divisor
from https://rosettacode.org/wiki/Modular_inverse#Python
"""
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient * x, x
y, lasty = lasty - quotient * y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1) |
def rounding_filters(filters, w_multiplier):
""" Calculate and round number of filters based on width multiplier. """
if not w_multiplier:
return filters
divisor = 8
filters *= w_multiplier
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters) |
def get_price_padding(closing_price: float) -> float:
"""
Calculate how far above and below to place your entry/stop
"""
if closing_price < 5:
return 0.01
elif 5 <= closing_price < 10:
return 0.02
elif 10 <= closing_price < 50:
return 0.03
elif 50 <= closing_price < 100:
return 0.05
else:
return 0.1 |
def is_unknown(value: str) -> bool:
"""Returns True if val represents and unknown value"""
if not isinstance(value, str):
raise TypeError
if not value or value.upper() in ("UNKN", "UNK", "UKN"):
return True
for char in value:
if char not in ("/", "X", "."):
break
else:
return True
return False |
def flatten(list_of_lists):
"""
>>> flatten([[1,2], [3,4,5]])
[1, 2, 3, 4, 5]
"""
return [item for sublist in list_of_lists for item in sublist] |
def vel_final_dist_helper(initial_velocity, acceleration, dist):
"""
Calculates the final velocity given the initial velocity, acceleration and the distance traveled. And is a helper
function to be called by vel_final_dist()
:param initial_velocity: Integer initial velocity
:param acceleration: Integer acceleration
:param dist: Integer distance traveled
:return: final velocity
"""
vel = (initial_velocity ** 2 + 2 * acceleration * dist) ** 0.5
return vel |
def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct |
def interpret_field(data):
"""
Convert data to int, if not possible, to float, otherwise return
data itself.
Parameters
----------
data : object
Some data object
Returns
-------
data : int, float or same data type as the input
Return the data object casted as an int, float or return
data itself.
"""
try:
return int(data)
except ValueError:
try:
return float(data)
except ValueError:
return data |
def quaternion_wxyz_to_xyzw_order(quat):
"""
Changes to {q.x, q.y, q.z, q.w} quaternion order from Isaac's {q.w, q.x, q.y, q.z}
"""
output_quaternion = [quat[1], quat[2], quat[3], quat[0]]
return output_quaternion |
def get_all_descendants(root, children_map):
"""
Returns all descendants in the tree of a given root node, recursively
visiting them based on the map from parents to children.
"""
return {root}.union(
*[get_all_descendants(child, children_map)
for child in children_map.get(root, [])]
) |
def _create_group_to_col_position(column_groups):
"""Get mapping from column groups to column positions.
Args:
column_names (list): The column groups to display in the estimatino table.
Returns:
group_to_col_index(dict): The mapping from column group titles to column
positions.
"""
if column_groups is not None:
group_to_col_index = {group: [] for group in list(set(column_groups))}
for i, group in enumerate(column_groups):
group_to_col_index[group].append(i)
else:
group_to_col_index = None
return group_to_col_index |
def argmaxIndexWithTies(l, f = lambda x: x):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the index of C{l} that has the highest score
"""
best = []; bestScore = f(l[0])
for i in range(len(l)):
xScore = f(l[i])
if xScore > bestScore:
best, bestScore = [i], xScore
elif xScore == bestScore:
best, bestScore = best + [i], xScore
return (best, bestScore) |
def try_coerce_ascii(string_utf8):
"""Attempts to decode the given utf8-encoded string
as ASCII after coercing it to UTF-8, then return
the confirmed 7-bit ASCII string.
If the process fails (because the string
contains non-ASCII characters) returns ``None``.
"""
try:
string_utf8.decode('ascii')
except UnicodeDecodeError:
return
return string_utf8 |
def truncated(text, max_length=100, msg='...'):
"""
>>> truncated("hello world!", 5)
'hello...'
>>> truncated("hello world!", 25)
'hello world!'
>>> truncated("hello world!", 5, " [truncated]")
'hello [truncated]'
"""
if len(text) < max_length:
return text
else:
return text[:max_length] + msg |
def stations_by_river(stations):
""" Given a list of station objects, this function returns a dictionary that maps river names
to a list of station objects on a given river.
"""
mapping = {}
for station in stations:
river = station.river
if river in mapping.keys():
mapping[river].append(station)
else:
mapping[river] = [station]
return mapping |
def point_dist(p0, p1):
"""
Calculate distance between two points in 2D.
:param p0: first point, array like coordinates
:param p1: second point, array like coordinates
:return: distance, float
"""
return ((p1[0] - p0[0])**2 + (p1[1] - p0[1])**2)**(1/2) |
def remove_2x_compat_notes(matcher):
"""
A number of the files end in
{{{
#!html
<h2 class='compatibility'>Older versions</h2>
}}}
and everything after is garbage, so just remove it.
"""
r'\{\{\{\n#!html\n<h2(.|\n)*'
return '' |
def prepare_folders(output_directory: str, delete_existing: bool, add: bool) -> str:
"""
A general folder prep function to help with the preprocess steps, this will handle general file prep and delete_existing and add conflicts.
Parameters
----------
output_directory: The name of the output directory that should be made.
delete_existing: If the existing output directory should be overwritten
add: If the user is trying to add molecules to an existing output folder.
Returns
-------
error_dir: The name of the error directory that has been made/found.
"""
import os
import shutil
error_dir = os.path.join(output_directory, 'error_mols')
if delete_existing and add:
raise Exception("Can not specify BOTH --delete-existing AND --add flags")
# Delete pre-existing output mols if requested
elif delete_existing and not (add):
if os.path.exists(output_directory):
shutil.rmtree(output_directory)
os.makedirs(output_directory)
# Create error directory
error_dir = os.path.join(output_directory, 'error_mols')
os.makedirs(error_dir)
elif not (delete_existing) and add:
if not os.path.exists(output_directory):
raise Exception(f'--add flag was specified but directory {output_directory} not found')
# If ADD is FALSE, make new output dir
elif not (delete_existing) and not (add):
if os.path.exists(output_directory):
raise Exception(f'Output directory {output_directory} already exists. '
f'Specify `--delete-existing` to remove.')
os.makedirs(output_directory)
# Create error directory
os.makedirs(error_dir)
return error_dir |
def num2alpha(num: int) -> str:
"""turn num to alpha"""
return "" if num == 0 else num2alpha((num - 1) // 26) + chr((num - 1) % 26 + ord('A')) |
def generateSuperItemsets(base_itemsets):
"""
combine tuples in the base itemsets list to generate the immediate super itemsets list
:param base_itemsets - [(a,b), (b,c), (a,c) ...]
:return super_itemsets - [(a,b,c), ...]
"""
if base_itemsets == []:
return []
# sort: make sure, in (a,b), a < b
for n in range(len(base_itemsets)):
base_itemsets[n] = sorted(base_itemsets[n])
num_base = len(base_itemsets[0])
num_super = num_base + 1
super_itemsets = []
len_itemsets = len(base_itemsets)
for n_x in range(len_itemsets):
x = base_itemsets[n_x]
for n_y in range(n_x+1, len_itemsets):
y = base_itemsets[n_y]
if x[:-1] == y[:-1] and x[-1] < y[-1]:
xy_list = x + y[-1:]
count_ = 0
for i in range(len(xy_list)):
if xy_list[:i]+xy_list[i+1:] in base_itemsets:
count_ += 1
else:
break
if count_ == num_super:
super_itemsets.append(tuple(xy_list))
return super_itemsets |
def solution(S, P, Q):
"""
We cum sum all impacts for each nucleotide at each position
then, for each query, we simply subtract the sum of the end minus the
sum until the start and start to check if this subtraction is higher
than 0.
If it is, then it means that that nucleotide exists within the query range
and therefore, if we assert starting from A until T, we can therefore
return the minimal impact value.
"""
m = len(P)
n = len(S)
res = [0] * m
# Mount the prefix sum of each nucleotide in the sequence
a_prefix_sum = [0] * (n + 1)
c_prefix_sum = [0] * (n + 1)
g_prefix_sum = [0] * (n + 1)
t_prefix_sum = [0] * (n + 1)
for i in range(1, n + 1):
a_prefix_sum[i] = a_prefix_sum[i - 1]
c_prefix_sum[i] = c_prefix_sum[i - 1]
g_prefix_sum[i] = g_prefix_sum[i - 1]
t_prefix_sum[i] = t_prefix_sum[i - 1]
if S[i - 1] == 'A':
a_prefix_sum[i] += 1
elif S[i - 1] == 'C':
c_prefix_sum[i] += 2
elif S[i - 1] == 'G':
g_prefix_sum[i] += 3
elif S[i - 1] == 'T':
t_prefix_sum[i] += 4
# Now, perform the queries
for i in range(m):
start = P[i]
stop = Q[i]
if a_prefix_sum[stop + 1] - a_prefix_sum[start] > 0:
res[i] = 1
elif c_prefix_sum[stop + 1] - c_prefix_sum[start] > 0:
res[i] = 2
elif g_prefix_sum[stop + 1] - g_prefix_sum[start] > 0:
res[i] = 3
else:
res[i] = 4
return res |
def hex_to_ipv6(hex):
"""
Takes a 128 bit hexidecimal string and returns that string formatted for IPv6
:param hex: Any 128 bit hexidecimal passed as string
:return: String formatted in IPv6
"""
return ':'.join(hex[i:i + 4] for i in range(0, len(hex), 4)) |
def _sequence_to_index(seq, dim_list):
"""
Inverse of _index_to_sequence.
Parameters
----------
seq : list of ints
List of coordinates for each particle.
dim_list : list of int
List of dimensions of consecutive particles.
Returns
-------
i : list
Index in a matrix.
"""
i = 0
for s, d in zip(seq, dim_list):
i *= d
i += s
return i |
def BitwiseOr(value1, value2) -> int:
"""Returns the result of the bitwise OR operation."""
return int(int(value1) | int(value2)) |
def is_set(value):
"""
Checks if the given value is a set object or not.
Args:
value (mixed): Value passed in by the user.
Returns:
bool: True if the given value is a set else False.
Example:
>>> is_set(set([1, 2]))
True
>>> is_set([1, 2, 3])
False
.. versionadded:: 4.0.0
"""
return isinstance(value, set) |
def get_patch_values(patch, path):
"""Get the patch values corresponding to the specified path.
If there are multiple values specified for the same path
(for example the patch is [{'op': 'add', 'path': '/name', 'value': 'abc'},
{'op': 'add', 'path': '/name', 'value': 'bca'}])
return all of them in a list (preserving order).
:param patch: HTTP PATCH request body.
:param path: the path to get the patch values for.
:returns: list of values for the specified path in the patch.
"""
return [p['value'] for p in patch
if p['path'] == path and p['op'] != 'remove'] |
def fasta_to_histo(fastalines):
"""Reads fastaline tuples as produced by read_fasta(...) and retuns a histogram (a list) of
dict(tally=..., at_bases=..., gc_bases=...)
"""
res = list()
for fline in fastalines:
if fline.length > len(res) - 1:
res.extend( dict(tally=0, at_bases=0, gc_bases=0) for _ in range(len(res) - 1, fline.length) )
res[fline.length]['tally'] += 1
res[fline.length]['at_bases'] += fline.at_bases
res[fline.length]['gc_bases'] += fline.gc_bases
return res |
def n64_to_n32(nonce):
"""
a 64 bit nonce in Python is actually constructed by n2 << 32 | n1
with n1 and n2 the next 32-bit random numbers
this function takes a nonce and return n1 and n2, the two 32-bit random numbers composing it
"""
n2 = nonce >> 32
n1 = (n2 << 32) ^ nonce
assert nonce == (n2 << 32) | n1
return n1, n2 |
def expand_template(tmpl_text, values):
""" Simplest template expander. """
from string import Template
tmpl = Template(tmpl_text)
return tmpl.substitute(values) |
def search_event(query, hits, n_total_hits, query_time_ms, source_id=None):
"""Format the properties of the ``search`` event.
:param query: a dictionary that specifies the query and it's options
:type query: dict
:param hits: a list of the returned hits. Each item in the list should
contain a dictionary with the document and source ID.
:type hits: list
:param n_total_hits: number of total hists that matched the query
:type n_total_hits: int
:param query_time_ms: duration of the query in milliseconds
:type query_time_ms: int
:param source_id: specifies which index was targeted. If ``source_id``
is ``None``, the search was executed against the
combined index.
:type source_id: str or None
"""
return {
'source_id': source_id,
'query': query,
'hits': hits,
'n_total_hits': n_total_hits,
'query_time_ms': query_time_ms
} |
def get_colour(image, p):
"""
Returns a char with the colour of given point
"""
# print('\n\n', image[p[0]+3, p[1]+3])
return '' |
def unzip(zipped_list, n):
"""returns n lists with the elems of zipped_list unsplitted.
The general case could be solved with zip(*zipped_list), but here we
are also dealing with:
- un-zipping empy list to n empty lists
- ensuring that all zipped items in zipped_list have lenght n, raising
ValueError if not.
"""
if not zipped_list:
return tuple([[]] * n)
else:
if not all(isinstance(x, tuple) and len(x) == n for x in zipped_list):
raise ValueError
return zip(*zipped_list) |
def ascertain_list(x):
"""
ascertain_list(x) blah blah returns [x] if x is not already a list, and x itself if it's already a list
Use: This is useful when a function expects a list, but you want to also input a single element without putting this
this element in a list
"""
if not isinstance(x, list):
## The "all but where it's a problem approach"
if hasattr(x, '__iter__') and not isinstance(x, dict):
x = list(x)
else:
x = [x]
## The (less safe) "just force what you want to do differently in those cases only" approach
# if isinstance(x, np.ndarray):
# x = list(x)
# else:
# x = [x]
return x |
def cyclic(lst1, lst2):
"""Return the cyclic of 2 lists"""
if len(lst1) != len(lst2):
return False
lst2_first_in_lst1 = []
for i in range(len(lst1)):
if lst1[i] not in lst2:
return False
if lst1[i] == lst2[0]:
lst2_first_in_lst1.append(i)
if_cyclic = True
for count_item in lst2_first_in_lst1:
for i1 in range(len(lst1)):
if lst1[i1] != lst2[(i1 + count_item) % len(lst1)]:
if_cyclic = False
if if_cyclic:
return True
else:
if_cyclic = True
return False |
def set_fd_inheritable(fd, inheritable):
"""
disable the "inheritability" of a file descriptor
See Also:
https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors
https://github.com/python/cpython/blob/65e6c1eff3/Python/fileutils.c#L846-L857
"""
from fcntl import ioctl
if inheritable:
from termios import FIONCLEX
return ioctl(fd, FIONCLEX)
else:
from termios import FIOCLEX
return ioctl(fd, FIOCLEX) |
def remove_duplicates(list_, func=None):
""" Remove all duplicates in a list.
Set func to use another value than a self hash for determining if duplicate.
Values must be hashable (If func is None) as they are passed through as dict keys. (Lists work but not Dicts) """
if func is None:
return list(dict.fromkeys(list_))
else:
return list({func(obj): obj for obj in list_}.values()) |
def _get_provenance_record(attributes, ancestor_files):
"""Return a provenance record describing the diagnostic data."""
record = attributes.copy()
record.update({
'ancestors': ancestor_files,
'realms': ['land'],
'domains': ['global'],
})
return record |
def _maybe_overlapping_memory(shape, strides):
"""Returns bool value indicating the array with such shape and strides
might have overlapping memory.
Args:
shape (tuple of int): The shape of output.
strides (tuple of int): The strides of output, given in the unit of steps.
storage_offset (int):
The offset between the head of allocated memory and the pointer of
first element, given in the unit of steps.
Returns:
bool: Existence of the overlapping memory
"""
max_ptr_in_slice = 0
for stride, size in sorted(zip([abs(s) for s in strides], shape)):
if stride <= max_ptr_in_slice:
return True
max_ptr_in_slice += stride * (size - 1)
return False |
def parse_header(line):
"""
Get all sample names present in the vcf header.
Assume each column is in format 'extra_info.samplename'.
Args:
line (str): Header line.
Returns:
samples (list of str): List of sample names.
"""
line = line.strip().split("\t")[9:]
samples = []
for x in line:
samp = x.split('.')[-1]
if samp not in samples:
samples.append(samp)
return samples |
def __check_same(res, name, mdel, binel):
"""
Helper function for binary metadata cross-validation with JSON
If @mdel (JSON) and @binel (Binary) are not the same, return False
and display a formatted error with the given @name.
Else return @res
"""
if str(mdel) != str(binel):
print("Element '{}' are not the same in metadata and binary data"
.format(name))
return False
else:
return res |
def is_clef(annotation_token: str) -> bool:
"""Returns true if the token is a clef"""
return annotation_token.startswith("clef.") |
def is_intel_email(email):
"""Checks that email is valid Intel email"""
return email and len(email) > 10 and ' ' not in email and email.lower().endswith('@intel.com') |
def transpose_report(report):
""" Splits & transposes the report
"""
l_o_l = [list(c for c in code) for code in report.split("\n") if len(code)]
return list(map(list, zip(*l_o_l))) |
def compound_interest(principal: int, interest: float, periods: int) -> float:
"""
Calculates the total return on a standard deposit and interest rate every period.
Args
principal: amount to be deposited every period
interest: expressed in decimal 3% = .03
periods: the number of periods to calculate for
"""
value = 0
total_value = 0
periods = periods
while periods > 0:
value = principal * ((1 + interest) ** periods)
total_value = total_value + value
periods -= 1
return total_value |
def heavy(t):
""" Take sample from the heavyside function (a.ka. unit step function).
"""
return float(t > 0) |
def get_total(puntaje: str):
""" Borra el substring `Total: ` del puntaje """
return puntaje.replace("Total: ", "").replace(",", ".") |
def from_base_alphabet(value: str, alphabet: str) -> int:
"""Returns value in base 10 using base len(alphabet)
[bijective base]"""
ret = 0
for digit in value:
ret = len(alphabet) * ret + alphabet.find(digit)
return ret |
def positive_sum3(a: int, b: int, c: int=0):
"""Normal exposed function
Parameters
----------
a: int
b: int
c: int, default 0
All parameters are positive values.
If negative, an exception is raised.
Returns
----------
int
"""
if a<0 or b<0 or c<0:
raise Exception('Error: Negative argument')
return a+b+c |
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment, a URL, or
an included file
"""
return line and not line.startswith(('-r', '#', '-e', 'git+', '-c')) |
def _get_tips_from_string(tips_str):
"""Get list of tips from tips string."""
return [tip.strip() for tip in tips_str.split('*') if tip] |
def fib_memoization(ar, n):
"""
Top down approach
https://www.geeksforgeeks.org/tabulation-vs-memoization/
"""
if n == 0:
return 1
if ar[n] is not None:
return ar[n]
else:
print("call---" + str(n))
ar[n] = n * fib_memoization(ar, n - 1)
return ar[n] |
def getRGBListFromRGBStr(rgbStr):
""" returns [red, green, blue] that represents whether each led is on or off """
red, green, blue = rgbStr.split(',')
return [red, green, blue] |
def fix_read_order_keys(key, start_value=7):
"""fix reading restored ckpt order_dict keys, by Hao."""
return key[start_value:] |
def insertion_sort(l):
""" scan each item in a list and findout if the current position number is less than target """
sorted_list = []
for item_compare in l:
for offset, sorted_number in enumerate(sorted_list.copy()):
if item_compare <= sorted_number:
sorted_list.insert(offset, item_compare)
break
else:
sorted_list.append(item_compare)
return sorted_list |
def cal_num_data_points(data: dict) -> int:
""" Calculate the number of data points in a dataset
Parameters
----------
data
dataset
Returns
-------
int
the number of data points in a dataset
"""
return sum([len(data_u) for u, data_u in data.items()]) |
def int_to_string(ints, inv_vocab):
"""
Output a machine readable list of characters based on a list of indexes in the machine's vocabulary
Arguments:
ints -- list of integers representing indexes in the machine's vocabulary
inv_vocab -- dictionary mapping machine readable indexes to machine readable characters
Returns:
l -- list of characters corresponding to the indexes of ints thanks to the inv_vocab mapping
"""
l = [inv_vocab[i] for i in ints]
return l |
def SGD(lr=0.001, momentum=0):
"""SGD Optimiser.
:param lr: Learning rate
:type lr: float
:param momentum: Momentum
:type momentum: float
"""
return {
"optimiser": "SGD",
"opt_args": {
"lr": lr,
"momentum": momentum
}} |
def GroupCheckBool(TagDict, TagGroupList):
"""
GroupCheckBool is a function to check whether the tag in TagDict.keys()
is in the TagGroupList. The key in TagDict is the tags and the value is
the corresponding colNum. TagGroupList is a List of List to specify the
group of the tag string.
"""
# Container Setting (List Comprehension)
ListCheckBool = [ [False] * len(x) for x in TagGroupList ]
# Chech Process
for ii,TagGroupstr in enumerate(TagGroupList):
for jj,Tagstr in enumerate(TagGroupList[ii]):
if Tagstr in TagDict:
ListCheckBool[ii][jj] = True if TagDict[Tagstr] != -1 else ListCheckBool[ii][jj]
return ListCheckBool |
def value_and_ldj(fn, args):
"""Compute the value and log-det jacobian of function evaluated at args.
This assumes that `fn`'s `extra` output is a 2-tuple, where the first element
is arbitrary and the the last element is the log determinant of the jacobian
of the transformation.
Args:
fn: Function to evaluate.
args: Arguments to `fn`.
Returns:
ret: First output of `fn`.
extra: Second output of `fn`.
ldj: Log-det jacobian of `fn`.
#### Example
```python
def scale_by_two(x):
# Return x unchanged as the extra output for illustrative purposes.
return 2 * x, (x, np.log(2))
y, y_extra, y_ldj = value_and_ldj(scale_by_2, 3.)
assert y == 6
assert y_extra == 3
assert y_ldj == np.log(2)
```
"""
value, (extra, ldj) = fn(args)
return value, (extra, ldj), ldj |
def SortClassAdsByElement(classads, element_name):
"""
Sort the classads (or any dictionary really) by an attribute
@param classads: list of classad objects
@param element_name: string of element name
"""
sorted_ads = sorted(classads, key=lambda ad: int(ad[element_name]))
return sorted_ads |
def format_header_text(string: str):
"""Returns a header string that is centered within a space of 39 characters, bordered by "#".
Examples:
>>> format_header_text('ARRAY FUNCTIONS')\n
'########### ARRAY FUNCTIONS ###########'
"""
return "{0:#^39}".format(f" {string} ") |
def open_range_sublist(the_list, from_index, to_index):
"""
Returns an open range sublist of 'the_list', 'to_index' is not included.
:param the_list:
:param from_index:
:param to_index:
:return: sublist of 'the_list' or empty list if indexes are out of range
"""
tmp = []
list_len = len(the_list)
if from_index < 0:
from_index = 0
if to_index < 0 or to_index > list_len:
to_index = list_len
if to_index < from_index:
to_index = from_index
if from_index > list_len - 1:
return tmp
for i in range(from_index, to_index):
tmp.append(the_list[i])
return tmp |
def seconds_to_hour_min_sec(secs):
""" simple formatter
:param secs:
:return:
"""
hours = int(secs / 3600)
secs -= hours * 3600
mins = int(secs / 60)
secs -= mins * 60
return '{:02d}:{:02d}:{:02d}'.format(hours, mins, int(secs)) |
def replenerate_hostname(h):
"""
Apply sinusoidal repleneration to h, which might not be a FQDN,
ensuring it becomes a FQDN.
"""
return h if "." in h else f"{h}.wikimedia.org" |
def _buildTreeString(root, curr_index):
"""Recursively walk down the binary tree and build a pretty-print string.
In each recursive call, a "box" of characters visually representing the
current (sub)tree is constructed line by line. Each line is padded with
whitespaces to ensure all lines in the box have the same length. Then the
box, its width, and start-end positions of its root node value repr string
(required for drawing branches) are sent up to the parent call. The parent
call then combines its left and right sub-boxes to build a larger box etc.
Args:
root - Node, root node to build string on
curr_index - int , top-down index of root
Returns:
(new_box, len(new_box[0]), new_root_start, new_root_end) - tuple
"""
# Generate representation string for current root node.
if root is None:
return [], 0, 0, 0
line1 = []
line2 = []
if root.pos != None:
node_repr = '{},{}'.format(root.pos, root.value)
else:
node_repr = str(root.value)
new_root_width = gap_size = len(node_repr)
# Get the left and right sub-boxes, their widths, and root repr positions.
l_box, l_box_width, l_root_start, l_root_end = \
_buildTreeString(root.left, 2 * curr_index + 1)
r_box, r_box_width, r_root_start, r_root_end = \
_buildTreeString(root.right, 2 * curr_index + 2)
# Draw the branch connecting the current root node to the left sub-box.
# Pad the line with whitespaces where necessary.
if l_box_width > 0:
l_root = (l_root_start + l_root_end) // 2 + 1
line1.append(' ' * (l_root + 1))
line1.append('_' * (l_box_width - l_root))
line2.append(' ' * l_root + '/')
line2.append(' ' * (l_box_width - l_root))
new_root_start = l_box_width + 1
gap_size += 1
else:
new_root_start = 0
# Draw the representation of the current root node.
line1.append(node_repr)
line2.append(' ' * new_root_width)
# Draw the branch connecting the current root node to the right sub-box.
# Pad the line with whitespaces where necessary.
if r_box_width > 0:
r_root = (r_root_start + r_root_end) // 2
line1.append('_' * r_root)
line1.append(' ' * (r_box_width - r_root + 1))
line2.append(' ' * r_root + '\\')
line2.append(' ' * (r_box_width - r_root))
gap_size += 1
new_root_end = new_root_start + new_root_width - 1
# Combine the left and right sub-boxes with the branches drawn above.
gap = ' ' * gap_size
new_box = [''.join(line1), ''.join(line2)]
for i in range(max(len(l_box), len(r_box))):
l_line = l_box[i] if i < len(l_box) else ' ' * l_box_width
r_line = r_box[i] if i < len(r_box) else ' ' * r_box_width
new_box.append(l_line + gap + r_line)
# Return the new box, its width and its root repr positions.
return new_box, len(new_box[0]), new_root_start, new_root_end |
def validate_args(numargs, args):
"""
Check that there are enough args in the list, and truncate accordingly.
Raises ValueError if not.
"""
if len(args) < numargs:
raise ValueError("Not enough elements in list {}, need "
"{}.".format(args, numargs))
return args |
def get_layer_idx_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ["cls_token", "pos_embed"]:
return 0
elif name.startswith("patch_embed"):
return 0
elif name.startswith("blocks"):
return int(name.split(".")[1]) + 1
else:
return num_layers |
def isWordGuessed(secretWord: str, lettersGuessed: list) -> bool:
"""Verify if secretWord was guessed
Args:
secretWord: the word the user is guessing
lettersGuessed: what letters have been guessed so far
Returns:
bool: True if all the letters of secretWord are in lettersGuessed,
False otherwise
"""
# return set(secretWord) <= set(lettersGuessed)
return all(char in lettersGuessed for char in secretWord) |
def _get_tolerance_line(line):
"""get a data item for a tolerance line with format (each line only one item):
i: type=rel, 1e-3
"""
assert line, 'Empty line!'
line = line.strip().replace(' ','')
stmp = line.split(':')
key = stmp[0]
_type, _val = stmp[1].split(',')
_type = _type.split('=')[-1]
tol={key:{'type':_type, 'val':float(_val)}}
return tol |
def convert_bbox_coord(coord, im_height_or_width=512):
""" Simple function designed to be used in a pd.apply() statement. Converts TF's
preferred format for bounding box coordinates (float percent to PASCAL VOC's
preferred format (pixel coordinates, top left origin).
NOTE: this currently assumes images are of equal height or width OR the user
has explicitly provided the proper im_height_or_width."""
return int(coord * im_height_or_width) |
def sub_add(a, b, c=100):
"""
Should certainly explain the purpose of subadd here
"""
print(f"Subadd {a}-{b}+{c}={a-b+c}")
return a-b+c |
def str_to_list(data):
"""
Converts a string delimited by \n and \t to a list of lists.
:param data:
:type data: str
:return:
:rtype: List[List[str]]
"""
if isinstance(data, list):
return data
data = data.replace("'", '')
try:
if data[-1] == '\n':
data = data[:-1]
except IndexError:
return
tmp = data.split('\n')
result = []
for tmp_ in tmp:
result.append(tmp_.split('\t'))
return result |
def calculateBBCenter(bb):
"""
**SUMMARY**
(Dev Zone)
Calculates the center of the given bounding box
**PARAMETERS**
bb - Bounding Box represented through 2 points (x1,y1,x2,y2)
**RETURNS**
center - A tuple of two floating points
"""
center = (0.5*(bb[0] + bb[2]),0.5*(bb[1]+bb[3]))
return center |
def range_check(value, min_value, max_value, inc_value=0):
"""
:brief Determine if the input parameter is within range
:param value: input value
:param min_value: max value
:param max_value: min value
:param inc_value: step size, default=0
:return: True/False
"""
if value < min_value:
return False
elif value > max_value:
return False
elif (inc_value != 0) and (value != int(value / inc_value) * inc_value):
return False
return True |
def direct(deps):
""" Return the set of direct dependencies """
return {(a, b) for a, b, c in deps if c is False} |
def default_error_encode(
errors, encoding, msg, u, startingpos, endingpos):
"""A default handler, for tests"""
assert endingpos >= 0
if errors == 'replace':
return '?', endingpos
if errors == 'ignore':
return '', endingpos
raise ValueError |
def _require_version(server, version):
"""Check version of server
server[in] Server instance
version[in] minimal version of the server required
Returns boolean - True = version Ok, False = version < required
"""
if version is not None and server is not None:
major, minor, rel = version.split(".")
if not server.check_version_compat(major, minor, rel):
return False
return True |
def date_mapper(date: float):
"""
map all dates from 20140101 to increasing naturals every
month
"""
date /= 100
month = int(date) - int(date / 100) * 100
date /= 100
year = int(date) - 2014
return year * 12 + month |
def clean_postcode(postcode):
"""
Returns `postcode` lowercase without spaces so that it can be used for comparisons.
"""
if not postcode:
return postcode
return postcode.lower().replace(' ', '').strip() |
def clear_process_data(process_data) -> dict:
""" Create a copy of the process data where all keys with a 'None'-value are removed.
"""
pd = {
'order': [],
'station': [],
'factory': process_data['factory']
}
for order in process_data['order']:
pd['order'].append({k: v for k, v in order.items() if v is not None})
for station in process_data['station']:
pd['station'].append({k: v for k, v in station.items() if v is not None})
return pd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.