content stringlengths 42 6.51k |
|---|
def get_trimmed_path(location, num_segments=2):
"""
Return a trimmed relative path given a location keeping only the
``num_segments`` trailing path segments.
For example::
>>> assert get_trimmed_path(None) == None
>>> assert get_trimmed_path('a/b/c') == 'b/c'
>>> assert get_trimmed_path('/b/c') == 'b/c'
>>> assert get_trimmed_path('b/c') == 'b/c'
>>> assert get_trimmed_path('b/c/') == 'b/c/'
>>> assert get_trimmed_path('/x/a/b/c/', 3) == 'a/b/c/'
>>> assert get_trimmed_path('/x/a/b/c', 3) == 'a/b/c'
"""
if location:
ends = location.endswith('/')
segments = location.strip('/').split('/')[-num_segments:]
relative = '/'.join(segments)
if ends:
relative += '/'
return relative |
def address_fixup(a):
""" Some Kern Co. addresses have typos. """
d = {
"2901 Silent Ave Suite 201, Bakersfield, CA 93308": "2901 Sillect Ave Suite 201, Bakersfield, CA 93308",
"3300 BUENA VISTA RD A, Bakersfield, CA 93311": "3300 Buena Vista Rd Bldg A, Bakersfield, CA 93311",
"8000 WHITE LANE, Bakersfield, CA 93301": "8000 WHITE LANE, BAKERSFIELD, CA 93309",
"Rite Aid Store 06303, Bakersfield, CA 93313": "3225 PANAMA LANE, BAKERSFIELD, CA 93313",
"3500 Stine Rd Bakersfield, Bakersfield, CA 93309": "3500 Stine Rd, Bakersfield, CA 93309",
}
return d.get(a, a) |
def convert_time(time):
"""Convert given time to srt format."""
stime = '%(hours)02d:%(minutes)02d:%(seconds)02d,%(milliseconds)03d' % \
{'hours': time / 3600,
'minutes': (time % 3600) / 60,
'seconds': time % 60,
'milliseconds': (time % 1) * 1000}
return stime |
def get_primes(n_primes, st=2):
""" Get N prime numbers
"""
_prims = []
x = st
while len(_prims) < n_primes:
if all([x%y > 0 for y in range(2,x)]):
_prims.append(x)
x+=1
return _prims |
def _levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b."""
n, m = len(a), len(b)
if n > m:
return _levenshtein(b, a)
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change += 1
current[j] = min(add, delete, change)
return current[n] |
def comment_radii(r_inner, r_outer, r):
"""
Convert radii defining the cluster and sub-spheres to a human-readable string.
Args:
r_inner (float): Inner sub-sphere radius.
r_outer (float): Outer sub-sphere radius.
r (float): Radius of the entire cluster.
Returns:
(*str*) -- A comment describing the cluster radii.
"""
return 'inner, outer, and cluster radii = ' + ' '.join(str(rad) for rad in [r_inner, r_outer, r]) + '; ' |
def calculateBchange(nuc, target, tt_ratio):
# At equal mut freq: transv more likely than transitions!!!
"""Returns score for mutation of target to query nucleotide.
Based on p(transition) = tt_ratio * p(transversion)."""
score = 1
transition_list = [("a", "g"), ("g", "a"), ("c", "t"), ("t", "c")]
transversion_list = [("a", "c"), ("c", "a"), ("a", "t"), ("t", "a"), ("c", "g"), ("g", "c"), ("g", "t"), ("t", "g")]
if (nuc, target) in transition_list:
score = score * tt_ratio
return score
elif (nuc, target) in transversion_list:
return score
# remove it from polymorphism table
elif nuc == target:
score = 0
return score
else:
return "Error: Non-DNA character encountered" |
def string_split_single_line(line):
"""We need to split on word boundry"""
if line == "":
return line
n = 28
line_list = line.split()
lines_out = line_list.pop(0)
count = len(lines_out)
for word in line_list:
word_len = len(word)
if count + word_len + 1 > n:
lines_out += '\\n' + word
count = word_len
else:
lines_out += ' ' + word
count += word_len + 1
return lines_out |
def _gr_ymin_ ( graph ) :
""" Get minimal y for the points
>>> graph = ...
>>> ymin = graph.ymin ()
"""
ymn = None
np = len(graph)
for ip in range( np ) :
x , y = graph[ip]
if None == ymn or y <= ymn : ymn = y
return ymn |
def pad_time(x):
"""
Format the time properly to parse as a datetime object
"""
if len(x) < 2:
return '000' + x
elif len(x) < 4:
return '0' + x
else:
return x |
def is_power_of_two(n):
"""Check whether `n` is an exponent of two
>>> is_power_of_two(0)
False
>>> is_power_of_two(1)
True
>>> is_power_of_two(2)
True
>>> is_power_of_two(3)
False
>>> if_power_of_two(16)
True
"""
return n != 0 and ((n & (n - 1)) == 0) |
def _split(stdout):
"""
stdout is result.stdout where result
is whatever is returned by subprocess.run
"""
decoded_text = stdout.decode(
'utf-8',
# in case there are decoding issues, just replace
# problematic characters. We don't need text verbatim.
'replace'
)
lines = decoded_text.split('\n')
return lines |
def argRead(ar, default=None):
"""Corrects the argument input in case it is not in the format True/False."""
if ar == "0" or ar == "False":
ar = False
elif ar == "1" or ar == "True":
ar = True
elif ar is None:
if default:
ar = default
else:
ar = False
else:
raise ValueError("Argument value not recognised.")
return ar |
def identifyChannels(channel_names):
"""
Gives each channel an id and return a dictionary
"""
channels_dict = {}
for i in range(len(channel_names)):
channels_dict[channel_names[i]] = i
return channels_dict |
def generate_static_obj_def(name,
look,
mesh,
world_xyz=[0.0, 0.0, 0.0],
world_rpy=[0.0, 0.0, 0.0],
material='Neutral'):
"""Generate tag for a static object"""
world_transform_fm = lambda x: ' '.join([f'{i:.2f}' for i in x])
return (
f'\n'
f'<static name="{name}" type="model">\n'
f'\t<look name="{look}"/>\n'
f'\t<material name="{material}"/>\n'
f'\t<world_transform xyz="{world_transform_fm(world_xyz)}" rpy="{world_transform_fm(world_rpy)}"/>\n'
f'\t<physical>\n'
f'\t\t<mesh filename="{mesh}" scale="1.0"/>\n'
f'\t\t<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0"/>\n'
f'\t</physical>\n'
f'</static>\n') |
def get_group(name, match_obj):
"""return a blank string if the match group is None"""
try:
obj = match_obj.group(name)
except:
return ''
else:
if obj is not None:
return obj
else:
return '' |
def manhattan_distance(point1_x, point1_y, point2_x, point2_y):
""" It is the sum of absolute values of differences in the point 1's x and y coordinates and the
point 2's x and y coordinates respectively """
return abs(point1_x - point2_x) + abs(point1_y-point2_y) |
def InvertMapping(x_to_ys):
"""Given a map x -> [y1, y2...] returns inverse mapping y->[x1, x2...]."""
y_to_xs = {}
for x, ys in x_to_ys.items():
for y in ys:
y_to_xs.setdefault(y, []).append(x)
return y_to_xs |
def format_memory_size(n_bytes: float, suffix: str = "B"):
"""Formats a memory size number
Parameters
----------
n_bytes : float
bytes to format
suffix : string
suffix of the memory
Notes
-----
Thanks Fred @ Stackoverflow:
https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(n_bytes) < 1024.0:
return "%3.1f%s%s" % (n_bytes, unit, suffix)
n_bytes /= 1024.0
return "%.1f%s%s" % (n_bytes, "Yi", suffix) |
def _create_statement(name, colnames):
"""Create table if not exists foo (...).
Note:
Every type is numeric.
Table name and column names are all lowercased
"""
# every col is numeric, this may not be so elegant but simple to handle.
# If you want to change this, Think again
schema = ', '.join([col + ' ' + 'numeric' for col in colnames])
return "create table if not exists %s (%s)" % (name, schema) |
def slqs(x_entr, y_entr):
"""
Computes SLQS score from two entropy values.
:param x_entr: entropy value of x
:param y_entr: entropy value of y
:return: SLQS score
"""
score = 1 - (x_entr/y_entr) if y_entr != 0.0 else -1.0
return score |
def decorate_table(table_text, convert_fun, d_cols=" & ", d_rows="\\\\\n"):
"""Transforms text of the table by applying converter function to each element of this table.
:param table_text: (str) text of the table.
:param convert_fun: (str => str) a function to be applied to each element of the table.
:param d_cols: (str) delimiter between columns.
:param d_rows: (str) delimiter between rows.
:return: (str) text of the converted table.
"""
def process_cell(s):
return str(convert_fun(s))
if d_cols not in table_text:
return table_text # delimiter was not present
splitted = table_text.split(d_cols)
new_text = ""
for i in range(0, len(splitted)):
s = splitted[i]
last_in_row = d_rows in s
if last_in_row:
two_elems = s.split(d_rows)
decorated = process_cell(two_elems[0]) + d_rows
if len(two_elems) > 1 and two_elems[1] != '':
decorated += process_cell(two_elems[1])
else:
decorated = convert_fun(s)
new_text += decorated
if i < len(splitted)-1:
new_text += d_cols
return new_text |
def dekatrian_week(dek_day: int, dek_month: int) -> int:
"""Returns the Dekatrian week day from a Dekatrian date.
Here we can see the elegance of Dekatrian, since it's not necessary to
inform the year. Actually, barely it's necessary to inform the month,
as it's only needed to check if that is an Achronian day.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
Return:
int: The week day.
Example: 0 = Achronian; 1 = first week day; 2 = second week day ... 7 = seventh.
"""
if dek_month == 0:
return 0
else:
dek_week_day = ((dek_day - 1) % 7) + 1
return dek_week_day |
def fmt_mate(mate_score):
"""Format a mate value as a proper string."""
if mate_score < 0: # mate in X for black
return "-M{:d}".format(abs(mate_score))
else: # mate in X for white
return "+M{:d}".format(abs(mate_score)) |
def get_clues(guess, secret_num):
"""Returns a string with the pico, fermi, bagels clues for a guess and secret number pair"""
if guess == secret_num:
return 'You got it!'
clues = []
for i in range(len(guess)):
if guess[i] == secret_num[i]:
# a correct digit is in the correct place.
clues.append('Fermi')
elif guess[i] in secret_num:
# a correct digit is in the wrong place
clues.append('Pico')
if len(clues) == 0:
# There are no correct digits at all
return 'Bagles'
else:
# Sort the clues in alphabetical order so their original order
# doesn't give information away
clues.sort()
# Make a single string from the list of string clues
return ' '.join(clues) |
def radix_sort(arr, radix=10):
"""
:param arr: Iterable of elements to sort.
:param radix: Base of input numbers
:return: Sorted list of input.
Time complexity: O(d * (n + b))
where, n is the size of input list.
b is base of representation.
d is number of digits in largest number in that base.
Space complexity: O(n + k)
where, k is the range of input.
"""
max_length = False
tmp, digit = -1, 1
while not max_length:
max_length = True
# declare and initialize buckets
buckets = [[] for _ in range(radix)]
# split arr between lists
for i in arr:
tmp = i // digit
buckets[tmp % radix].append(i)
if max_length and tmp > 0:
max_length = False
# empty lists into arr array
a = 0
for b in range(radix):
buck = buckets[b]
for i in buck:
arr[a] = i
a += 1
# move to next digit
digit *= radix
return arr |
def map_values_func(data_list):
"""
Removes all None values from the list
"""
data = list(data_list)
return [x for x in data if x != None] |
def convert_age(age_str):
"""Convert k8s abbreviated-style datetime str e.g. 14d2h to an integer."""
# age_str_org = age_str
def age_subst(age_str, letter, factor):
parts = age_str.split(letter)
if len(parts) == 2:
age_str = parts[0] + "*" + factor + "+" + parts[1]
return age_str
age_str = age_subst(age_str, "d", "60*60*24")
age_str = age_subst(age_str, "h", "60*60")
age_str = age_subst(age_str, "m", "60")
age_str = age_subst(age_str, "s", "1")
age_str = age_str[:-1]
# print(
# f"convert_age({repr(age_str_org)}) --> {repr(age_str)} --> {eval(age_str)}" # nosec
# ) # nosec
return eval(age_str) |
def waste_mass_series(isotopes, mass_timeseries, duration):
"""Given an isotope, mass and time list, creates a dictionary
With key as isotope and time series of the isotope mass.
Parameters
----------
isotopes: list
list with all the isotopes from resources table
mass_timeseries: list
a list of lists. each outer list corresponds to a different isotope
and contains tuples in the form (time,mass)
for the isotope transaction.
duration: integer
simulation duration
Returns
-------
waste_mass: dictionary
dictionary with "key=isotope, and
value=mass timeseries of each unique isotope" """
waste_mass = {}
for isotope in isotopes:
postion = [i for i, x in enumerate(isotopes) if x == isotope][0]
mass = [item[1] for item in mass_timeseries[postion]]
waste_mass[isotope] = mass
return waste_mass |
def f11(xx):
"""
Example of a analytic expression replacing the external point number
:param xx: the distance between two bodies (or markers)
"""
return 20.0/(0.5*xx*xx+1.0) |
def producto_complejos(num1:list,num2:list) -> list:
"""
Funcion que realiza el producto de dos numeros complejos.
:param num1: lista que representa primer numero complejo
:param num2: lista que representa segundo numero complejo
:return: lista que representa el producto de los numeros complejos.
"""
res = []
res.append(num1[0]*num2[0] - num1[1]*num2[1])
res.append(num1[0]*num2[1] + num1[1]*num2[0])
return res |
def removeSpecialsCharacters(text):
"""
Removes specials characters in string (\n, \r and \l).
"""
text = str.replace(text, '\n', '')
text = str.replace(text, '\r', '')
text = str.replace(text, '\l', '')
return text |
def normalize_file_permissions(st_mode):
"""
https://github.com/takluyver/flit/blob/6a2a8c6462e49f584941c667b70a6f48a7b3f9ab/flit_core/flit_core/common.py#L257
Normalize the permission bits in the st_mode field from stat to 644/755.
Popular VCSs only track whether a file is executable or not. The exact
permissions can vary on systems with different umasks. Normalizing
to 644 (non executable) or 755 (executable) makes builds more reproducible.
"""
# Set 644 permissions, leaving higher bits of st_mode unchanged
new_mode = (st_mode | 0o644) & ~0o133
if st_mode & 0o100: # no cov
new_mode |= 0o111 # Executable: 644 -> 755
return new_mode |
def quick_sort(data):
"""Sort a list of unique numbers in ascending order using quick sort. O(n^2).
The process includes recursively splitting a list into a pivot, smaller side, and larger side.
Args:
data: data to sort (list of int)
Returns:
sorted list
"""
n = len(data)
# terminate early
if n < 2:
return data
pivot = data[0]
# organize data around pivot
left = [value for i, value in enumerate(data[1:]) if value < pivot]
right = [value for i, value in enumerate(data[1:]) if value > pivot]
# make recursive calls
sorted_left = quick_sort(left)
sorted_right = quick_sort(right)
# combine sorted data with pivot
sorted_data = sorted_left
sorted_data.append(pivot)
sorted_data.extend(sorted_right)
return sorted_data |
def complement(dna: str) -> str:
"""
>>> complement("AATTGGCC")
"TTAACCGG"
"""
dict_of_complement={'A':'T','T':'A', 'C':'G', 'G':'C'}
sequence_of_complement=''
for char in dna:
sequence_of_complement =sequence_of_complement + dict_of_complement[char]
return sequence_of_complement |
def euler114(l=50, size_min=3):
"""Solution for problem 114."""
no_block, block = 1, 0
nb_comb = []
for i in range(l):
# Dynamic programming
# Combinations of length n ending with no block are generated taking all combinations of length n-1.
# Combinations of length n ending with a block are generated by either :
# - adding a block to combinations of length n-1 ending with a block
# - starting a new block out of combinations of length n-size_min not ending with a block
nb_comb.append(no_block)
no_block, block = (no_block + block), (block + (nb_comb[i - size_min + 1] if i + 1 >= size_min else 0))
return no_block + block |
def get_mentioned_string(user: str) -> str:
"""Get mentioned format of a user: @username
Args:
user (str): user id
Return:
mentioned_user_string (str): mentioned string
"""
return f"<@{user}>" |
def nested_compare(t, u):
"""
Return whether nested structure of t1 and t2 matches.
"""
if isinstance(t, (list, tuple)):
if not isinstance(u, type(t)):
return False
if len(t) != len(u):
return False
for a, b in zip(t, u):
if not nested_compare(a, b):
return False
return True
if isinstance(t, dict):
if not isinstance(u, dict):
return False
if set(t.keys()) != set(u.keys()):
return False
for k in t:
if not nested_compare(t[k], u[k]):
return False
return True
else:
return True |
def calculate_n_inputs(inputs, config_dict):
"""
Calculate the number of inputs for a particular model.
"""
input_size = 0
for input_name in inputs:
if input_name == 'action':
input_size += config_dict['prior_args']['n_variables']
elif input_name == 'state':
input_size += config_dict['misc_args']['state_size']
elif input_name == 'reward':
input_size += 1
elif input_name in ['params', 'grads']:
if config_dict['approx_post_args']['constant_scale']:
input_size += config_dict['prior_args']['n_variables']
else:
input_size += 2 * config_dict['prior_args']['n_variables']
return input_size |
def _clean_values(values):
"""
Clean values to the state that can be used in Sheets API
:type values: list
:param values: Row values to clean
:rtype: list
:return: Cleaned values, in the same order as given in function argument
"""
return [value if value is not None else '' for value in values] |
def filterbox_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0 |
def dep_graph_parser_parenthesis(edge_str):
"""Given a string representing a dependency edge in the 'parenthesis'
format, return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree, in
the format edge_label(parent_word-parent_index, child_word-child_index)
Returns:
tuple of (parent_index, edge_label, child_index)
"""
tokens = edge_str.split("(")
label = tokens[0]
tokens = tokens[1].split(", ")
parent = int(tokens[0].split("-")[-1]) - 1
child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1
return (parent, label, child) |
def arg_host(string_args):
"""
Returns the host part of string_args e.g.:
example.com:/tmp/file -> example.com
:param str string_args: opennebula string args
:return: the host path of string_args
"""
split = string_args.split(":", 1)
return split[0] |
def shorten_device_string(long_device_string):
"""Turns long device string into short string like "gpu:0" . """
start_pos = long_device_string.index("/device:")
assert start_pos >= 0
short_device_string = long_device_string[start_pos+len("/device:"):]
assert short_device_string
return short_device_string.lower() |
def is_well_formed(expression):
""" Verify that the expression's parenthesis are properly nested."""
count = 0
for i in range(len(expression)):
if expression[i] == "(":
count += 1
elif expression[i] == ")":
count -= 1
if count < 0:
return False
if count == 0:
return True
return False |
def reset_counts_repeated(shots, hex_counts=True):
"""Sampling optimization counts"""
if hex_counts:
return [{'0x1': shots}]
else:
return [{'01': shots}] |
def remove_duplicates(list1):
"""
Eliminate duplicates in a sorted list.
Returns a new sorted list with the same elements in list1, but
with no duplicates.
This function can be iterative.
"""
if len(list1) == 0:
return list1
result = []
lead_list = list1
for index in range(1, len(lead_list)):
if lead_list[index - 1] == lead_list[index]:
continue
else:
if lead_list[index] != lead_list[-1]:
result.append(lead_list[index - 1])
else:
result.append(lead_list[index - 1])
result.append(lead_list[index])
if len(result) == 0:
result.append(list1[-1])
return result |
def get_style(format):
"""Infer style from output format."""
if format == 'simple-html':
style = 'html'
elif format in ('tex', 'latex', 'pdf'):
style = 'markdown_tex'
else:
style = 'markdown'
return style |
def get_file_path_as_parts(fname):
""" fname is a python string of the full path to a data file,
then return the data_dir, file prefix and file suffix
"""
fnameidx1 = fname.rfind('\\') + 1
if (fnameidx1 == 0):
fnameidx1 = fname.rfind('/') + 1
fnameidx2 = fname.rfind('.')
data_dir = fname[0: fnameidx1]
fprefix = fname[fnameidx1: fnameidx2]
fsuffix = fname[fnameidx2:]
return (data_dir, fprefix, fsuffix) |
def set_namespace_root(namespace):
"""
Stores the GO ID for the root of the selected namespace.
Parameters
----------
namespace : str
A string containing the desired namespace. E.g. biological_process, cellular_component
or molecular_function.
Returns
-------
list
The list of GO ID's of the root terms of the selected namespace.
"""
if namespace == 'biological_process':
namespace_list = ['GO:0008150']
elif namespace == 'cellular_component':
namespace_list = ['GO:0005575']
elif namespace == 'molecular_function':
namespace_list = ['GO:0003674']
else:
namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674']
return namespace_list |
def mean(vals):
"""Computes the mean from a list of values."""
total = sum(vals)
length = len(vals)
return total/length |
def map_coords(func, obj):
"""Return coordinates, mapped pair-wise using the provided function."""
if obj['type'] == 'Point':
coordinates = tuple(map(func, obj['coordinates']))
elif obj['type'] in ['LineString', 'MultiPoint']:
coordinates = [tuple(map(func, c)) for c in obj['coordinates']]
elif obj['type'] in ['MultiLineString', 'Polygon']:
coordinates = [[
tuple(map(func, c)) for c in curve]
for curve in obj['coordinates']]
elif obj['type'] == 'MultiPolygon':
coordinates = [[[
tuple(map(func, c)) for c in curve]
for curve in part]
for part in obj['coordinates']]
else:
raise ValueError("Invalid geometry object %s" % repr(obj))
return {'type': obj['type'], 'coordinates': coordinates} |
def _auth_mongo_cmd(cmd, username, password, auth_db):
"""takes a command string and adds auth tokens if necessary"""
if username != "":
cmd.append("--username")
cmd.append(username)
if password != "":
cmd.append("--password")
cmd.append(password)
if auth_db != "":
cmd.append("--authenticationDatabase")
cmd.append(auth_db)
return cmd |
def calculate_N50(list_of_lengths):
"""Calculate N50 for a sequence of numbers.
Args:
list_of_lengths (list): List of numbers.
Returns:
float: N50 value.
"""
if len(list_of_lengths)==0:
print("list is empty. Cannot compute N50.")
return
else:
tmp = []
for tmp_number in set(list_of_lengths):
tmp += [tmp_number] * list_of_lengths.count(tmp_number) * tmp_number
tmp.sort()
if (len(tmp) % 2) == 0:
median = (tmp[int(len(tmp) / 2) - 1] + tmp[int(len(tmp) / 2)]) / 2
else:
median = tmp[int(len(tmp) / 2)]
return median |
def to_version_tuple(version):
"""Split version into number tuple"""
return tuple(int(n) for n in str(version).split(".")) |
def normalize_factors(factors):
"""Normalize the factor list into a list of individual factors.
The factor argument has "append" behavior (-f foo -f bar), and each of these
arguments may be a comma-separated list of factors. Normalize this into a
flat list of individual factors. e.g.,
>>> normalize_factors(['py37', 'lint,isort'])
['py37', 'lint', 'isort']
Args:
factors: A list of comma-separated factor strings.
Returns:
The list flattened, individual factors.
"""
assert isinstance(factors, list), (
'Expected `factors` list to be a list, got `{cls}`.'
.format(cls=type(factors).__name__))
flattened = [
f.strip()
for flist in factors
for f in flist.split(',')
]
# Remove empty strings
return [f for f in flattened if f] |
def remove_recurring_characters(sorted_string: str) -> str:
"""Returns string without recurring characters, sorted input required."""
output_string = ''
for index, char in enumerate(sorted_string):
if index == 0:
output_string += char
else:
if char != sorted_string[index - 1]:
output_string += char
return output_string |
def valid_passport1(passport):
"""
part 1: passport validation
"""
_keys = ['byr', 'ecl', 'eyr', 'hgt', 'hcl', 'iyr', 'pid']
return all(k in passport for k in _keys) |
def split(total, num_people):
"""
Splits a total to the nearest whole cent and remainder
Total is a Money() type so no need to worry about floating point errors
return (2-tuple): base amount owed, remainder of cents which couldn't be evenly split
Example: >>> split(1.00, 6)
(0.16, 0.04)
"""
base = total * 100 // num_people / 100
extra = total - num_people * base
assert base * num_people + extra == total, "InternalError:" + \
" something doesnt add up here: %d * %d + %d != %d" %(base, num_people, extra, total)
return base, extra |
def merge_pips(pip_list):
"""Merge pip requirements lists the same way as `merge_dependencies` work"""
return {'pip': sorted({req for reqs in pip_list for req in reqs})} |
def fx(x, y):
"""
sample func
"""
return 2 * x - 2 * y |
def make_loglist(jobs):
"""Returns list of metrics log files for completed jobs in s3 outputs bucket
['outputs/j6d508o6q/preview_metrics.txt', 'outputs/j6d508o6q/process_metrics.txt']
"""
log_files = []
for ipst in jobs:
log_files.append(f"outputs/{ipst}/preview_metrics.txt")
log_files.append(f"outputs/{ipst}/process_metrics.txt")
print("LogFiles: ", len(log_files))
return log_files |
def format_size(num, suffix='B'):
"""Format memory sizes as text.
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if num < 1024:
return f"{num}{unit}{suffix}"
num //= 1024
return f"{num}Yi{suffix}" |
def point_is_in(bbox, point):
"""
Checks whether EPSG:4326 point is in bbox
"""
#bbox = normalize(bbox)[0]
return point[0]>=bbox[0] and point[0]<=bbox[2] and point[1]>=bbox[1] and point[1]<=bbox[3] |
def get_variant_label(v_conf):
"""
Generates name for variant images based settings (by variants sizes).
"""
if v_conf['MAX_SIZE'][0] is None:
return 'h{}'.format(v_conf['MAX_SIZE'][1])
if v_conf['MAX_SIZE'][1] is None:
return 'w{}'.format(v_conf['MAX_SIZE'][0])
return '{}x{}'.format(*v_conf['MAX_SIZE']) |
def isint(obj):
"""
isint
"""
return isinstance(obj, int) |
def str_2_num(s: str):
"""
This function is become str into num,
eg: 100+ to 100 ..
:param s: input string
:return: oout string the type is num
"""
try:
if isinstance(s, str):
return s.replace('+', '')
else:
return s
except KeyError:
raise Exception('type is error') |
def int2bool(x):
""" Converts integer strings to boolean objects. """
return bool(int(x)) |
def average_5_kernel(current_index, data):
"""
An denoise function to get the average of the current data and two to the left and right
:param current_index: index where to start from
:param data: data to denoise
:return: The denoised value at place current index
"""
divided_by = 1
current = data[current_index]
if current_index - 1 >= 0:
divided_by += 1
current += data[current_index - 1]
if current_index - 2 >= 0:
divided_by += 1
current += data[current_index - 2]
if current_index + 1 < len(data):
divided_by += 1
current += data[current_index + 1]
if current_index + 22 < len(data):
divided_by += 1
current += data[current_index + 2]
return int(current / divided_by) |
def is_probably_inside_string_or_comment(line, index):
"""Return True if index may be inside a string or comment."""
# Make sure we are not in a string.
for quote in ['"', "'"]:
if quote in line:
if line.find(quote) <= index:
return True
# Make sure we are not in a comment.
if '#' in line:
if line.find('#') <= index:
return True
return False |
def locale_to_lower_upper(locale):
"""
Take a locale, regardless of style, and format it like "en-US"
"""
if '-' in locale:
lang, country = locale.split('-', 1)
return '%s_%s' % (lang.lower(), country.upper())
elif '_' in locale:
lang, country = locale.split('_', 1)
return '%s_%s' % (lang.lower(), country.upper())
else:
return locale.lower() |
def _build_kwargs(keys, input_dict):
"""
Parameters
----------
keys : iterable
Typically a list of strings.
adict : dict-like
A dictionary from which to attempt to pull each key.
Returns
-------
kwargs : dict
A dictionary with only the keys that were in input_dict
"""
kwargs = {}
for key in keys:
try:
kwargs[key] = input_dict[key]
except KeyError:
pass
return kwargs |
def extended_gcd(phi, e):
"""
Gives us the inverse (d)
"""
d = 1
top1 = phi
top2 = phi
while e != 1:
k = top1 // e
oldTop1 = top1
oldTop2 = top2
top1 = e
top2 = d
e = oldTop1 - e * k
d = oldTop2 - d * k
if d < 0:
d = d % phi
return d |
def sides_parallel(coords, clockwise=True):
"""
Takes a 5-tuple of (x, y) coordinate tuples for a clockwise or
counterclockwise quadrilateral. Assumes coordinates start at
(min_x, min_y) and end at (min_x, min_y). Returns True if the
coordinates define a rectangle. Otherwise returns False.
"""
if len(coords) != 5: # pragma: no cover
raise ValueError('quadrilateral must contain exactly 5 coordinates')
# get coordinates for [bottom left, top left, top right, bottom right]
if clockwise:
corners = [coords[0], coords[1], coords[2], coords[3]]
else:
corners = [coords[0], coords[3], coords[2], coords[1]]
# get (x, y) values for each corner
(c0_min_x, c0_min_y) = corners[0] # bottom left
(c1_min_x, c1_max_y) = corners[1] # top left
(c2_max_x, c2_max_y) = corners[2] # top right
(c3_max_x, c3_min_y) = corners[3] # bottom right
left_rt_parallel = (c0_min_x == c1_min_x and c2_max_x == c3_max_x)
top_btm_parallel = (c0_min_y == c3_min_y and c1_max_y == c2_max_y)
has_width = (c0_min_x != c2_max_x and c1_min_x != c3_max_x)
has_height = (c0_min_y != c1_max_y and c3_min_y != c2_max_y)
return left_rt_parallel and top_btm_parallel and has_width and has_height |
def get_first_or_list(from_result):
""" Return the first element, if there's only one, otherwise returns the whole list """
return from_result[0] if (type(from_result) == list and len(from_result) == 1) else from_result |
def isName(string):
""" Checks to see if the string is an author name of an experiment rather than a sequence of NCBI codes. """
return '_' in string |
def polynomiale_carre2(a : int,b : int, c : int, x : int) -> int:
"""Retourne la valeur de a*x^4 + b*x^2 + c
"""
return (((a*x*x + b) * x*x) + c) |
def ordinal(value):
""" Cardinal to ordinal conversion for the edition field """
try:
digit = int(value)
except:
return value.split(' ')[0]
if digit < 1:
return digit
if digit % 100 == 11 or digit % 100 == 12 or digit % 100 == 13:
return value + 'th'
elif digit % 10 == 3:
return value + 'rd'
elif digit % 10 == 2:
return value + 'nd'
elif digit % 10 == 1:
return value + 'st'
else:
return value + 'th' |
def generate_path_dict(images_path, labels_path, partial_f_name):
"""
Generate split data path dict, also contains the corresponding label directories.
:param images_path: str - image top path
:param labels_path: str - label top path
:param partial_f_name: str - ending annotation name
:return: Dict - resulting path dict
"""
path_dict = {
'images': {'train': images_path + 'train/',
'test': images_path + 'test/',
'val': images_path + 'val/'},
'labels': {'train': labels_path + 'train' + partial_f_name,
'test': labels_path + 'test' + partial_f_name,
'val': labels_path + 'val' + partial_f_name},
'labels_path': labels_path
}
return path_dict |
def get_unexpanded_list(conf_dict):
"""
Given a configuration dict, returns the list of templates that were
specified as unexpanded.
"""
return conf_dict.get('unexpanded_templates', ()) |
def convert_to_array(scalar_or_list, parameter_name, num_dims):
"""Converts a list or scalar to an array"""
if not isinstance(scalar_or_list, list):
array = [scalar_or_list] * num_dims
elif len(scalar_or_list) == 1:
array = scalar_or_list * num_dims
elif len(scalar_or_list) == num_dims:
array = scalar_or_list
else:
raise ValueError(
'The ' + parameter_name + 'parameter must be a scalar, or a list '
'containing one entry for '
'each image dimension')
return array |
def left_column(matrix):
"""
Return the first (leftmost) column of a matrix.
Returns a tuple (immutable).
"""
result = []
for row in matrix:
result.append(row[0])
#
return tuple(result) |
def highest(dictionary, key):
"""
Get highest value from a dictionary in a template
@param dictionary: dictionary
@param key: what key to look for
@return: value
"""
values = []
for item in dictionary:
values.append(item[key])
return max(values) |
def dict_to_string(packet):
""" Convert dictionary {"epoch": 10, "lr": 0.01} to string "epoch=10 lr=0.01"
"""
params = ""
for key, value in packet.items():
params += key + "=" + str(value) + " "
return params.strip() |
def generate_extern(component, key, alias, suffix, specify_comp=True):
"""
This function generates a type trait label for C++
"""
if specify_comp:
return "TIMEMORY_{}_EXTERN_{}({}, {})".format(
key.upper(), suffix.upper(), alias, "::tim::component::{}".format(component))
else:
return "TIMEMORY_{}_EXTERN_{}({})".format(
key.upper(), suffix.upper(), alias) |
def arglis(seq):
"""Returns the indices of the Longest Increasing Subsequence in the Given List/Array"""
n = len(seq)
p = [0] * n
m = [0] * (n + 1)
l = 0
for i in range(n):
lo = 1
hi = l
while lo <= hi:
mid = (lo + hi) // 2
if seq[m[mid]] < seq[i]:
lo = mid + 1
else:
hi = mid - 1
new_l = lo
p[i] = m[new_l - 1]
m[new_l] = i
if new_l > l:
l = new_l
s = []
k = m[l]
for i in range(l - 1, -1, -1):
s.append(k)
k = p[k]
return s[::-1] |
def lcase(i):
"""
>>> lcase("Cat")
'cat'
"""
return i.lower() |
def sm_arn_from_execution_arn(arn):
"""
Get the State Machine Arn from the execution Arn
Input: Execution Arn of a state machine
Output: Arn of the state machine
"""
sm_arn = arn.split(':')[:-1]
sm_arn[5] = 'stateMachine'
return ':'.join(sm_arn) |
def compute_number_of_simulations(number_of_flowrates_analyzed, number_of_angles_analyzed):
"""using the number of angles and flow rates described by user, calculate the number of unioque simulations.
Args:
number_of_angles_analyzed (int): user-defined number of angles to be analyzed.
number_of_flowrates_analyzed (int): user defined number of flow rates to be analyzed.
Returns:
N_simulations (int): Number of unique simulations to be run. Also the number of rows in the global simulation array.
"""
N_simulations = number_of_angles_analyzed*number_of_flowrates_analyzed # for each flow rate, there are x number of simulations (x being number of angles_)
print("number of unique simulations:")
print(N_simulations)
return N_simulations |
def ANSWER_3_testanswer(val, original_val = None): #TEST 43
"""
(1) If no leaves were pruneable in the tree, swapping two children could
definitely help. For example, what would happen if it's MAX's turn and the
tree initially had monotonically increasing leaves, but then you swapped the
two top-level children?
(2) Similarly, that idea can be applied at any level of the tree. Especially
in a tree with distinct, monotonically increasing/decreasing leaves,
swapping any two children is likely to change whether any nodes will get
pruned in that subtree.
(3) Because depth_limit=INF, the heuristic_fn is never even used.
Hence, changing the heuristic_fn will have no effect.
Thus, the final answer is (4).
"""
if val == '':
raise NotImplementedError
return str(val) == '4' |
def prep_msg(msg):
""" Prepare message """
msg += '\0'
return msg.encode('utf-8') |
def get_sex_choices(id=1):
""" """
ret = []
ret.append( ('w', 'Frau') )
ret.append( ('m', 'Herr') )
return ret |
def check_fields(data, fields):
"""
Checks if the attributes present in the fields list are present in data
"""
if not data:
return False
for f in fields:
if f not in data:
print(f"The {f} is not present")
return False
return True |
def predict_progress_data(progress):
"""
{
"progress": 0.2 # 0-1 float ,
}
:param progress:
:return:
"""
data = {}
data['progress'] = progress
return data |
def is_leap(year):
""" Find if a leap year is leap or not"""
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
else:
return True
else:
return False |
def compare_dictionaries (dict1, dict2):
""" compare 2 dictionaries and return a list of
keys having different values"""
result = list()
interesting_keys = ['mtime','atime','ctime','crtime']
for key in dict1.keys():
if key in interesting_keys and dict1[key] != dict2[key]:
result.append(key)
return result |
def every_n_steps(step, n):
"""Step starts from 0."""
return (step + 1) % n == 0 |
def find_highest_calorie_cereal(_, cereals):
"""Example of a Dagster solid that takes input and produces output."""
sorted_cereals = list(sorted(cereals, key=lambda cereal: cereal["calories"]))
return sorted_cereals[-1]["name"] |
def __shift_rows_decrypt(data_block):
"""the rows 1/2/3/4 of the matrix are shifted cyclically to the right by offsets 0/1/2/3"""
return [data_block[0], data_block[13], data_block[10], data_block[7],
data_block[4], data_block[1], data_block[14], data_block[11],
data_block[8], data_block[5], data_block[2], data_block[15],
data_block[12], data_block[9], data_block[6], data_block[3]] |
def removeTrailingColumnNumbering(column_list):
"""
When pandas finds columns with same name, it numbers them
This function receives a list of column names and removes the numbering if found
Looks for columns that end with .1, .2, .3 and so on
"""
import re
tmp = []
for s in column_list:
x = re.search('\.{1}\d+',s)
if x != None:
i = x.span()[0]#index of the .
tmp.append(s[:i])
else:
tmp.append(s)
return tmp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.