content stringlengths 42 6.51k |
|---|
def get_bottom_right_inner(sprites):
"""
:return: which sprite to use.
"""
return sprites["block"][0] |
def _clean_code(code):
"""
Cleans the received code (the parser does not like extra spaces not a VALUE
statement). Returns the cleaned code as a list of lines.
:param code: The COBOL code to clean
:return The list of code lines (cleaned)
"""
lines = []
# cleanup lines, the parser is very sensitive to extra spaces,...
for l in code.splitlines():
# remove last .
if l.endswith('.'):
l = l[:-1]
# the parser doe not like VALUE xxx.
if "VALUE" in l:
l = l[:l.find("VALUE")]
# the parser does not like extra spaces between "PIC X(xxx)" and "."
indent = len(l) - len(l.lstrip())
tokens = l.split(" ")
while "" in tokens:
tokens.remove("")
if tokens and not tokens[-1].endswith("."):
tokens[-1] += "."
lines.append(" " * indent + " ".join(tokens))
return lines |
def get_float_list(range_max:int, div:int=100) -> list:
""" To get 0 -> 1, range_max must be same order of mag as div """
return [float(x)/div for x in range(int(range_max))] |
def extract_comp_reaction_ids(comp):
"""Extracts all reaction IDs from a MINE compound object."""
rxn_id_list = []
try:
rxn_id_list.extend(comp['Reactant_in'])
except KeyError:
pass
try:
rxn_id_list.extend(comp['Product_of'])
except KeyError:
pass
return rxn_id_list |
def render_dashboard(category, tabs, prefix):
"""Renders a dashboard config string.
Follows this format:
{
name = 'dashboard_name'
dashboard_tab = [
tab('tab-name', 'test-group-name'),
...
]
}
"""
if '\'' in prefix:
raise ValueError(prefix)
if '\'' in category:
raise ValueError(category)
for tab in tabs:
if '\'' in tab:
raise ValueError(tab, tabs)
return """{
name = '%(prefix)s-%(category)s'
dashboard_tab = [
%(tabs)s
]
},""" % dict(
prefix=prefix,
category=category,
tabs='\n '.join('tab(\'%s\', \'%s\'),' % (tab, path)
for (tab, path) in sorted(tabs))) |
def build_signature(function_name, function_parameters):
"""
Given the function name and function parameters, returns the signature line
:param function_name: the name of the function
:param function_parameters: the parameters of the function (whatever would be
within the brackets)
"""
return "def {}({}):".format(function_name, function_parameters) |
def remove_relationship(relationship_type: str, var: str = 'r') -> str:
"""
Returns a statement that removes a relationship.
Parameters
----------
relationship_type : str
The type of the relationship
var : str, optional
Name of the relationship when referred in another statement
Returns
-------
out: str
Neo4j statement
"""
return f'''\
MATCH ()-[{var}:{relationship_type}]-> ()
DELETE {var}
''' |
def calculate_delta_x(a: float, b: float, n: int) -> float:
"""
Calculate delta_x = (b - a) / n
n = data points samples
a = min x value of integral
b = max x value of intergal
"""
return (b - a) / n |
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(
bytes[i * bytes_per_line : (i + 1) * bytes_per_line]
+ b"\x00" * extra_padding
)
return b"".join(new_data) |
def List_Combine_to_List(*listmember):
"""
Combine several "listmembers" to a list
"""
outlist=[]
for member in listmember:
outlist.append(member)
return outlist |
def new_seating_chart(size=22):
"""Create a new seating chart.
:param size: int - number if seats in the seating chart.
:return: dict - with number of seats specified, and placeholder values.
"""
return {number: None for number in range(1, size + 1)} |
def validate_float_or_None(s):
"""convert s to float, None or raise"""
# values directly from the rc file can only be strings,
# so we need to recognize the string "None" and convert
# it into the object. We will be case-sensitive here to
# avoid confusion between string values of 'none', which
# can be a valid string value for some other parameters.
if s is None or s == 'None':
return None
try:
return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float or None' % s) |
def comma_labels(x_list):
"""Change list of int to comma format."""
result = []
for x in x_list:
result.append(format(int(x), ','))
return(result) |
def istuple(n):
"""
Determines if it is a tuple or not.
Returns: Boolean
"""
return type(n) == tuple |
def get_indeces(a, b):
"""
Extracts the indices multiple items in a list.
Parameters:
a: list. where we want to get the index of the items.
b: list. the items we want to get index of.
#example:
x = ['SBS1', 'SBS2', 'SBS3', 'SBS5', 'SBS8', 'SBS13', 'SBS40']
y = ['SBS1', 'SBS5']
get_indeces(x, y)
#result
>>> [1,3]
"""
indeces = []
for i in b:
try:
idx = a.index(i)
indeces.append(idx)
except:
next
return indeces |
def kaldi_id_to_channel(example_id):
"""
>>> kaldi_id_to_channel('P28_S09_LIVING.R-0714562-0714764')
'R'
>>> kaldi_id_to_channel('P28_S09_LIVING.L-0714562-0714764')
'L'
>>> kaldi_id_to_channel('P05_S02_U02_KITCHEN.ENH-0007012-0007298')
'ENH'
>>> kaldi_id_to_channel('P09_S03_U01_NOLOCATION.CH1-0005948-0006038')
'CH1'
"""
try:
_, post = example_id.split('.')
channel, _, _ = post.split('-')
return channel
except Exception as e:
raise ValueError(example_id) from e |
def create_list_versions(list_of_versions):
"""Create a list of package versions available in CodeArtifact repository"""
list_versions = [item['version'] for item in list_of_versions]
return list_versions |
def get_r( x, y = 0, z = 1):
"""Makes r (length between origin and point) for x, y, z
Returns r (scalar or array, depending on inputs)"""
r = ( x **2 + y **2 + z **2 ) **0.5
return r |
def capture_group(s):
"""
Places parentheses around s to form a capure group (a tagged piece of
a regular expression), unless it is already a capture group.
"""
return s if (s.startswith('(') and s.endswith(')')) else ('(%s)' % s) |
def metric_to_ips(d, min_depth, max_depth):
"""
Args:
d: metric depth [min_depth, max_depth]
min_dpeth: in meter
max_depth: in meter
Returns:
"""
# d = d.clamp(min_depth, max_depth)
return (max_depth * d - max_depth * min_depth) / ((max_depth - min_depth) * d) |
def _score_mers(mers: list, consensus: str) -> int:
"""
Calculate the score of a mer list
"""
result = 0
for mer in mers:
for i in range(len(mer)):
if mer[i] != consensus[i]:
result += 1
return result |
def is_testharness_output_passing(content_text):
"""Checks whether |content_text| is a passing testharness output.
Under a relatively loose/accepting definition of passing
testharness output, we consider any output with at least one
PASS result and no FAIL result (or TIMEOUT or NOTRUN).
"""
# Leading and trailing whitespace are ignored.
lines = content_text.strip().splitlines()
lines = [line.strip() for line in lines]
at_least_one_pass = False
for line in lines:
if line.startswith('PASS'):
at_least_one_pass = True
continue
if (line.startswith('FAIL') or line.startswith('TIMEOUT')
or line.startswith('NOTRUN')
or line.startswith('Harness Error.')):
return False
return at_least_one_pass |
def _get_ensemble_component_file_name(component_index: int) -> str:
"""Returns the file name for the checkpoint of the ith model in the ensemble."""
return f"ensemble_{component_index}" |
def get_header(header: str, sep: str) -> str:
"""
Gets a header in Sphinx rst format.
:param header: The text of the header
:param sep: The separator to use for creating the header
"""
line = header + "\n"
line += str(sep * len(header)) + "\n"
return line |
def drusen(drusize: int) -> int:
"""
DRSZWI: MAXIMUM DRUSEN W/I GRID
0=None
1=Quest
2=<C0 (63)
3=<C1 (125)
4=<C2 (250)
5=>=C2
8=CG
Returns:
0, 1, 2, 88
"""
drszwi = drusize
if 0 <= drszwi <= 2:
return 0
elif drszwi == 3:
return 1
elif 4 <= drszwi <= 5:
return 2
elif drszwi == 8 or drszwi == 88:
return 88
else:
raise KeyError('drarwi: %s' % drszwi) |
def integer_to_bytestring(n):
"""Return the bytestring represented by an integer."""
bytes = []
while n > 0:
bytes.append(chr(n - ((n >> 8) << 8)))
n = n >> 8
return ''.join(bytes) |
def create_rules_dict(raw_rules_input: str) -> dict:
"""
Creates dict with rules from raw str input
:param raw_rules_input: String of written rules
:type raw_rules_input: str
:return: Dictionary with processed rules
:rtype: dict
"""
bag_rules: dict = {}
for rule_row in raw_rules_input.split(".\n"):
is_color: bool = True
rules_for_color: str = ""
for rule_part in rule_row.split(" bags contain "):
try:
if is_color:
rules_for_color = rule_part
bag_rules[rules_for_color] = {}
else:
for bag_rule in rule_part.split(", "):
cleaned_bag_rule = bag_rule.split()[:-1]
if cleaned_bag_rule[0] != "no":
bag_amount = cleaned_bag_rule[0]
bag_color = cleaned_bag_rule[1] + " " + cleaned_bag_rule[2]
bag_rules[rules_for_color][bag_color] = bag_amount
is_color = not is_color
except ValueError:
pass
return bag_rules |
def strings_differ(string1: str, string2: str) -> bool:
"""Check whether two strings differ while avoiding timing attacks.
This function returns True if the given strings differ and False
if they are equal. It's careful not to leak information about *where*
they differ as a result of its running time, which can be very important
to avoid certain timing-related crypto attacks:
http://seb.dbzteam.org/crypto/python-oauth-timing-hmac.pdf
>>> strings_differ('one', 'one')
False
>>> strings_differ('one', 'two')
True
:param string1:
:param string2:
"""
if len(string1) != len(string2):
return True
invalid_bits = 0
for a, b in zip(string1, string2):
invalid_bits += a != b
return invalid_bits != 0 |
def isBinary(inputString):
"""
Verify if a sequence is binary.
Args:
inputString: input string to be verified.
Returns:
boolean: true if the string is binary; false if it isn't.
Raises:
None
"""
binarydigits = ('01')
return all(char in binarydigits for char in inputString) |
def partial(f, args):
"""
Arguments:
- `f`: a function of a value tuple to values
- `args`: a tuple of arguments for f, may contain None
elements
"""
if None in args:
return None
else:
return f(*args) |
def operation_mode(session, Type='Int32', RepCap='', AttrID=1250005, buffsize=0, action=['Get', '']):
"""[Operation Mode <int32>]
Sets/Gets how the function generator produces output. When set to Continuous mode, the waveform will be output continuously.
When set to Burst mode, the ConfigureBurstCount function is used to specify how many cycles of the waveform to output.
RepCap: < channel# (1-2) >
Attribute value (mode):
0: AGM933X_VAL_OPERATE_CONTINUOUS
1: AGM933X_VAL_OPERATE_BURST
"""
return session, Type, RepCap, AttrID, buffsize, action |
def is_image(file_path):
"""
Checks if the specified file is an image
:param file_path: Path to the candidate file
:return: Whether or not the file is an image
"""
return any(file_path.endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) |
def get_data_file_names(config):
""" Used by the models to find the correct pickles for a specific fold. """
fold=config['fold']
return f'train_data_{fold}.pickle', f'pred_data_{fold}.pickle', f'raw_data_{fold}.pickle' |
def get_dlp_results_sql(project_id, dataset_id, table_id, min_count=0) -> str:
""" Generate sql to query the DLP results table: https://cloud.google.com/dlp/docs/querying-findings
and counts the number of finding in each field,info-type, likelihood grouping
:param project_id: Project Id
:param dataset_id: Dataset
:param table_id: Table
:return:a sql query that generates a result set with the following columns [field_name, info_type_name, likelihood, count_total]
"""
return """SELECT
locations.record_location.field_id.name AS field_name,
info_type.name as info_type_name,
likelihood as likelihood,
COUNT(*) AS count_total
FROM {}.{}.{},
UNNEST(location.content_locations) AS locations
GROUP BY
locations.record_location.field_id.name,
info_type.name,
likelihood
HAVING count_total > {}
""".format(
project_id, dataset_id, table_id, min_count
) |
def configureTrajectoryImage(dim, size=512, nTraj=100):
"""configureTrajectoryImage(dim, size=512, nTraj=100)
Configure trajectory images of the specified dim in meters and size in pixels."""
return { 'Trajectories' : dim, 'TrajSize' : 512, 'TrajCount':nTraj } |
def gen_type_help_txt(types: str, target: str = 'Input') -> str:
"""Generate a type help txt.
"""
return (f'Select type of {target} files from {types}'
'[Automatically detected by those extension]') |
def flattenlabel(label, order='C'):
"""
Flatten label in row-major order 'C' (default) or column-major order 'F'.
Code taken (but modified) from http://code.activestate.com/recipes/496807
"""
if order not in ('C', 'F'):
raise ValueError("order must be 'C' or 'F'")
label = list(label)
if order == 'C':
label = label[::-1]
idx = [[]]
for x in label:
t = []
for y in x:
for i in idx:
t.append(i+[y])
idx = t
if order == 'C':
idx = [i[::-1] for i in idx]
idx = [tuple(i) for i in idx]
return [idx] |
def print_dict(d, header=''):
"""
Formats a dictionary for printing.
:param d: Dictionary to print.
:return: String containing the formatted dictionary.
"""
obj_str = str(header) + '{ \n\t'
obj_str += "\n\t".join([str(key) + ": " + str(d[key]) for key in sorted(d.keys())])
obj_str += '\n'
obj_str += '}'
return obj_str |
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215',\
'*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215',\
'*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215',\
'*35214*', '*41532*', '*2*1***'])
False
"""
del board[0]
del board[-1]
lst = []
for i in board:
lst.append(list(i))
res = []
for _ in lst:
del _[-1]
del _[0]
for j in _:
if _.count(j) >= 2:
res.append(j)
if len(res) > 0:
return False
else:
return True |
def w(a, cosmo):
"""
Effective equation of state parameter.
"""
return cosmo['w0'] + cosmo['wa'] * (1 - a) |
def BCD_calc(AM, POP):
"""
Calculate Bray-Curtis dissimilarity (BCD).
:param AM: Automatic/Manual
:type AM: int
:param POP: population or total number of samples
:type POP: int
:return: BCD as float
"""
try:
return abs(AM) / (2 * POP)
except (ZeroDivisionError, TypeError, AttributeError):
return "None" |
def euler_step(u, t, f, dt):
"""
Returns the solution at the next time-step using Euler's
method.
Parameters
----------
u : array of float
solution at the previous time-step.
t : float
the time at the previous time-step.
f : function
function to compute the right hand-side of the system
of equation.
dt : float
time-increment.
Returns
-------
u_n_plus_1 : array of float
approximate solution at the next time step.
"""
return u + dt * f(u,t) |
def StringLiteral(s):
"""Returns string literal for text."""
return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"' |
def filter_dupes(entries, id_comparator=None):
"""Partitions the list of entries into two lists: one containing uniques, and
one containing the duplicates.
entries = [(datetime(...), UUID()), (datetime(...), UUID())]
keys, dupes = filter_dupes(entries)
keys # => [UUID().bytes, UUID().bytes]
dupes # => [(datetime(...), UUID()), (datetime(...), UUID())]
entries - A List of indexed Messages represented as Tuples, each
containing a DateTime timestmap and a UUID Message key.
id_comparator - Function applied to IDs before being returned in the List
of keys. Default: str()
Returns a Tuple of a List of String UUID byte Message keys and a List of
indexed Message Tuples.
"""
if id_comparator == None:
id_comparator = str
keys = []
dupes = []
existing = set()
for timestamp, id in entries:
if id in existing:
dupes.append((timestamp, id))
else:
existing.add(id)
keys.append(id_comparator(id))
return (keys, dupes) |
def default_hash_key_function(key, number_of_buckets, max_key_length=32):
"""Takes a key and returns a string value that represents the bucket identifier"""
raw_string_key = str(key)
string_key = raw_string_key[:min(max_key_length, len(raw_string_key))]
the_hash = 0
for character_index, character in enumerate(string_key):
the_hash += ord(character) + character_index
return the_hash % number_of_buckets |
def optional_d(text):
"""Method for parsing an 'optional' integer."""
if text:
return int(text)
else:
return None |
def _anonymous_model_data(ops_data):
"""Returns a dict representing an anonymous model.
ops_data must be a dict representing the model operations. It will
be used unmodified for the model `operations` attribute.
"""
return {
"model": "",
"operations": ops_data
} |
def remove_non_deployable_operators(operators: list):
"""Removes operators that are not part of the deployment pipeline.
If the non-deployable operator is dependent on another operator, it will be
removed from that operator's dependency list.
Args:
operators (list): original pipeline operators.
Returns:
A list of all deployable operators.
"""
deployable_operators = [operator for operator in operators if operator["notebookPath"]]
non_deployable_operators = list()
for operator in operators:
if operator["notebookPath"] is None:
# checks if the non-deployable operator has dependency
if operator["dependencies"]:
dependency = operator["dependencies"]
# looks for who has the non-deployable operator as dependency
# and assign the dependency of the non-deployable operator to this operator
for op in deployable_operators:
if operator["operatorId"] in op["dependencies"]:
op["dependencies"] = dependency
non_deployable_operators.append(operator["operatorId"])
for operator in deployable_operators:
dependencies = set(operator["dependencies"])
operator["dependencies"] = list(dependencies - set(non_deployable_operators))
return deployable_operators |
def get_shortest_unique_filename(filename, filenames):
"""Get a representation of filename in a way that makes it look
unique compared to the other given filenames. The most unique part
of the path is used, and every directory in between that part and the
actual filename is represented with a slash.
"""
# Normalize and avoid having filename itself in filenames
filename1 = filename.replace("\\", "/")
filenames = [fn.replace("\\", "/") for fn in filenames]
filenames = [fn for fn in filenames if fn != filename1]
# Prepare for finding uniqueness
nameparts1 = filename1.split("/")
uniqueness = [len(filenames) for i in nameparts1]
# Establish what parts of the filename are not unique when compared to
# each entry in filenames.
for filename2 in filenames:
nameparts2 = filename2.split("/")
nonunique_for_this_filename = set()
for i in range(len(nameparts1)):
if i < len(nameparts2):
if nameparts2[i] == nameparts1[i]:
nonunique_for_this_filename.add(i)
if nameparts2[-1 - i] == nameparts1[-1 - i]:
nonunique_for_this_filename.add(-i - 1)
for i in nonunique_for_this_filename:
uniqueness[i] -= 1
# How unique is the filename? If its not unique at all, use only base name
max_uniqueness = max(uniqueness[:-1])
if max_uniqueness == 0:
return nameparts1[-1]
# Produce display name based on base name and last most-unique part
displayname = nameparts1[-1]
for i in reversed(range(len(uniqueness) - 1)):
displayname = "/" + displayname
if uniqueness[i] == max_uniqueness:
displayname = nameparts1[i] + displayname
break
return displayname |
def _CompletionsFromArgs(fn_args):
"""Takes a list of fn args and returns a list of the fn's completion strings.
Args:
fn_args: A list of the args accepted by a function.
Returns:
A list of possible completion strings for that function.
"""
completions = []
for arg in fn_args:
arg = arg.replace('_', '-')
completions.append('--{arg}'.format(arg=arg))
return completions |
def check_json(data, name):
"""
Check if 'name' should be inhibited according to json 'data'
return bool True if the box should go down
"""
for force in data['force_desinhibit']:
if force in name:
return True
for inhib in data['inhibit']:
if inhib in name:
return False
return True |
def define_token(token):
"""Return the mandatory definition for a token to be used within cisp
where cisp expects a valid token.
For example, in order to use an identifier as a method name in a
cisp type, the definition generated by this function must be written
for that specific identifier.
"""
if not isinstance(token, str):
raise TypeError("{} is not of type {}".format(token, str))
return "#define CISP_TOKEN_{} ({})".format(token, " ".join(token)) |
def stringify(grid: dict, n: int) -> str:
"""Stringify with (0, 0) in the lower-left corner."""
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), "-")
row.append(value)
rows.append(row)
return "\n".join("".join(row) for row in rows) |
def merge_dicts(a, b):
"""Return a new dict with items from two other dicts.
This function exists for backward compatibility to replace Python 3.9's a|b.
For performance reasons, there are no guarantees that a and b won't be modified.
"""
a.update(b)
return a |
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value |
def is_assymetric_msg(msg_type: int) -> bool:
"""Check if an OpenFlow message type is assymetric."""
if msg_type in [1, 10, 11, 12]:
return True
return False |
def pad_extra_whitespace(string, pad):
"""Given a multiline string, add extra whitespaces to the front of every line."""
return '\n'.join(' ' * pad + line for line in string.split('\n')) |
def tl_to_string(exp):
""" Converts Python objects to a TL-readable string. """
if not isinstance(exp, list):
return str(exp)
return '(' + ' '.join(map(tl_to_string, exp)) + ')' |
def bmh_nic_names(bmh):
"""Returns the names of the interfaces of a BareMetalHost as a sorted
list."""
nics = bmh["status"]["hardware"]["nics"]
return sorted(set(nic["name"] for nic in nics)) |
def list_deployed_clusters(pipeline, actual_deployments):
"""Returns a list of clusters that a service is deployed to given
an input deploy pipeline and the actual deployments"""
deployed_clusters = []
# Get cluster.instance in the order in which they appear in deploy.yaml
for namespace in pipeline:
cluster, instance = namespace.split('.')
if namespace in actual_deployments:
if cluster not in deployed_clusters:
deployed_clusters.append(cluster)
return deployed_clusters |
def _size_arg_converter(size_arg):
""" If it succeeds to interpret, then float is returned,
otherwise, ``None`` is returned.
"""
size_arg = size_arg.replace('"', '')
size_arg = size_arg.replace("'", '')
size_arg = size_arg.lower().replace("px", "")
size_arg = size_arg.lower().replace("pt", "")
try:
return float(size_arg)
except Exception as e:
print("Exception", e, type(e), size_arg)
return None |
def string_interleave(s1, s2):
"""Interleave a character from a small string to a big string
Args:
s1 (str): input string
s2 (str): input string
Returns:
(str): the string that already interleave
Raises:
TypeError: if s1 or s2 is not a string
Examples:
>>> string_interleave("abc", "mnopq")
'manbocpq'
>>> string_interleave("mnopq", "abc")
'manbocpq'
>>> string_interleave("Mine", "Thai")
'TMhianie'
>>> string_interleave("Theprove", "Puchonggggggggg")
'PTuhcehpornogvgeggggggg'
>>> string_interleave("itumelabmaidai", "mairooruengloei")
'miatiurmoeolraubemnagildoaeii'
>>> string_interleave(25269, "mnop")
Traceback (most recent call last):
...
TypeError: s1 is not a string
>>> string_interleave("mnop", 25269)
Traceback (most recent call last):
...
TypeError: s2 is not a string
"""
# raise error
if type(s1) == int:
raise TypeError("s1 is not a string")
elif type(s2) == int:
raise TypeError("s2 is not a string")
# function program
s1.split()
s2.split()
output = []
if len(s1) > len(s2):
for i in range(len(s2)):
output.append(s1[i])
output.append(s2[i])
loop = len(s2) + 1
while loop <= len(s1):
output.append(s1[loop - 1])
loop += 1
else:
for i in range(len(s1)):
output.append(s2[i])
output.append(s1[i])
loop = len(s1) + 1
while loop <= len(s2):
output.append(s2[loop - 1])
loop += 1
ans = ""
for k in range(len(output)):
ans += output[k]
return ans |
def split_operations_by_type(operations: list) -> tuple:
"""
Split input operations into sub-lists of each transformation type
the normalization operation is placed last to apply correctly the other operations
:param operations: list of pipeline operations
:return: tuple of lists of the operations separated into color, geometry and independent
"""
color, geometry, independent = [], [], []
normalize = None
for op in operations:
if op.get_op_type() == 'color':
color.append(op)
elif op.get_op_type() == 'geometry':
geometry.append(op)
elif op.get_op_type() == 'normalize':
normalize = op
else:
independent.append(op)
if normalize is not None:
color.append(normalize) # normalization must be last color operation
return color, geometry, independent |
def printSubsequence(Matrix, s1, s2, i, j, seq):
""" Returns the longest common subsequence of two strings
(2D-array, str, str, int, int, str) -> (str) """
if i == 0 or j == 0:
if seq == []: return None
return ''.join(seq[::-1])
# If inputs for s1, s2 are numbers uncomment below line.
# return ' '.join([str(i) for i in seq][::-1])
if s1[i-1] == s2[j-1]:
seq.append(s1[i-1])
return printSubsequence(Matrix, s1, s2, i-1, j-1, seq)
if Matrix[i-1][j] > Matrix[i][j-1]:
return printSubsequence(Matrix, s1, s2, i-1, j, seq)
else:
return printSubsequence(Matrix, s1, s2, i, j-1, seq) |
def create_buckets(lower_bound, upper_bound, bins):
"""
Create a dictionary with bins
:param lower_bound: low range
:param upper_bound: high range
:param bins: number of buckets
:return:
"""
range_value = (upper_bound - lower_bound) / bins
low = lower_bound
buckets = []
if bins == 1:
buckets.append({"lower": low, "upper": low + 1, "bucket": 0})
else:
for i in range(0, bins):
high = low + range_value
buckets.append({"lower": low, "upper": high, "bucket": i})
low = high
# Ensure that the upper bound is exactly the higher value.
# Because floating point calculation it can miss the upper bound in the final sum
buckets[bins - 1]["upper"] = upper_bound
return buckets |
def is_empty(elem):
"""This function is just a helper function for
check whether the passed elements (i.e. list)
is empty or not
Args:
elem (:obj:): Any structured data object (i.e. list).
Returns:
bool: True if element is empty or false if not.
"""
if not elem:
return True
else:
return False |
def is_png(filename):
"""Checks whether the file is a .png.
Parameters:
-----------
filename:
string: The filename of the image.
Returns:
--------
Boolean:
"""
file_format = filename[::-1][:4:][::-1]
if file_format != ".png":
return False
return True |
def onroot_vd(t, y, ydot, solver):
"""
onroot function to just continue if time <28
"""
if t > 28:
return 1
return 0 |
def stringify_dict(datum):
"""
returns log data with all values as strings
"""
return dict((x, str(datum[x])) for x in datum) |
def is_binary_format(content, maxline=20):
"""
parse file header to judge the format is binary or not
:param content: file content in line list
:param maxline: maximum lines to parse
:return: binary format or not
"""
for lc in content[:maxline]:
if b'format' in lc:
if b'binary' in lc:
return True
return False
return False |
def de2hex(obj):
"""
Converts a decimal number into its hexadecimal representation
Parameters
----------
obj : int or str
A number in decimal format
Returns
-------
str
The hexadecimal representation of the input number
"""
return hex(int(obj))[2:].upper() |
def reflect(lower, current, modifier, upper):
"""
Reflects a projected position if it escapes the bounds.
"""
next, modified = current, modifier
if next + modified < lower:
modified *= -1
next = lower + (modified - next + lower)
elif upper < next + modified:
next = upper - (next + modified - upper)
modified *= -1
else:
next += modified
# escaped = escapes(lower, current, modifier, upper)
# if escaped < 0:
# next = lower - escaped
# modified *= -1
# elif 0 < escaped:
# next = upper - escaped
# modified *= -1
# else:
# next += modified
return (next, modified) |
def _find_match(needle: dict, haystack: list, keys: list):
"""Find a dictionary in a list of dictionary based on a set of keys"""
for item in haystack:
for key in keys:
if item.get(key) != needle[key]:
break
else:
return item
return None |
def _is_valid_glob(glob):
"""Check whether a glob pattern is valid.
It does so by making sure it has no dots (path separator) inside groups,
and that the grouping braces are not mismatched. This helps doing useless
(or worse, wrong) work on queries.
Args:
glob: Graphite glob pattern.
Returns:
True if the glob is valid.
"""
depth = 0
for c in glob:
if c == "{":
depth += 1
elif c == "}":
depth -= 1
if depth < 0:
# Mismatched braces
return False
elif c == ".":
if depth > 0:
# Component separator in the middle of a group
return False
# We should have exited all groups at the end
return depth == 0 |
def comment(*text):
"""
add comment
"""
return f"<!-- {''.join(text)} -->" |
def is_fin_st(id):
"""Used in p_one_line()
---
Checks if id begins with f or F or if or IF.
"""
return ( (id[0] in {'f','F'})
or ((len(id) > 1) and (id[0:2] == 'if' or id[0:2] == 'IF'))
) |
def _parse_url(time, inputnames, inputvalues, outputnames):
"""
Ensure that inputs has the right type
"""
data = {str(key):float(value)
for key, value in
zip(inputnames.split(','), inputvalues.split(','))}
data['time'] = float(time)
data['outputnames'] = outputnames.split(',')
return data |
def applyF_filterG(L, f, g):
"""
Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty
"""
if L != []:
temp = []
for i in L:
if (g(f(i))):
temp.append(i)
L.clear()
# mutate
for x in temp:
L.append(x)
if(L):
return max(L)
return -1 |
def solution(N):
"""
Write a function that returns an array containing the numbers from 1 to N,
where N is the parametered value. N will never be less than 1.
Replace certain values however if any of the following conditions are met:
- If the value is a multiple of 3: use the value 'Fizz' instead
- If the value is a multiple of 5: use the value 'Buzz' instead
- If the value is a multiple of 3 & 5: use the value 'FizzBuzz' instead
>>> solution(42)
[1, 2, 'Fizz', 4, 'Buzz', 'Fizz', 7, 8, 'Fizz', 'Buzz', 11, 'Fizz', 13, 14, 'FizzBuzz', 16, 17, 'Fizz', 19, 'Buzz', 'Fizz', 22, 23, 'Fizz', 'Buzz', 26, 'Fizz', 28, 29, 'FizzBuzz', 31, 32, 'Fizz', 34, 'Buzz', 'Fizz', 37, 38, 'Fizz', 'Buzz', 41, 'Fizz']
"""
array = []
for i in range(1, N + 1):
value = i
is_multiple_of_3 = i % 3 == 0
is_multiple_of_5 = i % 5 == 0
if is_multiple_of_3 and is_multiple_of_5:
value = 'FizzBuzz'
elif is_multiple_of_3:
value = 'Fizz'
elif is_multiple_of_5:
value = 'Buzz'
array.append(value)
return array |
def in_domain(x, y):
"""
Check if a point x, y is within the geometry domain
"""
l = 0.1
h = 0.04
xc = x - h
yc = y - h
check = True
if (x > l) or (x < 0.0) or (y > l) or (y < 0.0):
check = False
return check
if (xc > 0.0) & (yc > 0.0):
check = False
return check |
def to_string(val):
""" Convert a value into a string, recursively for lists and tuples.
Args:
val: Value to convert value
Returns:
String representation of the value
"""
# Check tuple
if isinstance(val, tuple):
if len(val) == 1:
return "(" + to_string(val[0]) + ",)"
return "(" + ", ".join(map(to_string, val)) + ")"
# Check list
if isinstance(val, list):
return "[" + ", ".join(map(to_string, val)) + "]"
# Default
return str(val) |
def one_or_all(mixed):
"""
Evaluate truth value of single bool or list of bools.
:param mixed: bool or list
:type mixed: bool or [bool]
:return: truth value
:rtype: bool
"""
if isinstance(mixed, bool):
return mixed
if isinstance(mixed, list):
return all(mixed) |
def camelcase_to_snake_case(_input):
""" Convert a camel case string to a snake case string: CamelCase -> camel_case
Args:
_input (str): The string to convert """
# https://codereview.stackexchange.com/a/185974
res = _input[0].lower()
for i, letter in enumerate(_input[1:], 1):
if letter.isupper():
try:
if _input[i - 1].islower() or _input[i + 1].islower():
res += "_"
except IndexError:
pass
res += letter.lower()
return res |
def convert_list_elements_to_list(list_to_convert):
"""Converts list elements in list to sequence of elements:
Example: [1, 2, 3, [4, 5], 6, [7]] = [1, 2, 3, 4, 5, 6, 7]
"""
converted_list = []
for element in list_to_convert:
if isinstance(element, list):
converted_list.extend(element)
else:
converted_list.append(element)
return converted_list |
def bam_indexing(samtools, variable_name):
"""Will take a file variable name and a samtools path and will print
bash code that will create a bam index using samtools """
return ('# Indexing\n'
+ samtools + ' index $' + variable_name + ' >> $logFile 2>&1') |
def int_or_tuple_3d(value):
"""Converts `value` (int or tuple) to height, width for 3d ops.
This functions normalizes the input value by always returning a tuple.
Args:
value: A list of 3 ints, 5 ints, a single int or a tf.TensorShape.
Returns:
A list with 5 values.
Raises:
ValueError: If `value` it not well formed.
TypeError: if the `value` type is not supported
"""
if isinstance(value, int):
return [1, value, value, value, 1]
elif isinstance(value, (tuple, list)):
len_value = len(value)
if len_value == 3:
return [1, value[0], value[1], value[2], 1]
elif len_value == 5:
assert value[0] == value[4] == 1, 'Must have strides[0] = strides[4] = 1'
return [value[0], value[1], value[2], value[3], value[4]]
else:
raise ValueError('This operation does not support {} values list.'.format(len_value))
raise TypeError('Expected an int, a list with 3/5 ints or a TensorShape of length 3, '
'instead received {}'.format(value)) |
def dt_controller(current_control_output_value, previous_control_output_value, derivative_gain_value):
"""Docstring here (what does the function do)"""
return (current_control_output_value - previous_control_output_value) * derivative_gain_value |
def get_tail(page, marker):
""" Returns page content after the marker """
if page is None: return None
if marker in page:
return page.split(marker,1)[1] |
def _ends_overlap(left, right):
"""Returns whether the left ends with one of the non-empty prefixes of the right"""
for i in range(1, min(len(left), len(right)) + 1):
if left.endswith(right[:i]):
return True
return False |
def bbox_horz_aligned(box1, box2):
"""
Returns true if the vertical center point of either span is within the
vertical range of the other
"""
if not (box1 and box2):
return False
# NEW: any overlap counts
# return box1.top <= box2.bottom and box2.top <= box1.bottom
box1_top = box1.top + 1.5
box2_top = box2.top + 1.5
box1_bottom = box1.bottom - 1.5
box2_bottom = box2.bottom - 1.5
return not (box1_top > box2_bottom or box2_top > box1_bottom) |
def get_link(text):
"""Turn a markdown headline in to a link, # Hello World -> hello-world"""
return ''.join(
i if i in '-_' or ('a' <= i <= 'z') else ''
for i in text.lower().replace(' ', '-')
) |
def get_crowd_selection_counts(input_id, task_runs_json_object):
"""
Figure out how many times the crowd selected each option
:param input_id: the id for a given task
:type input_id: int
:param task_runs_json_object: all of the input task_runs from json.load(open('task_run.json'))
:type task_runs_json_object: list
:return: number of responses for each selection
:rtype: dict
"""
counts = {'n_frk_res': 0,
'n_unk_res': 0,
'n_oth_res': 0,
'ERROR': 0}
for task_run in task_runs_json_object:
if input_id == task_run['task_id']:
try:
selection = task_run['info']['selection']
except KeyError:
selection = 'ERROR'
if selection == 'fracking':
counts['n_frk_res'] += 1
elif selection == 'unknown':
counts['n_unk_res'] += 1
elif selection == 'other':
counts['n_oth_res'] += 1
else:
counts['ERROR'] += 1
return counts |
def canonicalize(val):
"""
A helper function to convert all 'str' to 'bytes' in given value. The
values can either be a string or a list. We will recursively convert each
member of the list.
"""
if isinstance(val, list):
return [canonicalize(v) for v in val]
if isinstance(val, str):
return val.encode("utf8")
return val |
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value |
def dual_cc_device_library_of(label):
"""
Given the label of a dual_cc_library, returns the label for the
on-device library. This library is private to the Bazel package that
defines it.
"""
return "{}_on_device_do_not_use_directly".format(label) |
def a_mystery_function_3(binary_string):
"""
binary_string is a string that is at least 4 characters long with 1s and 0s with the rightmost character representing the 0th bit
"""
binary_list=[int(i) for i in binary_string]
binary_list.reverse()
a,b,c=binary_list[0:3]
return (a or b or not c) and \
(a or b or c) and \
(a or not b or c) and \
(a or not b or not c) and \
(not a or b or not c) and \
(not a or b or c) and \
(not a or not b or not c) |
def zeroPad(data):
"""
ZeroPadding
"""
block_size = 8
while len(data) % block_size:
data += b"\0"
return data |
def pack(word, pattern):
"""Return a packed word given a spaced seed pattern.
>>> pack('actgac', [True, False, True, True, False, True])
'atgc'
"""
ret = []
for i, char in enumerate(word):
if pattern[i]:
ret.append(char)
return "".join(ret) |
def _str(item, encoding='utf-8', default=''):
"""_str
Args:
item (Any): item
encoding (str, optional): encoding
default (str, optional): default
Returns:
str: item in str type.
"""
if isinstance(item, bytes):
try:
result = item.decode(encoding)
except Exception as e:
result = default
return result
try:
result = str(item)
except Exception as e:
result = default
return result |
def kin2dyn(kin, density):
"""
Convert from kinematic to dynamic viscosity.
Parameters
----------
kin: ndarray, scalar
The kinematic viscosity of the lubricant.
density: ndarray, scalar
The density of the lubricant.
Returns
-------
dyn: ndarray, scalar
The dynamic viscosity of the lubricant.
"""
dyn = kin * density
return dyn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.