content
stringlengths 42
6.51k
|
|---|
def _stable_hash(s):
"""A simple string hash that won't change from run to run."""
ret = 0
for c in s:
ret = ret * 97 + ord(c)
return ret
|
def tolist(textlist):
""" ' a, b,c,d, e,f ' -> [a, b, c, d, e, f]"""
return [x.strip() for x in textlist.split(",")] if textlist else []
|
def fasta_formatted_string(name, sequence, description=None, line_width=None):
"""Returns the name and character sequence in the FASTA format.
Parameters
----------
name : str
Name describing the sequence. Usually the ID in the sequence in the a FASTA file.
sequence : str
Characters describing nucleotide sequences in a FASTA file.
description : str, optional
Longer description and other notes about the sequence.
line_width : int, optional
The number of characters in a line.
Returns
-------
str
String in FASTA format consisting of 2 lines, first line is the `name` and
the second line is `sequence`.
"""
string = '>' + name
if description:
string += ' ' + description
string += '\n'
if line_width:
last = 0
for i in range(line_width, len(sequence), line_width):
string += sequence[i-line_width:i] + '\n'
last = i
string += sequence[last:]
return string + '\n'
string += sequence
return string + '\n'
|
def replace_all(text, dic, filtered=None):
""" Searches for specific keys in TEXT and replaces them with the
value from DIC.
Uses a dictionary to find specific keys and replace them with the
relevant value pair. DIC holds the key:value look up information.
Args:
text: is an iterable data set that holds the text search through
dic: holds the dictionary that contains the key:value to search
filtered: ensures that the list is initialised empty each time
"""
if filtered is None:
filtered = []
for line in text:
for key, value in dic.items():
line = line.replace(key, value)
filtered.append(line)
return filtered
|
def var_is_false(var):
"""
Returns True if var = False, else False. Remember here that 1 is a almost True value
but in this case should return False.
:param var: any variable.
:return: boolean
"""
return not var and isinstance(var, bool)
|
def is_reachable(peer):
"""
Return true if the peer has an endpoint i.e.
it is reachable from the outside.
In other words, returns false for dynamic peers
"""
return "endpoint" in peer
|
def swap_uuid_prefix_and_suffix(uuid_string: str) -> str:
"""Swap the first 12 and last 12 hex digits of a uuid string.
Different databases implement uuid comparison differently (see UUIDOrdering). This function
is useful as a helper method to implement the LastSixBytesFirst ordering method based on the
LeftToRight ordering method.
args:
uuid_string: uuid string
Returns:
the input with the first and last 12 hex digits swapped
"""
segments = uuid_string.split("-")
segment_lengths = tuple(len(segment) for segment in segments)
expected_segment_lengths = (8, 4, 4, 4, 12)
if expected_segment_lengths != segment_lengths:
raise AssertionError(f"Unexpected segment lengths {segment_lengths} in {uuid_string}")
new_segments = [
segments[4][:8],
segments[4][8:],
segments[2],
segments[3],
segments[0] + segments[1],
]
return "-".join(new_segments)
|
def getSpectroscopicParmLabel(expt_type):
"""
Returns the label for the spectroscopic parameter in the plot group.
Parameters
----------
expt_type : str
Type of the experiment - found in the parms.txt file
Returns
-------
str
label for the spectroscopic parameter axis in the plot
"""
if expt_type in ['DC modulation mode', 'current mode']:
return 'DC Bias'
elif expt_type == 'AC modulation mode with time reversal':
return 'AC amplitude'
return 'User Defined'
|
def _convert_pascal_to_camel(pascal_case_string: str) -> str:
"""
Convert a string provided in PascalCase to camelCase
"""
return pascal_case_string[:1].lower() + pascal_case_string[1:]
|
def _get_python_function_arguments(f):
"""
Helper to get the parameter names and annotations of a Python function.
"""
# Note that we only return non-optional arguments (we assume that any optional args are not specified).
# This allows to, e.g., accept max(a, b, *more, name='') as a binary function
from inspect import getfullargspec
param_specs = getfullargspec(f)
annotations = param_specs.annotations
arg_names = param_specs.args
# "if this tuple has n elements, they correspond to the last n elements listed in args"
defaults = param_specs.defaults
if defaults:
# we allow Function(functions with default arguments),
# but those args will always have default values since CNTK Functions do not support this
arg_names = arg_names[:-len(defaults)]
return arg_names, annotations
|
def rewrite_path(path, template):
"""Converts source path to destination path based on template
:param path: string, example: a.#0.name
:param template: template string, example: b.*.name
:return: string, example: b.#0.name
"""
path_parts = path.split('.')
template_parts = template.split('.')
for t in template_parts:
if t.startswith('{') and t.endswith('}'):
key_idx = int(t[1:-1])
if key_idx < len(path_parts):
key_name = path_parts[key_idx]
template = template.replace(t, key_name, 1)
return template
|
def change_digits(digits, num):
"""Change digits by num"""
n = len(digits) - 1
digits[n] += num
while digits[n] < 0:
while digits[n] < 0:
digits[n] += 10
digits[n - 1] -= 1
n -= 1
return digits
|
def create_callback_zone(action, num):
""" Create the callback data associated to each button"""
return ";".join([action, str(num)])
|
def parse_nick(name):
""" parse a nickname and return a tuple of (nick, mode, user, host)
<nick> [ '!' [<mode> = ] <user> ] [ '@' <host> ]
"""
try:
nick, rest = name.split('!')
except ValueError:
return (name, None, None, None)
try:
mode, rest = rest.split('=')
except ValueError:
mode, rest = None, rest
try:
user, host = rest.split('@')
except ValueError:
return (name, mode, rest, None)
return (name, nick, mode, user, host)
|
def rotate_right(head, k):
"""
Rotate the given linked list to the right by k places
:param head: head node of given linked list
:type head: ListNode
:param k: position to rotate
:type k: int
:return: head node of rotated linked list
:rtype: Node
"""
# basic case
if head is None:
return head
# get length of linked list
length = 1
end = head
while end.next is not None:
length += 1
end = end.next
# get actual k
k %= length
# get k-1 node
node_k_pre = head
for _ in range(length - k - 1):
node_k_pre = node_k_pre.next
# rotate list
if node_k_pre.next is not None:
node_k = node_k_pre.next
node_k_pre.next = None
end.next = head
else:
node_k = head
return node_k
|
def user_can_create_edit_delete(user, obj):
"""A simple function to see if a user is authorized to create, edit,
or update events or upload event objects. Great Lakes stocking
Coordinator (based at USWFS Green Bay Office) can create, edit, or
delete anyone's event (True). Agency Stocking Corrdinaors are only able
to create, edit, or delete there own events (True), Other users cannot (False).
Arguments:
- `user`: a django user object
- `obj`: either a stocking event or stocking event uploads object
"""
if not hasattr(user, "role"):
return False
if type(obj) is dict:
lake = obj.get("lake", "")
agency = obj.get("agency", "")
else:
lake = obj.lake
agency = obj.agency
if user.role == "glsc":
return True
elif user.role == "asc" and agency == user.agency and lake in user.lakes.all():
return True
else:
return False
|
def test_callback(container, text=''):
"""
A callback used for basic testing.
"""
return {
'actions':
[
{
'action': 'chat.postMessage',
'kwargs': {
'text': '{}'.format(text)
}
}
]
}
|
def padding_format(padding):
"""
Checks that the padding format correspond format.
Parameters
----------
padding : str
Must be one of the following:"same", "SAME", "VALID", "valid"
Returns
-------
str "SAME" or "VALID"
"""
if padding in ["SAME", "same"]:
padding = "SAME"
elif padding in ["VALID", "valid"]:
padding = "VALID"
elif padding == None:
padding = None
elif isinstance(padding, tuple) or isinstance(padding, int):
return padding
else:
raise Exception("Unsupported padding: " + str(padding))
return padding
|
def sigmoid_derivative(x):
"""
Actual derivative: S'(x) = S(x)(1-S(x)) but the inputs have already gone
through the sigmoid function.
"""
return x * (1 - x)
|
def GetMinValue(list):
"""Get the min value in a list.
Args:
list: a value list.
Returns:
the min value in the list.
"""
minv = list[0]
for x in list:
if x < minv:
minv = x
return minv
|
def bin_data(x, t, bin_width, bin_start):
"""
Bin data.
Bin the provided data into evenly-distributed time bins.
"""
# Define time grid
t_bins = []
x_bins = []
# Iterate through time bins
return x_bins, t_bins
|
def forcetoint(src) -> int:
"""Forces the input value to an int, on error, returns 0"""
try:
return int(src)
except Exception:
return 0
|
def append_slash(dev):
"""Append a final slash, if needed."""
if not dev.endswith("/"):
dev += "/"
return dev
|
def binding_str(binding):
"""Handles string conversion of either dictionary or Unifier."""
if isinstance(binding, dict):
s = ",".join(["{}: {}".format(str(var), str(val))
for var, val in binding.items()])
return '{' + s + '}'
else:
return str(binding)
|
def get_key_recursive(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
- modified from: https://stackoverflow.com/a/20254842
"""
fields_found = []
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
elif isinstance(value, dict):
results = get_key_recursive(value, field)
for result in results:
fields_found.append(result)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
more_results = get_key_recursive(item, field)
for another_result in more_results:
fields_found.append(another_result)
return fields_found
|
def split(versionstring):
"""Split the version string 'X.Y.Z' and return tuple (int(X), int(Y), int(Z))"""
assert versionstring.count('.') == 2, "Version string must be of the form str('X.Y.Z')"
return tuple([int(x) for x in versionstring.split('.')])
|
def _MethodCallRepr(message):
"""Gives a string representation of |obj|.|method|(*|args|, **|kwargs|)
Args:
message: A MetricCall object.
"""
if not message:
return repr(message)
obj = message.metric_name
method = message.method
args = message.method_args
kwargs = message.method_kwargs
args_strings = ([repr(x) for x in args] +
[(str(k) + '=' + repr(v))
for k, v in kwargs.items()])
return '%s.%s(%s)' % (repr(obj), method, ', '.join(args_strings))
|
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print(average([20, 30, 70]))
40.0
"""
return sum(values) / len(values)
|
def create_environment(**kwargs):
""" Format args for AWS environment
Writes argument pairs to an array {name, value} objects, which is what AWS wants for
environment overrides.
"""
return [{'name': k, 'value': v} for k, v in kwargs.items()]
|
def set_indent(x: str, n: int = 2):
"""
Args:
x:
n:
Returns:
"""
indent = ' ' * n
x = indent + x
x = x.replace('\n', '\n' + indent)
return x
|
def normalize_ensembl_genes(ensgenes):
"""
:param ensgenes:
:return:
"""
ens_genes = []
for ensid, trans in ensgenes.items():
strands = set(t['strand'] for t in trans)
assert len(strands) == 1, 'Switching strands: {}'.format(trans)
strand = strands.pop()
feat_types = set([t['feature_type'] for t in trans])
assert len(feat_types) == 1, 'Mixed feature types: {}'.format(trans)
feat_type = feat_types.pop()
gene_names = set([t['gene_name'] for t in trans])
assert len(gene_names) == 1, 'Different gene names: {}'.format(trans)
gene_name = gene_names.pop()
chrom = trans[0]['chrom']
left = str(min([int(t['start']) for t in trans]))
right = str(max([int(t['end']) for t in trans]))
infos = {'chrom': chrom, 'start': left, 'end': right, 'strand': strand,
'feature_type': feat_type, 'feature': 'gene', 'score': '0',
'name': ensid, 'gene_name': gene_name}
ens_genes.append(infos)
return ens_genes
|
def le_calibration_func(etr, kc, ts):
"""Latent heat flux at the calibration points
Parameters
----------
etr : scalar or array_like
kc : scalar or array_like
ts : scalar or array_like
Surface temperature [K].
Returns
-------
scalar or array_like
Notes
-----
1000000 / 3600 in [1] was simplified to 2500 / 9
References
----------
.. [1] Allen, R., Tasumi, M., & Trezza, R. (2007). Satellite-Based Energy
Balance for Mapping Evapotranspiration with Internalized Calibration
(METRIC)-Model. Journal of Irrigation and Drainage Engineering, 133(4).
https://doi.org/10.1061/(ASCE)0733-9437(2007)133:4(380)
"""
return etr * kc * (2.501 - 2.361E-3 * (ts - 273)) * 2500 / 9
|
def keyValueList(d, key=None):
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc.
d -- a dictionary to iterate through
"""
if not isinstance(d, dict) and not isinstance(d, list):
return []
keyvalues = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
keyvalues.extend(keyValueList(entry))
else:
keyvalues.append((key, entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
if not isinstance(v, dict) and type(v) != list:
keyvalues.append((k,v))
elif isinstance(v, list):
keyvalues.extend(keyValueList(v, k))
else:
keyvalues.extend(keyValueList(v))
return keyvalues
|
def retry_on_connection_error(exc: Exception):
"""Return True if there is an connection error exception."""
return isinstance(exc, (ConnectionError, TimeoutError, IOError))
|
def attr_list(x):
"""
parser for attribute lists on cli
"""
if ',' in x:
return [y for y in x.split(',') if y.strip()]
else:
return [x]
|
def cleanup_favorite(favorite):
"""Given a dictionary of a favorite record, return a new dictionary for
output as JSON."""
return str(favorite["post"])
|
def shorten_name(name):
"""Shortens a parkour room name."""
if name[0] == "*":
return name.replace("#parkour", "", 1)
return name.replace("-#parkour", "", 1)
|
def modevaltohex(mode_val):
""" convert mode_val value that can be either xeh string or int to a hex string """
if isinstance(mode_val, int):
return (hex(mode_val)[2:]).upper()
if isinstance(mode_val, str):
return mode_val
return None
|
def _get_values(dct: dict) -> dict:
"""Remove description / value metadata from dictionary recursively."""
return {
k: v["value"]
if isinstance(v, dict) and "value" in v
else _get_values(v)
if isinstance(v, dict)
else v
for k, v in dct.items()
}
|
def intersect(a1, b1, a2, b2):
"""finds the intersection of two lines"""
x = (b1 - b2)/(a2 - a1)
y = a1*x + b1
return x,y
# '''
# returns list_tips, a list of spiral tips. Returns [] for no spiral tips.
# contours_raw is the list of contour points for cond1, contours_edge is the list of
# contour points with cond2'''
# # if not contours_raw.any():
# # return []
# # if not contours_edge.any():
# # return []
# # if not contours_edge.any():
# # return []
# #put contour sample points into a dataframe
|
def aod_type(dataID, stdoutFLG=True):
"""
input_parameters
=> dataID: label used to identify data set in the obsys_rc file
=> stdoutFLG: set to False, if calling directly from Python code
purpose
Identify AOD data type
return value
=> aod_type_val: AOD data type
"""
# modis terra
#------------
if dataID[0:3] == "mod":
if dataID.find("land") > 1:
aod_type_val = "terra_land"
elif dataID.find("ocean") > 1:
aod_type_val = "terra_ocean"
else:
aod_type_val = "terra_L2"
# modis aqua
#-----------
elif dataID[0:3] == "myd":
if dataID.find("land") > 1:
aod_type_val = "aqua_land"
elif dataID.find("ocean") > 1:
aod_type_val = "aqua_ocean"
else:
aod_type_val = "aqua_L2"
# others
#-------
else:
aod_type_val = dataID
# return value
#-------------
if stdoutFLG:
print(aod_type_val)
return aod_type_val
|
def number_to_name(number):
"""Take integer number as input (0-1-2-3-4) and returns string (rock-spock-paper-lizard-scissor)
"""
if number == 0:
return "rock"
elif number == 1:
return "spock"
elif number == 2:
return "paper"
elif number == 3:
return "lizard"
elif number == 4:
return "scissor"
else:
return "Error"
|
def _is_gradient_task(task_id, num_tasks):
"""Returns True if this task should update the weights."""
if num_tasks < 3:
return True
return 0 <= task_id < 0.6 * num_tasks
|
def ip_to_int(a, b, c, d):
""" Returns an integer
For example 192.168.43.43 returns 3232246571
"""
rv = (a * 16777216) + (b * 65536) + (c * 256) + (d)
return rv
|
def black_invariant(text, chars=None):
"""Remove characters that may be changed when reformatting the text with black"""
if chars is None:
chars = [" ", "\t", "\n", ",", "'", '"', "(", ")", "\\"]
for char in chars:
text = text.replace(char, "")
return text
|
def djangocms_misc_page_link(context, lookup, css_class='', link_text='', link_text_attr=''):
"""
link_text_attr is not working (yet)
"""
if not link_text_attr:
link_text_attr = 'title'
context.update({
'lookup': lookup,
'css_class': css_class,
'link_text': link_text,
'link_text_attr': link_text_attr,
})
return context
|
def check_dependent_step_names(workflow):
"""
What it does:
I. check if every calculation step copies from the correct parent calculation step (tag: copy_which_step)
II. check if every calculation step has an array of correct additional depdendent steps (tag: additional_cal_dependence)
If there is an error, raise it; Otherwise, return True
"""
if len(workflow) > 1:
cal_name_list = [firework["firework_folder_name"] for firework in workflow]
#Task I @ copy_which_step
for firework in workflow[1:]:
if firework["copy_which_step_full_name"] == "None":
#this is the case where it is not the first step but creates its vasp input files from scratch.
continue
assert firework["copy_which_step_full_name"] in cal_name_list, "tag copy_which_step in {} refers to a non-existent parent step: {}".format(firework["firework_folder_name"],
firework["copy_which_step_full_name"])
#Task II @ additional_cal_dependence
for firework in workflow[1:]:
for dep_cal_name in firework["additional_dependence_full_name"]:
assert dep_cal_name in cal_name_list, "tag additional_cal_dependence in {} refers to a non-existent additional dependent calculation step: {}".format(
firework["firework_folder_name"], dep_cal_name)
return True
|
def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr):
""" .cfgspc embedded code execution is done here, in a relatively confined
space. The variables available to the code to be executed are:
SCOPE, NAME, VAL, PARENT, TEAL
The code string itself is expected to set a var named OUT
"""
PARENT = None
if TEAL:
PARENT = TEAL.top
OUT = None
ldict = locals() # will have OUT in it
exec(codeStr, globals(), ldict) # nosec
return ldict['OUT']
|
def format_value(number):
"""
Format the value with ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y').
Defines the format of the value that will be shown on the /_health endpoint,
this could be K, M, G ,etc
:param number: The value to be use.
:return: Formatted value.
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for index, symbol in enumerate(symbols):
prefix[symbol] = 1 << (index + 1) * 10
for symbol in reversed(symbols):
if number >= prefix[symbol]:
value = float(number) / prefix[symbol]
return '%.1f%s' % (value, symbol)
return "%sB" % number
|
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE
|
def get_intersection(features_list1: list, features_list2: list) -> list:
"""
Get intersection between two list of features as strings list
:param features_list1: first list of features
:param features_list2: second list of features
:return: a list of string, composed of all elements both in features_list1 and features_list2
"""
features_intersection = [value for value in features_list1 if value in features_list2]
return features_intersection
|
def cycle_check(cycle_line):
"""find the cycle
This is not efficient, but for a small sequence (50 ints) it works ok.
Consider deque.
Pop the first item from the front of the list. See if that item exists
still in the list. If so, construct the loop list to return.
"""
while cycle_line:
check1 = cycle_line.pop(0)
if check1 in cycle_line:
loop_point = cycle_line.index(check1)
loop = cycle_line[:loop_point]
loop.insert(0, check1)
return loop
|
def convert_title(original_title):
"""Remove underscores from string"""
new_title = original_title.replace("_", " ").title()
new_title = new_title.replace("Api", "API")
return new_title
|
def _get_datapoint(datapoints, name, tags=None):
"""Find a specific datapoint by name and tags
:param datapoints: a list of datapoints
:type datapoints: [dict]
:param name: the name of the required datapoint
:type name: str
:param tags: required tags by key and value
:type tags: dict
:return: a matching datapoint
:rtype: dict
"""
for datapoint in datapoints:
if datapoint['name'] == name:
if tags is None:
return datapoint
dtags = datapoint.get('tags', {})
tag_match = True
for k, v in tags.items():
tag_match = tag_match and dtags.get(k) == v
if tag_match:
return datapoint
|
def ascii_identify(origin, *args, **kwargs):
"""Check whether given filename is ASCII."""
return (isinstance(args[0], str) and
args[0].lower().split('.')[-1] in ['txt', 'dat'])
|
def max_sum_subarr(arr_a: list, arr_b: list) -> list:
"""
Time Complexity: O(m+n)
Space Complexity: (n)
"""
set_b: set = set(arr_b)
start, end, temp_start = -1, -1, 0
max_so_far, max_ending_here = 0, 0
for i, elem in enumerate(arr_a):
if elem not in set_b:
if max_ending_here + elem > 0:
max_ending_here += elem
elif elem > max_ending_here:
max_ending_here = elem
temp_start = i
else:
temp_start = i + 1
if max_ending_here > max_so_far:
max_so_far = max_ending_here
end = i
start = temp_start
else:
max_ending_here = 0
temp_start = i + 1
return arr_a[start : end + 1]
|
def validate_operator(operator_id, operator_config):
"""Method to validate operator_id recevd against defined operators in config."""
operator_ids = [op.id for op in operator_config]
return True if operator_id in operator_ids else False
|
def sigmoid_derivative(x):
"""
The derivative of the Sigmoid function.
This is the gradient of the Sigmoid curve.
It indicates how confident we are about the existing weight.
"""
return x * (1 - x)
|
def split_list(l, n_part):
"""
:param list l:
:param int n_part:
:rtype: list[list]
"""
ret = list()
for start_idx in range(n_part):
ret.append(l[start_idx::n_part])
return ret
|
def dict_assign(obj, key, value):
"""Chainable dictionary assignment. Returns a copy.
Parameters
----------
obj : dict
A dictionary
key : string
Which attribute to set. May be a new attribute, or overwriting an existing one
value
A value of any type
Returns
-------
dict
A new dictionary, with the given attribute set to the given value
"""
new_dict = dict(obj)
new_dict[key] = value
return new_dict
|
def rectangle(f, a, b, n, height='left'):
"""Uses a rectangle method for integrating f. The height of
each rectangle is computed either at the left end, middle or
right end of each sub-interval"""
h = float(b-a)/n
if height == 'left':
start = a
elif height == 'mid':
start = a + h/2.0
else: # Must be right end
start = a + h
result = 0
for i in range(n):
result += f((start) + i*h)
result *= h
return result
|
def format_arguments(*args):
"""
Converts a list of arguments from the command line into a list of
positional arguments and a dictionary of keyword arguments.
Handled formats for keyword arguments are:
* --argument=value
* --argument value
Args:
*args (list): a list of arguments
Returns:
([positional_args], {kwargs})
"""
positional_args = []
kwargs = {}
split_key = None
for arg in args:
if arg.startswith('--'):
arg = arg[2:]
if '=' in arg:
key, value = arg.split('=', 1)
kwargs[key.replace('-', '_')] = value
else:
split_key = arg.replace('-', '_')
elif split_key:
kwargs[split_key] = arg
split_key = None
else:
positional_args.append(arg)
return positional_args, kwargs
|
def parse_bags(bag_restrictions: list) -> list:
"""Parse the list of raw bag restrictions given by the input file
:param bag_restrictions: List of raw bag restrictions
:return: Parsed bag restrictions
"""
parsed_restrictions = []
for line in bag_restrictions:
parent_str, children_str = line.split(' contain ')
parent_str = parent_str[:-5]
children_list = children_str.split(', ')
children_list = [child.split(' bag')[0] for child in children_list]
if children_list[0] == 'no other':
children_list = []
else:
children_list = [child.split(' ', maxsplit=1)
for child in children_list]
parsed_restrictions.append((parent_str, children_list))
return parsed_restrictions
|
def to_sql_list(xs):
"""stringify lists for SQL queries
>>> to_sql_list([1, 2, 3]) == '1, 2, 3'
"""
def to_sql_literal(x):
if isinstance(x, str):
return "'{}'".format(x)
return str(x)
res = ", ".join(map(to_sql_literal, xs))
return res
|
def is_empty(string):
"""
Determines whether the provided string is empty or None or consists of only empty spaces
:param string:
:type str:
:return: bool
"""
return string is None or len(string) == 0 or not bool(string.replace(' ', ''))
|
def get_source_path(path):
"""
If the path is for the .pyc, then removes the character 'c'.
:param path: string
:return: corrected path
"""
if path[-1] == 'c':
return path[:-1]
return path
|
def ascii_to_hex(__ascii):
"""
translates ASCII string into an array of hex ASCII codes
"""
return [hex(ord(c)).replace('0x', '') for c in __ascii]
|
def RTrim(text):
"""Strip spaces from the right of the text"""
return str(text).rstrip()
|
def list_to_dict(list):
"""creates a dictionary out of a list
assuming the list is written in alternating kwarg, arg,...
the dictionary will be written as {kwarg:arg,..}. The first entry in the list is
ommited, because that is usually a name"""
dictio = {}
for i in range(0,len(list),2):# iterates through every second item of a list list starting at the second value
dictio[list[i]] = list[i+1]
return dictio
|
def s2s(s):
"""convert set to string"""
return "{%s}" % ", ".join([str(x) for x in s])
|
def in_decimalhours(value):
""" Returns decimal time in hours.
e.g. 120 min = 2"""
try:
temp = int(value)
except:
raise ValueError('Error in "in_decimalhours" filter. Variable must be in convertable to int format.')
output = value / 60
return output
|
def calc_permutation(m, mm, _accuracy):
"""
Evaluates the permutation expression
"""
value = 1
return value
|
def get_significant_parts(line):
""" line is list of strings
function returns list of nonempty elements"""
wyn = []
for element in line:
if element != '':
wyn.append(element)
return wyn
# end get_significant_parts
|
def pad_sentences(sentences, padding_word="<PAD/>", maxlen=0):
"""
Pads all the sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
print('pad sentences...')
if maxlen > 0:
sequence_length = maxlen
else:
sequence_length = max(len(s) for s in sentences)
print('max sequence length: {}'.format(sequence_length))
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
replaced_newline_sentence = []
for char in list(sentence):
if char == "\n":
replaced_newline_sentence.append("<NEWLINE/>")
elif char == " ":
replaced_newline_sentence.append("<SPACE/>")
else:
replaced_newline_sentence.append(char)
new_sentence = replaced_newline_sentence + [padding_word] * num_padding
# new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
|
def implies(cond1: bool, cond2: bool) -> bool:
"""Logical Implication, i.e. cond1 => cond2"""
return (not cond1) or cond2
|
def reverse_number(num):
"""
Reverse a number.
"""
return int(str(num)[::-1])
|
def causal(effective_kernel_size: int):
"""Pre-padding such that output has no dependence on the future."""
return [effective_kernel_size - 1, 0]
|
def get_most_likely(emo, likelihood, most_likely_emotion):
"""Dado dos emociones conserva la de mayor probabilidad"""
likelihood = likelihood.replace('Likelihood.', '')
if likelihood == "VERY_LIKELY":
return (emo,likelihood)
if (likelihood == "LIKELY" and most_likely_emotion[1] != "VERY_LIKELY" ):
return (emo,likelihood)
if (likelihood == "POSSIBLE" and most_likely_emotion[1] == "-" ):
return (emo,likelihood)
return most_likely_emotion
|
def sift(expr, keyfunc):
"""Sift the arguments of expr into a dictionary according to keyfunc.
INPUT: expr may be an expression or iterable; if it is an expr then
it is converted to a list of expr's args or [expr] if there are no args.
OUTPUT: each element in expr is stored in a list keyed to the value
of keyfunc for the element.
EXAMPLES:
>>> from sympy.utilities import sift
>>> from sympy.abc import x, y
>>> from sympy import sqrt, exp
>>> sift(range(5), lambda x: x%2)
{0: [0, 2, 4], 1: [1, 3]}
It is possible that some keys are not present, in which case you should
used dict's .get() method:
>>> sift(x+y, lambda x: x.is_commutative)
{True: [y, x]}
>>> _.get(False, [])
[]
Sometimes you won't know how many keys you will get:
>>> sift(sqrt(x) + x**2 + exp(x) + (y**x)**2,
... lambda x: x.as_base_exp()[0])
{E: [exp(x)], x: [x**(1/2), x**2], y: [y**(2*x)]}
>>> _.keys()
[E, x, y]
"""
d = {}
if hasattr(expr, 'args'):
expr = expr.args or [expr]
for e in expr:
d.setdefault(keyfunc(e), []).append(e)
return d
|
def _get_subtypes(sub_hier, subtypes=[]):
"""
A recursive function (I'm sorry) that converts lists all subtypes at the terminal nodes of a hierarchical branch.
...
Parameters
__________
sub_hier : Dict
Hierarchical dictionary (a branch in the hierarchy)
subtypes : List
A list of all leaf nodes within the supplied hierarchical branch.
Returns
__________
subtypes : List
A list of all leaf nodes within the supplied hierarchical branch.
"""
if sub_hier == False: # Recursion stop condition
return False
for parent in sub_hier.keys():
if isinstance(sub_hier[parent], dict):
_get_subtypes(sub_hier[parent], subtypes)
else:
subtypes.append(parent)
return subtypes
|
def hazard_id(value):
"""
>>> hazard_id('')
()
>>> hazard_id('-1')
(-1,)
>>> hazard_id('42')
(42,)
>>> hazard_id('42,3')
(42, 3)
>>> hazard_id('42,3,4')
(42, 3, 4)
>>> hazard_id('42:3')
Traceback (most recent call last):
...
ValueError: Invalid hazard_id '42:3'
"""
if not value:
return ()
try:
return tuple(map(int, value.split(',')))
except:
raise ValueError('Invalid hazard_id %r' % value)
|
def trim_float(value: float, places: int = 2) -> float:
"""Trim a float to N places.
Args:
value: float to trim
places: decimal places to trim value to
"""
if isinstance(places, int):
value = float(f"{value:.{places}f}")
return value
|
def parse_line(line):
"""Takes a line formatted as three comma seperated integers. Returns three
integers.
"""
n, p1, p2 = line.strip().split(',')
return int(n), int(p1), int(p2)
|
def BuildReachableFileSet(entry_classes, reachability_tree, header_mapping):
"""Builds a set of reachable translated files from entry Java classes.
Args:
entry_classes: A comma separated list of Java entry classes.
reachability_tree: A dict mapping translated files to their direct
dependencies.
header_mapping: A dict mapping Java class names to translated source files.
Returns:
A set of reachable translated files from the given list of entry classes.
Raises:
Exception: If there is an entry class that is not being transpiled in this
j2objc_library.
"""
transpiled_entry_files = []
for entry_class in entry_classes.split(','):
if entry_class not in header_mapping:
raise Exception(entry_class +
'is not in the transitive Java deps of included ' +
'j2objc_library rules.')
transpiled_entry_files.append(header_mapping[entry_class])
# Translated files from package-info.java are also added to the entry files
# because they are needed to resolve ObjC class names with prefixes and these
# files may also have dependencies.
for transpiled_file in reachability_tree:
if transpiled_file.endswith('package-info'):
transpiled_entry_files.append(transpiled_file)
reachable_files = set()
for transpiled_entry_file in transpiled_entry_files:
reachable_files.add(transpiled_entry_file)
current_level_deps = []
# We need to check if the transpiled file is in the reachability tree
# because J2ObjC protos are not analyzed for dead code stripping and
# therefore are not in the reachability tree at all.
if transpiled_entry_file in reachability_tree:
current_level_deps = reachability_tree[transpiled_entry_file]
while current_level_deps:
next_level_deps = []
for dep in current_level_deps:
if dep not in reachable_files:
reachable_files.add(dep)
if dep in reachability_tree:
next_level_deps.extend(reachability_tree[dep])
current_level_deps = next_level_deps
return reachable_files
|
def convertToGenomicCoordinate(transcriptomic_coordinate,exon_list_genomic,transcript_id):
"""
"""
exon_list_transcriptomic=[]
for exon in exon_list_genomic:
exon_start,exon_end=exon
exon_length=exon_end-exon_start+1
if len(exon_list_transcriptomic)==0:
exon_list_transcriptomic.append([1,exon_length])
else:
exon_list_transcriptomic.append([exon_list_transcriptomic[-1][1]+1,exon_length+exon_list_transcriptomic[-1][1]])
"""if transcript_id=="1.62.0":
print(transcriptomic_coordinate)"""
for exon_num,exon in enumerate(exon_list_transcriptomic):
exon_start,exon_end=exon
if exon_start<=transcriptomic_coordinate<=exon_end:
"""if transcript_id=="1.62.0":
print(exon_list_genomic[exon_num][0],transcriptomic_coordinate,exon_start,exon_list_genomic[exon_num][0]+transcriptomic_coordinate-exon_start)"""
return exon_list_genomic[exon_num][0]+transcriptomic_coordinate-exon_start
"""print(exon_list_transcriptomic,transcriptomic_coordinate)
print("="*100)"""
|
def rchop(text, end):
""" Removes the end from the text if the text ends with it. """
if text.endswith(end):
return text[:-len(end)]
return text
|
def _to_tornado_pattern(transmute_path):
"""convert a transmute path to
a tornado pattern.
"""
return (
transmute_path.replace("{", "(?P<")
# .replace("}", ">[^\/]+)"))
.replace("}", ">.*)")
)
|
def write_the_species_tree(annotated_species_tree, output_file):
"""
this function writes the species tree to file
args:
annotated_species_tree : a string of annotated species tree in .newick format
output_file : a file name to write to
output:
a file containing the annotated species tree
"""
with open(output_file, "w") as out:
out.write(annotated_species_tree)
print("wrote the annotated species besttree to "+output_file)
return output_file
|
def _qualify_optional_type(cpp_type):
# type: (str) -> str
"""Qualify the type as optional."""
return 'boost::optional<%s>' % (cpp_type)
|
def _get_hidden_node_location(flattened_index, num_rows, num_columns):
"""Converts the flattened index of a hidden node to its index in the 3D array.
Converts the index of a hidden node in the first convolution layer (flattened)
into its location- row, column, and channel in the 3D activation map. The
3D activation map has dimensions: (num_channels, num_rows, num_columns).
Args:
flattened_index: int, index of a hidden node in the first convolution
layer after it is flattened.
num_rows: int, number of rows in the activation map produced by each
kernel.
num_columns: int, number of columns in the activation map produced by each
kernel.
Returns:
channel: int, channel number of the activation map to which the hidden node
belongs to.
row: int, row number of the hidden node in the activation map.
column: int, column number of the hidden node in the activation map.
"""
total = num_rows * num_columns
output_activation_map_row = (flattened_index % total) // num_columns
output_activation_map_column = (flattened_index % total) % num_columns
return (flattened_index // total,
output_activation_map_row,
output_activation_map_column)
|
def simple_hash(s: str) -> int:
"""A ridiculously simple hashing function"""
basic_hash = ord(s[0])
return basic_hash % 10
|
def _ExtractTestsFromFilter(gtest_filter):
"""Returns the list of tests specified by the given filter.
Returns:
None if the device should be queried for the test list instead.
"""
# Empty means all tests, - means exclude filter.
if not gtest_filter or '-' in gtest_filter:
return None
patterns = gtest_filter.split(':')
# For a single pattern, allow it even if it has a wildcard so long as the
# wildcard comes at the end and there is at least one . to prove the scope is
# not too large.
# This heuristic is not necessarily faster, but normally is.
if len(patterns) == 1 and patterns[0].endswith('*'):
no_suffix = patterns[0].rstrip('*')
if '*' not in no_suffix and '.' in no_suffix:
return patterns
if '*' in gtest_filter:
return None
return patterns
|
def _getIfromRGB(rgb):
"""Retrieve if from a specific layer color.
Parameters
----------
rgb :
Returns
-------
"""
red = rgb[2]
green = rgb[1]
blue = rgb[0]
RGBint = (red << 16) + (green << 8) + blue
return RGBint
|
def calc_weight(judge_i, pairing_i):
""" Calculate the relative badness of this judge assignment
We want small negative numbers to be preferred to large negative numbers
"""
return -1 * abs(judge_i - (-1 * pairing_i))
|
def truncate_stats(stat, threshold, name='Other'):
"""
Combines all entries (name, count) with a count below the threshold and appends a new entry
:return: Truncated statistics with the last item being the addup of all truncated items.
"""
a, b = [], []
for s in stat:
(a, b)[s[1] < threshold].append(s)
c = 0
for s in b:
c += s[1]
if c > 0:
a.append([name, c])
return a
|
def findLowestFolder(list1, list2):
"""
Sorts the folders in ascending order to increase organizational structure of the project
"""
tmp_string1 = str(list1)
numbers = tmp_string1.split('_')
tmp_string2 = str(list2)
numbers2 = tmp_string2.split('_')
if int(numbers[0]) > int(numbers2[0]):
return True
elif int(numbers[0]) == int(numbers2[0]) and int(numbers[1]) > int(numbers2[1]):
return True
elif int(numbers[0]) == int(numbers2[0]) and int(numbers[1]) == int(numbers2[1]) and int(numbers[3]) > int(numbers2[3]):
return True
return False
|
def remove_duplicates(cascade_nodes,cascade_times):
"""
# Some tweets have more then one retweets from the same person
# Keep only the first retweet of that person
"""
duplicates = set([x for x in cascade_nodes if cascade_nodes.count(x)>1])
for d in duplicates:
to_remove = [v for v,b in enumerate(cascade_nodes) if b==d][1:]
cascade_nodes= [b for v,b in enumerate(cascade_nodes) if v not in to_remove]
cascade_times= [b for v,b in enumerate(cascade_times) if v not in to_remove]
return cascade_nodes, cascade_times
|
def getValues(params):
"""Extracts the attribute data
Parameters:
params (dict) Dictionary (node_attribute: value)
Returns:
(tuple) Tuple of the node attributes
"""
path = params.get("path")
name = params.get("name")
order = params.get("order")
shape = params.get("shape")
color = params.get("color")
bottom_label = params.get("bottom_label")
side_label = params.get("side_label")
dashed_line = params.get("dash")
arrow = params.get("arrow")
jump = params.get("jump")
action_order = params.get("action_order")
left_edge_label = params.get("left_edge_label")
right_edge_label = params.get("right_edge_label")
return path,name,order,shape,color,bottom_label,side_label,dashed_line,arrow,jump,action_order,left_edge_label,right_edge_label
|
def truncate(content, length=100, suffix='...'):
"""
Smart string truncation
"""
if len(content) <= length:
return content
else:
return content[:length].rsplit(' ', 1)[0] + suffix
|
def load_ldap_settings(config):
""" Load all the ldap configuration settings into a dict
LDAP configuration settings contain an ldap_ prefix.
Args:
config (dict): the global config
Returns:
(dict) All the ldap_ settings
"""
ldap_config = {}
for key, value in config.items():
if key.lower().startswith("ldap_"):
ldap_config[key] = value
return ldap_config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.