content stringlengths 42 6.51k |
|---|
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'rois', 'labels']
return blob_names |
def key2param(key):
"""
max-results -> max_results
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result) |
def parse_odbc(url):
"""
Parse Redshift ODBC URL
-----------------------
:param url: Fully Qualified ODBC URL
:type url: str
:return parsed: ODBC fields parsed into respective fields.
:rtype parsed: dict
.. example::
Driver={Amazon Redshift (x64)};
Server=server_name.xxxxxx.us-east-1.redshift.amazonaws.com;
Database=mydb;
UID=myuser;
"""
secrets = 'Database', 'Port', 'Server', 'UID'
parsed = dict([
dr.strip().split('=') for dr in url.strip().split(';')
if any(secret in dr for secret in secrets)
])
# Ensure all fields were serialized
assert set(parsed) == set(secrets)
return parsed |
def is_quote(string: str, quote_index: int, is_looking_for_opening: bool) -> bool:
"""
indicates whether the quote str[quote_index] is an opening/closing quote
:param string: the character string where the quote is located
:param quote_index: the index of the quote in string
:param is_looking_for_opening: True if you are looking for an opening quote, False for a closing quote
:return: True if it is a delimiting quote corresponding to the corresponding to the type requested, False otherwise
"""
if is_looking_for_opening:
j = quote_index - 1
if j < 0:
j = 0
while j > 0 and string[j] == ' ':
j -= 1
if j == 0 or string[j] == ":":
return True
else:
return False
else:
j = quote_index + 1
if j > len(string) - 1:
j = len(string) - 1
while j < len(string) - 1 and string[j] == ' ':
j += 1
temp = quote_index - 1
if temp < 0:
temp = 0
if (j == len(string) - 1 or string[j] == ":") and string[temp] != "\\":
return True
else:
return False |
def simple_div(n):
"""
Calculate the divisors of n. Used to split arrays into a groups of integer size, or to asses how
large the batch size should be when training a model
:param n:
:return:
"""
return [i for i in range(n, 0, -1) if n % i == 0] |
def handle_api_error(e):
"""Returns 500 internal server error when Giphy API doesn't respond successfully.
"""
return f"Failed to call Giphy API: {e}", 500 |
def _inversion_simple(a3, a0):
"""Solve for degree 5 polynomial with coefficients a5=1, a3=0., a0."""
return (-a0)**(1./5.) |
def funded_by_grant(paper, grant_id):
"""
Determine whether the paper was funded by the specified grant,
and if specified, the rationale behind how the paper aligns with the grant.
Parameters
----------
paper : dict
The 'paper' entry from the papers.yaml database.
grant_id : str
The grant id from grants.yaml to query for
Returns
-------
rationale : bool or str
If a rationale is provided, a str containing the rationale of how the paper aligns with the grant is provided.
If no rationale is provided, True is returned if the paper contains the grant_id as a funding source, False otherwise.
"""
try:
# get list of grants
for grant in paper['funding']:
# key: value entries may have a rationale
if type(grant) is dict:
if grant['id'] == grant_id:
if 'rationale' in grant:
# Return rationale if provided
return grant['rationale']
else:
return True
# If we haven't specified a dict, there can be no rationale
elif type(grant) is str:
if grant == grant_id:
return True
except Exception as e:
pass
return False |
def str_to_bool(string_in: str):
"""convert a string to boolean
Args:
string_in (string): Text from config file value
Raises:
ValueError: Cannot convert the string to a bool as it isn't True or False
Returns:
Boolean: True/False value
"""
if string_in.lower() == "true":
return True
elif string_in.lower() == "false":
return False
else:
raise ValueError(f"Cannot convert {string_in} to a bool") |
def get_dict_buildin(dict_obj, _type=(int, float, bool, str, list, tuple, set, dict)):
"""
get a dictionary from value, ignore non-buildin object
"""
non_buildin = {key for key in dict_obj if not isinstance(dict_obj[key], _type)}
return dict_obj if not non_buildin else {key: dict_obj[key] for key in dict_obj if key not in non_buildin} |
def all_groups_completed(groups):
"""Returns groups if they are all completed, otherwise False."""
if all(len(g.get('jobs')) == g.get('completed') for g in groups):
return groups
else:
return False |
def pyAttrName2XmlAttrName(key):
"""
The @pyAttrName2XmlAttrName@ converts the Python XML attribute name @key@ to an
appropriate XML attribute identifier.
If the *key* is 'cssClass' then it is translated into 'class'.
If an HTML5 attribute *data_xxxx* is used, then change that to *data-xxxx*.
"""
if key == 'cssClass':
key = 'class'
elif key == 'cssId':
key = 'id'
elif key.startswith('data'):
key = key.replace('_', '-')
return key |
def interpolate_value_from_list_of_dicts(value1, tag_of_val1, list_of_dicts, tag_of_result):
""" returns the linear interpolation of y-value for x-value of 'value1'
assumptions are:
- x-values are saved with the tag 'tag_of_val1'
- y-values are saved with the tag 'tag_of_result'
- x- values are monoton and growing with index"""
if(len(list_of_dicts) == 0):
return 0 # list is empty
elif(len(list_of_dicts) == 1):
return list_of_dicts[0][tag_of_result] # list contains only one dict element
else:
ii=0
while(list_of_dicts[ii][tag_of_val1] == list_of_dicts[ii+1][tag_of_val1]):
ii += 1 # x-values of neighbouring elements are identical
if(ii < len(list_of_dicts)):
if(list_of_dicts[ii][tag_of_val1] < list_of_dicts[ii+1][tag_of_val1]):
# growing
while((ii < len(list_of_dicts)) and (list_of_dicts[ii][tag_of_val1] < value1)):
ii += 1
elif():
# falling
while((ii < len(list_of_dicts)) and (list_of_dicts[ii][tag_of_val1] > value1)):
ii += 1
if(ii > 0):
if(ii >= len(list_of_dicts)):
ii = len(list_of_dicts) - 1
# interpolation or extrapolation upwards when ii == len(list_of_dicts)
# a = (y2 - y1) / (x2 - x1)
AA = (list_of_dicts[ii][tag_of_result] - list_of_dicts[ii - 1][tag_of_result]) / (list_of_dicts[ii][tag_of_val1] - list_of_dicts[ii - 1][tag_of_val1])
# b = (x2 * y1 - x1 * y2) / (x2 - x1)
BB = (list_of_dicts[ii][tag_of_val1] * list_of_dicts[ii - 1][tag_of_result] - list_of_dicts[ii - 1][tag_of_val1] * list_of_dicts[ii][tag_of_result]) / (list_of_dicts[ii][tag_of_val1] - list_of_dicts[ii - 1][tag_of_val1])
else:
# ii == 0 (idx == 1) - extrapolation downwards
# a = (y2 - y1) / (x2 - x1)
AA = (list_of_dicts[ii + 1][tag_of_result] - list_of_dicts[ii][tag_of_result]) / (list_of_dicts[ii + 1][tag_of_val1] - list_of_dicts[ii][tag_of_val1])
# b = (x2 * y1 - x1 * y2) / (x2 - x1)
BB = (list_of_dicts[ii + 1][tag_of_val1] * list_of_dicts[ii][tag_of_result] - list_of_dicts[ii][tag_of_val1] * list_of_dicts[ii + 1][tag_of_result]) / (list_of_dicts[ii + 1][tag_of_val1] - list_of_dicts[ii][tag_of_val1])
return (AA * value1 + BB)
else:
return list_of_dicts[len(list_of_dicts)][tag_of_result]
# end interpolate_value_from_list_of_dicts |
def is_non_strict_type(type1, compare_type):
"""
Returns true if the given type is the same as a comparison type but does it less rigorous than issubclass().
The main advantage is that classes of modules which where reloaded during runtime are still recognized.
WARNING: If the name of a class in a different module is the same as in the module in question, this method will
still return true.
:param type1: type in question
:param compare_type: type to compare the type in question to
:return: True or False
"""
if isinstance(type1, type) and type1.__name__ == compare_type.__name__:
return True
else:
return False |
def find_in_list_by_condition(search_key_value_pairs, search_list):
"""Find one item that match given search key value pairs.
:param search_key_value_pairs: dictionary that contains search key and
values
:param search_list: list of dictionary objects to search
:return: a single item that matches criteria or None
"""
valid = (isinstance(search_key_value_pairs, dict) and
isinstance(search_list, list))
if not valid:
raise ValueError("Invalid argument was passed.")
if not search_key_value_pairs or not search_list:
return None
results = []
for m in search_list:
match_failed = False
for key in search_key_value_pairs:
if not m.get(key) == search_key_value_pairs[key]:
match_failed = True
break
if not match_failed:
results.append(m)
return results |
def str2int(name):
"""Convert well address into a row-major index."""
row = ord(name[0].upper()) - 65
col = int(name[1:]) - 1
assert 0 <= row < 8, 'Invalid well address "%s"' % name
assert 0 <= col < 12, 'Invalid well address "%s"' % name
return row*12 + col |
def writefile(filename, contents):
""" Write File
This function will write, and if it does not exist create the
given file. It will fill the file with the content array.
"""
result = True
try:
outfile = open(filename,'w')
outfile.writelines(contents)
outfile.flush()
outfile.close()
except IOError as e:
result = False
return result |
def _extent(x, y):
"""Get data extent for pyplot imshow.
Parameters
----------
x: list
Data array on X-axis.
y: list
Data array on Y-axis.
Returns
-------
[float, float, float, float]
X and Y extent.
"""
dx, dy = .5 * (x[1] - x[0]), .5 * (y[1] - y[0])
return [x[0] - dx, x[-1] + dx, y[-1] + dy, y[0] - dy] |
def calculate_brightness(brightness=1, color_code=[255, 255, 255]):
"""Given a color code (array of three int where 0 =< int =< 255),
we put the brightness multiplicator on it"""
return [round(nb * brightness) for nb in color_code] |
def merge_dicts_with_function(dict_0, dict_1, function):
"""
Merge dict_0 and dict_1, apply function to values keyed by the same key.
Arguments:
dict_0 (dict);
dict_1 (dict):
function (callable):
Returns:
dict: merged dict
"""
merged_dict = {}
for k in dict_0.keys() | dict_1.keys():
if k in dict_0 and k in dict_1:
merged_dict[k] = function(dict_0.get(k), dict_1.get(k))
elif k in dict_0:
merged_dict[k] = dict_0.get(k)
elif k in dict_1:
merged_dict[k] = dict_1.get(k)
else:
raise ValueError('dict_0 or dict_1 changed during iteration.')
return merged_dict |
def digest(cud):
"""Reads tuples. Returns a dictionary."""
corpus = {}
for i in cud:
corpus.setdefault(i[:-1], []).append(i[-1])
return corpus |
def vect3_prod_v(_V1, _V2):
"""
Returns vector product of 3d vectors V1 and V2
"""
return [_V1[1]*_V2[2] - _V1[2]*_V2[1], _V1[2]*_V2[0] - _V1[0]*_V2[2], _V1[0]*_V2[1] - _V1[1]*_V2[0]] |
def get_type(o):
"""
Handy wrapper for logging purposes
:param o: any object
:return str: Nice type name
"""
return type(o).__name__ |
def convert_to_celsius(fahrenheit: float) -> float:
"""
:param fahrenheit: float
:return: float
"""
return(fahrenheit - 32.0) * 5.0 / 9.0 |
def toBool(val):
"""
Coerce a string value to a bool. Meant to be used to parse HTTP
parameters, which are always sent as strings. The following string
values will be interpreted as True:
- ``'true'``
- ``'on'``
- ``'1'``
- ``'yes'``
All other strings will be interpreted as False. If the given param
is not passed at all, returns the value specified by the default arg.
This function is case-insensitive.
:param val: The value to coerce to a bool.
:type val: str
"""
if isinstance(val, bool):
return val
return val.lower().strip() in ('true', 'on', '1', 'yes') |
def _params_to_ints(qs):
"""Convert a (string) list of string ids to a list of integers"""
return [int(str_id) for str_id in qs.split(',')] |
def merge(listA, listB):
"""Merges two lists together"""
if len(listA) < 1:
return listB
if len(listB) < 1:
return listA
listA_index = 0
listB_index = 0
merged_list = []
total_length = len(listA) + len(listB)
while listA_index + listB_index < total_length:
if len(listA) <= listA_index:
merged_list.append(listB[listB_index])
listB_index += 1
elif len(listB) <= listB_index:
merged_list.append(listA[listA_index])
listA_index += 1
elif listA[listA_index][0] < listB[listB_index][0]:
merged_list.append(listA[listA_index])
listA_index += 1
else:
merged_list.append(listB[listB_index])
listB_index += 1
return merged_list |
def is_valid_go_command(cmd):
"""
Test if it is a valid go command: ``.go 1.23 4.56 7.89 0``
"""
try:
if cmd != cmd.strip():
return False
if not cmd.startswith(".go"):
return False
if cmd.count(" ") != 4:
return False
chunks = cmd.split(" ")
if len(chunks) != 5:
return False
float(chunks[1])
float(chunks[2])
float(chunks[3])
if str(int(chunks[4])) != chunks[4]:
return False
return True
except:
return False |
def get_unique_office_name(element=None):
"""Provide an autogenerated unique <office:name> for the document.
"""
if element is not None:
body = element.document_body
else:
body = None
if body:
used = body.get_office_names()
else:
used = []
# unplugged current paragraph:
if element is not None:
used.extend(element.get_office_names())
i = 1
while True:
name = '__Fieldmark__lpod_%s' % i
if name in used:
i += 1
continue
break
return name |
def pprint_row(formatter, data, returned_metrics):
"""
Return a pretty printed string with a row of the metric streaming
output, consisting of the values of the metrics.
"""
try:
parts = [u'{0}'.format(next(iter(data.items()))[1].timestamp)]
except (StopIteration, AttributeError):
return u''
for m in returned_metrics:
try:
parts.append(u'{0}'.format(data[m.str_raw(True)].value))
except KeyError:
parts.append(u'-')
return formatter.format(*parts) |
def _findfirst(s, substrs):
"""Finds whichever of the given substrings occurs first in the given string
and returns that substring, or returns None if no such strings occur.
"""
i = len(s)
result = None
for substr in substrs:
pos = s.find(substr)
if -1 < pos < i:
i = pos
result = substr
return i, result |
def compute_predecessors(nodes):
"""Build a transitive closure.
For a list of nodes, compute all the predecessors of each node.
Args:
nodes: A list of nodes or blocks.
Returns:
A dictionary that maps each node to a set of all the nodes that can reach
that node.
"""
# Our CFGs are reflexive: Every node can reach itself.
predecessors = {n: {n} for n in nodes}
discovered = set() # Nodes found below some start node.
# Start at a possible root and follow outgoing edges to update predecessors as
# needed. Since the maximum number of times a given edge is processed is |V|,
# the worst-case runtime is |V|*|E|. However, these graphs are typically
# trees, so the usual runtime is much closer to |E|. Compared to using
# Floyd-Warshall (|V|^3), this brings down the execution time on some files
# from about 30s to less than 7s.
for start in nodes:
if start in discovered:
# We have already seen this node from a previous start, do nothing.
continue
unprocessed = [(start, n) for n in start.outgoing]
while unprocessed:
from_node, node = unprocessed.pop(0)
node_predecessors = predecessors[node]
length_before = len(node_predecessors)
# Add the predecessors of from_node to this node's predecessors
node_predecessors |= predecessors[from_node]
if length_before != len(node_predecessors):
# All of the nodes directly reachable from this one need their
# predecessors updated
unprocessed.extend((node, n) for n in node.outgoing)
discovered.add(node)
return predecessors |
def extract_capabilities(text):
"""Extract a capabilities list from a string, if present.
Args:
text: String to extract from
Returns: Tuple with text with capabilities removed and list of capabilities
"""
if b"\0" not in text:
return text, []
text, capabilities = text.rstrip().split(b"\0")
return (text, capabilities.strip().split(b" ")) |
def _filter_contacts(ions, contacts, loose):
"""
Filter the precalculated contacts to ensure they make sense
Specifically:
Sort the contacts by distance and then by residue and if possible
include at most 1 atom per residue and at most 4 residues so max
4 contacts per ion.
If using strict settings (default - as opposed to loose) then only
contacts with protein atoms will be considered. If not, then all
atoms will be included.
"""
standard_residues = (
'ALA', 'ARG', 'ASN', 'ASP', 'CYS',
'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO',
'SER', 'THR', 'TRP', 'TYR', 'VAL',
'MSE',
)
filtered_contacts = {}
for ion in ions:
filtered_contacts[ion.resid] = []
ion_contacts = contacts[ion.resid]
ion_contacts = sorted(ion_contacts, key = lambda x: float(x[1]))
for ion_contact in ion_contacts:
atom, dist = ion_contact
if atom.resname not in standard_residues and not loose:
continue
if atom.resid not in [_[0].resid for _ in filtered_contacts[ion.resid]]:
filtered_contacts[ion.resid].append(ion_contact)
if len(filtered_contacts[ion.resid]) == 4:
break
return filtered_contacts |
def _pressure_from_hybrid(psfc, hya, hyb, p0=100000.):
"""Calculate pressure at the hybrid levels."""
# This will be in Pa
return hya * p0 + hyb * psfc |
def getVersionString(plist, key=None):
"""Gets a version string from the plist.
If a key is explicitly specified, the value of that key is
returned without modification, or an empty string if the
key does not exist.
If key is not specified:
if there's a valid CFBundleShortVersionString, returns that.
else if there's a CFBundleVersion, returns that
else returns an empty string.
"""
VersionString = ''
if key:
# admin has specified a specific key
# return value verbatim or empty string
return plist.get(key, '')
# default to CFBundleShortVersionString plus magic
# and workarounds and edge case cleanups
key = 'CFBundleShortVersionString'
if not 'CFBundleShortVersionString' in plist:
if 'Bundle versions string, short' in plist:
# workaround for broken Composer packages
# where the key is actually named
# 'Bundle versions string, short' instead of
# 'CFBundleShortVersionString'
key = 'Bundle versions string, short'
if plist.get(key):
# return key value up to first space
# lets us use crappy values like '1.0 (100)'
VersionString = plist[key].split()[0]
if VersionString:
# check first character to see if it's a digit
if VersionString[0] in '0123456789':
# starts with a number; that's good
# now for another edge case thanks to Adobe:
# replace commas with periods
VersionString = VersionString.replace(',', '.')
return VersionString
if plist.get('CFBundleVersion'):
# no CFBundleShortVersionString, or bad one
# a future version of the Munki tools may drop this magic
# and require admins to explicitly choose the CFBundleVersion
# but for now Munki does some magic
VersionString = plist['CFBundleVersion'].split()[0]
# check first character to see if it's a digit
if VersionString[0] in '0123456789':
# starts with a number; that's good
# now for another edge case thanks to Adobe:
# replace commas with periods
VersionString = VersionString.replace(',', '.')
return VersionString
return '' |
def _retrieve_task_id(res_id, job_dict):
"""
internal function to retrieve a matching job id for create_bag_by_irods celery task from a specified celery
job dictionary including active jobs, reserved jobs, and scheduled jobs. Active jobs and reserved jobs have the
same dictionary format, while schedule jobs have a bit different format in which the job details are stored in the
'request' key of a sub dictionary. Refer to
http://docs.celeryproject.org/en/latest/userguide/workers.html?highlight=revoke#inspecting-workers for details.
"""
job_name = 'hs_core.tasks.create_bag_by_irods'
if job_dict:
workers = job_dict.keys()
for worker in workers:
for job in job_dict[worker]:
if 'name' in job:
if job['name'] == job_name:
if res_id in job['args']:
return job['id']
elif 'request' in job:
scheduled_job = job['request']
if 'name' in scheduled_job:
if scheduled_job['name'] == job_name:
if res_id in scheduled_job['args']:
return scheduled_job['id']
return None |
def _transient_string_in_exception_message(exc):
# type: (Exception) -> bool
"""Determines whether an exception's message contains a common message for transient errors.
The exception's message containing one of these substrings is sufficient to determine that it is
transient, but there can be transient exceptions whose messages do not contain these substrings.
"""
return ('The job encountered an internal error during execution' in str(exc) or
'Retrying the job may solve the problem' in str(exc)) |
def binarySearchBool(x, L):
"""Purpose: to search through a list and return a boolean stating if a speci-
fic value is present in that list
Parameters: x, the value in question, and L, the list to be searched through
Return: isIn, a boolean stating whether the value is in the list or not"""
isIn = False
low = 0
high = len(L) - 1
mid = (high + low) // 2
while low <= high:
mid = (high + low) // 2
if L[mid] > x:
high = mid - 1
elif L[mid] < x:
low = mid + 1
elif L[mid] == x:
isIn = True
return isIn
elif L[high] == x:
isIn = True
return isIn
elif L[low] == x:
isIn = True
return isIn
return isIn |
def check_name_length(feature_name):
""" check the maximum name length for file geodatabase feature classes,
if exceed, 160 characters would be returned """
if len(feature_name) > 160:
feature_name = feature_name[:160]
return feature_name |
def oracle_nvl2(expr1, expr2, expr3):
""" NVL2(expr1, expr2, expr3)
NVL2 lets you determine the value returned by a query based on whether a specified expression is null or not null.
If expr1 is not null, then NVL2 returns expr2. If expr1 is null, then NVL2 returns expr3.
"""
if expr1:
return expr2
else:
return expr3 |
def total_speed(speed_list):
"""
Compute the sum of all speeds in the speed_list.
:param speed_list: A list of objects that have a speed_mb attribute.
:returns: Speed as an integer.
"""
speed_mb = 0
if speed_list:
for port in speed_list:
if port.get_speed_mb():
speed_mb += port.get_speed_mb()
return speed_mb |
def change(seq, start, end, change):
"""Return the sequence with ``seq[start:end]`` replaced by ``change``"""
return seq[:start] + change + seq[end:] |
def serialise_double(i, context=None):
"""Serialise a Double value.
i -- integer (1 or 2)
(unknown values are treated as 1)
"""
if i == 2:
return "2"
return "1" |
def add_fullstop(text, stop_chars='.?!', replace_chars=';:,-/'):
""" Add a fullstop to the end of a string if it does not exist """
text = text.strip()
if replace_chars is not None:
if text[-1] in replace_chars:
text = text[0:-1]
return add_fullstop(text, stop_chars=stop_chars, replace_chars=replace_chars)
if text[-1] not in stop_chars:
text+='.'
return text |
def destos_to_binfmt(key):
"""
Returns the binary format based on the unversioned platform name,
and defaults to ``elf`` if nothing is found.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf' |
def parse_metric(metric):
""" function to parse the metric into dictionary """
return dict({
"value": metric
}) |
def clean_bool_value_from_dict_object(dict_object, dict_name, dict_key, post_errors, no_key_allowed=False):
"""
This function takes a target dictionary and returns the boolean value given by the given key.
Returns None if key if not found and appends any error messages to the post_errors list
:param dict_object: (type: dictionary) target object to get boolean from
:param dict_name: (type: string) name of target dictionary
:param dict_key: (type: string) target dictionary key
:param post_errors: (type: list) list of error messages
:param no_key_allowed: (type: boolean) whether the or not to allow for absence of target key in target dictionary,
default is False
:return: (type: boolean or None) boolean type value for given target key, or None
"""
if dict_key not in dict_object:
if no_key_allowed:
return None
else:
post_errors.append("{!r} key not found in {!r} object".format(dict_key, dict_name))
elif dict_object[dict_key] is None:
post_errors.append("Value for {!r} in {!r} object is Null".format(dict_key, dict_name))
elif not isinstance(dict_object[dict_key], bool):
post_errors.append("Value for {!r} in {!r} object is not type boolean".format(dict_key, dict_name))
elif dict_object[dict_key] == '':
post_errors.append("Value for {!r} in {!r} object is and empty string".format(dict_key, dict_name))
else:
return dict_object[dict_key] |
def find_biggest_value_per_day(day_data):
"""
Take pressure data per day and find
biggest value.
If some systolic and other systolic equal,
compare by diastolic
"""
values = [(data[2], data[3]) for data in day_data]
systolic, diastolic = max(values)
return systolic, diastolic |
def sqrt_iterative(x, init, k):
"""using privpy's method for final result.
for initial value form bit, we set k=4.
for initial value from comp, we set k=7.
"""
for _ in range(k):
init = 0.5*init*(3 - x*init*init)
return init * x |
def sortArrayByParity(A):
"""
:type A: List[int]
:rtype: List[int]
"""
return [i for i in A if i % 2 == 0] + [i for i in A if i % 2 != 0] |
def _pop_message(message):
"""Pops the top-most space-separated component from the message."""
if " " in message:
component, message = message.split(" ", 1)
return component, message.lstrip(" ")
return message, "" |
def _get_subdict(master_dict, subkeys):
"""Helper method to get a set of keys from a larger dictionary"""
return {k: master_dict[k] for k in subkeys if k in master_dict and master_dict[k] is not None} |
def compare_two_hit_id_lists(ids1, ids2):
"""Take two lists of sequence IDs, and remove ids so that each list
contains only unique ids.
"""
# Copy the input lists.
ids1a = ids1
ids2a = ids2
# Make the lists the same length.
if len(ids1a) > len(ids2a):
ids2a = ids2a + ["Filler"]*(len(ids1a) - len(ids2a))
elif len(ids1a) < len(ids2a):
ids1a = ids1a + ["Filler"]*(len(ids2a) - len(ids1a))
else:
pass
assert len(ids1a) == len(ids2a)
# Zip through the lists and generate non-redundant lists.
ids1a2 = []
ids2a2 = []
stop_adding_to_ids1a2 = False
stop_adding_to_ids2a2 = False
for x, y in zip(ids1a, ids2a):
if not stop_adding_to_ids1a2:
if not x in ids2a2 and not x == "Filler":
ids1a2.append(x)
else:
stop_adding_to_ids1a2 = True
if not stop_adding_to_ids2a2:
if not y in ids1a2 and not y == "Filler":
ids2a2.append(y)
else:
stop_adding_to_ids2a2 = True
if stop_adding_to_ids1a2 and stop_adding_to_ids2a2:
break
# Remove all instances of "Filler" from both lists.
ids1a2 = list(filter(lambda a: a != "Filler", ids1a2))
ids2a2 = list(filter(lambda a: a != "Filler", ids2a2))
# Return the new lists.
return (ids1a2, ids2a2) |
def refresh_voter_primary_email_cached_information_by_voter_we_vote_id(voter_we_vote_id):
"""
Make sure this voter record has accurate cached email information.
:param voter_we_vote_id:
:return:
"""
results = {
'success': False,
'status': "TO_BE_IMPLEMENTED",
}
return results |
def get_contact_timings(contact_states):
"""
:param contact_states:
:return: contact start timings, contact end timings
"""
start_frames = []
end_frames = []
if contact_states[0] is True:
start_frames.append(0)
for i in range(1, len(contact_states)):
if contact_states[i] != contact_states[i-1]:
if contact_states[i] is True:
start_frames.append(i)
else:
end_frames.append(i)
if contact_states[len(contact_states)-1] is True:
end_frames.append(len(contact_states)-1)
return start_frames, end_frames |
def normalize_angle_deg(angle: float) -> float:
""" Given an angle in degrees, normalises in [-179, 180] """
# ATTRIBUTION: https://github.com/Gor-Ren/gym-jsbsim
new_angle = angle % 360
if new_angle > 180:
new_angle -= 360
return new_angle |
def to_kwh(joules):
""" Converts from watts used in a timeframe (in hours) to kwh """
watt_hours = joules
return watt_hours / 1000 |
def u_global(name, prefix='u_', suffix=''):
"""Returns the uncertainty corresponding to a column. For example, the
column ('roi_atoms', 'Fz', '', '') has uncertainties in the column
('roi_atoms', 'u_Fz', '', '')
"""
if type(name) == tuple:
i = len(name)-1
while not name[i]:
i -= 1
t = name[:i]
t += (prefix + name[i] + suffix,)
t += name[i+1:]
return t
elif type(name) == str:
return prefix + name + suffix
else:
return None |
def cuda_infinity_norm(a, b):
"""
Use @reduce decorator for converting a simple binary operation into a reduction kernel.
:param a:
:param b:
:return:
"""
return max(abs(a), abs(b)) |
def find_kmers(seq, k):
""" kmers value"""
n = len(seq) - k + 1
return [] if n < 1 else [seq[i:i + k] for i in range(n)] |
def parse_formatted_fftlen(fftlen: str) -> int:
"""Parse a formatted FFT length."""
if fftlen.endswith(('m', 'M')):
mega = int(fftlen[:-1])
return mega * 2**20
elif fftlen.endswith(('k', 'K')):
kilo = int(fftlen[:-1])
return kilo * 2**10
else:
return int(fftlen) |
def find_par(s):
"""returns first parenthese substring"""
sub = ""
par = 0
start = False
for c in s:
if c != '(' and not start:
continue
elif c == '(':
par += 1
start = True
elif c == ')':
par -= 1
if start:
sub += c
if par == 0 and start:
return sub[1:-1] |
def cir_RsQ(w, Rs, Q, n):
"""
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return Rs + 1/(Q*(w*1j)**n) |
def deep_sort(obj):
"""
Recursively sort list or dict nested lists
"""
if isinstance(obj, dict):
_sorted = {}
for key in sorted(obj):
_sorted[key] = deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted |
def _escape_shell_chars(arg):
"""
An attempt to sanitize shell arguments without disabling
shell expansion.
>>> _escape_shell_chars('This (; file has funky chars')
'This \\(\\; file has funky chars'
"""
arg = arg.replace("(", "\\(")
arg = arg.replace(";", "\\;")
arg = arg.replace(")", "\\)")
return arg |
def to_seconds(s):
"""Takes str s in HH:MM:SS, MM:SS, or SS format and returns total seconds (integer)."""
ssmmhh = [int(n) for n in reversed(s.split(":"))]
# only [SS]; we allow user to give us any positive number of seconds
if len(ssmmhh) == 1 and ssmmhh[0] >= 0:
return ssmmhh[0]
# [SS, MM]
elif len(ssmmhh) == 2 and ssmmhh[0] in range(60) and ssmmhh[1] in range(60):
return ssmmhh[0] + ssmmhh[1] * 60
# [SS, MM, HH]
elif (
len(ssmmhh) == 3
and ssmmhh[0] in range(60)
and ssmmhh[1] in range(60)
and ssmmhh[2] >= 0
):
return ssmmhh[0] + ssmmhh[1] * 60 + ssmmhh[2] * 3600
raise ValueError(f"{s} is not a valid time.") |
def calculate_intervals_from_range(list_range):
"""
Merge a list of continuous indexes into list of intervals.
Parameters:
list_range -- A list with compartment indexes to be merged into intervals.
Output:
A list of lists with intervals.
"""
list_range = list(list_range)
intervals = []
for idx, item in enumerate(list_range):
if not idx or item-1 != intervals[-1][-1]:
intervals.append([item])
else:
intervals[-1].append(item)
return(intervals) |
def fully_qualified_stack_name(org: str, project: str, stack: str) -> str:
"""
Returns a stack name formatted with the greatest possible specificity:
org/project/stack or user/project/stack
Using this format avoids ambiguity in stack identity guards creating or selecting the wrong stack.
Note that filestate backends (local file, S3, Azure Blob) do not support stack names in this
format, and instead only use the stack name without an org/user or project to qualify it.
See: https://github.com/pulumi/pulumi/issues/2522
:param org: The name of the org or user.
:param project: The name of the project.
:param stack: The name of the stack.
:returns: The fully qualified stack name.
"""
return f"{org}/{project}/{stack}" |
def extract_http_tags(event):
"""
Extracts HTTP facet tags from the triggering event
"""
http_tags = {}
request_context = event.get("requestContext")
path = event.get("path")
method = event.get("httpMethod")
if request_context and request_context.get("stage"):
if request_context.get("domainName"):
http_tags["http.url"] = request_context.get("domainName")
path = request_context.get("path")
method = request_context.get("httpMethod")
# Version 2.0 HTTP API Gateway
apigateway_v2_http = request_context.get("http")
if event.get("version") == "2.0" and apigateway_v2_http:
path = apigateway_v2_http.get("path")
method = apigateway_v2_http.get("method")
if path:
http_tags["http.url_details.path"] = path
if method:
http_tags["http.method"] = method
headers = event.get("headers")
if headers and headers.get("Referer"):
http_tags["http.referer"] = headers.get("Referer")
return http_tags |
def calculate_edit_distance(org_code, cand_code):
"""
The higher the score, the lower the similarity.
Pay attention to \n symbol in the line
"""
org_parts = [part.strip() for part in org_code.strip().split()]
cand_parts = [part.strip() for part in cand_code.strip().split()]
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2 + 1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
return levenshteinDistance(org_parts, cand_parts)
pass |
def ps_good_mock(url, request):
"""
Mock for PTCRB lookup, best case.
"""
thebody = "<table></table><table><td>CER-59665-001 - Rev2-x05-05</td><td>10.3.3.2205</td><td>snek</td><td>OS Version: 10.3.0.1052 Radio Version: 10.3.0.1053 SW Release Version: 10.3.0.675</td></table>"
return {'status_code': 200, 'content': thebody} |
def one_of_in(lst, val):
"""
Returns True if one of the items in the given list exists
in the given value, False otherwise.
"""
for l in lst:
if l in val:
return True
return False |
def get_checklist(full_name):
""" Generate a list of names that may contain the 'full_name'.
Notes:
Eg. if full_name looks like "a.b.c", then the list
["a.b.c", "a.*", "a.b.*", "a.b.c.*"]
is generated. So either the full name itself may be found, or when
full_name is included in some *-import.
Args:
full_name: The full module name
Returns:
List of possible "containers".
"""
if not full_name: # guard against nonsense
return []
mtab = full_name.split(".") # separate components by dots
checklist = [full_name] # full name is always looked up first
m0 = ""
for m in mtab: # generate *-import names
m0 += "." + m if m0 else m
checklist.append(m0 + ".*")
return tuple(checklist) |
def dot(coords1, coords2):
"""
Find the dot product of two 3-dimensional points
Parameters
coords1: coordinates of form [x,y,z]
coords2: coordinates of form [x,y,z]
Returns
value: Dot product coords2 and coords1 (float)
"""
value = 0.0
for i in range(3):
value += coords1[i]*coords2[i]
return value |
def reorder_proper_torsions(i0, i1, i2, i3):
"""Return the atom indices of a proper torsion after "flipping" the
order so that the first atom is the smallest index.
Parameters
----------
i0, i1, i2, i3 : int,
Atom indices of torsion
Returns
-------
j0, j1, j2, j3 : int,
Atom indices of torsion
"""
if i0 < i3:
j0, j1, j2, j3 = i0, i1, i2, i3
else:
j0, j1, j2, j3 = i3, i2, i1, i0
return j0, j1, j2, j3 |
def make_minimal_cs_thread(overrides=None):
"""
Create a dictionary containing all needed thread fields as returned by the
comments service with dummy data and optional overrides
"""
ret = {
"type": "thread",
"id": "dummy",
"course_id": "dummy/dummy/dummy",
"commentable_id": "dummy",
"group_id": None,
"user_id": "0",
"username": "dummy",
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "1970-01-01T00:00:00Z",
"updated_at": "1970-01-01T00:00:00Z",
"last_activity_at": "1970-01-01T00:00:00Z",
"thread_type": "discussion",
"title": "dummy",
"body": "dummy",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"abuse_flagged_count": None,
"votes": {"up_count": 0},
"comments_count": 0,
"unread_comments_count": 0,
"children": [],
"read": False,
"endorsed": False,
"resp_total": 0,
}
ret.update(overrides or {})
return ret |
def combine_options(options=None):
""" Returns the ``copy_options`` or ``format_options`` attribute with spaces in between and as
a string. If options is ``None`` then return an empty string.
Parameters
----------
options : list, optional
list of strings which is to be converted into a single string with spaces
inbetween. Defaults to ``None``
Returns
-------
str:
``options`` attribute with spaces in between
"""
return " ".join(options) if options is not None else "" |
def parse_fasta(fastalines, zip_entries = True):
"""
Interprets the input as a string encoding an
entire FASTA file.
If zip_entries then the return is a list of (id, seq) tuples.
Otherwise return is two lists
"""
ids = []
seqs = []
curr_id = ''
curr_seq = []
lines = fastalines.splitlines()
for line in lines:
if line.startswith('>'):
#Write current entry
if curr_seq:
if not curr_id:
curr_id = 'UNKNOWN'
ids.append(curr_id)
seqs.append(''.join(curr_seq))
curr_id = ''
curr_seq = []
curr_id = line[1:].strip()
else:
curr_line = line.strip()
if curr_line:
curr_seq.append(curr_line)
#Finally
if curr_seq:
if not curr_id:
curr_id = 'UNKNOWN'
ids.append(curr_id)
seqs.append(''.join(curr_seq))
if zip_entries:
return list(zip(ids, seqs))
else:
return ids, seqs |
def create_group(key, name):
"""create a group of keys: node of our tree"""
res = {}
res['name'] = name
res['key'] = key
res['keys'] = []
res['groups'] = []
res['children'] = []
return res |
def prep_filesize(file_size):
"""
Convert file size to float.
:param file_size: Number to parse.
:type file_size: float
"""
if file_size is None:
file_size = 0.0
fsize = float(file_size)
return fsize |
def hamming_distance(v1, v2):
"""
Get Hamming distance between integers v1 and v2.
:param v1:
:param v2:
:return:
"""
return bin(v1 ^ v2).count("1") |
def _get_extra_configmaps_and_metrics(resource_name, resource_qsets):
"""
configmap used for logbeat
"""
extra_configmaps, metrics = [], []
for res in resource_qsets:
cms, mts = res.get_extra_configmaps_and_metrics(resource_name)
extra_configmaps.extend(cms)
metrics.extend(mts)
return extra_configmaps, metrics |
def reduce_vector(V, p):
"""
Reduces a vector over finite field.
"""
for i, element in enumerate(V):
V[i] = element % p
return V |
def folder_name(unit_name):
"""
Extract folder name from full unit name
Example:
Unit 01 - Example unit
returns:
Unit 01
"""
return unit_name.strip() |
def fs_get_file_id(filelist):
"""Returns a dict associating filename to figshare file id
Parameters
----------
filelist : list of dict
HTTP request response from fs_get_file_list
Returns
-------
response : dict
keys are filname and values are file_id
"""
return {f["name"]: str(f["id"]) for f in filelist} |
def sort_by(attribute, iterable, new=False, reverse=False):
"""Sort an iterable by an attribute of the instance (or dictionary) within the iterable.
:param attribute: The name of the attribute by which the iterable should be sorted.
:type attribute: str
:param iterable: An iterable (such as a list or tuple).
:param new: Indicates a new list should be returned. When ``False`` the list is sorted "in place".
:type new: bool
:param reverse: Indicates the list should be sorted in reverse.
:type reverse: bool
:returns: A new iterable when ``new`` is ``True``. Otherwise, ``None``.
This is a shortcut for using lambda functions for sortation:
.. code-block:: python
# To sort the list in place ...
some_list.sort(key=lambda x: x.sort_order)
# Or to return a new list using the sorted() built-in function ...
new_list = sorted(some_list, key=lambda x: x.sort_order)
I can never seem to remember the lambda syntax, hence ``sort_by()`` saves me looking up
`how to sort a list of objects based on an attribute of the objects`_?
.. _how to sort a list of objects based on an attribute of the objects: https://stackoverflow.com/a/403426/241720
"""
def _get_attribute_value(instance):
if type(instance) is dict:
return instance[attribute]
return getattr(instance, attribute)
if new:
return sorted(iterable, key=_get_attribute_value, reverse=reverse)
# iterable.sort(key=lambda x: getattr(x, attribute), reverse=reverse)
iterable.sort(key=_get_attribute_value, reverse=reverse) |
def read_key_safe(obj, keys, default):
""" Recursively check if key is in obj and return the value.
Otherwise, return default. """
for key in keys:
if key in obj:
obj = obj[key]
else:
return default
return obj |
def PrivacyChoiceStrength(privacy):
"""Returns privacy strength (stronger privacy means higher returned value)."""
if privacy == 'public-contact-data':
return 0
if privacy == 'redacted-contact-data':
return 1
if privacy == 'private-contact-data':
return 2 |
def do_range(stop):
"""
Wrap the standard range() method, to enable things like
{% for i in range(6) %} ...
"""
return list(range(stop)) |
def read_metadata(line, search_text, current_value):
"""
Function to read simple header items in DynAdjust files
:param line: DynAdjust header line
:param search_text: header field required.
:param current_value: stored value. Updated when search_text is successfully found.
:return: either current value or string corresponding to search_text
"""
if line[:35] == search_text.ljust(35, ' '):
return line[35:].strip()
else:
return current_value |
def get_nested_dict_value(input_dict, key):
"""Uses '.' or '->'-splittable string as key to access nested dict."""
if key in input_dict:
val = input_dict[key]
else:
key = key.replace("->", ".") # make sure no -> left
split_key = key.split('.', 1)
if len(split_key) == 2:
key_prefix, key_suffix = split_key[0], split_key[1]
else: # not enough values to unpack
raise KeyError("'{:s}' not in {}".format(key, input_dict))
val = get_nested_dict_value(input_dict[key_prefix], key_suffix)
return val |
def compute_patk(matches, k):
"""
:param matches: list
:param k: float
:return: float
"""
num_matches = 0
for x in matches[:k]:
if x:
num_matches += 1
return float(num_matches)/k |
def make_all_seqs(l):
"""
Makes all possible sequences, including Ns, of length l
"""
if l > 8:
print("Warning - large barcodes detected!")
print("It may be faster to use option '--dont_build_reference'!")
nts = ['A', "C", "G", "T", "N"]
all_seqs = nts
for i in range(l - 1):
new_seqs = []
for seq in all_seqs:
for nt in nts:
new_seqs.append(seq + nt)
all_seqs = new_seqs
return (all_seqs) |
def sort_dic(dic, switch, is_reverse):
"""
Function: sort dictionary according to keys or values.
Input:
- dic: Dictionary.
- switch: str. "keys" or "values" to sort.
- is_reverse: whether or not to sort reversely.
Output: Dictionary. sorted.
"""
if(switch == 'keys'):
return {k: v for k, v in sorted(dic.items(), key=lambda item: item[0], reverse = is_reverse)}
elif(switch == 'values'):
return {k: v for k, v in sorted(dic.items(), key=lambda item: item[1], reverse = is_reverse)} |
def extract_dependencies(nodes, dependencies):
"""
Transform dependencies from the file into a dependency dictionary
:param nodes:
:param dependencies:
:return:
"""
dependencies_dict = {}
for node in nodes:
dependencies_dict[node] = []
for dependency in dependencies:
effect_from, effect_on = dependency.split(" -> ")
dependencies_dict[effect_on].append(effect_from)
return dependencies_dict |
def Phi_mean_periodic_lorentz(gamma, A_mean):
"""returns the mean values of a process of periodic Lorentz pulses
with duration time td = 1"""
I_1 = 1
return gamma * A_mean * I_1 |
def generate_numbers(count, mean, stdDev, ordering="Random"):
"""Generates a series of number according to the parameters with a uniform
distribution.
Possible orderings are: 'Random' (default), 'Sorted', 'Reverse Sorted'"""
if (ordering != "Random" and ordering != "Sorted" and
ordering != "Reverse Sorted"):
return float("NaN")
from random import random
numbers = []
while count > 0:
# uniform distribution with a = 0 and b = 1 has mean = 0.5 and stdDev = 0.2887
# doesn't handle high mean and low stdDev well
numbers.append((random() - 0.5)*stdDev/0.2887 + mean)
count -= 1
if ordering == "Sorted":
numbers.sort()
elif ordering == "Reverse Sorted":
numbers.sort()
numbers.reverse()
return numbers |
def is_whitelist_file(file_name):
"""White list documents that are exempt from validation."""
return file_name.endswith("_markers_denormalized.tsv") or file_name.endswith("_Allen_markers.tsv") |
def delete_comments(input_text):
"""
Completely delete all the comments in the input_text.
Type 1 comments: "<# ... #>".
Type 2 comments: "# ... \n", except for cases when # is surrounded with " or '.
:param input_text: a string representing a script to work on.
:return: an input_text freed from any comments.
"""
output = ''
start_symbols = ['<#', '#']
end_symbols = ['#>', '\n']
assert len(start_symbols) == len(end_symbols)
for i in range(len(start_symbols)):
output = ''
# 1. initial search
start_index = input_text.find(start_symbols[i])
while start_index >= 0:
if input_text[:start_index].split('\n')[-1].replace(" ", "") == "": # handling spaces before the comment
if len(input_text[:start_index].split('\n')[-1]) > 0:
start_index = start_index - len(input_text[:start_index].split('\n')[-1])
# 2. append everything up to start_index to the output
output = output + input_text[:start_index]
# 3. then, either:
if i == 0 or (i == 1 and input_text[start_index - 1] != "`" and
((input_text[:start_index].split('\n')[-1].find("'") == -1 or
input_text[start_index:].split('\n')[0].find("'") == -1) and
(input_text[:start_index].split('\n')[-1].find('"') == -1 or
input_text[start_index:].split('\n')[0].find('"') == -1))):
# 3.1. skip the comment
end_index = start_index + input_text[start_index:].find(end_symbols[i]) + len(end_symbols[i])
else:
# 3.2. or add the "false" positive '#' to the output
end_index = start_index + 1
output = output + '#' # we need '#' this time
# 4. cut input_text from the end position
input_text = input_text[end_index:]
# 5. loop
start_index = input_text.find(start_symbols[i])
output = output + input_text
input_text = output
while output.find('\n\n\n') != -1:
output = output.replace('\n\n\n', '\n\n')
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.