content stringlengths 42 6.51k |
|---|
def __read_class_labels(classes_file):
"""
Returns a function -> class-label dictionary. Only supports single class labels.
:param functionclasses_file: A file with the format function-name class-label
:return: Dictionary of the form {fn: class}
"""
classes = {}
with open(classes_file, 'r') as f:
for line in f:
function, class_label = line.strip().split()
if function in classes:
continue
# raise Exception("Duplicate function %s" % function)
classes[function] = class_label
return classes |
def write_idx_on_disk(index_dirty, index_clean, path, path_dirty):
"""
Write a list on the disk
"""
with open(path, 'a') as fd:
for name in index_clean:
fd.write("{}\n".format(name))
with open(path_dirty, 'a') as fd:
for name in index_dirty:
fd.write("{}\n".format(name))
return [] |
def isfuncdesc(desc):
"""Tests if a description is a function-type description."""
return desc is not None and 'signatures' in desc |
def is_tty (fp):
"""Check if a file object is a TTY."""
return (hasattr(fp, "isatty") and fp.isatty()) |
def get_private_key_user(user: str, index: int) -> str:
"""
Returns the keychain user string for a key index.
"""
return f"wallet-{user}-{index}" |
def u16(x):
"""Unpacks a 2-byte string into an integer (little endian)"""
import struct
return struct.unpack('<H', x)[0] |
def create_particle(p_full, p_agent, p_patient):
"""
Create a fantasy particle with a given number of
transitive and intransitive situations
"""
particle = []
nid = 0
for _ in range(p_full):
particle.extend([(nid, [0,1], [nid+1,nid+2], [], []),
(nid+1, [], [], [0], [nid]),
(nid+2, [], [], [1], [nid])])
nid += 3
for _ in range(p_agent):
particle.extend([(nid, [0], [nid+1], [], []),
(nid+1, [], [], [0], [nid])])
nid += 2
for _ in range(p_patient):
particle.extend([(nid, [1], [nid+1], [], []),
(nid+1, [], [], [1], [nid])])
nid += 2
return particle |
def keywithmaxval(dict):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
if len(dict) > 0:
v=list(dict.values())
k=list(dict.keys())
return k[v.index(max(v))]
else:
return "Check" |
def _cleanBlank(value):
""" Converts blank string to Python None """
if value == '':
return None
return value |
def reformat_wrd(wrd):
"""
1. Clean up special cases of standalone apostrophes
2. Detect valid apostrophe cases and split those into a two words
"""
if wrd[-1] == "'":
wrd = wrd[:-1]
if "'" in wrd and wrd != "'s":
wrd_lst = wrd.split("'")
wrd_lst[-1] = "'" + wrd_lst[-1]
else:
wrd_lst = [wrd]
return wrd_lst |
def prepare_results(cursor_description, rows):
"""
Generate result in JSON format with an entry consisting of key value pairs.
:param cursor_description: a tuple with query result columns
:param rows: list of returned sql query values
:return: dictionary
"""
if rows is None or len(rows) == 0:
return {"entries": None}
# List of column names from SQL result to use as dictionary keys
dt_column_keys = [column[0] for column in cursor_description]
# Build dictionary: key-value pairs consisting of column name - row value
entries_data_list = []
for row in rows:
entries_data_list.append(dict(zip(dt_column_keys, row)))
entries = {"entries": [entry for entry in entries_data_list]}
return entries |
def lower_camel(name, split_char="_"):
"""Converts a given name into lower camelcase
:param name: The name to be converted
:type name: str
:param split_char: The character that separates words in the name.
:type split_char: str
:return: str
"""
words = name.split(split_char)
upper = [w.capitalize() for w in words[1:]]
return words[0] + "".join(upper) |
def minioString(obj):
"""
A Function to cast an object to str and then lowercase it.
This Function is helping to name paths, needed for the analysis in the right way.
Args:
- obj (Python Object): An object to turn it into a lowercase String.
Returns:
- Lower cased String (String): The lowecased String of the given object.
"""
return str(obj).lower() |
def row_str(dflen: int) -> str:
"""[summary]
Args:
dflen (int): [description]
Returns:
str: A formatted string with the number of rows (in millions).
"""
return str(round(dflen / 1000000, 1)) + "M rows" |
def ellipsis(text, maxlength=400):
"""Trim string to at most maxlength (default: 400) characters."""
if len(text) <= maxlength:
return text
else:
return "%s..." % (text[:maxlength - 3]) |
def doOverlap(bbox1, bbox2):
"""
:param bbox1: bounding box of the first rectangle
:param bbox2: bounding box of the second rectangle
:return: 1 if the two rectangles overlap
"""
if bbox1[2] < bbox2[0] or bbox2[2] < bbox1[0]:
return False
if bbox1[3] < bbox2[1] or bbox2[3] < bbox1[1]:
return False
return True |
def create_search_url(base_url, keyword_list):
"""Create Google search URL for a keyword from keyword_list
Args:
keyword_list (list): list of strings that contain the search keywords
base_url (str): Google's base search url
Returns:
list: Google search url like https://www.google.com/search?q=pizza
"""
search_kw = [kw.replace(" ", "+") for kw in keyword_list] # replace space with '+'
search_query = [base_url + sq for sq in search_kw]
return search_query |
def str_to_bool(str_bool):
""" Helper function to convert string to boolean. """
if str_bool == "False":
return False
return True |
def time2str(s, last_unit='s'):
""" Return human readable time string from seconds.
Examples
--------
>>> from iocbio.utils import time_to_str
>>> print time_to_str(123000000)
3Y10M24d10h40m
>>> print time_to_str(1230000)
14d5h40m
>>> print time_to_str(1230)
20m30.0s
>>> print time_to_str(0.123)
123ms
>>> print time_to_str(0.000123)
123us
>>> print time_to_str(0.000000123)
123ns
"""
seconds_in_year = 31556925.9747 # a standard SI year
orig_s = s
years = int(s / (seconds_in_year))
r = []
if years:
r.append ('%sY' % (years))
s -= years * (seconds_in_year)
if last_unit=='Y': s = 0
months = int(s / (seconds_in_year/12.0))
if months:
r.append ('%sM' % (months))
s -= months * (seconds_in_year/12.0)
if last_unit=='M': s = 0
days = int(s / (60*60*24))
if days:
r.append ('%sd' % (days))
s -= days * 60*60*24
if last_unit=='d': s = 0
hours = int(s / (60*60))
if hours:
r.append ('%sh' % (hours))
s -= hours * 60*60
if last_unit=='h': s = 0
minutes = int(s / 60)
if minutes:
r.append ('%sm' % (minutes))
s -= minutes * 60
if last_unit=='m': s = 0
seconds = int(s)
if seconds:
r.append ('%ss' % (seconds))
s -= seconds
if last_unit=='s': s = 0
mseconds = int(s*1000)
if mseconds:
r.append ('%sms' % (mseconds))
s -= mseconds / 1000
if last_unit=='ms': s = 0
useconds = int(s*1000000)
if useconds:
r.append ('%sus' % (useconds))
s -= useconds / 1000000
if last_unit=='us': s = 0
nseconds = int(s*1000000000)
if nseconds:
r.append ('%sns' % (nseconds))
s -= nseconds / 1000000000
if not r:
return '0'
return ''.join(r) |
def cloudtrail_root_account_usage(rec):
"""
author: airbnb_csirt
description: Root AWS credentials are being used;
This is against best practice and may be an attacker
reference_1: https://aws.amazon.com/premiumsupport/knowledge-center/
cloudtrail-root-action-logs/
reference_2: http://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html
playbook: (a) identify who is using the Root account
(b) ping the individual to determine if intentional and/or legitimate
"""
# reference_1 contains details on logic below
return (rec['detail']['userIdentity']['type'] == 'Root'
and rec['detail']['userIdentity'].get('invokedBy') is None
and rec['detail']['eventType'] != 'AwsServiceEvent') |
def unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in '"\'':
string = string[1:]
if string[-1] in '"\'':
string = string[:-1]
return string |
def _get_mixture_weights_constraints(n_mixtures):
"""Constrain mixture weights to be between 0 and 1 and sum to 1."""
if n_mixtures == 1:
msg = "Set the mixture weight to 1 if there is only one mixture element."
return [
{
"loc": "mixture_weights",
"type": "fixed",
"value": 1.0,
"description": msg,
}
]
else:
msg = "Ensure that weights are between 0 and 1 and sum to 1."
return [{"loc": "mixture_weights", "type": "probability", "description": msg}] |
def italics(line):
""" Check if italics words exist, if exist, change them into html format
:param line: str, the line in markdown format
:return: str, the line in html format with italics style
"""
if line.count('__') >= 2:
for i in range(0, line.count('__') - line.count('__') % 2):
if i % 2 == 0:
line = line.replace('__', '<em>', 1)
else:
line = line.replace('__', '</em>', 1)
return line |
def remove_elements_from_text(text: str, elements: str) -> str:
"""
Removes certain elements from a text.
Args:
text (:obj:`str`):
Text to process.
elements (:obj:`str`):
Elements to remove from text.
Returns:
:obj:`str`:
Text with given elements removed.
"""
# define translation table to remove symbols
translation = str.maketrans("", "", elements)
# apply translation table
text = text.translate(translation)
return text |
def is_decorated(field_spec):
"""
is this spec a decorated one
:param field_spec: to check
:return: true or false
"""
if 'config' not in field_spec:
return False
config = field_spec.get('config')
return 'prefix' in config or 'suffix' in config or 'quote' in config |
def getChainID(line):
"""
reads the chainID from the pdbline
"""
if line == None:
return "A"
elif line[21] == " ":
return "A"
else:
return line[21] |
def parseversion(version):
""" Method to parse a version string from an AT or a BDP to turn it into ints so it can
be easily compared.
Parameters
----------
version : str
The string to parse
Returns
-------
Tuple containing the major, minor, and sub version numbers (all ints)
"""
# split the string into the components
parse = version.split(".")
# if all three components are present
if len(parse) == 3:
major = int(parse[0])
minor = int(parse[1])
sub = int(parse[2])
# if only two are present
elif len(parse) == 2:
major = int(parse[0])
minor = int(parse[1])
sub = 0
# if only one is present
elif len(parse) == 1:
major = int(parse[0])
minor = 0
sub = 0
else:
raise Exception("Improperly formatted version string, it must conatin 1, 2, or 3 ints.")
return (major, minor, sub) |
def user_feedback(result_id, flags, comment, query, source_id=None, doc_type=None):
"""Format the properties of the ``feedback`` event.
:param result_id: the ES document ID the feedback applies to
:type result_id: str
:param flags: a dictionary with labels as keys and boolean values
:type flags: dict
:param comment: the (optional) free text provided by the user
:type comment: str
:param query: a dictionary that specifies the query and its options
:type query: dict
:param source_id: specifies which index was targeted. If ``source_id``
is ``None``, the search was executed against the
combined index.
:type source_id: str or None
:param doc_type: specifies the document type (if any)
:type doc_type: str or None
"""
return {
'source_id': source_id,
'doc_type': doc_type,
'result_id': result_id,
'flags': flags,
'comment': comment,
'query': query,
} |
def enc_bool(val):
"""Encode a boolean as either a \0x00 or \0x01"""
# No '?' decode format in py2.4
if val:
return '\x01'
return '\x00' |
def accuracy(y_true, y_predicted):
"""
Calculate accuracy in terms of absolute coincidence
Args:
y_true: array of true values
y_predicted: array of predicted values
Returns:
portion of absolutely coincidental samples
"""
examples_len = len(y_true)
correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)])
return correct / examples_len if examples_len else 0 |
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1: return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}': return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char: return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
##elif episode_or_year[-1:] == '}':
## # Title of the episode, as in the plain text data files.
## begin_eps = episode_or_year.find('{')
## if begin_eps == -1: return series_title, episode_or_year
## series_title = title[:second_quot+begin_eps].rstrip()
## # episode_or_year is returned with the {...}
## episode_or_year = episode_or_year[begin_eps:]
return series_title, episode_or_year |
def get_label_from_data(labels, position, prototype):
""" A simple function getting labels from a given list as in the example
of the MATLAB implementation.
"""
#print('providing label')
return labels[position] |
def raster_calculation(raster_list, weight_list):
"""
Function to calcule weighted sum of the rasters
Args:
raster_list (list): input rasters
weight_list (list): input weight of the rasters
Returns:
result raster
"""
assert len(raster_list) == len(weight_list), "Both list should have the same length!"
result_map = 0
for r, w in zip(raster_list, weight_list):
result_map += r * w
return result_map |
def can_double_bet(player_bets, player_cash):
"""
If the player has at least the amount of money as the first bet
return True else return False
:param player_bets:
:param player_cash:
:return: True or False
"""
if player_cash < sum(player_bets[0]):
return False
else:
return True |
def _aggregate_wsgi_filename(filename):
"""
The WSGI profiler outputs files like this:
GET.root.000003ms.1543612537.prof
For comparison in our plotter we want them to look like this:
GET.root
"""
return ".".join(filename.split(".")[:2]) |
def format_float_to_str(num):
"""Format number into human-readable float format.
More precise it convert float into the string and remove redundant
zeros from the floating part.
It will format the number by the following examples:
0.0000001 -> 0.0
0.000000 -> 0.0
37 -> 37.0
1.0000001 -> 1.0
1.0000011 -> 1.000001
1.0000019 -> 1.000002
:param num: Number to be formatted
:return: string representation of the number
"""
num_str = "%f" % num
float_part = num_str.split(".")[1].rstrip("0") or "0"
return num_str.split(".")[0] + "." + float_part |
def find_clones(population):
"""
Find all individuals that have a clone.
A clone is:
1) find all individuals with pattern and depot assignment
2) find all individuals with same cost
However, there is the chance that there is a clone with identical depot_chromosome but different solution.
This is because of the solution of the split algorithm and the crossover algorithm is not identical
Therefore, if this happens, we select the version with the shortest length.
Args:
population: List of individuals
Returns: List of individuals
"""
tmp_pop = population.copy()
clones = []
while tmp_pop:
individual1 = tmp_pop[0]
clone_found = False
for i, individual2 in enumerate(tmp_pop[1:]):
# if its a clone, remove the worse clone
szen1 = (individual1.depot_chromosome == individual2.depot_chromosome)
szen2 = (individual1.length == individual2.length)
if szen1 or szen2:
if individual1.length > individual2.length:
index_pop = 0
else:
index_pop = i + 1
worst_clone = tmp_pop.pop(index_pop)
clones.append(worst_clone)
clone_found = False
break
# else remove the first object
if not clone_found:
del tmp_pop[0]
return clones |
def json_apply(fragment, check_func, func):
"""recursively searches through a nested dict/lists
if check_func(fragment) is True, then we return
func(fragment)
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment |
def _escape_template_string(template):
# type: (str) -> str
"""Escape the '$' in template strings unless followed by '{'."""
# See https://docs.python.org/2/library/string.html#template-strings
template = template.replace('${', '#{')
template = template.replace('$', '$$')
return template.replace('#{', '${') |
def dot_map_dict_to_nested_dict(dot_map_dict):
"""
Convert something like
```
{
'one.two.three.four': 4,
'one.six.seven.eight': None,
'five.nine.ten': 10,
'five.zero': 'foo',
}
```
into its corresponding nested dict.
http://stackoverflow.com/questions/16547643/convert-a-list-of-delimited-strings-to-a-tree-nested-dict-using-python
:param dot_map_dict:
:return:
"""
tree = {}
for key, item in dot_map_dict.items():
split_keys = key.split('.')
if len(split_keys) == 1:
if key in tree:
raise ValueError("Duplicate key: {}".format(key))
tree[key] = item
else:
t = tree
for sub_key in split_keys[:-1]:
t = t.setdefault(sub_key, {})
last_key = split_keys[-1]
if not isinstance(t, dict):
raise TypeError(
"Key inside dot map must point to dictionary: {}".format(
key
)
)
if last_key in t:
raise ValueError("Duplicate key: {}".format(last_key))
t[last_key] = item
return tree |
def _get_relative_path(storage_location, path):
"""
Given a storage location and an absolute path, return
the relative path (i.e. the dump key)
"""
prefix_len = len(storage_location)
return path[prefix_len:] |
def get_deprt(lista):
"""Add together from row with same index."""
base = []
for item in lista:
base.append(item)
resultado = set(base)
return resultado |
def _adjmatType(adjMat):
"""(helper function) retruns <class 'int'> if the adjacency matrix is a (0,1)-matrix, and
returns <class 'list'> if the adjacency matrix contains edge weights, and returns None if
neither of the cases occurs.
Args:
adjMat (2D - nested - list): the adjacency matrix.
Returns:
(type) the type of the adjacency matrix as explained above.
"""
checktype = {all(isinstance(entry, list) for entry in row) for row in adjMat}
if len(checktype) == 1 and checktype.pop() == True: return list
checktype = {all(isinstance(entry, int) for entry in row) for row in adjMat}
if len(checktype) == 1 and checktype.pop() == True: return int
return None |
def bounds(grid):
"""Return a list of tuples reporting the min and max value of each coordinate
in the given grid.
"""
xmin, ymin, zmin = list(grid.keys())[0]
xmax, ymax, zmax = xmin, ymin, zmin
for x, y, z in grid:
xmin = min(xmin, x)
ymin = min(ymin, y)
zmin = min(zmin, z)
xmax = max(xmax, x)
ymax = max(ymax, y)
zmax = max(zmax, z)
return [(xmin, xmax), (ymin, ymax), (zmin, zmax)] |
def _dictionary_product(dictionary):
"""Returns a named cartesian product of dictionary's values."""
# Converts {'a': [1, 2], 'b': [3, 4]} into
# [{'a': 1, 'b': 3}, {'a': 1, 'b': 4}, {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]
product = [[]]
for values in dictionary.values():
# Iteratively grow the elements of the product.
product = [element + [value] for element in product for value in values]
dicts = [{k: v for k, v in zip(dictionary, element)} for element in product]
return dicts |
def checkComing( visdict, dayspan=8 ):
"""Check for any visits that might be scheduled for execution in the
next <dayspan> days, and return a string reporting them to the user.
If nothing has been done lately, returns 0.
"""
daynames = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
if dayspan <= 8 :
schedlist = ['Scheduled']
else :
schedlist = ['Scheduled','Scheduling','Implementation']
comingSoon = [ k for k in visdict.keys()
if ( visdict[k]['status'] in schedlist )
and ( (0 < visdict[k]['daystostart'] < dayspan)
or (0 < visdict[k]['daystoend'] < dayspan) )
]
datekey = lambda x : visdict[x]['enddate'].isoformat()
comingVisits = sorted(comingSoon,key=datekey,reverse=False)
if len(comingVisits)==0 : return('')
report = '\n Visits Scheduled (or schedulable) for the Next %i days:\n\n'%dayspan
for vis in comingVisits :
datestr = visdict[vis]['enddate'].date().isoformat()
timestr = visdict[vis]['enddate'].time().isoformat()[:5]
target = visdict[vis]['targets']
weekday = daynames[ visdict[vis]['enddate'].date().weekday() ]
report += '%s %s %s %s (%s)\n'%(
vis, weekday, datestr,timestr,target)
report += '\n'
return( report ) |
def fill_dict_cols(collection):
"""
Convert the collection, which is a dictionary in which the keys are column names
and the values are the columns
"""
result = {}
max_length = 0
for col in collection:
result[col] = collection[col]
if len(collection[col]) > max_length:
max_length = len(collection[col])
# fill the list with empty entries so we can use the pandas dataframe
for res in result:
col = result[res]
addition = max_length - len(col)
col += [""] * addition
return result |
def zfill_to_collection_size(index: int, collection_size: int) -> str:
"""
Prepends amount of zeroes required for indexes to be string-sortable in terms of given collection size.
Examples:
for 10 items prepends up to 1 zero: 1 -> "01", 10 -> "10"
for 100 items prepends up to 2 zeroes: 7 -> "007", "13" -> "013"
"""
positions = len(str(collection_size))
return str(index).zfill(positions) |
def add_lists(*args):
"""
Append lists. This is trivial but it's here for symmetry with add_dicts
"""
out = []
for arg in args:
out.extend(arg)
return out |
def check_degree(num, edge, out_degree, in_degree):
"""
Check out degree and in degree
"""
out_d, in_d = [0]*num, [0]*num
for e in edge:
out_d[e[0]] = 1
in_d[e[-1]] = 1
if out_d == out_degree and in_d == in_degree:
return 1
else:
return 0 |
def get_public_attributes_and_methods(obj, exclude_parent=False):
"""Return a list of public attributes and methods for an object.
Parameters
----------
exclude_parent : bool
If ``True``, only display public attributes and methods specific to
the current class, excluding those inherited from the parent class.
Overridden and extended methods are not excluded.
"""
try:
all_public = [i for i in obj.__dict__ if not i.startswith("_")]
except AttributeError:
all_public = []
all_public += [
i
for i in obj.__class__.__dict__
if i not in all_public and not i.startswith("_")
]
if not exclude_parent:
parent_public = get_public_attributes_and_methods(
obj.__class__.__base__(), exclude_parent=True
)
all_public += [i for i in parent_public if i not in all_public]
return sorted(all_public, key=str.lower) |
def Levenshtein_Distance(str1, str2):
"""
cal str1 and str2 edit distance.
Args:
str1: string A
str2: string B
Return:
value of edit distance
"""
matrix = [[ i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
for i in range(1, len(str1)+1):
for j in range(1, len(str2)+1):
if(str1[i-1] == str2[j-1]):
d = 0
else:
d = 1
matrix[i][j] = min(matrix[i-1][j]+1, matrix[i][j-1]+1, matrix[i-1][j-1]+d)
return matrix[len(str1)][len(str2)] |
def s2f(s):
""" Convert a string to a float even if it has + and , in it. """
if s == None or s == '':
return None
if type(s) == type(0.0) or type(s) == type(0):
# Already a number
return s
if s:
return float(s.replace(',', ''))
return None |
def restructure_output(_doc):
"""Restructure the API output"""
field_mapping = {'HGNC': 'hgnc',
'ensembl.gene': 'ensembl',
'MIM': 'omim',
'entrezgene': 'entrez',
'pharos.target_id': 'pharos',
'umls.cui': 'umls'}
# loop through mapping, change the field name one by one
for k, v in field_mapping.items():
if _doc.get(k):
_doc[v] = _doc.pop(k)
return _doc |
def median(x):
""" Calculate sample Median without using numpy """
x = sorted(x)
m = int(len(x) / 2.0)
if len(x) % 2 == 0:
return (x[m] + x[m-1]) / 2.0
else:
return x[m] |
def Group(array):
"""Groups duplicate elements, e.g. [2, 1, 2, 2, 3] => [1, 2, 3], [1, 3, 1]."""
array.sort()
uniq, cnts = [], []
for i in array:
if len(uniq) == 0 or i != uniq[-1]:
uniq.append(i)
cnts.append(1)
else:
cnts[-1] += 1
return uniq, cnts |
def identify_disruptive_sequences(ambiguous_positions, threshold):
"""
Count the number of times a given sample introduces a missing character into a position which would be otherwise core
Parameters
----------
ambiguous_positions [dict] : {position: [sample list]}
threshold [int] : Max number of sequences required to be missing sequence for it to be considered disruptive
Returns
-------
disruptive_samples[dict] : sample_id and number of times a sequence was disruptive
"""
disruptive_samples = {}
for pos in ambiguous_positions:
total = len(ambiguous_positions[pos])
if total < threshold:
for sample_id in ambiguous_positions[pos]:
if sample_id not in disruptive_samples:
disruptive_samples[sample_id] = 0
disruptive_samples[sample_id] += 1
return disruptive_samples |
def evaluate_tuple(columns,mapper,condition):
"""
"""
if isinstance(condition, tuple):
return condition[0](columns,mapper,condition[1],condition[2])
else:
return condition(columns,mapper) |
def risk_color_for(probability, impact, num_risks): # pylint: disable=too-many-return-statements
"""
Given a probability + impact, color a square.
If there are no risks the square will be light grey (#f0f0f0).
"""
# See https://www.colourlovers.com/palette/56122/Sweet_Lolly
colors = ["#00C176", "#88C100", "#FABE28", "#FF8A00", "#FF003C", "#f0f0f0"]
if num_risks > 0: # we have some risks, color them
# Colors taken directly off an internal slide
if probability <= 5 and impact <= 1: # green
return colors[0]
if probability <= 3 and impact <= 2: # green
return colors[0]
if probability <= 5 and impact <= 2: # yellow
return colors[2]
if probability <= 2 and impact <= 3: # green
return colors[0]
if probability <= 4 and impact <= 3: # yellow
return colors[2]
if probability <= 5 and impact <= 3: # red
return colors[4]
if probability <= 1 and impact <= 4: # green
return colors[0]
if probability <= 3 and impact <= 4: # yellow
return colors[2]
if probability <= 5 and impact <= 4: # red
return colors[4]
if probability <= 2 and impact <= 5: # yellow
return colors[2]
if probability <= 5 and impact <= 5: # red
return colors[4]
# Nothing else matched, use grey
return colors[5] |
def get_report_line(percentage, line_size):
"""Creates a string to be used in reporting the percentage done."""
report = ""
for i in range(0, line_size):
if (float(i) / line_size < percentage):
report += "="
else:
report += "-"
return report |
def get_pos(pk, token_index, pos_dict):
"""input: (file, sentId, eId, eiId) and token_index and pos_dict
output: pos_dict[(file, sentId, token_index)]
"""
if token_index == -1:
return "ROOT"
return pos_dict[(pk[0], pk[1], token_index)] |
def div(a,b):
"""Elementwise division with another vector, or with a scalar."""
if hasattr(b,'__iter__'):
if len(a)!=len(b):
raise RuntimeError('Vector dimensions not equal')
return [ai/bi for ai,bi in zip(a,b)]
else:
return [ai/b for ai in a] |
def bbcommon(bb, bbother):
"""
Checks for overlaps of bounding boxes. First, east-west, then north-south.
Element 0 is west, element 2 is east, element 1 is north?, element 3 is
south?
All four checks must be false for chflag to be true, meaning the two
bounding boxes do not overlap.
"""
chflag = 0
if not ((bbother[2] < bb[0]) or (bbother[0] > bb[2])):
if not ((bbother[3] < bb[1]) or (bbother[1] > bb[3])):
chflag = 1
return chflag |
def _genfmt(size, endian, sign):
"""
Generates a format string that can be used with struct.pack()
and struct.unpack().
"""
if sign not in [True, False]:
raise ValueError('sign must be either True or False')
if endian == 'little':
fmt = '<'
elif endian == 'big':
fmt = '>'
else:
raise ValueError('endianness must be either "little" or "big"')
if size == 16:
fmt += 'h' if sign else 'H'
elif size == 32:
fmt += 'i' if sign else 'I'
elif size == 64:
fmt += 'q' if sign else 'Q'
else:
raise ValueError('supported sizes are 16, 32 and 64')
return fmt |
def uniq_vals_incommon(list1, list2):
"""find unique values in common between two lists"""
return list(set([x for x in list1 if x in list2])) |
def splitcomma(source, sep=","):
"""split comma-separated string into list of elements,
stripping whitespace.
"""
source = source.strip()
if source.endswith(sep):
source = source[:-1]
if not source:
return []
return [ elem.strip() for elem in source.split(sep) ] |
def command_type(current_command):
"""Return command type
A_Command for @Xxx where Xxx is a symbol or decimal number
C_Command for dest=comp;jump
L_Command for Xxx where Xxx is a symbol
"""
if current_command[0] == '@':
return 'A_Command'
elif current_command[0] == '(':
return 'L_Command'
else:
return 'C_Command' |
def update_intersection_properties(inters, config):
"""
Since intersection data includes properties from each contributing segment,
use the max value for each feature (as given by config file) available
and set properties for the intersection
Args:
inters - a list of intersection objects
config - the configuration object, which has the features
Returns:
inters - updated intersection object list
"""
for inter in inters:
for feature in config.features:
values = [x[feature] for x in inter.data if feature in x]
if values:
# Just take max val of feature across all intersection segments
inter.properties[feature] = max(values)
else:
inter.properties[feature] = None
inter.properties['connected_segments'] = inter.connected_segments
return inters |
def filter_included_resources(include_list: list, resource_tuple_list: list) -> list:
"""
Filters the list returned by get_resource_urls() according to
a list of included resources.
"""
filtered_resource_list = list()
for k, v in resource_tuple_list:
if v.name in include_list:
filtered_resource_list.append((k, v))
return filtered_resource_list |
def _unary_op(result_name, func_name, arg_name):
"""
Generates a function call to func_name with argument arg_name
storing the result in result_name.
"""
return f"{result_name} = {func_name}({arg_name})" |
def str_to_animate_params(s):
"""
Parses animation parameters
:param s: A string of the form "<param> <start> <stop> <n_steps>"
:return: A tuple containing each field, (param: str, start: float, stop: float, n_steps: int)
"""
param, start, stop, n_steps = s.split(" ")
return param, float(start), float(stop), int(n_steps) |
def pytest_error_str(error):
"""Different for different versions of Pytest"""
try:
return str(error.value)
except AttributeError:
return str(error) |
def add(x, y):
"""Add x and y."""
print(x, y)
return x + y |
def ev_fill(decimal_part):
"""Pad the first decimal part with zeros, taking into account E / V codes.
"""
if decimal_part.startswith("V"):
return "V" + decimal_part[1:].zfill(2)
elif decimal_part.startswith("E"):
return "E" + decimal_part[1:].zfill(3)
else:
return decimal_part.zfill(3) |
def parse_events(props):
"""
Pull out the dashEvents from the Component props
Parameters
----------
props: dict
Dictionary with {propName: propMetadata} structure
Returns
-------
list
List of Dash event strings
"""
if 'dashEvents' in props and props['dashEvents']['type']['name'] == 'enum':
events = [v['value'] for v in props['dashEvents']['type']['value']]
else:
events = []
return events |
def label_mapper(label):
"""
for dales
Args:
label:
Returns:
"""
label_map = {1.0: 0, 2.0: 1, 6.0: 2, 9.0: 3, 26.0: 4}
return label_map[label] |
def calculate_safe_offset(firewall: dict) -> int:
"""Calculate the delay for making it through without getting caught"""
i = 0
while True:
s = any(True for layer, depth in firewall.items()
if not (layer+i) % (2*depth-2))
if not s:
return i
i += 1 |
def fix_relative(html, url):
""" this is fucking cheesy """
try:
base = "/".join(url.split("/")[:3])
html = html.replace("src='//", "src='http://")
html = html.replace('src="//', 'src="http://')
html = html.replace("src='/", "src='%s/" % base)
html = html.replace('src="/', 'src="%s/' % base)
except Exception:
pass
return html |
def _courses_from_container(container):
"""
Pluck nested courses out of a ``container`` dictionary,
which is either the ``curriculum`` field of a program, or
a program itself (since either may contain a ``courses`` list).
"""
return [
course.get('uuid')
for course in container.get('courses', [])
] |
def is_prime_v1(integer):
"""Return 'True' if integer is a prime. Else 'False'."""
if integer == 0:
return False
if integer == 1:
return False # 1 is a unit, not a prime
for number in range(2, integer):
if integer % number is 0:
return False # The number has a divisor other than itself and one.
return True |
def divide_chunks(a_list, n):
"""Divide a list into chunks of size n.
:param a_list: an entry list.
:param n: size of each chunk.
:return: chunks in a list object.
"""
return [a_list[i:i + n] for i in range(0, len(a_list), n)] |
def EW_G_curve(EW, FeH, fitted_vals, power):
"""Calculate curve in V/EW space for a given metallicity."""
a, b, c, d, e = fitted_vals
return -1*(a - FeH + d*EW**(power) + c*EW)/(b + e*EW) |
def get_two_by_two_edges(*edges):
""" create the list of edges
Parameters
----------
* edges : list or tuple
each consecutive elements will be an edge
Returns
-------
list of 2-uple for the edges
"""
# Examples :
# G = test_graph_from_edges((1,2,3),(4,3))
# 1 -> 2 -> 3, and 3 -> 4
two_by_two_edges = []
for list_of_edge in edges:
if len(list_of_edge) <= 1:
raise ValueError("edges must be of length at least 2")
if not isinstance(list_of_edge, (tuple, list)):
raise TypeError("argument should be tuple or list, instead i got : '%s' " % type(list_of_edge))
for e1, e2 in zip(list_of_edge[:-1], list_of_edge[1:]):
two_by_two_edges.append((e1, e2))
return two_by_two_edges |
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404 |
def to_list(tag):
"""
Put `tag` to list if it ain't list/tuple already.
Args:
tag (obj): Anything.
Returns:
list: Tag.
"""
if isinstance(tag, tuple) or isinstance(tag, list):
return tag
return [tag] |
def _DisplayValue(info, field, padding):
"""Gets the value of field from the dict info for display.
Args:
info: The dict with information about the component.
field: The field to access for display.
padding: Number of spaces to indent text to line up with first-line text.
Returns:
The value of the field for display, or None if no value should be displayed.
"""
value = info.get(field)
if value is None:
return None
skip_doc_types = ('dict', 'list', 'unicode', 'int', 'float', 'bool')
if field == 'docstring':
if info.get('type_name') in skip_doc_types:
# Don't show the boring default docstrings for these types.
return None
elif value == '<no docstring>':
return None
elif field == 'usage':
lines = []
for index, line in enumerate(value.split('\n')):
if index > 0:
line = ' ' * padding + line
lines.append(line)
return '\n'.join(lines)
return value |
def split_by_newline(value):
"""
Returns the value turned into a list.
"""
value = value.decode('utf-8')
return value.split('\r\n') |
def float_else_zero(sstring):
"""Return converted string to float. If conversion fail, return zero.
:param sstring: String to be converted
:return: ``float(sstrinq)`` if ``sstring`` can be converted to float
(e.g. ``"3.14"``), else ``0``
"""
try:
return float(sstring)
except ValueError:
return 0 |
def cir_RsQ(w, Rs, Q, n):
"""
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
"""
return Rs + 1 / (Q * (w * 1j) ** n) |
def str2seq(st, func=int, sep=None):
""" "1 2 3" -> [func('1'), func('2'), func('3')]"""
# XXX check usage of this, check if we need list() at all
if sep is None:
return list(map(func, st.split()))
else:
return list(map(func, st.split(sep))) |
def xywh_iou(box1,box2):
"""
calculate iou between box1 and box2
:param box1: (4, ), format (left, upper, width, height)
:param box2: (4, ), format (left, upper, width, height)
:return: float, iou score
"""
l_max = max(box1[0], box2[0])
r_min = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
u_max = max(box1[1], box2[1])
b_min = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)
if l_max>=r_min or u_max>=b_min:
return 0.
else:
area1 = box1[2]*box1[3]
area2 = box2[2]*box2[3]
area_i = (b_min-u_max)*(r_min-l_max)
return area_i*1.0/(area1+area2-area_i) |
def get_modality(config_file):
"""
Gets the list of modalities to be searched.
Return None if the search is by date only.
:param config_file:
:return:
"""
if not config_file['StudyInfo']['Modality']:
return None
else:
return config_file['StudyInfo']['Modality'] |
def get_sizes(shares, amount):
"""Transform wallet shares to category sizes optimally
catsizes = [cs1, cs2, cs3, cs4] - blocks numbers of each color category
:param shares: list of wallet shares
:type shares: list in range (4)
:param amount: total amount of blocks
:type amount: int
:return: list of category sizes
:rtype: list in range (4)
"""
catsizes = [w * amount for w in shares]
for catsize in catsizes:
# if any block catsize is non-integer...
if catsize - int(catsize) > 0:
# ==============================================================
# Round all shares optimally (0.5, +1, -1)
trig = True
for k, cs in enumerate(catsizes):
if cs - int(cs) == 0.5:
if trig:
catsizes[k] = int(cs + 0.5)
trig = False
else:
catsizes[k] = int(cs - 0.5)
trig = True
else:
catsizes[k] = round(cs)
# ==============================================================
if amount - sum(catsizes) == 1:
maxcat = max([cs - int(cs) for cs in catsizes])
for k, cs in enumerate(catsizes):
if cs - int(cs) == maxcat:
catsizes[k] += 1
break
elif sum(catsizes) - amount == 1:
mincat = min([cs - int(cs) for cs in catsizes])
for k, cs in reversed(list(enumerate(catsizes))):
if cs - int(cs) == mincat:
catsizes[k] -= 1
break
# ==============================================================
return catsizes
else:
return [int(cs) for cs in catsizes] |
def calculate_delay_per_req(
num_total_requests: int, total_expected_execution_time: float
) -> float:
""" Calculate the delay to insert between each request sent by slave lambda """
delay_per_req = total_expected_execution_time / num_total_requests
delay_per_req = round(delay_per_req, 3)
return delay_per_req |
def base41_decode(input):
"""Decode a Base41 string.
input is the string to decode.
The decoded data is returned. A TypeError is raised if input
is not valid (odd number of chars, non-alphabet character present,
invalid word).
"""
rslt = bytearray()
i = 0
while i + 2 < len(input):
x = (ord(input[i]) - 41) + 41 * (ord(input[i+1]) - 41) + 1681*(ord(input[i+2]) - 41)
rslt.extend([x % 256, x // 256])
i += 3
if i != len(input):
raise TypeError("Invalid Base41 string")
return rslt |
def binary_to_decimal(binary):
"""Converts a binary number(str) into a decimal(int)"""
reversed_binary = binary[::- 1]
# i = corresponds to power of 2 when reversed
decimal = 0 # keep track of sum
for i, value in enumerate(reversed_binary):
if value == "0":
continue # ignore 0 because no value
decimal += 2**i # multiply 2 by i b/c i = exponent
return decimal |
def _gen_index_name(keys):
"""Generate an index name from the set of fields it is over.
"""
return u"_".join([u"%s_%s" % item for item in keys]) |
def p(*resistances: float) -> float:
"""Computes the total resistance of resistors in parallel."""
return 1 / sum(1 / r for r in resistances) |
def _ref_potential_walls_plumed(cv, at, kappa, offset=0.0, exp=2.0, eps=1.0, upper_wall=True):
"""A reference implementation of PLUMED UPPER/LOWER_WALLS restraint for testing."""
if upper_wall and cv <= at - offset:
return 0.0
elif not upper_wall and cv >= at - offset:
return 0.0
dist = cv - at + offset
if not upper_wall:
dist = -dist
return kappa * (dist / eps)**exp |
def emission(height: int) -> int:
"""
Emission rate (nanoERG/block) at given height.
"""
initial_rate = 75
fixed_rate_period = 525600
epoch_length = 64800
step = 3
if height <= fixed_rate_period:
em = initial_rate
else:
em = initial_rate - (((height - fixed_rate_period) // epoch_length) + 1) * step
return em * 10 ** 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.