content
stringlengths 42
6.51k
|
|---|
def get_sso_login_param(attribute_id):
"""
Assemble params for get_sso_login
:return: Params in dict
"""
param_dict = dict()
param_dict['Attritd'] = attribute_id
return param_dict
|
def cumulative_sum(values):
"""
Returns the cumulative sum of a collection of numbers
"""
cum_sum = []
y = 0
for value in values:
y += value
cum_sum.append(y)
return cum_sum
|
def validate_max_length(value, max_length):
"""Validates the maximum length of a value."""
return len(value) <= max_length
|
def dedupe(urls):
""" Gets rid of duplicates in a list (and returns a list).
"""
output = set()
for url in urls:
output.add(url)
return list(output)
|
def list_without_None(*args):
""" Basically list(filter(None, args)) but this wouldn't work for tensors because they cannot compare to None. """
return [arg for arg in args if arg is not None]
|
def _get_completed_count(action, completed_members):
"""returns the compelted member count."""
for member in completed_members:
if member['action'] == action.id:
return member['count']
return 0
|
def godel(read,out_array,godel_numbers):
"""Count godel numbers in a given read.
Parameters
----------
read : dictionay
Dictionary with the DNA sequences.
out_array : array
The array with the prime numbers.
godel_numbers : dictionary
Dictionary with the godel numbers.
Returns
-------
godel_numbers : dictionary, {'string': float}
A dictionary of strings keyed by their individual godel numbers (strings
of length k).
"""
# For every string
for elem in read.keys():
point_in_string=0
# If the key is new
if elem not in godel_numbers:
godel_numbers[elem] = 0
# Calculate the godel number for the key
for x in elem:
if x == 'A' or x == 'a':
godel_numbers[elem]+=out_array[point_in_string]*1
point_in_string=point_in_string+1
if x == 'T' or x == 't':
godel_numbers[elem]+=out_array[point_in_string]*4
point_in_string=point_in_string+1
if x == 'G' or x == 'g':
godel_numbers[elem]+=out_array[point_in_string]*3
point_in_string=point_in_string+1
if x == 'C' or x == 'c':
godel_numbers[elem]+=out_array[point_in_string]*2
point_in_string=point_in_string+1
return godel_numbers
|
def merge_bboxes(bboxes: list):
"""
Merge bounding boxes.
:param bboxes: Bounding boxes to be merged.
:return: Merged bounding box.
"""
merge_bbox = {
"x0": min([b['x0'] for b in bboxes]),
"x1": max([b['x1'] for b in bboxes]),
"y0": min([b['y0'] for b in bboxes]),
"y1": max([b['y1'] for b in bboxes]),
"top": min([b['top'] for b in bboxes]),
"bottom": max([b['bottom'] for b in bboxes]),
"page_index": bboxes[0]['page_index'],
}
return merge_bbox
|
def parse_timeout(value):
"""Parse the timeout value from either the run or build section
into an int (or none).
:param Union[str,None] value: The value to parse.
"""
if value is None:
return None
if value.strip().isdigit():
return int(value)
|
def create_board(lines):
"""Convert the board into a list of lists.
For example::
[[ 2, 77, 1, 37, 29],
[50, 8, 87, 12, 76],
[74, 88, 48, 60, 79],
[41, 35, 92, 33, 34],
[45, 52, 75, 24, 28]]
"""
board = []
for l in lines:
board.append([int(x) for x in l.strip().split()])
return board
|
def hex_sans_prefix(number):
"""Generates a hexadecimal string from a base-10 number without the standard '0x' prefix."""
return hex(number)[2:]
|
def anonymise_data(json_data: dict) -> dict:
"""Replace parts of the logfiles containing personal information."""
replacements = {
'Latitude': 30.4865,
'Longitude': 58.4892,
'SerialNumber': 'ANON_SERIAL',
'MacAddress': 'ANON_MAC',
'HostName': 'WiserHeatXXXXXX',
'MdnsHostname': 'WiserHeatXXXXXX',
'IPv4Address': 'ANON_IP',
'IPv4HostAddress': 'ANON_IP',
'IPv4DefaultGateway': 'ANON_IP',
'IPv4PrimaryDNS': 'ANON_IP',
'IPv4SecondaryDNS': 'ANON_IP',
'SSID': 'ANON_SSID',
'DetectedAccessPoints': []
}
if isinstance(json_data, dict):
for key, value in json_data.items():
if isinstance(value, dict):
json_data[key] = anonymise_data(value)
elif isinstance(value, list):
key_data = []
for item in value:
key_data.append(anonymise_data(item))
json_data[key] = key_data
elif key in replacements:
json_data[key] = replacements[key]
return json_data
|
def merge_dicts(base, updates):
"""
Given two dicts, merge them into a new dict as a shallow copy.
Parameters
----------
base: dict
The base dictionary.
updates: dict
Secondary dictionary whose values override the base.
"""
if not base:
base = dict()
if not updates:
updates = dict()
z = base.copy()
z.update(updates)
return z
|
def size_of(rect):
"""Return size of list|tuple `rect` (top, left, bottom, right) as tuple (width, height)"""
return (rect[3] - rect[1], rect[2] - rect[0])
|
def check_eq_files(path1, path2, blocksize=65535, startpos1=0, startpos2=0):
""" Return True if both files are identical, False otherwise """
flag = True
with open(path1, 'rb') as f1, open(path2, 'rb') as f2:
buf1 = 1
buf2 = 1
f1.seek(startpos1)
f2.seek(startpos2)
while 1:
buf1 = f1.read(blocksize)
buf2 = f2.read(blocksize)
if buf1 != buf2 or (buf1 and not buf2) or (buf2 and not buf1):
# Reached end of file or the content is different, then return false
flag = False
break
elif (not buf1 and not buf2):
# End of file for both files
break
return flag
#return filecmp.cmp(path1, path2, shallow=False) # does not work on Travis
|
def fib(n):
""" This function generates the nth Fibonacci number in a fast way. """
def fib(prvprv, prv, c):
if c < 1:
return prvprv
else:
return fib(prv, prvprv + prv, c - 1)
return fib(0, 1, n)
|
def convert_geographic_coordinate_to_pixel_value(lon, lat, transform):
"""
Converts a latitude/longitude coordinate to a pixel coordinate given the
geotransform of the image.
Args:
lon: Pixel longitude.
lat: Pixel latitude.
transform: The geotransform array of the image.
Returns:
Tuple of refx, refy pixel coordinates.
"""
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = -transform[5]
refx = round((lon - xOrigin) / pixelWidth)
refy = round((yOrigin - lat) / pixelHeight)
return int(refx), int(refy)
|
def parse_op2bdrc_work_id(op2bdrc_work_id):
"""Return op work id and bdrc work id
Args:
op2bdrc_work_id (str): openpecha work id and bdrc work id
Returns:
list: bdrc work id and op work id
"""
op2bdrc_work_id = str(op2bdrc_work_id)
work_ids = op2bdrc_work_id.split(",")
op_work_id = work_ids[0].replace("b'", "")
bdrc_work_id = work_ids[1].replace("'", "")
return [bdrc_work_id, op_work_id]
|
def is_escaped_newline(line):
""" Checks if the final newline in a string is escaped
Parameters
----------
line : string
String that ends with a newline
Returns
-------
bool
True if the last newline in the string is escaped
Examples
--------
>>> is_escaped_newline("Line\n")
False
>>> is_escaped_newline("Line\\\n")
True
>>> is_escaped_newline("Line\\\\n")
False
>>> is_escaped_newline("Line")
False
"""
if line[-1] != '\n':
return False
line = line[:-1]
cnt = 0
for c in reversed(line):
if c == '\\':
cnt += 1
else:
break
# An odd number of backslashes means the newline is escaped
return cnt % 2 == 1
|
def remove_duplicates(l):
"""
Removes duplicates from l, where l is a List of Lists.
:param l: a List
"""
d = {}
result = []
for row in l:
if tuple(row) not in d:
result.append(row)
d[tuple(row)] = True
return result
|
def century(year):
"""
The first century spans from the year 1 up to and including the year 100, The second - from the year 101 up to
and including the year 200, etc.
:param year: an integer value.
:return: the current century.
"""
return (year - 1) // 100 + 1
|
def allowed_file(filename, extensions):
"""
chceck if file extension is in allowed extensions
:param filename: name of file
:param extensions: allowed files extensions
:return: True if extension is correct else False
"""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in extensions
|
def filename_from_analysis(line):
"""
Utility method to obtain the file name value from the relative line in the final analysis output text file.
:param line: string from log file
:return: string containing file name
"""
return line.split()[2].strip()
|
def get_features(bgr, hsv, contours, ground_truths, length=0, debug=True,
scale=1.0, extra_args=None, ignore_objects=False):
"""Loop over each pixel in a contour getting the overcomplete
>40-feature set of features, and then save the mean averages
of those features as the edge feature for each edge (contour).
Returns: A dictionary containing lists of values for each
feature.
"""
features = {}
return features
|
def square_root(num):
"""
Question 12.5
"""
last_root = 1
root = 2
square = 4
while square < num:
last_root = root
root = square
square *= square
if square == num:
return root
# binary search for proper root
low, high = last_root, root
while low <= high:
mid = low + ((high - low) // 2)
square = mid ** 2
if square > num:
high = mid - 1
elif square == num:
return mid
else:
low = mid + 1
return low - 1
|
def get_record_id(rec):
"""
Extracts the most suitable identifier from a JSON-serialized record on the ENCODE Portal.
This is useful, for example, for other applications that need to store identifiers of specific
records on the Portal. The identifier chosen is determined to be the 'accession' if that
property is present, otherwise it's the first alias of the 'aliases' property is present,
otherwise its the value of the 'uuid' property.
Args:
rec: `dict`. The JSON-serialization of a record on the ENCODE Portal.
Returns:
`str`: The extracted record identifier.
Raises:
`Exception`: An identifier could not be extracted from the input record.
"""
if "accession" in rec:
return rec["accession"]
elif "aliases" in rec:
return rec["aliases"][0]
elif "uuid" in rec:
return rec["uuid"]
raise Exception("Could not extract an uptream identifier for ENCODE record '{}'.".format(rec))
|
def tensors_debug_string(tensors):
"""Debug string of tensors.
Args:
tensors: tensor list.
Returns:
debug string of tensor list.
"""
dtypes = [t.dtype for t in tensors]
sizes = [
t.dtype.size * t.shape.num_elements()
if t.shape.num_elements() is not None and t.shape.num_elements() > 0
else None
for t in tensors]
nonempty_sizes = [s for s in sizes if s]
return "{} tensors ({}): {:.2f} MB and {} dynamic-shaped tensors".format(
len(tensors),
', '.join([repr(dt) for dt in set(dtypes)]),
sum(nonempty_sizes) / 1024.0 / 1024.0,
len(sizes) - len(nonempty_sizes))
|
def is_confirming(drone):
"""Check if the drone is in confirmation state."""
return drone["State"]["Status"] == "Confirming"
|
def is_list_like(obj):
"""True if object type is similar to list, tuple etc."""
return isinstance(obj, (tuple, list))
|
def is_ec_earth(sheet_cell):
"""Is this variable produced by `institute` using ECEarth?"""
if not sheet_cell:
return False
if sheet_cell.upper() == 'X' or sheet_cell.upper() == 'LIMITED':
return True
elif sheet_cell.upper() == 'FALSE' or sheet_cell.upper() == 'NO':
return False
else:
print('Unknown EC-Earth status: {}. Ignoring.'.format(sheet_cell))
return False
|
def find_regions(directives):
"""Looks for ##sequence-region directives in a list of GFF3 directives."""
regions = {}
for directive in directives:
if directive.startswith("sequence-region"):
_, accession, start, end = directive.split(" ")
regions[accession] = (int(start), int(end))
return regions
|
def typename(obj):
"""Returns the type of obj as a string. More descriptive and specific than
type(obj), and safe for any object, unlike __class__."""
if hasattr(obj, '__class__'):
return getattr(obj, '__class__').__name__
else:
return type(obj).__name__
|
def sorted_symbols(seq, symbs_first=('C', 'H'), symbs_last=()):
""" Produce a sorted list of atomic symbols; some elements given priority.
By default, C placed first, then H, then others in alphabetical order.
:param seq: formula or sequence of atomic symbols
:type seq: dict, list, or tuple
:param symbs_first: atomic symbols to place first
:type symbs_first: sequence of strings
:param symbs_last: atomic symbols to place last
:type symbs_last: sequence of strings
:rtyp: tuple(str)
"""
def _sort_key(char):
if char in symbs_first:
val = symbs_first.index(char)
elif char in symbs_last:
val = len(symbs_first) + 1 + symbs_last.index(char)
else:
val = len(symbs_first)
return (val, char)
return tuple(sorted(seq, key=_sort_key))
|
def is_etx_message(message: str) -> bool:
"""Checks if a message contains information about some neighbour ETX."""
if "ETX" in message:
return True
return False
|
def slash_tokenize(message):
"""Carefully discover tokens."""
tokens = []
accum = ""
i = 0
size = len(message)
# In-line comments are already gone at this point
while i < size:
char = message[i]
if char == "/":
tokens.append(accum)
accum = ""
elif char in ['"', "'"]:
# We are in description block, find the end of this madness
pos = message[i + 1 :].find(char)
if pos > -1:
accum += message[i : i + pos + 2]
i += pos + 1
else:
accum += char
i += 1
if len(accum) > 0:
tokens.append(accum)
return tokens
|
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host in ('::', '::0', '::0.0.0.0'):
# :: is IN6ADDR_ANY, which should answer on localhost.
# ::0 and ::0.0.0.0 are non-canonical but common
# ways to write IN6ADDR_ANY.
return '::1'
return server_host
|
def format_number(n, accuracy=6):
"""Formats a number in a friendly manner (removes trailing zeros and unneccesary point."""
fs = "%."+str(accuracy)+"f"
str_n = fs%float(n)
if '.' in str_n:
str_n = str_n.rstrip('0').rstrip('.')
if str_n == "-0":
str_n = "0"
#str_n = str_n.replace("-0", "0")
return str_n
|
def nlines_back(n):
"""
return escape sequences to move character up `n` lines
and to the beginning of the line
"""
return "\033[{0}A\r".format(n+1)
|
def check_bounds(chrom, start, end, bounds, fname):
"""
:param chrom:
:param start:
:param end:
:param bounds:
:return:
"""
good = False
try:
assert int(end) <= bounds[chrom],\
'Region out of bounds: {} - {} - {} : {} in file {}'.format(chrom, start, end, bounds[chrom], fname)
_ = int(start)
good = True
except ValueError:
raise ValueError('Non-integer coordinates {} - {} - {} in file {}'.format(chrom, start, end, fname))
except KeyError:
# chromosome not in check file, skip
print('Skipped ', chrom)
pass
return good
|
def is_url(text):
""" Check if the given text looks like a URL. """
if text is None:
return False
text = text.lower()
return text.startswith('http://') or text.startswith('https://') or \
text.startswith('urn:') or text.startswith('file://')
|
def create_satellite_string(mission_id):
"""Convert mission_id to scihub's search url platformname attribute
:param mission_id: an OST scene mission_id attribute (e.g. S1)
:return: Copernicus' scihub compliant satellite query string
:rtype: str
"""
if str(1) in mission_id:
return f'platformname:Sentinel-1'
elif str(2) in mission_id:
return f'platformname:Sentinel-2'
elif str(3) in mission_id:
return f'platformname:Sentinel-3'
elif str(5) in mission_id:
return f'platformname:Sentinel-5'
else:
raise ValueError('No satellite with mission_id')
|
def percent_change(old, new):
"""Computes the percent change from old to new:
.. math::
percent_change = 100 \frac{new - old}{abs(old)}
"""
return float(new - old) / abs(old)
|
def get_file_extension(filepath):
"""Return full file extension from filepath"""
filename = filepath.split('/')[-1]
return filename[filename.index('.'):]
|
def _countFollowingZeros(l):
"""Return number of elements containing 0 at the beginning of the list."""
if len(l) == 0:
return 0
elif l[0] != 0:
return 0
else:
return 1 + _countFollowingZeros(l[1:])
|
def current_red(before, after):
"""
Checks if the red light works well.
:param before: has to be None or "yellow"
:param after: has to be None or "yellow"
:return: if any constraints not met will return False else True
"""
print(f'{before}--red--{after}')
accepted = [None, "yellow"]
if before not in accepted or after not in accepted:
return False
return True
|
def ipv4_cidr_to_netmask(bits):
""" Convert CIDR bits to netmask """
netmask = ''
for i in range(4):
if i:
netmask += '.'
if bits >= 8:
netmask += '%d' % (2**8-1)
bits -= 8
else:
netmask += '%d' % (256-2**(8-bits))
bits = 0
return netmask
|
def str2p( s ):
""" Convert string to packet """
return [ord(char) for char in list(s)]
|
def to_list(value):
"""Convert `value` to a list."""
if not isinstance(value, list) and value is not None:
value = [value]
return value
|
def safe_filename(infilename):
"""
Take a filename and remove special characters like the asterisk and slash which mess things up. Warning: if you
pass a directory path to this function, it will remove the slashes. Do not do that.
:param infilename: filename to be processed.
:type infilename: string
:return: string with asterisk and slash replaced by underscores
:rtype: string
"""
safename = infilename.replace('/', '_')
safename = safename.replace('*', '_')
return safename
|
def _match_against(docs, docs_meta, by_meta):
"""Return the list of values to match against in filtering functions."""
if by_meta:
if not docs_meta or not isinstance(docs_meta[0], dict) or by_meta not in docs_meta[0].keys():
raise ValueError('`docs_meta` is required and must be a list of dicts containing the key `%s`' % by_meta)
return [dmeta[by_meta] for dmeta in docs_meta]
else:
return docs
|
def get_car_unchanging_properties(car):
"""
Gets car properties that are expected to not change at all
for a given car VIN/ID during a reasonable timescale (1 week to 1 month)
:param car: car info in original system JSON-dict format
:return: dict with keys mapped to common electric2go format
"""
return {
'vin': car['Id'],
'license_plate': car['Name'],
'model': 'Toyota Prius C'
}
|
def split_at(n, coll):
""" Returns a tuple of (coll[:n], coll[n:]).
>>> split_at(1, ['Hallo', 'Welt'])
(['Hallo'], ['Welt'])
"""
return (coll[:n], coll[n:])
|
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def clean_str(x):
""" Limpia de nulos cualquier string. """
if x.lower() in ['#n/a', '#n/a n/a', '#na', '-1.#ind', '-1.#qnan', '-nan', '1.#ind', '1.#qnan', '<na>', 'n/a', 'na',
'null', 'nan', '<n/a>', '<null>', '<nan>']:
return None
if x.strip() in ['']:
return None
else:
return x
|
def strip_textpad(text):
"""
Attempt to intelligently strip excess whitespace from the output of a
curses textpad.
"""
if text is None:
return text
# Trivial case where the textbox is only one line long.
if '\n' not in text:
return text.rstrip()
# Allow one space at the end of the line. If there is more than one space,
# assume that a newline operation was intended by the user
stack, current_line = [], ''
for line in text.split('\n'):
if line.endswith(' '):
stack.append(current_line + line.rstrip())
current_line = ''
else:
current_line += line
stack.append(current_line)
# Prune empty lines at the bottom of the textbox.
for item in stack[::-1]:
if len(item) == 0:
stack.pop()
else:
break
out = '\n'.join(stack)
return out
|
def chroms_from_build(build):
""" Get list of chromosomes from a particular genome build
Args:
build str
Returns:
chrom_list list
"""
chroms = {'grch37': [str(i) for i in range(1, 23)],
'hg19': ['chr{}'.format(i) for i in range(1, 23)]
# chroms = {'grch37': [i for i in range(1, 23)] + ['X', 'Y'],
}
try:
return chroms[build]
except KeyError:
raise ValueError("Oops, I don't recognize the build {}".format(build))
|
def evaluate_activity_detection(groundtruth, predicted):
"""
Evaluate activity detection interface (Precision, Recall, f1-score, accuracy)
"""
# Metrics
TP = 0
TN = 0
FN = 0
FP = 0
for index in range(0, len(groundtruth), 1):
if groundtruth[index] == predicted[index] and predicted[index] == 1:
TP += 1
elif groundtruth[index] == predicted[index] and predicted[index] == 0:
TN += 1
elif groundtruth[index] != predicted[index] and predicted[index] == 0:
FN += 1
elif groundtruth[index] != predicted[index] and predicted[index] == 1:
FP += 1
if (TP == 0 and FP == 0) or (TP == 0 and FN == 0):
precision = 0
recall = 0
f1_score = 0
accuracy = 0
else:
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1_score = 2 * ((precision * recall) / (precision + recall))
accuracy = (TP + TN) / (TP + TN + FP + FN)
# print("Precision " + str(precision))
# print("Recall " + str(recall))
# print("F1_score " + str(f1_score))
# print("Accuracy " +str(accuracy))
return precision, recall, f1_score, accuracy
# plot_ad_evaluation(signal,groundtruth,predicted,sr)
|
def defaultFilter(filePath:str) -> bool:
"""
Default filter function to determine.
"""
if filePath.find("/__") > -1 :
return False
if not filePath.endswith('.py') :
return False
return True
|
def textOutput(count, cc) -> str:
"""Returns an appropriate text output depending on
`count` and `cc`."""
text = "NOTHING"
if (count, cc) == (2, 2):
text = "SCISSOR"
elif count == 0:
text = "ROCK"
elif count == 5:
text = "PAPER"
else:
pass
return text
|
def get_filters(filter_dict):
"""
Returns the filters list in string mode for a given dictionary.
Gets all the filters that the dictionary contains and create a list
of key:value elements
"""
filters = ''
if type(filter_dict) is dict:
for filter_name, filter_value in filter_dict.items():
filters += filter_name + ":\"" + filter_value + "\"\n"
return filters
|
def long_array(array, truncation=10, display=3):
"""
Format an array as a string.
Parameters:
array (array_like): array-like data
truncation (int, optional): truncate array if its length exceeds this threshold
display (int, optional): number of elements to display at the beginning and the end in truncated mode
"""
if len(array) <= truncation:
return "%s" % array
return "%s, ..., %s" % (str(array[:display])[:-1], str(array[-display:])[1:])
|
def prepend_all(flag, params):
"""
Returns a list where all elements of "params" is prepended with the given
flag. For example in case "flag" is -f and "params" is ['a', 'b', 'c'] the
result is ['-f', 'a', '-f', 'b', '-f', 'c'].
"""
result = []
for param in params:
result.append(flag)
result.append(param)
return result
|
def absoluteBCPIn(anchor, BCPIn):
"""convert relative incoming bcp value to an absolute value"""
return (BCPIn[0] + anchor[0], BCPIn[1] + anchor[1])
|
def tle_scientific_2_float(number):
"""
This method transforms a scientific suffix from the TLE format into a
string format that can be parsed into a float by Python. The format is the
following:
15254-5 = 0.15254 >>> 15254E-5
-333-3 = -0.333 >>> 333E-3
@param number String where the decimal is expected
@returns String that can be parsed by float()
"""
return float(number[:-2] + 'E-' + number[-1])
|
def span_to_bit_coverage(start, end, token_to_node, reference_coverage):
"""
Construct a bit coverage for token span start:end according to the reference coverage.
:param start: Start token index
:param end: End token index
:param token_to_node: Token to node alignment dictionary
:param reference_coverage: Reference coverage list
:return: Bit coverage string
"""
aligned_node_list = list()
token_list = range(start, end + 1)
for token in token_list:
aligned_node_list.extend(token_to_node[token])
node_set = set(aligned_node_list)
coverage = ['0'] * len(reference_coverage)
for index, node_id in enumerate(reference_coverage):
if node_id in node_set:
coverage[index] = '1'
return ''.join(coverage)
|
def get_total_open_threads(feedback_thread_analytics):
"""Returns the count of all open threads for the given
FeedbackThreadAnalytics domain objects."""
return sum(
feedback.num_open_threads for feedback in feedback_thread_analytics)
|
def as_property(fact):
"""Convert a fact name to the name of the corresponding property"""
return 'is_%s' % fact
|
def linux_translator(value):
"""Translates a "linux" target to linux selections."""
return {
"@com_github_renatoutsch_rules_system//system:linux_arm": value,
"@com_github_renatoutsch_rules_system//system:linux_ppc": value,
"@com_github_renatoutsch_rules_system//system:linux_ppc64": value,
"@com_github_renatoutsch_rules_system//system:linux_s390x": value,
"@com_github_renatoutsch_rules_system//system:linux_piii": value,
"@com_github_renatoutsch_rules_system//system:linux_k8": value,
}
|
def average(values):
"""
Computes the arithmetic mean of a list of numbers.
>>> print(average([20, 30, 70]))
40.0
"""
return sum(values) / len(values)
|
def min_max_scaling(x, min_x, max_x):
""" Helper function to calculate min/max scaling for x """
return (x - min_x) / (max_x - min_x)
|
def create_alignment(sequences):
""" Makes alingment:
takes: a file with multiple sequences
returns: the character sequence of an alignment as a str
(X) = no alignment
(|) = alignment, identical bases in this position"""
number_sequences = len(sequences)
alignment = []
for i in range(0, len(sequences[0])):
combo = []
for k in range(0, number_sequences):
combo.append(sequences[k][i])
if all(element == combo[0] for element in combo):
alignment.append('|')
else:
alignment.append('X')
# return(len(alignment))
return ''.join(alignment)
|
def write_the_bootstrap_tree(list_of_annotated_strings, output_file):
"""
this function writes every bootstrap tree annotated by this program to one file, newline seperated
args:
list_of_annotated_strings : a list of annotated bootstrap gene trees
output_file : a file name to write to
output:
a file containing all 100 bootstraps annotated
"""
with open(output_file, "w") as out:
for item in list_of_annotated_strings:
out.write(item+"\n")
print("wrote the annotated bootstraps gene tree to "+output_file)
return output_file
|
def pluralize(num=0, text=''):
"""
Takes a number and a string, and pluralizes that string using the number and combines the results.
:rtype: str
"""
return "{:,} {}{}".format(num, text, "s"[num == 1:])
|
def strip_module_names(testcase_names):
"""Examine all given test case names and strip them the minimal
names needed to distinguish each. This prevents cases where test
cases housed in different files but with the same names cause clashes."""
result = list(testcase_names)
for i, testcase in enumerate(testcase_names):
classname = testcase.split(".")[-1]
duplicate_found = False
testcase_names_ = list(testcase_names)
del testcase_names_[i]
for testcase_ in testcase_names_:
classname_ = testcase_.split(".")[-1]
if classname_ == classname:
duplicate_found = True
if not duplicate_found:
result[i] = classname
return result
|
def LogMessage(name, message):
"""add the the (function-) name to the message"""
new_message = '{} - {}'.format(name, message)
return new_message
|
def unescape_bytes(value8_uint64be):
"""Unescapes seven bytes from eight bytes.
Args:
value8_uint64be(int): Bytes as a 64-bit bigendian unsigned integer.
Returns:
int: Unescaped bytes as a 56-bit bigendian unsigned integer.
"""
x = value8_uint64be
x0 = x & 0x007F007F007F007F
x1 = x & 0x7F007F007F007F00
x = x0 | (x1 >> 1)
x0 = x & 0x00003FFF00003FFF
x1 = x & 0x3FFF00003FFF0000
x = x0 | (x1 >> 2)
x0 = x & 0x000000000FFFFFFF
x1 = x & 0x0FFFFFFF00000000
x = x0 | (x1 >> 4)
return x
|
def is_numeric(xycell):
"""
Given an xycell return True if the .value attribute of the cell is numeric
"""
try:
float(xycell.value)
return True
except:
return False
|
def _len(n: int):
"""Digit length."""
return len(str(n))
|
def grabKmer(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the gapped and non-gapped kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide starting at starti from seq.
nonGapped : str
A k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = seq[starti:]
full = tmp[:k]
if full[0] == '-':
return None, None
elif '-' in full:
ng = tmp.replace('-', '')
if len(ng) >= k:
ng = ng[:k]
else:
ng = None
else:
ng = full
return full, ng
else:
return None, None
|
def build_recursive_localize_env(destination, inputs):
"""Return a multi-line string with export statements for the variables.
Arguments:
destination: Folder where the data will be put.
For example /mnt/data
inputs: a list of InputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the inputs.
"""
export_input_dirs = '\n'.join([
'export {0}={1}/{2}'.format(var.name, destination.rstrip('/'),
var.docker_path.rstrip('/'))
for var in inputs
if var.recursive and var.docker_path
])
return export_input_dirs
|
def not_empty(cell):
"""Check if a given paragraph is empty"""
return (cell.get("text") or cell.get('msg')) is not None
|
def safeEqual( obj1, obj2 ):
"""True if both objects are None or both are equals."""
try: return obj1 == obj2
except (ValueError, TypeError): return False
|
def restrictToDataDir(Path):
"""
>>> restrictToDataDir('/data/garage.json')
'data/garage.json'
>>> restrictToDataDir('/data/../garage.json')
False
>>> restrictToDataDir('/inc/img.jpg')
False
"""
Path = Path.strip()
if not Path.startswith('/data/'):
return False
if Path.startswith('/'):
Path = Path[1:]
if Path.find('../') > -1:
return False
else:
return Path
|
def emission_probability(symbol, word, emission_probabilities, symbol_counts):
"""
Takes a symbol, a word, a nested dictionary of emission probabilities,
and a dictionary of symbol counts
and returns the emission probability for that symbol and word
If the word has not been encountered in the training data
we assign it a fixed probability based on the symbol count
"""
unseen_word = True
for sym in emission_probabilities:
if word in emission_probabilities[sym]:
unseen_word = False
if unseen_word:
return 1 / (1 + symbol_counts[symbol])
else:
if word in emission_probabilities[symbol]:
return emission_probabilities[symbol][word]
else:
return 0
|
def primers_to_fasta(name, seq_list):
"""return fasta string of primers with tracing newline"""
fas = ""
for i in range(len(seq_list)):
fas += f">{name}[{i}]\n{seq_list[i]}\n"
return fas
|
def tokenize_akkadian_signs(word):
"""
Takes tuple (word, language) and splits the word up into individual
sign tuples (sign, language) in a list.
input: ("{gisz}isz-pur-ram", "akkadian")
output: [("gisz", "determinative"), ("isz", "akkadian"),
("pur", "akkadian"), ("ram", "akkadian")]
:param: tuple created by word_tokenizer2
:return: list of tuples: (sign, function or language)
"""
word_signs = []
sign = ''
language = word[1]
determinative = False
for char in word[0]:
if determinative is True:
if char == '}':
determinative = False
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, 'determinative'))
sign = ''
language = word[1]
continue
else:
sign += char
continue
else:
if language == 'akkadian':
if char == '{':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
determinative = True
continue
elif char == '_':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
language = 'sumerian'
continue
elif char == '-':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
language = word[1] # or default word[1]?
continue
else:
sign += char
elif language == 'sumerian':
if char == '{':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
determinative = True
continue
elif char == '_':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
language = word[1]
continue
elif char == '-':
if len(sign) > 0: # pylint: disable=len-as-condition
word_signs.append((sign, language))
sign = ''
language = word[1]
continue
else:
sign += char
if len(sign) > 0:
word_signs.append((sign, language))
return word_signs
|
def transform_rnn_head_single_agent(batch):
"""Used in pred_mode='rnn_head_multi', predict only single agent."""
return (
batch["image"],
batch["history_positions"],
batch["history_availabilities"],
batch["target_positions"],
batch["target_availabilities"]
)
|
def fake_ips(num):
"""Generate simple IPv4 addresses given the amount to create."""
return [
(None, None, None, None, ('.'.join(str(x) * 4), 0))
for x in range(num)
]
|
def is_power_of_two(n):
"""Return True if n is a power of two."""
if n <= 0:
return False
else:
return n & (n - 1) == 0
|
def _link_nodes(first_id, second_id, rel_type, props_str):
"""
Using ids of two nodes and rel type, create code for linking nodes
Why MATCHing first? Cause the following approach does not work:
MERGE node1 with props p1
MERGE node2 with props p2
Creating relationship with node1 and node2 creates a relationship with nodes
having the same type and props with node1 and node2. But node1 and node2 themselves
won't be connected.
"""
return """
MATCH (n1 {id: "%s"})
MATCH (n2 {id: "%s"})
WITH n1, n2
MERGE ((n1)-[:%s %s]->(n2));
""" % (first_id, second_id, rel_type, props_str)
|
def adjust_fields(prefix, task):
"""
Prepend the prefix to a task's fields
:param prefix: string prepended to task fields
:type prefix: str
:param task: a JSOn task object from task.json
:type task: dict
:return: a modified JSON task object from task.json
:rtype: dict
"""
output_task = {}
for field, content in task.items():
output_task[prefix + field] = content
return output_task.copy()
|
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
|
def reverse_xyz(t):
"""Point an 3d vector to the opposite direction"""
return [-t[0], -t[1], -t[2]]
|
def cmpValue(subInfo1, subInfo2):
"""
Returns True if value in (value, work) tuple subInfo1 is GREATER than
value in (value, work) tuple in subInfo2
"""
return subInfo1[0] > subInfo2[0]
|
def F_hydro(R):
"""
fraction of all free-free emissions due to
hydrogen. R is the ratio of the number of heliums to
the number of hydrogens, which is approximately
0.08
"""
result = 1 - R
return result
|
def partition(A, lo, hi, idx):
"""
Partition using A[idx] as value. Note lo and hi are INCLUSIVE on both
ends and idx must be valid index. Count the number of comparisons
by populating A with RecordedItem instances.
"""
if lo == hi:
return lo
# swap into position
A[idx], A[lo] = A[lo], A[idx]
i = lo
j = hi + 1
while True:
while True:
i += 1
if i == hi:
break
if A[lo] < A[i]:
break
while True:
j -= 1
if j == lo:
break
if A[j] < A[lo]:
break
# doesn't count as comparing two values
if i >= j:
break
A[i], A[j] = A[j], A[i]
A[lo], A[j] = A[j], A[lo]
return j
|
def reduce_to_nonempty(objs):
"""Remove from a list all objects that don't follow ``obj.empty==True``."""
objs_reduced = []
ids = []
for i, obj in enumerate(objs):
assert hasattr(obj, "empty"), (
"Expected object with property 'empty'. Got type %s." % (
type(obj),))
if not obj.empty:
objs_reduced.append(obj)
ids.append(i)
return objs_reduced, ids
|
def get_outdir_min(outdir_min: str) -> int:
"""
Get disk requirement.
Value is always given in GiB.
Covers an Int type input and a string input without suffix only.
"""
if '"' in outdir_min:
outdir_min = outdir_min[outdir_min.find('"') + 1 : -1]
outdir_value = int(float(outdir_min.strip()) * 1024)
return outdir_value
|
def distinct_records(query_res):
"""
Given a list of tuples from the result of a sqlalchemy join query returns
unique record info for the substrates, phosphosites and kinases in the
query result.
:param query_res: sqlalchemy join query result (list of tuples)
:return: three sets of unique substrate, phosphosite and kinase records'
info (unique_subs, unique_phos, unique_kin; sets)
"""
# collect distinct substrate, phosphosite and kinase record info
# from all records retrieved in the query
unique_subs = set()
unique_phos = set()
unique_kin = set()
for record in query_res:
subs_info = (record[0], record[1]) # (subs_gene, subs_acc)
unique_subs.add(subs_info)
# The record info tuples retrieved when the phosphosite is not
# found in the DB is len 2, while those resulting from the
# 3-table join are len 6
if len(record) > 2:
if record[2]: # only add phosphosite info if not None
# collect distinct phosphosite records from record info
# retrieved in join query
phos_info = (record[2], record[3]) # (phos_rsd, phos_id)
unique_phos.add(phos_info)
if record[4]: # only add kinase info if not None
# collect distinct kinase record info from records
# retrieved in join query
kin_info = (record[4], record[5]) # (kin_gene, kin_acc)
unique_kin.add(kin_info)
return unique_subs, unique_phos, unique_kin
|
def global_maxima(mylist):
"""
Returns the max index and global maxima of the list.
"""
maxima = -1
index = -1
for i in range(0, len(mylist), 1):
distance = float(mylist[i])
if distance > maxima:
maxima = distance
index = i
return index, maxima
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.