content stringlengths 42 6.51k |
|---|
def data_validation(state_input, county_input, states_array, counties_array):
"""
WHAT IT DOES: Processes the State and County variables to ensure that they actually exist
PARAMETERS: two strings, two arrays of string variables
RETURNS: A boolean
"""
found_it = False
i = len(states_array) - 1
if state_input in states_array and county_input in counties_array:
while i >= 0 and found_it == False:
if states_array[i] == state_input and counties_array[i] == county_input:
found_it = True
i = i - 1
return found_it |
def set_pen_attr(element, attributes):
"""
Set pen attributes.
:param element:
:param attributes:
:return:
"""
pen_attr = element.get('attr')
if pen_attr is None:
return {}
else:
return {
'lineStyle': pen_attr.get('lineStyle'),
'lineWidth': float(pen_attr.get('lineWidth', '0')),
'lineColor': pen_attr.get('lineColor')
} |
def search_high(left, right, tuples, weight):
"""
:param left: search limit
:param right: search limit
:param tuples: array of elements
:param weight: predefined value
:return: index of array
"""
v = 0
if left == right:
if tuples[left][1] > weight:
v = left
else:
v = left + 1
else:
mid = left + (right - left) // 2
if tuples[mid][1] > weight:
v = search_high(left, mid, tuples, weight) # search left
else:
v = search_high(mid + 1, right, tuples, weight) # search right
return v |
def is_done(op):
"""Return whether the operation has been marked done."""
return op.get('done', False) |
def compute_pad(image_shape, kernel_size, enforce_odd=True):
"""Computes a padding length for a given image shape and kernel.
Args:
image_shape: A tuple or list of 2 integers.
kernel_size: A positive integer.
enforce_odd: A boolean indicating whether the padding should result in an
image with odd width/height. If True, we remove one from the left/top of
the padding (as necessary) to enforce this.
Returns:
A tuple of 2 tuples, each with 2 integers, indicating the left/right
padding, and the top/down padding.
"""
padding = (kernel_size//2, kernel_size//2)
if enforce_odd:
adjust = (1 - image_shape[0] % 2, 1 - image_shape[1] % 2)
else:
adjust = (0, 0)
return ((padding[0] - adjust[0], padding[0]), (padding[1] - adjust[1],
padding[1])) |
def find_no_match(match_list, all_end2end_nums, type='end2end'):
"""
Find out no match end2end bbox in previous match list.
:param match_list: matching pairs.
:param all_end2end_nums: numbers of end2end_xywh
:param type: 'end2end' corresponding to idx 0, 'master' corresponding to idx 1.
:return: no match pse bbox index list
"""
if type == 'end2end':
idx = 0
elif type == 'master':
idx = 1
else:
raise ValueError
no_match_indexs = []
# m[0] is end2end index m[1] is master index
matched_bbox_indexs = [m[idx] for m in match_list]
for n in range(all_end2end_nums):
if n not in matched_bbox_indexs:
no_match_indexs.append(n)
return no_match_indexs |
def rgb_to_hex(red, green, blue):
"""Return color as #rrggbb for the given color values."""
return "#%02x%02x%02x" % (int(red), int(green), int(blue)) |
def get_departments(courses):
"""
Creates a dictionary of departments and courses in those departments.
Parameters:
-----------
courses (list): All courses formatted LLNNN (LL is a 2 character department code)
returns (dict): Departments where the keys are LL and the values are lists of
the courses in the departments
"""
departments = {}
for course in courses:
prefix = course[:2]
# either add department prefix and course to dictionary or
# add course to department list in the dictionary
if prefix not in departments:
departments[prefix] = [course]
else:
departments[prefix].append(course)
return departments |
def get_kwargs(args, exp_keys, exp_elems):
"""Return user-specified keyword args in a dictionary and a set (for True/False items)."""
arg_dict = {} # For arguments that have values
for key, val in args.items():
if exp_keys is not None and key in exp_keys and val:
arg_dict[key] = val
elif exp_elems is not None and key in exp_elems and val:
arg_dict[key] = True
return arg_dict |
def predict_heuristic(previous_predict, month_predict, actual_previous_value, actual_value):
""" Heuristic that tries to mark the deviance from real and prediction as
suspicious or not.
"""
if (
actual_value < month_predict and
abs((actual_value - month_predict) / month_predict) > .3):
if (
actual_previous_value < previous_predict and
abs((previous_predict - actual_previous_value) / actual_previous_value) > .3):
return False
else:
return True
else:
return False |
def isInt( str ):
"""Utility function: Is the given string an integer?"""
try: int( str )
except ValueError: return 0
else: return 1 |
def o_count(board):
"""
count how many 'o'-es are on the board
"""
return len([ch for ch in board if ch == 'o']) |
def list_dict(l):
"""
return a dictionary with all items of l being the keys of the dictionary
"""
return {i: None for i in l} |
def _adjust_component(r, g , b) -> tuple:
"""
Author: PRAYANSHU NARAYAN S# 101144277
Returns an image but filtered to posterize:
Examples:
>>> posterize('p2-original.jpg')
sets all the pixels in the file in posterize
"""
r2 = 0
g2 = 0
b2 = 0
if r <= 63 and r > 0:
r2 = 32
elif r <= 127 and r > 64:
r2 = 96
elif r <= 191 and r > 128:
r2 = 160
elif r <= 255 and r > 192:
r2 = 224
if g <= 63 and g > 0:
g2 = 32
elif g <= 127 and g > 64:
g2 = 96
elif g <= 191 and g > 128:
g2 = 160
elif g <= 255 and g > 192:
g2 = 224
if b <= 63 and b > 0:
b2 = 32
elif b <= 127 and b > 64:
b2 = 96
elif b <= 191 and b > 128:
b2 = 160
elif b <= 255 and b > 192:
b2 = 224
return r2,g2,b2 |
def position_within_overlap(pos, strand, boundary):
""" Check if given position is within the overlap range """
if strand == "fwd" and pos is not None and pos >= boundary:
return True
if strand == "rev" and pos is not None and pos <= boundary:
return True
return False |
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0):
"""
Normalize raw HID readings to target range.
Args:
x (int): Raw reading from HID
axis_scale (float): (Inverted) scaling factor for mapping raw input value
min_v (float): Minimum limit after scaling
max_v (float): Maximum limit after scaling
Returns:
float: Clipped, scaled input from HID
"""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x |
def validate_placeholders(placeholders):
"""Validate placeholders is a dict-like structure"""
keys = ['placeholder', 'replace']
try:
return all(sorted(list(p.keys())) == keys for p in placeholders)
except TypeError:
return False |
def bin_filter(input: float, abs_boundary: float) -> float:
"""
Simple binary filter, will provide abs_ceiling as a binary output,
according to the 'negativity' of the input value
:param input : input value
:param abs_boundary : abs boundary value
:return : output binary value
"""
output = abs(abs_boundary)
if input < 0:
output = -abs(abs_boundary)
return output |
def amino_function(weight: float, sport_a_week: int) -> float:
"""
Calculates 24hour calory need.
Returns -1 in any error case.
:param weight: float, kilograms
:param sport_a_week: int, times. do sport times a week amount.
:return:
"""
amino = -1
if sport_a_week <= 0:
amino = 1.4*weight
elif 3 >= sport_a_week >= 1:
amino = 2.2*weight
elif 5 >= sport_a_week > 3:
amino = 3.4*weight
elif 7 >= sport_a_week > 5:
amino = 4*weight
else:
amino = weight
return amino |
def isPalindrome(n):
"""Takes in an integer and determines whether it is a palindrome"""
n = str(n)
length = len(n)
increment = 0
isPalindrome = True
# well written loop
while increment < length/2 and isPalindrome:
if n[increment] == n[length - 1 - increment]:
increment +=1
else:
isPalindrome = False
return isPalindrome |
def getFibonacciIterative(n: int) -> int:
"""
Calculate the fibonacci number at position n iteratively
"""
a = 0
b = 1
for _ in range(n):
a, b = b, a + b
return a |
def run_command(cmd):
""" run a bash command with Popen, return response"""
from subprocess import Popen, PIPE
from shlex import split
return Popen(split(cmd), stdout=PIPE, stderr=PIPE).communicate() |
def lesser_of_two_evens(a, b):
"""
a function that returns the lesser of two given numbers if both numbers are even,
but returns the greater if one or both numbers are odd
:param a: int
:param b: int
:return: int
lesser_of_two_evens(2,4) --> 2
lesser_of_two_evens(2,5) --> 5
"""
if a % 2 == 0 and b % 2 == 0:
return min(a, b)
else:
return max(a, b) |
def accounting(value):
"""Converts a string into all lowercase"""
points = float(value)
if points < 0:
return "({0})".format(-points)
return value |
def check_positive_int(string):
"""Convert a string to integer and check if it's positive.
Raise an error if not. Return the int.
"""
if int(string) > 0:
return int(string)
else:
raise RuntimeError("Integer must be positive.") |
def _standardise_proc_param_keys(key):
"""Convert to lowercase, replace any spaces with underscore."""
return key.lower().replace(" ", "_") |
def get_config_option(option_name, opts, optional=False):
"""Given option_name, checks if it is in appconfig. Raises ValueError if a mandatory option is missing"""
option = opts.get(option_name)
if not option and optional is False:
err = "'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function".format(option_name)
raise ValueError(err)
else:
return option |
def find(val, values):
"""
Find all instances of val in array. Return their indices.
"""
indices = []
values = list(values)
n = 0
for elt in values:
if elt == val:
indices.append(n)
n += 1
return indices |
def regex_pattern_to_url(pattern):
"""
Take a url regex pattern from urlconf and return a url that matches it
"""
url = pattern.replace('^', '').rstrip('$')
if not url.startswith('/'):
return '/' + url
return url |
def get_space_from_string(space_str):
"""
Convert space with P, T, G, M to int
"""
M = 1024
G = 1024 * M
T = 1024 * G
P = 1024 * T
if 'M' in space_str:
return int(float(space_str.split('M')[0]) * M)
elif 'G' in space_str:
return int(float(space_str.split('G')[0]) * G)
elif 'T' in space_str:
return int(float(space_str.split('T')[0]) * T)
elif 'P' in space_str:
return int(float(space_str.split('P')[0]) * P)
else:
return int(space_str) |
def generate_parenthesis(n):
"""
Given n pairs of parentheses, write a function to generate all
combinations of well-formed parentheses.
For n=3 the solution is:
"((()))", "(()())", "(())()", "()(())", "()()()"
For each opening bracket we can either start a new opening
bracket or close it and track back.
"""
if n == 0:
return []
def all_combinations(braces, opening=0, closing=0):
if len(braces) >= 2*n:
yield braces
return
if opening < n:
for c in all_combinations(braces + ["("], opening+1, closing):
yield c
if closing < opening:
for c in all_combinations(braces + [")"], opening, closing+1):
yield c
return ["".join(p) for p in all_combinations([])] |
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean") |
def tolist_if_not(x):
"""Convert to a list."""
if not isinstance(x, list):
x = [x]
return x |
def measure_lexical_diversity(tokenized_string):
"""
Given a tokenized string (list of tokens), return the fraction of unique tokens to total tokens.
"""
return len(set(tokenized_string)) / len(tokenized_string) |
def get_partition_count_for_writing(is_sampled):
"""
Return a reasonable partition count.
using_sample_id: boolean
One day is O(140MB) if filtering down to a single sample_id, but
O(14GB) if not. Google reports 256MB < partition size < 1GB as ideal.
"""
if is_sampled:
return 1
return 25 |
def space_calculation_AC( maxrec, blocksize=2544, rabnsize=3):
""" Calculate space requirements for an Adabas file in
in the Address Converter.
:param maxrec: number of records
:param blocksize: ASSO blocksize and
:param rabnsize: DATA storage RABN size defined for database (3 or 4)
>>> space_calculation_AC(5000,blocksize=2004)
8
>>> space_calculation_AC(5000,blocksize=2004,rabnsize=4)
10
>>> space_calculation_AC(10**9,rabnsize=4)
1572328
>>> space_calculation_AC(10**9,rabnsize=3)
1179246
"""
isnsperblock = blocksize//rabnsize
return (maxrec+isnsperblock-1) // isnsperblock |
def linenr_column_line(text, offset):
"""Return line number, column and the whole line
in which text[offset] lies.
Line number and column are in one-based indexing.
Each tab is counted as one column.
"""
offset = min(max(0, offset), len(text))
textbegin = text[:offset]
if not textbegin:
return 1, 1, None
lines = textbegin.splitlines(True)
linenr, column = max(1, len(lines)), len(lines[-1]) + 1
line = lines[-1]
nlpos = text.find('\n', offset)
if nlpos >= 0:
line += text[offset:nlpos+1]
else:
line += text[offset:]
return linenr, column, line |
def filterIfEmpty(l):
""" If l is one line (comment), filter."""
if len(l) == 1:
return []
else:
return l |
def getTimeString(seconds):
"""Converts an integer to a time string.
Credit: http://stackoverflow.com/questions/775049/python-time-seconds-to-hms
Args:
seconds: integer time value in seconds
Returns:
time_str: time string of the format HH:MM
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
time_str = "%02d:%02d" % (hours, minutes)
return time_str |
def select_basis(atomlist, attempt=0):
"""
Given a list of atoms, generates a list of molecules
that is best suited to serve as a basis for those atoms
INPUT:
atomlist - list of atoms
OUPUT:
basis - recommended basis as a list of stoichiometries
"""
count = len(atomlist)-1
basis = []
i = 0
if 'N' in atomlist and i <= count and attempt < 2:
basis.append('N#N')
i += 1
if 'N' in atomlist and 'H' in atomlist and i <= count and attempt > 1:
basis.append('N')
i += 1
if 'S' in atomlist and i <= count:
basis.append('O=S=O')
i += 1
if 'H' in atomlist and i <= count and attempt < 2:
basis.append('[H][H]')
i += 1
elif 'H' in atomlist and 'C' not in atomlist and i <= count and attempt < 3:
basis.append('[H][H]')
i += 1
if 'O' in atomlist and i <= count and attempt < 3:
basis.append('[O][O]')
i += 1
if 'C' in atomlist and i <= count and attempt < 4:
basis.append('C')
i += 1
if 'O' in atomlist and 'H' in atomlist and i <= count and attempt < 4:
basis.append('O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count and attempt < 5:
basis.append('C(=O)=O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count and attempt < 5:
basis.append('C=O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count:
basis.append('CO')
i += 1
if 'C' in atomlist and i <= count:
basis.append('CC')
i += 1
if 'S' in atomlist and i <= count:
basis.append('O=S=O')
i += 1
if 'H' in atomlist and i <= count and attempt < 1:
basis.append('[H][H]')
i += 1
elif 'H' in atomlist and 'C' not in atomlist and i <= count and attempt < 3:
basis.append('[H][H]')
i += 1
if 'O' in atomlist and i <= count and attempt < 2:
basis.append('[O][O]')
i += 1
if 'C' in atomlist and i <= count and attempt < 3:
basis.append('C')
i += 1
if 'O' in atomlist and 'H' in atomlist and i <= count and attempt < 3:
basis.append('O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count and attempt < 4:
basis.append('C(=O)=O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count and attempt < 4:
basis.append('C=O')
i += 1
if 'C' in atomlist and 'O' in atomlist and i <= count:
basis.append('CO')
i += 1
if 'C' in atomlist and i <= count:
basis.append('CC')
i += 1
return basis |
def GetFileBaseName(filename):
"""Removes all extensions from a file.
(Note: os.path.splitext() only removes the last extension).
"""
first_ext = filename.find('.')
if first_ext != -1:
filename = filename[:first_ext]
return filename |
def flatten_list(list_of_lists):
"""Flatten the list"""
return [element for array in list_of_lists for element in array] |
def nombre_sommets(G):
"""Renvoie le nombre de sommets d'un graphe"""
return len(G.keys()) |
def pytest_make_parametrize_id(config, val, argname):
"""
Return parametrization info when running test with verbose option
"""
if isinstance(val, dict):
return '{}({})'.format(
argname,
', '.join('{}={}'.format(k, v) for k, v in val.items())
) |
def get_object(object_list, object_value):
"""Get Ironic object by value from list of Ironic objects.
:param object_list: the output of the cmd
:param object_value: value to get
"""
for obj in object_list:
if object_value in obj.values():
return obj |
def compare_rule_hits(r1, r2, diff):
"""Compare 'read' rule hits and fill-in diff structure accordingly."""
d1 = r1["report"]["data"]
d2 = r2["report"]["data"]
all_found = True
for hit1 in d1:
found = False
for hit2 in d2:
if hit1["rule_id"] == hit2["rule_id"]:
found = True
if not found:
all_found = False
# result of comparison
diff["same_hits"] = "yes" if all_found else "no"
# return comparison result
return all_found |
def write_csv_string(data):
"""
Takes a data object (created by one of the read_*_string functions).
Returns a string in the CSV format.
"""
keys = data[0].keys()
result = ','.join(keys)
result += '\n'
# Didn't find a way of convertng to csv
# using the csv module
# So I be doing it manually
for row in data:
subList = []
for key in keys:
subList.append( row[key] )
result += ','.join(subList)
result += '\n'
return result |
def tags_from_src(src_data):
"""
:param src_data: A source data file (either column or table)
:return: Set of all tags found in the file.
"""
return set([tag for tags in src_data for tag in tags['tags'].split(',')])-set(['']) |
def _filter(gvar, qs):
"""
Internal function to filter a query set by the specified group name.
"""
for _ix in range(len(qs)-1, -1, -1):
if 'job-id' in gvar['command_args'] and qs[_ix]['global_job_id'] != gvar['command_args']['job-id']:
del(qs[_ix])
elif 'job-target-alias' in gvar['command_args'] and qs[_ix]['target_alias'] != gvar['command_args']['job-target-alias']:
del(qs[_ix])
elif 'job-status' in gvar['command_args'] and str(qs[_ix]['job_status']) != gvar['command_args']['job-status']:
del(qs[_ix])
elif 'job-request-cpus' in gvar['command_args'] and str(qs[_ix]['request_cpus']) != gvar['command_args']['job-request-cpus']:
del(qs[_ix])
elif 'job-request-ram' in gvar['command_args'] and str(qs[_ix]['request_ram']) != gvar['command_args']['job-request-ram']:
del(qs[_ix])
elif 'job-request-disk' in gvar['command_args'] and str(qs[_ix]['request_disk']) != gvar['command_args']['job-request-disk']:
del(qs[_ix])
elif 'job-request-swap' in gvar['command_args'] and str(qs[_ix]['request_swap']) != gvar['command_args']['job-request-swap']:
del(qs[_ix])
elif 'job-requirements' in gvar['command_args'] and qs[_ix]['requirements'] != gvar['command_args']['job-requirements']:
del(qs[_ix])
elif 'job-priority' in gvar['command_args'] and str(qs[_ix]['job_priority']) != gvar['command_args']['job-priority']:
del(qs[_ix])
elif 'job-user' in gvar['command_args'] and qs[_ix]['user'] != gvar['command_args']['job-user']:
del(qs[_ix])
elif 'job-image' in gvar['command_args'] and qs[_ix]['image'] != gvar['command_args']['job-image']:
del(qs[_ix])
elif 'job-hold' in gvar['command_args'] and str(qs[_ix]['js_held']) != gvar['command_args']['job-hold']:
del(qs[_ix])
return qs |
def list_to_lowercase(l):
"""given a list of strings, make them all lowercase."""
return [x.lower() for x in l if type(x) is str] |
def noneToDefaultOrValue(value, default=''):
"""Convert a NoneType to a default or pass back the value.
Parameters
----------
value : anything
The value to possibly convert.
default : anything, optional
The default to return if value is NoneType.
Returns
-------
anything
The converted value.
"""
return default if value is None else value |
def black(s):
"""Black text."""
#if settings.allow_black_foreground:
#return colored.black(s)
#else:
return s.encode('utf-8') |
def get_modified_files_list(diff):
"""Get list of modified or newly added files list"""
file_list = []
for file in diff["files"]:
if file["status"] == "modified" or file["status"] == "added":
file_list.append(file["filename"])
return file_list |
def validate_elements(new_elements, known_elements):
"""
Check new elements (entities or intents) against already identified elements.
Returns elment list, success_code where
element list is the bad entities if it failed
>>> validate_elements([1,2,3], [])
([1, 2, 3], True)
>>> validate_elements([1,2,2], [])
([2], False)
>>> validate_elements([], [])
([], True)
>>> validate_elements([1,2,2], [2])
([2], False)
>>> validate_elements([1,2], [2])
([2], False)
"""
unique_elements = set(new_elements)
is_unique = sorted(unique_elements) == sorted(new_elements)
overlap = unique_elements.intersection(known_elements)
is_known = bool(overlap)
if not is_unique:
duplicated_elements = set(
elem for elem in new_elements if new_elements.count(elem) > 1
)
overlap = overlap.union(duplicated_elements)
failure = is_known or not is_unique
return (list(overlap if failure else unique_elements), not failure) |
def exists_lod(lst, key, val):
"""is key-value in list of dicts"""
return any(d[key] == val for d in lst) |
def _tokenize(s):
"""An interface for tokenization, currently we rely on external tokenizers
i.e. We assume all the inputs have been well-tokenized
"""
return s.split() |
def convert_to_ascii(lengths):
"""Convert from numbers to associated ascii values."""
return [ord(length) for length in lengths] |
def geoPointInsideBox(p, edges):
"""
returns True only if p lies inside or on the sides of box
"""
y,x = p
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return x0<=x<=x1 and y0<=y<=y1 |
def fastFib(n, memo = {}):
"""
Assumes n is an int >= 0, memo used only by recursive calls
Returns Fibonacci of n
"""
if n == 0 or n == 1:
return 1
try:
return memo[n]
except KeyError:
result = fastFib(n-1, memo) + fastFib(n-2, memo)
memo[n] = result
return result |
def _traverse_report(data):
"""Recursively traverse vehicle health report."""
if 'items' not in data:
return {}
out = {}
for item in data['items']:
skip = (item['severity'] == 'NonDisplay' or
item['itemKey'] == 'categoryDesc' or
item['value'] in [None, 'Null', 'N/A', 'NULL'])
if skip:
continue
value = 'Ok' if item['value'] == '0.0' else item['value']
out[item['itemKey']] = value
out.update(_traverse_report(item))
return out |
def import_from_string(path):
"""
Import object from given dot-path string.
Args:
path<str>: Dot-path to Class, Function or other object in a module (e.g. foo.bar.Klass).
"""
# Split into parts and extract function name
module_path, obj_name = path.rsplit('.', 1)
# Import module
module = __import__(module_path, fromlist=[str(obj_name)])
# Import the function or class
obj = getattr(module, obj_name)
return obj |
def check_ignore(source):
""" This function checks if a file is on the ignore list """
for f in ["interrogate_module.cpp", "interrogate_wrapper.cpp"]:
if f.lower() in source.lower():
return False
return True |
def get_column_letter(col_idx):
"""Convert a column number into a column letter (3 -> 'C')
Right shift the column col_idx by 26 to find column letters in reverse
order. These numbers are 1-based, and can be converted to ASCII
ordinals by adding 64.
"""
# these indicies corrospond to A -> ZZZ and include all allowed
# columns
if not 1 <= col_idx <= 18278:
raise ValueError("Invalid column index {0}".format(col_idx))
letters = []
while col_idx > 0:
col_idx, remainder = divmod(col_idx, 26)
# check for exact division and borrow if needed
if remainder == 0:
remainder = 26
col_idx -= 1
letters.append(chr(remainder+64))
return ''.join(reversed(letters)) |
def polstring_version_required(str_in):
"""
What SICD version does the pol string require?
Parameters
----------
str_in : None|str
The tx/rcv polarization string.
Returns
-------
tuple
One of `(1, 1, 0)`, `(1, 2, 1)`, `(1, 3, 0)`
"""
if str_in is None or str_in in ['OTHER', 'UNKNOWN']:
return (1, 1, 0)
parts = str_in.split(':')
if len(parts) != 2:
return (1, 1, 0)
part1, part2 = parts
if part1 in ['S', 'E', 'X', 'Y', 'OTHER'] or part2 in ['S', 'E', 'X', 'Y', 'OTHER']:
return (1, 3, 0)
elif (part1 in ['V', 'H'] and part2 in ['RHC', 'LHC']) or \
(part2 in ['V', 'H'] and part1 in ['RHC', 'LHC']):
return (1, 2, 1)
else:
return (1, 1, 0) |
def convert_compartment_format_to_list(compartments):
"""Converts compartment to a list.
Parameters
----------
compartments : list of str or str
Cell Painting compartment(s).
Returns
-------
compartments : list of str
List of Cell Painting compartments.
"""
if isinstance(compartments, list):
compartments = [x.lower() for x in compartments]
elif isinstance(compartments, str):
compartments = [compartments.lower()]
return compartments |
def is_note_in_seq(note,seq):
"""
note: (onset,pitch) tuple
seq: list of (onset,pitch) tuples
Returns True if note is in seq.
"""
for n in seq:
if n[0] == note[0] and n[1]==note[1]:
return True
return False |
def build_indices(stream):
""" Parses the stream of logs and indexes it.
Args:
stream, list of string, format <metric_name>|<value>|<coma_separated_tags>
Returns:
pair of hashes,
first hash has the format {tag -> set([line_indices])}
second hash has the format {line_index -> set([tags])}
"""
tag_index = {} # tag -> [line_ids]
line_index = {} # line_id -> [tag]
for i in range(len(stream)):
line = stream[i]
segments = line.split("|")
tags = segments[2].split(",")
line_index[i] = set(tags)
for tag in tags:
if tag not in tag_index:
tag_index[tag] = set([])
tag_index[tag].add(i)
return tag_index, line_index |
def remove_chars(string: str) -> str:
"""Remove all characters but strings.
Args:
string: <str> input string sequence.
Returns:
string: <str> sorted only letters string.
Examples:
>>> assert remove_chars('.tree1') == 'tree'
"""
return ''.join(
filter(lambda letter: letter.isalpha() or letter.isspace(), string)
) |
def resolve_keywords_array_string(keywords: str):
""" Transforms the incoming keywords string into its single keywords and returns them in a list
Args:
keywords(str): The keywords as one string. Sometimes separates by ',', sometimes only by ' '
Returns:
The keywords in a nice list
"""
ret_list = []
if keywords is not None:
# first make sure no commas are left
keywords = keywords.replace(",", " ")
key_list = keywords.split(" ")
for key in key_list:
key = key.strip()
if len(key) > 0:
ret_list.append(key)
return ret_list |
def get_unit_prods(non_ter, pgrammar):
"""
Input: non_terminal, new_grammar
Ouput: unit productions of a non terminal if they exist else None
"""
unit_prods = None
all_non_terms = list(pgrammar.keys())
# remove the given non_ter from list of all non terms
all_non_terms.remove(non_ter)
# if there are other non terminals at all other than this
if all_non_terms:
prod = pgrammar[non_ter]
# if any of the non terminals appear as a unit in the productions
if any(x in prod for x in all_non_terms):
# filter out the unit prods
unit_prods = list(filter(lambda x: x in all_non_terms, prod))
return unit_prods |
def get_errno(err):
"""
Returns errno for select.error, handles differences between Py2 and Py3
:param err: instance of select.error or OSError
:return: Errno number
"""
if isinstance(err, OSError):
return err.errno
else:
# on older versions of python select.error is a tuple like object
# with errno in first position
return err[0] |
def residue_points(m):
"""Return a dictionary with each residue and the numbers that produce it"""
D = {}
for i in range(m):
r = (i**2) % m
if str(r) in D:
D[str(r)].append(i)
else:
D[str(r)] = [i]
return D |
def getColor(val, minval, maxval):
""" Convert val in range minval..maxval to the range 0..120 degrees which
correspond to the colors Red and Green in the HSV colorspace.
"""
h = (float(val-minval) / (maxval-minval)) * 120
return str(h/360)+' 1.000 1.000'
# r, g, b = hsv_to_rgb(h/360, 1., 1.)
# return r, g, b |
def fix_url(url):
""" do things like escape the & in intersection names, ala "17th %26 Center"
"""
ret_val = url
ret_val = ret_val.replace(" & ", " %26 ")
return ret_val |
def index_settings(shards=5, refresh_interval=None):
"""Configure an index in ES with support for text transliteration."""
return {
"index": {
"number_of_shards": shards,
"refresh_interval": refresh_interval,
"analysis": {
"analyzer": {
"icu_latin": {
"tokenizer": "lowercase",
"filter": ["latinize"]
}
},
"filter": {
"latinize": {
"type": "icu_transform",
"id": "Any-Latin; NFD; [:Nonspacing Mark:] Remove; NFC" # noqa
}
}
}
}
} |
def template_replace(content, replacements):
"""
Replace text in rendered page with their replacements, for example to ensure
absolute paths, or replace links of the type:
href="page/section1/page1/"
with
href="/page/section1/page1/"
when 'page' is pathprefix
"""
#for key, value in replacements:
for needle in replacements:
content = content.replace(needle, replacements[needle])
return content |
def collide(particles, positions):
"""Mark colliding particles as not active.
Returns True if any particles collide, otherwise returns False."""
collisions = [collided for _, collided in positions.items()
if len(collided) > 1]
if not collisions:
return False
for numbers in collisions:
for number in numbers:
particles[number]['active'] = False
return True |
def _ev_charge_level_value(data, unit_system):
"""Get the charge level value."""
return round(data["evStatus"]["chargeInfo"]["batteryLevelPercentage"]) |
def time_to_seconds(time):
"""Convert timestamp string of the form 'hh:mm:ss' to seconds.
:param time: Timestamp of the form 'hh:mm:ss'
:type time: str
:return: The corresponding number of seconds
:rtype: int
"""
if not time:
return 0
return int(sum(abs(int(x)) * 60 ** i for i, x in enumerate(reversed(time.replace(',', '').split(':')))) * (-1 if time[0] == '-' else 1)) |
def filter_prop_and_pickle(sample_dic, props):
"""
Filter the SMILES strings to exclude those that don't have a known value
of all `props`, or do not have a known path to a pickle file with conformer
information.
Args:
sample_dic (dict): Sample of `summary_dic` that will be used in this dataset
props (list[str]): list of property names that you want to predict
Returns:
sample_dic (dict): Updated `sample_dic` with the above filters applied.
"""
smiles_list = [key for key, sub_dic in sample_dic.items()
if all([prop in sub_dic for prop in props])
and sub_dic.get("pickle_path") is not None]
sample_dic = {key: sample_dic[key] for key in smiles_list}
return sample_dic |
def list_process(items):
"""From WIL, converts lists generated from an NM command with unicode strings to lists
with ascii strings
"""
r = []
for l in items:
if isinstance(l, list):
for i in l:
if i.startswith('.L'):
continue
else:
r.append(str(i))
else:
if l.startswith('.L'):
continue
else:
r.append(str(l))
return r |
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data |
def fake_idtoken_processing_hook4(id_token, user, **kwargs):
"""
Fake function for checking kwargs passed to processing hook.
"""
id_token['kwargs_passed_to_processing_hook'] = {
key: repr(value)
for (key, value) in kwargs.items()
}
return id_token |
def redis_key(project_slug, key, *namespaces):
"""
Generates project dependent Redis key
>>> redis_key('a', 'b')
'a:b'
>>> redis_key('a', 'b', 'c', 'd')
'a:c:d:b'
>>> redis_key('a', 1, 'c', None)
'a:c:1'
"""
l = [project_slug]
if namespaces:
l.extend(namespaces)
l.append(key)
return ':'.join(str(i)for i in l if i) |
def escape_text(s):
"""Convert a string to a raw Text property value that represents it.
s -- 8-bit string, in the desired output encoding.
Returns an 8-bit string which passes is_valid_property_value().
Normally text_value(escape_text(s)) == s, but there are the following
exceptions:
- all linebreaks are are normalised to \n
- whitespace other than line breaks is converted to a single space
"""
return s.replace(b"\\", b"\\\\").replace(b"]", b"\\]") |
def apply(f, *args):
"""
Applies a function to sequence values or dicts of values.
Args:
f: The function to apply to ``x`` or all items in ``x``.
*args: Sequence of arguments to be supplied to ``f``. If all arguments
are dicts, the function ``f`` is applied key-wise to all elements
in the dict. Otherwise the function is applied to all provided
argument.s
Return:
``{k: f(x_1[k], x_1[k], ...) for k in x}`` or ``f(x)`` depending on
whether ``x_1, ...`` are a dicts or not.
"""
if any(isinstance(x, dict) for x in args):
results = {}
d = [x for x in args if isinstance(x, dict)][0]
for k in d:
args_k = [arg[k] if isinstance(arg, dict) else arg
for arg in args]
results[k] = f(*args_k)
return results
return f(*args) |
def triple_to_rgb(triple):
"""
Returns rgb color in format #xxxxxx from triple of ints
>>> triple_to_rgb((0, 189, 40))
'#0bd28'
"""
r, g, b = triple
return "#{0}{1}{2}".format(hex(r)[2:], hex(g)[2:], hex(b)[2:]) |
def dms2dd(d,m,s):
"""Convert degrees, minutes, seconds to decimal degrees
"""
if d < 0:
sign = -1
else:
sign = 1
dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600)
return dd |
def get_psg_start_stop_events(events):
"""
:param events:
:return:
List of START/STOP events. Length 2 list of format:
[[start_sec, 0?? (duration), "START PSG"], [start_sec, 0?? (duration), "STOP PSG"]]
"""
# Filter to keep only start/stop PSG events
start_stop_events = list(filter(lambda e: "psg" in e[-1].lower(), events))
assert len(start_stop_events) == 2, "Found != 2 start/stop PSG events: {}".format(start_stop_events)
# Make sure events are sorted by init time
start_stop_events = sorted(start_stop_events, key=lambda x: x[0])
assert "start" in start_stop_events[0][-1].lower() and "stop" in start_stop_events[-1][-1].lower()
# Standardize
return [
(start_stop_events[0][0], start_stop_events[0][1], "PSG START"),
(start_stop_events[1][0], start_stop_events[1][1], "PSG STOP"),
] |
def formatSlurmJobState(stateId):
"""
Convert the Job State ID from Slurm acctdb to the friendly name
"""
states = {
3: 'COMPLETED',
5: 'FAILED'
}
state = states.get(stateId, "UNKNOWN")
return state |
def site_is_subsample(site):
"""
Determines whether a trace site originated from a subsample statement inside an `iarange`.
"""
return site["type"] == "sample" and type(site["fn"]).__name__ == "_Subsample" |
def __isStringType__(obj):
"""
Returns true if the obj is a str type
"""
try:
return hasattr(obj, 'capitalize')
except:
return False |
def completeDict(targetDict:dict, otherDict:dict) -> dict:
"""
Completes target with the keys of other, if they are not present,
stores the values of other. If they are, target keeps their values.
Does not use copy.
"""
for key in otherDict.keys():
target = targetDict.get(key)
load = otherDict[key]
if target is not None:
if isinstance(target, dict) and isinstance(load, dict):
targetDict[key] = completeDict(target, load)
else:
targetDict[key] = load
return targetDict |
def get_ganesha_config(user_id, access_key, secret_key, rgw_hostname, nfs_version):
"""
This function is to get the ganesha configuration
Parameters:
user_id(char): uid of the user
access_key(char):
secret_key(char):
rgw_hostname(char): name of the rgw host running ganesha
nfs_version(char): version of nfs
Returns:
ganesha_conf: returns the ganesha configuration
"""
ganesha_conf = """
EXPORT
{
Export_ID=77;
Path = "/";
Pseudo = "/";
Access_Type = RW;
SecType = "sys";
NFS_Protocols = %s;
Transport_Protocols = TCP;
FSAL {
Name = RGW;
User_Id = %s;
Access_Key_Id ="%s";
Secret_Access_Key = "%s";
}
}
NFSV4 {
Allow_Numeric_Owners = true;
Only_Numeric_Owners = true;
}
Cache_Inode {
Dir_Max = 10000;
}
RGW {
name = "client.rgw.%s";
ceph_conf = "/etc/ceph/ceph.conf";
init_args = "-d --debug-rgw=16";
}
""" % (
nfs_version,
user_id,
access_key,
secret_key,
rgw_hostname,
)
return ganesha_conf |
def group_data_by_columns(datasets, columns):
"""
:param datasets: [CxNxSxF]
:param columns: F
:return: CxNxFxS
"""
new_dataset = []
for i in range(len(datasets)):
datalist = []
for row in range(len(datasets[i][0][0])):
row_data = []
for column_idx in range(len(columns)):
col_data = []
for series in range(len(datasets[i][0][0][row])):
col_data.append(datasets[i][0][0][row][series][column_idx])
row_data.append(col_data)
datalist.append(row_data)
new_dataset.append((datalist, datasets[i][0][1]))
return new_dataset |
def word_count(phrase):
"""
given a phrase, count the occurrences of each word in that phrase
words comprise alphanumeric characters and are separated by anything else
"""
counts = {}
for word in ''.join(
map(lambda c: c.lower() if c.isalnum() else ' ', phrase)).split():
try:
counts[word] = counts[word] + 1
except KeyError:
counts[word] = 1
return counts |
def mosaic_or_horizontal(all_series: dict):
"""Take a mosaic or horizontal model and return series or models.
Args:
all_series (dict): dict of series: model (or list of models)
"""
first_value = all_series[next(iter(all_series))]
if isinstance(first_value, dict):
return "mosaic"
else:
return "horizontal" |
def calculate_chunk_slices(items_per_chunk, num_items):
"""Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices
"""
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)] |
def annotation_to_dict(annotation):
"""Read BRAT annotation line by line and transform it in a dictionary.
Events are currently no supported.
Args:
annotation (string): String formatted in BRAT format (content of .ann file)
Returns:
dictionary: contains 'entities' and 'relations' as separated nested dictionaries
"""
annotation_dict = {
'entities': {},
'relations': {}
}
lines = annotation.split('\n')
for line in lines:
if line.rstrip():
line_split = line.split('\t')
if len(line_split) != 3:
raise(RuntimeError('Line in unsupported format: "{}"'.format(line)))
if line.startswith('T'):
ann_label, ann_beg, ann_end = line_split[1].split()
annotation_dict['entities'][line_split[0]] = {
'label': ann_label,
'beg': int(ann_beg),
'end': int(ann_end),
'string': line_split[2].rstrip()
}
elif line.startswith('R'):
rel_type, arg1, arg2 = line_split[1].split()
annotation_dict['relations'][line_split[0]] = {
'label': rel_type,
'arg1': arg1.split(':')[-1],
'arg2': arg2.split(':')[-1]
}
else:
raise(RuntimeError('Got unsupported annotation type in line "{}"'.format(line)))
return annotation_dict |
def flattenList(inputList):
"""Flattens shallow nested lists into a single list.
Note that this will only work for nesting that is one level deep.
"""
outputList = []
for item in inputList:
if hasattr(item, '__iter__'): outputList += item
else: outputList += [item] #catches case that item is not a list, and doesn't need to be flattened.
return outputList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.