content
stringlengths 42
6.51k
|
|---|
def partition_list(array, pivot):
"""
Given an unsorted list and a pivot integer, partition the list in two depending if the elements are smaller or greater than the pivot.
O(n) time & space
"""
smaller = []
bigger = []
for element in array:
if element < pivot:
smaller.append(element)
else:
bigger.append(element)
return [smaller, bigger]
|
def replace(text: str, replacements: dict) -> str:
"""
Returns a copy of text with the replacements applied.
>>> replace('abc.-.', {'.': '*', '-': '<===>'})
'abc*<===>*'
>>> replace('Hola1 que2 ase3', {'Hola': 'Hi', 'que': 'ase', 'ase': None})
'Hi1 ase2 3'
"""
if not replacements:
return text
result: list[str] = []
start = 0
while start < len(text):
pattern: list[str] = []
for char in text[start:]:
pattern.append(char)
try:
new_value = replacements[''.join(pattern)]
except KeyError:
pass
else:
result.append(new_value or '')
start += len(pattern)
break
else:
result.append(text[start])
start += 1
return ''.join(result)
|
def error_response(error_message):
"""Return a well-formed error message."""
return {
"success": False,
"test_name": "s3_png_metadata",
"error_message": error_message,
}
|
def extract_expand_info(kernel_info):
"""Convert the json into a more friendly format"""
input_desc = []
if 'input_desc' in kernel_info and kernel_info['input_desc']:
for desc in kernel_info['input_desc']:
input_desc += desc
attrs = {}
if 'attr' in kernel_info and kernel_info['attr']:
for attr in kernel_info["attr"]:
attrs[attr["name"]] = attr["value"]
expand_info = {
"name": kernel_info["name"],
"input_desc": input_desc,
"output_desc": kernel_info["output_desc"],
"attr": attrs,
"process": kernel_info["process"],
}
return expand_info
|
def unify_units(dic):
"""Unifies names of units.
Some units have different spelling although they are the same units.
This functions replaces different spelling options by unified one.
Example of British English spelling replaced by US English spelling::
>>> unify_units({'units': ['metres'], 'unit': ['metre']}) # doctest: +SKIP
{'units': ['meters'], 'unit': ['meter']}
:param dic: The dictionary containing information about units
:return: The dictionary with the new values if needed or a copy of old one
"""
# the lookup variable is a list of list, each list contains all the
# possible name for a units
lookup = [['meter', 'metre'], ['meters', 'metres'],
['Meter', 'Metre'], ['Meters', 'Metres'],
['kilometer', 'kilometre'], ['kilometers', 'kilometres'],
['Kilometer', 'Kilometre'], ['Kilometers', 'Kilometres'],
]
dic = dict(dic)
for l in lookup:
import types
if not isinstance(dic['unit'], str):
for n in range(len(dic['unit'])):
if dic['unit'][n] in l:
dic['unit'][n] = l[0]
else:
if dic['unit'] in l:
dic['unit'] = l[0]
if not isinstance(dic['units'], str):
for n in range(len(dic['units'])):
if dic['units'][n] in l:
dic['units'][n] = l[0]
else:
if dic['units'] in l:
dic['units'] = l[0]
return dic
|
def fit_simulaid(phi):
"""
DEPRECATED AND WORKING FOR SMALL NUMBER OF SAMPLES
--
Fit theta such as:
phi_i = theta * i + phi_0 (E)
Solving the system:
| SUM(E)
| SUM(E*i for i)
that can be written:
| a11 * theta + a12 * phi_0 = b1
| a21 * theta + a22 * phi_0 = b2
---
Parameters:
phi
n
---
Return:
theta
"""
n = len(phi)-1
# coefficients
a11 = (2*n + 1)*(n + 1)*n/6
a21 = n*(n+1)/2
a12 = a21
a22 = n
# Second member
b1 = 0
b2 = 0
for i, phi in enumerate(phi):
b1 += phi*i
b2 += phi
theta = (a22*b1 - a12 * b2)/(a22*a11 - a12*a21)
return theta
|
def _hex_to_bin(hexstring):
"""Convert hexadecimal readouts (memory) to binary readouts."""
return str(bin(int(hexstring, 16)))[2:]
|
def _first_available(color_list):
"""
(https://en.wikipedia.org/wiki/Greedy_coloring)
Return smallest non-negative integer not in the given list of colors.
:param color_list: List of neighboring nodes colors
:type color_list: list of int
:rtype: int
"""
color_set = set(color_list)
count = 0
while True:
if count not in color_set:
return count
count += 1
|
def readCurrent(obj, container=True):
"""
Persistence safe wrapper around zodb connection readCurrent;
also has some built in smarts about typical objects that need
to be read together.
"""
# Per notes from session_storage.py, remember to activate
# the objects first; otherwise the serial that gets recorded
# tends to be 0 (if we had a ghost) which immediately changes
# which leads to falce conflicts
try:
obj._p_activate()
obj._p_jar.readCurrent(obj)
except (TypeError, AttributeError):
pass
if container: # BTree containers
try:
data = obj._SampleContainer__data
data._p_activate()
data._p_jar.readCurrent(data)
except AttributeError:
pass
return obj
|
def ens_to_indx(ens_num, max_start=1000000):
"""
Get the index related to the ensemble number : e.g 101 => 0
:param ens_num: ensemble number, int
:param max_start: max number of ensembles, int
:return: index, int
"""
start = 100
while start < max_start:
ind = ens_num % start
if ind < start:
return ind - 1
# Otherwise, try with bigger number of ensembles
start *= 10
print("Error: ens_to_index function: ensemble number cannot be converted to index")
|
def is_valid_location(location, bounds):
"""Check if location is in drone bounds."""
if min(bounds[0]) <= location[0] <= max(bounds[0]):
if min(bounds[1]) <= location[1] <= max(bounds[1]):
return True
return False
|
def _get_template_disk_size(template):
"""Get disk size from template."""
return int(
template['properties']['disks'][0]['initializeParams']['diskSizeGb'])
|
def unwrap_error_with_hosts(error_obj):
"""Extracts original error from a wrapper listing hosts
where the error occurred"""
error_id = error_obj.get('id')
description = error_obj.get('description')
details = error_obj.get('details', {})
nodes = []
if error_id == 'errorOnNodes' and 'error' in details:
error_id = details['error']['id']
description = details['error']['description']
nodes = details['hostnames']
details = details['error'].get('details', {})
return error_id, description, details, nodes
|
def is_attorney(descr):
""" If a string description is a attorney description """
return any(map(lambda s: s in descr.upper(), ('ATTORNEY','ATTNY')))
|
def find(func, iterable):
"""
find the first item in iterable for which func returns something true'ish.
@returns None if no item in iterable fulfills the condition
"""
for i in iterable:
if func(i):
return i
return None
|
def spread(nodes, n):
"""Distrubute master instances in different nodes
{
"192.168.0.1": [node1, node2],
"192.168.0.2": [node3, node4],
"192.168.0.3": [node5, node6]
} => [node1, node3, node5]
"""
target = []
while len(target) < n and nodes:
for ip, node_group in list(nodes.items()):
if not node_group:
nodes.pop(ip)
continue
target.append(node_group.pop(0))
if len(target) >= n:
break
return target
|
def fizzGame(num):
"""Checks input and computer fizz or buzz
This function returns "fizz", "buzz", "fizz buzz",
or original number if it is divisible by 3, 5, both, or neither.
Args:
num: number that will be checked for fizz or buzz
Returns:
Output for game, (str) "fizz", "buzz", "fizz buzz" OR (int) num
"""
if (num%3 == 0) and (num%5 == 0):
return("fizz buzz")
elif num % 3==0:
return ("fizz")
elif num % 5 == 0:
return ("buzz")
else:
return num
|
def get_fn_names(component):
"""If `component` has a `function` attribute and it is True,
appends the names in the function handler to `fn_names` and
returns it, else returns an empty list.
:param component: the component object to get it's list of function names for
:type component: object
:return: fn_names: the name in each function handler in the component if found
:rtype: list
"""
assert isinstance(component, object)
fn_names = []
# Get a list of callable methods for this object
methods = [a for a in dir(component) if callable(getattr(component, a))]
for m in methods:
this_method = getattr(component, m)
is_function = getattr(this_method, "function", False)
if is_function:
fn_decorator_names = this_method.names
# Fail if fn_decorator_names is not a tuple as may have unhandled side effects if a str etc.
# When a function handler is decorated its __init__() function takes the '*args' parameter
# When * is prepended, it is known as an unpacking operator to allow the function handler to have
# multiple names. args (or names in our case) will be a tuple, so if the logic of the function
# decorator changes, this will catch it.
assert isinstance(fn_decorator_names, tuple)
for n in fn_decorator_names:
fn_names.append(n)
return fn_names
|
def create_text(lines):
"""
For a list of strings, creates a single string with elements of list
separated by line.
:param lines: list of strings
:return: string
"""
return '\n'.join(lines)
|
def parse_messages(messages):
"""Parse messages."""
return [message.strip() for message in messages.split('\n')]
|
def convert_input_to_dice(to_re_roll):
"""
Parse the user intput into a list of dice
:param to_re_roll: the raw comma-separated string received from the user input
:return: a list of dice which are integers
"""
if to_re_roll:
dice = [int(d) for d in to_re_roll.split(",")]
return [die for die in dice if die in (1, 2, 3, 4, 5, 6)]
return []
|
def STRING_BOUNDARY(e):
"""
:return: expr that matches an entire line
"""
return r"^{e}$".format(e=e)
|
def is_valid(glyph_str):
""" Validates if glyph_str is alphanumeric and contains unique chars
Parameters
----------
glyph_str : string
glyph alphabet to be used for number encoding
Returns
-------
True when:
glyph string is alphanumeric
Each char occurs only once
"""
uniq = True
if len({x for x in glyph_str}) != len(glyph_str):
uniq = False
return uniq and glyph_str.isalnum()
|
def clean_string(string):
"""
Remove the double spaces from a string.
"""
string = string.replace(" ", " ")
if string.startswith(" "): string = string[1:]
if string.endswith(" "): string = string[-2:]
return string
|
def parse(prog):
"""Parse a Chicken program into bytecode."""
OPs = []
prog = prog.split('\n')
for line in prog:
OPs.append(line.count("chicken"))
return OPs
|
def unify(facts, query):
"""
Simplistic unification of a query with one of several facts.
Query and each fact should be a tuple.
"Variables" are indicated by None in the query
Valid substitutions with literals from the facts are returned
"""
matches = set()
for fact in facts:
if not len(fact) == len(query): continue
if not all([query[i] in [None, fact[i]] for i in range(len(fact))]): continue
matches.add(tuple(fact[i] for i in range(len(fact)) if query[i] is None))
return matches
|
def fix_broken_format(x):
"""
Fix the broken
"""
if "*^" in x:
tokens = x.split("*^")
a = float(tokens[0])
b = int(tokens[1])
return a * 10**b
else:
return float(x)
|
def head(list1: list) -> object:
"""Return the head of a list.
If the input list is empty, then return `None`.
:param list list1: input list
:return: the first item
:rtype: object
>>> head([])
None
>>> head([1,2,3])
1
"""
if len(list1) == 0:
return None
else:
return list1[0]
|
def _boolify_envvar(val):
"""Interpret boolean environment variables.
True whenever set/exported, even if value is an empty string,
"null", or "none".
"""
falsey = ("false", "nil", "no", "off", "0")
return (val if val is not None else "false").lower() not in falsey
|
def _len_guards(m: int):
"""Handle small or incorrect window lengths"""
if int(m) != m or m < 0:
raise ValueError("Window length m must be a non-negative integer")
return m <= 1
|
def line(x, a, b):
"""
Line equation, used
for curve fitting algorithm
Args:
x: x value
a: line coefficient
b: line constant term
Returns:
The y coordinate of the point
"""
return a*x + b
|
def lin_portfolio(q1, q2, c1=2, c2=1, *args):
"""Simple linear function with analytic EE solution for the next test."""
return c1 * q1 + c2 * q2
|
def gpr_to_abi(gpr):
"""Convert a general purpose register to its corresponding abi name"""
switcher = {
"x0" : "zero",
"x1" : "ra",
"x2" : "sp",
"x3" : "gp",
"x4" : "tp",
"x5" : "t0",
"x6" : "t1",
"x7" : "t2",
"x8" : "s0",
"x9" : "s1",
"x10" : "a0",
"x11" : "a1",
"x12" : "a2",
"x13" : "a3",
"x14" : "a4",
"x15" : "a5",
"x16" : "a6",
"x17" : "a7",
"x18" : "s2",
"x19" : "s3",
"x20" : "s4",
"x21" : "s5",
"x22" : "s6",
"x23" : "s7",
"x24" : "s8",
"x25" : "s9",
"x26" : "s10",
"x27" : "s11",
"x28" : "t3",
"x29" : "t4",
"x30" : "t5",
"x31" : "t6",
"f0" : "ft0",
"f1" : "ft1",
"f2" : "ft2",
"f3" : "ft3",
"f4" : "ft4",
"f5" : "ft5",
"f6" : "ft6",
"f7" : "ft7",
"f8" : "fs0",
"f9" : "fs1",
"f10" : "fa0",
"f11" : "fa1",
"f12" : "fa2",
"f13" : "fa3",
"f14" : "fa4",
"f15" : "fa5",
"f16" : "fa6",
"f17" : "fa7",
"f18" : "fs2",
"f19" : "fs3",
"f20" : "fs4",
"f21" : "fs5",
"f22" : "fs6",
"f23" : "fs7",
"f24" : "fs8",
"f25" : "fs9",
"f26" : "fs10",
"f27" : "fs11",
"f28" : "ft8",
"f29" : "ft9",
"f30" : "ft10",
"f31" : "ft11",
}
return switcher.get(gpr, "na")
|
def recursive_dict_to_list(dict_data):
""" Returns a list containing all values from
a dictionary and any child contained dictionary.
"""
list_data = []
for v in dict_data.values():
if type(v) == dict:
for v1 in recursive_dict_to_list(v):
list_data.append(v1)
else:
list_data.append(v)
return list_data
|
def knotvector_normalize(knotvector=()):
""" Normalizes the input knot vector between 0 and 1.
:param knotvector: input knot vector
:type knotvector: tuple
:return: normalized knot vector
:rtype: list
"""
if len(knotvector) == 0:
return knotvector
first_knot = float(knotvector[0])
last_knot = float(knotvector[-1])
knotvector_out = []
for kv in knotvector:
knotvector_out.append((float(kv) - first_knot) / (last_knot - first_knot))
return knotvector_out
|
def to3(p):
"""
Returns a string representation of p with at least 3 chars
Adds leading 0's if necessary
Parameter n: number to pad
Precondition: p is an int
"""
assert type(p) == int, repr(p)+' is not an int' # get in the habit
if p < 10:
return '00' + str(p)
elif p < 100:
return '0' + str(p)
return str(p)
|
def json_or_empty(response):
"""
Return response JSON as python dict or empty dict.
:param response:
:return:
"""
response_json = {}
try:
response_json = response.json()
except Exception:
pass
return response_json
|
def stricmp(sFirst, sSecond):
"""
Compares to strings in an case insensitive fashion.
Python doesn't seem to have any way of doing the correctly, so this is just
an approximation using lower.
"""
if sFirst == sSecond:
return 0;
sLower1 = sFirst.lower();
sLower2 = sSecond.lower();
if sLower1 == sLower2:
return 0;
if sLower1 < sLower2:
return -1;
return 1;
|
def preOrder(node, mx):
""" visit node,
process left subtree
process right subtree
return results
"""
count = 0
# node empty
if (node == None):
return mx, 0
# If the current node value
# is greater or equal to the
# max value, then update count
# variable and also update max
# variable
if (node.data >= mx):
count += 1
mx = max(node.data, mx)
# descend left subtree
(lmx,lcount) = preOrder(node.left, mx)
# descend right subtree
(rmx,rcount) = preOrder(node.right, mx)
# if lmx > mx: mx = lmx
# if rmx > mx: mx = rmx
return mx, count+lcount+rcount
|
def elide_string_middle(text, max_length):
"""Replace the middle of the text with ellipses to shorten text to the desired length.
Args:
text (str): Text to shorten.
max_length (int): Maximum allowable length of the string.
Returns:
(str) The elided text, e.g. "Some really long tex ... the end."
"""
if len(text) <= max_length:
return text
half_len = (max_length - 5) / 2 # Length of text on either side.
return '{} ... {}'.format(text[:half_len], text[-half_len:])
|
def get_client_msg(message, client_id):
"""get_client_msg: get the client-based-message"""
return message + '@' + client_id
|
def distance(strand_a, strand_b):
"""
Calculates the Hamming's distance between two strads.
:param str Strand A.
:param str Strand B.
"""
if len(strand_a) != len(strand_b):
raise ValueError('Strands must be of equal length.')
return sum(i != j for i, j in zip(strand_a, strand_b))
|
def try_classmro(fn, cls):
"""
Try `fn` with the `cls` by walking its MRO until a result is returned.
eg, `try_classmro(field_dict.get, forms.CharField)`
"""
for cls in cls.mro():
result = fn(cls)
if result is not None:
return result
|
def vcd_convert(data, signals):
"""
`data` obtained from .get_signals() has a form of a dictionary with strange
key names (e.g. '7#'), then, for each key, it has a dictionary with keys:
'references' - here we will have the actual signal name
'size' - N bytes
'var_type' - e.g. 'wire'
'tv' - actual data stored as list of tuples (time: int, value: str)
each `value` is a binary number as a string
This function converts that to a dictionary that we can reference by normal names.
"""
conv = {}
for sig in signals:
# check
for data_key in data.keys():
if sig in data[data_key]['references']:
conv[sig] = data[data_key]
break
assert sig in conv, 'Signal "%s" not found' % sig
return conv
|
def sublist(list, start, len):
"""
Returns the sub-list of List1 starting at Start and with (max) Len elements. It is not an error for Start+Len to exceed the length of the list.
>>> x
[-1, 2, 10, 23, 23.23]
>>> f.sublist(x, 1, 3)
[2, 10, 23]
>>> f.sublist(x, 2, 3)
[10, 23, 23.23]
>>>
>>>
"""
return list[start:(start + len)]
|
def clip(data, start_idx):
"""Return data[start_idx:]"""
return data[start_idx:]
|
def task_request_statistics(contributions):
"""Returns a list of task requests."""
task_requests = []
for contribution in contributions:
# If contribution wasn't staff picked skip it
if "task" in contribution["category"]:
task_requests.append(contribution)
return {"task_requests": task_requests}
|
def cpp_string(s: str) -> str:
"""Convert a python string into a c++ string literal """
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\a', '\\a')
s = s.replace('\b', '\\b')
s = s.replace('\f', '\\f')
s = s.replace('\n', '\\n')
s = s.replace('\v', '\\v')
s = s.replace('\t', '\\t')
return f'"{s}"'
|
def choose_max_efficiency(efficiencies):
"""
Given a single or list of DOM efficiencies choose the highest
"""
if type(efficiencies) == list or type(efficiencies) == tuple:
return max(map(float,efficiencies))
else:
return float(efficiencies)
|
def _filter_subs(lst):
"""Return a copy of the list with any subvars of basevars in the list
removed.
"""
bases = [n.split('[',1)[0] for n in lst]
return [n for i,n in enumerate(lst)
if not (bases[i] in lst and n != bases[i])]
|
def hasTemplate (s):
""" Return True if string s has string templates """
return '{' in s and '}' in s
|
def find_index(text, pattern, index=0, iterator=0):
"""Return the starting index of the first occurrence of pattern in text,
or None if not found.
Runtime, worst case: O(n), n is len(text)
Runtime, best case: O(1), if text == ''
Space Complexity: O(1), creating no new variables
"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
if text == '':
return None
elif pattern == '':
return 0
if index < len(text): #if text[index] exists
if text[index] == pattern[iterator]:
if iterator == len(pattern) - 1:
return index - iterator
return find_index(text, pattern, index+1, iterator+1)
elif iterator > 0:
return find_index(text, pattern, index)
else:
return find_index(text, pattern, index+1)
else:
return None
|
def strip_whitespace(value):
"""
Remove leading and trailing whitespace from strings. Return value if not a string.
:param value:
:return: stripped string or value
"""
if isinstance(value, str):
return value.strip()
return value
|
def gray(n):
"""
Calculate n-bit gray code
"""
g = [0,1]
for i in range(1,int(n)):
mg = g+g[::-1] # mirror the current code
first = [0]*2**(i) + [2**(i)]*2**(i) # first bit 0/2**i for mirror
g = [mg[j] + first[j] for j in range(2**(i+1))]
return g
|
def phase_flip(phase):
"""
Flip phasing
"""
return [(y,x) for x,y in phase]
|
def _get_glsl(to_insert, shader_type=None, location=None, exclude_origins=()):
"""From a `to_insert` list of (shader_type, location, origin, snippet), return the
concatenated snippet that satisfies the specified shader type, location, and origin."""
return '\n'.join((
snippet
for (shader_type_, location_, origin_, snippet) in to_insert
if shader_type_ == shader_type and location_ == location and
origin_ not in exclude_origins
))
|
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a
|
def walk_json(obj, *fields, default=None):
""" for example a=[{"a": {"b": 2}}]
walk_json(a, 0, "a", "b") will get 2
walk_json(a, 0, "not_exist") will get None
"""
try:
for f in fields:
obj = obj[f]
return obj
except:
return default
|
def attributes_dict_to_string(attributes_dict):
"""
Converts an attributes dict back into GTF string format.
Args:
attributes_dict: a dict mapping attribute keywords to their
values
Returns:
a string of the given attributes in GTF format
>>> attributes_dict_to_string({
... 'gene_name': 'ESR1',
... 'gene_biotype': 'protein_coding',
... })
'gene_name "ESR1"; gene_biotype "protein_coding";'
"""
output_strings = []
for key, value in attributes_dict.items():
output_strings.append('{} "{}";'.format(key, value))
return ' '.join(output_strings)
|
def check_complexity(check_bytes: bytes, complexity: int) -> bool:
"""Check the complexity of a bystream to see if it has the proper amount of leading 0 bits
Args:
bytes: byte stream to check for complexity bits
complexity: number of leading bits that must be 0 in order to pass complexity
Returns:
Boolean true if passing complexity and false if not
"""
# First check full bytes
num_bytes = complexity // 8
for i in range(num_bytes):
if check_bytes[i] != 0:
return False
complex_remainder = complexity % 8
# If complexity is a factor of 8 (full byte) no remaining bit checking is needed
if complex_remainder == 0:
return True
return check_bytes[num_bytes] < 2 ** (8 - (complex_remainder))
|
def part2(data):
"""
>>> part2(140)
142
>>> part2(800)
806
>>> part2(INPUT)
349975
"""
x = 0
y = 0
i = 1
value = 1
max_x = 0
max_y = 0
min_x = 0
min_y = 0
direction = 'right'
values = {}
values[(0, 0)] = 1
def adjacent(x, y):
try:
return values[(x, y)]
except KeyError:
return 0
while value < data:
i += 1
if direction == 'right':
x += 1
if x > max_x:
max_x = x
direction = 'up'
elif direction == 'up':
y += 1
if y > max_y:
max_y = y
direction = 'left'
elif direction == 'left':
x -= 1
if x < min_x:
min_x = x
direction = 'down'
elif direction == 'down':
y -= 1
if y < min_y:
min_y = y
direction = 'right'
value = adjacent(x - 1, y - 1) + adjacent(x, y - 1) + adjacent(x + 1, y - 1) + adjacent(x - 1, y) + \
adjacent(x + 1, y) + adjacent(x - 1, y + 1) + adjacent(x, y + 1) + adjacent(x + 1, y + 1)
values[(x, y)] = value
return value
|
def get_span_labels(sentence_tags, inv_label_mapping=None):
"""
Desc:get from token_level labels to list of entities, it doesnot matter tagging scheme is BMES or BIO or BIOUS
Returns: a list of entities [(start, end, labels), (start, end, labels)]
"""
if inv_label_mapping:
sentence_tags = [inv_label_mapping[i] for i in sentence_tags]
span_labels = []
last = "O"
start = -1
# traverse the sentence tags
for i, tag in enumerate(sentence_tags):
pos, _ = (None, "O") if tag == "O" else tag.split("-")
if (pos == "S" or pos == "B" or tag == "O") and last != "O":
span_labels.append((start, i - 1, last.split("-")[-1]))
if pos == "B" or pos == "S" or last == "O":
start = i
last = tag
if sentence_tags[-1] != "O":
span_labels.append((start, len(sentence_tags) -1 , sentence_tags[-1].split("-"[-1])))
# return the result
return span_labels
|
def make_filename(parts_list):
"""
Assemble items in a list into a full path
Creates a full path file name from a list of parts such as might
have been created by full_path.split('/')
@param parts_list : list of str
@return: str
"""
name = ''
for part in parts_list:
name += '/'+part
return name
|
def square_int_list(int_list):
"""Function that takes a list of integers and squares them
Args:
int_list ([integer]): List that its numbers are going to be squared
Returns:
[list]: List with all its numbers squared
"""
for index in range(len(int_list)):
int_list[index] *= int_list[index]
return int_list
|
def raw_name_to_display(raw_name):
"""
Converts the given raw application command name to it's display name.
Parameters
----------
raw_name : `str`
The name to convert.
Returns
-------
display_name : `str`
The converted name.
"""
return '-'.join([w for w in raw_name.strip('_ ').lower().replace(' ', '-').replace('_', '-').split('-') if w])
|
def InfoValuesFromAPI(values):
"""Converts a list of strings to an API JsonValue equivalent.
Args:
values: the list of JsonValue strings to be converted
Returns:
An equivalent list of strings
"""
return [v.string_value for v in values]
|
def _merge_dictionaries(dictionaries):
"""
Collapse a sequence of dictionaries into one, with collisions resolved by
taking the value from later dictionaries in the sequence.
:param [dict] dictionaries: The dictionaries to collapse.
:return dict: The collapsed dictionary.
"""
result = {}
for d in dictionaries:
result.update(d)
return result
|
def eval_string(s):
"""evaluate python statement (String)"""
if s.startswith("eval("):
try:
return eval(s[5:-1])
except:
return s
else:
return s
|
def extract_error_msg_from_ldap_exeception(e):
"""
When the LDAP library raises an exeption the error message
contains a descrition and additional info. Return those seperately
instead of a single string.
"""
s = str(e)
d = eval(s)
return d['desc'], d['info']
|
def mnemonic_to_string(mnemonic_str):
"""
Converts a menmonic to a user readable string in the terminal.
"""
mnemonic = mnemonic_str.split()
mnemonics_string = ""
for i in range(0, len(mnemonic)):
mnemonics_string += f"{i + 1}) {mnemonic[i]}"
if i != len(mnemonic) - 1:
mnemonics_string += ", "
if (i + 1) % 6 == 0:
mnemonics_string += "\n"
return mnemonics_string
|
def get_lookup_title(window):
"""Get the lookup title for a window."""
parts = window.get("name").split(" - ")
wclass = window.get("window_properties", {}).get("class")
mark = window.get("mark")
if wclass:
parts = [part for part in parts if part.lower() != wclass.lower()]
parts.insert(0, wclass)
title = " - ".join(parts)
if mark:
title += " [{}]".format(mark)
return title
|
def blinks_count(smiles_list):
"""
groups
:param smiles_list:
:return:
"""
res = []
for i in range(0, len(smiles_list), 4):
a = smiles_list[i: i + 4]
res.append(1) if sum(a) >= 2 else res.append(0)
for element in range(1, len(res) - 1):
if res[element - 1] != res[element] and \
res[element] != res[element + 1]:
res[element] = res[element - 1]
change_blinks = [] #
for k in res:
if len(change_blinks) == 0:
change_blinks.append(k)
else:
if k != change_blinks[-1]:
change_blinks.append(k)
return sum(change_blinks)
|
def get_motion(Letter, Word, i):
"""Only consider 'motions' of one character. If a letter
only needs to be move by one position, this is 1/2 an error."""
LWord = len(Word)
if i >= LWord - 1: return 0
if Word[i].lower() == Letter.lower(): return 0
if i > 0:
if Word[i - 1].lower() == Letter.lower(): return -1
if i < LWord:
if Word[i + 1].lower() == Letter.lower(): return 1
return 0
|
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list'):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes
|
def reverse(string: str) -> str:
"""Return the string reversed."""
reversed_string = ""
for index in range(len(string) - 1, -1, -1):
reversed_string += string[index]
return reversed_string
|
def pangrams(s):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/pangrams/problem
Roy wanted to increase his typing speed for programming contests. His friend suggested that he type the sentence
"The quick brown fox jumps over the lazy dog" repeatedly. This sentence is known as a pangram because it contains
every letter of the alphabet.
After typing the sentence several times, Roy became bored with it so he started to look for other pangrams.
Given a sentence, determine whether it is a pangram. Ignore case.
Args:
s (str): String to check to see if it's a pangram or not
Returns:
str: Return "pangram" or "not pangram" based on the results of the string
"""
letters = {}
for i in s.lower():
if i not in letters.keys() and i != " ":
letters[i] = 1
if len(letters.keys()) == 26:
return "pangram"
return "not pangram"
|
def ord_if_char(value):
"""
Simple helper used for casts to simple builtin types: if the argument is a
string type, it will be converted to it's ordinal value.
This function will raise an exception if the argument is string with more
than one characters.
"""
return ord(value) if (isinstance(value, bytes) or isinstance(value, str)) else value
|
def GetCols(x, *columns):
"""
iterable >> GetCols(*columns)
Extract elements in given order from x. Also useful to change the order of
or clone elements in x.
>>> from nutsflow import Collect
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(1) >> Collect()
[(2,), (5,)]
>>> [[1, 2, 3], [4, 5, 6]] >> GetCols(2, 0) >> Collect()
[(3, 1), (6, 4)]
>>> [[1, 2, 3], [4, 5, 6]] >> GetCols((2, 0)) >> Collect()
[(3, 1), (6, 4)]
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(2, 1, 0) >> Collect()
[(3, 2, 1), (6, 5, 4)]
>>> [(1, 2, 3), (4, 5, 6)] >> GetCols(1, 1) >> Collect()
[(2, 2), (5, 5)]
:param iterable iterable: Any iterable
:param indexable container x: Any indexable input
:param int|tuple|args columns: Indicies of elements/columns in x to extract
or a tuple with these indices.
:return: Extracted elements
:rtype: tuple
"""
if len(columns) == 1 and isinstance(columns[0], tuple):
columns = columns[0]
return tuple(x[i] for i in columns)
|
def binary_search (arr, item):
"""The binary_Search function takes input the sorted list of elements and the item to be searched
and returns the position of the item, if found in the list, else it returns -1 """
beg, end = 0, len(arr)-1
mid = (beg+end)//2
while beg<=end:
mid = (beg+end)//2
if arr[mid] == item :
return mid
elif arr[mid] > item:
end = mid-1
elif arr[mid] < item:
beg = mid+1
return -1 # return -1 if the element is not present in the array
|
def successor(node):
"""
Successor is the next highest value.
In a bst, every value in the left tree is smaller than the node and every value in the right is greater.
8
4 13
2 6 10 16
1 3 5 7 9 11 15 18
"""
if not node:
return None
if node.right:
result = node.right
while result.left:
result = result.left
return result.value
while node.parent:
if node == node.parent.left:
return node.parent.value
node = node.parent
return None
|
def get_correct_narrative_timestep(
sim_yr,
narrative_timesteps
):
"""Based on simulated year select the correct
narrative timestep, i.e. the year which is
to be used from the narrative
Arguments
---------
sim_yr : int
Simulation year
narrative_timesteps : list
All defined timesteps in narrative
Returns
-------
timestep : int
Narrative timestep to use for calculation
Example
-------
If we have a two-step narrative such as:
year: 2015 - 2030, 2030 - 2050
for the sim_yr 2020, the calculated values for
2030 would need to be used. For the year 2031,
the values 2050 would need to be used.
"""
narrative_timesteps.sort()
# Get corresponding narrative timestep
if len(narrative_timesteps) == 1:
timestep = narrative_timesteps[0]
return timestep
else:
# Test if current year is larger than any narrative_timestep
# and use last defined timestep if this is true. Otherwise
# get correct timestep from narrative_timesteps
if sim_yr > narrative_timesteps[-1]:
timestep = narrative_timesteps[-1]
else:
for year_narrative in narrative_timesteps:
if sim_yr <= year_narrative:
timestep = year_narrative
return timestep
|
def trimmed_split(s, seps=(";", ",")):
"""Given a string s, split is by one of one of the seps."""
for sep in seps:
if sep not in s:
continue
data = [item.strip() for item in s.strip().split(sep)]
return data
return [s]
|
def detect_roi(u_array, c_array, u_cutoff, min_length=50):
"""Report u_array regions that are above u_cutoff"""
roi = list()
in_region = False
base_pos = 1
for u_score, c_score in zip(u_array, c_array):
if in_region == False and u_score >= u_cutoff:
in_region = True # turn on recording
roi.append([base_pos, 0])
elif in_region == True and u_score >= u_cutoff:
pass
elif in_region == True and u_score < u_cutoff:
in_region = False # turn off recording
roi[-1][1] = base_pos
else:
pass
base_pos += 1
len_filtered_roi = list()
for region in roi:
if (region[1] - region[0] + 1) >= min_length:
len_filtered_roi.append(region)
return len_filtered_roi
|
def WriteDubious(outfile,infile,code, station, time):
"""
Write note to dubious file list.
:param string outfile: filename to be written to
:param string infile: filename of dubious file
:param string code: text identifier of variables being tested
:param string station: station ID being processed
:param string time: time of the dubious data
:returns: int of flag status.
"""
flagged=0
try:
with open(outfile,'a') as of:
of.write(station+' '+time+' '+code+' variables are first, but not nec. only problem '+infile+'\n')
of.close()
flagged=1
except IOError:
# file doesn't exist as yet, so make a new one
with open(outfile,'w') as of:
of.write(station+' '+time+' '+code+' variables are first, but not nec. only problem '+infile+'\n')
of.close()
flagged=1
return flagged
|
def mem_text(trial):
"""
input: current memory trial # (int)
output: memory instruction text (string) for given memory trial
"""
mem1 = ' Now we\'re going to test your memory. ' \
'\n Just like the practice round, you will rate single images using the following scale: ' \
'\n\n (1) I definitely have not seen the image before' \
'\n (2) I probably have not seen the image before' \
'\n (3) I probably have seen the image before' \
'\n (4) I definitely have seen the image before' \
'\n\n You will need to make your responses quickly -- you\'ll have just 2 seconds. ' \
' If you aren\'t sure what to say for a particular image, make your best guess! ' \
'\n\n Press any key to begin.'
mem2 = ' MEMORY BLOCK. ' \
'\n\n Press any key to begin.'
instructions = [mem1, mem2]
if trial >= 1:
num = 1
else:
num = 0
return(instructions[num])
|
def get_user_group(dest):
"""
Given a dictionary object representing the dest JSON in the late bind config's parameter file, return two
values, the user and group
Args:
dest: dict object from the late bind config's parameters file e.g. dest["user_group"] = "Bob:devops"
Returns:
user: user that the late bind config belongs to
group: group that the late bind config belongs to
"""
return dest["user_group"].split(":")
|
def _catc_tags(start_num, end_num):
"""
Return a list of CATC tags corresponding to the start test number and end
test number. For example, start=1 and end=3 would return:
["CATC-001", "CATC-002", "CATC-003"].
"""
return ["CATC-0{0:02d}".format(i) for i in range(start_num, end_num + 1)]
|
def seven_byte_length(value):
"""
Returns the minimum number of bytes required to represent the integer
if we can use seven bits per byte.
Positive integers only, please!
"""
q, rem = divmod(value.bit_length(), 7)
if rem or not q:
# (the not q is in case value is 0, we can't have 0 bytes)
q += 1
return q
|
def html_header(title, page_heading=None, highlight_targets=False):
"""Write HTML file header. TITLE and PAGE_HEADING parameters are
expected to already by HTML-escaped if needed. If HIGHLIGHT_TARGETS
is true, then write out a style header that causes anchor targets to be
surrounded by a red border when they are jumped to."""
if not page_heading:
page_heading = title
s = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n'
s += ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
s += '<html><head>\n'
s += '<meta http-equiv="Content-Type"'
s += ' content="text/html; charset=UTF-8" />\n'
if highlight_targets:
s += '<style type="text/css">\n'
s += ':target { border: 2px solid red; }\n'
s += '</style>\n'
s += '<title>%s</title>\n' % title
s += '</head>\n\n'
s += '<body style="text-color: black; background-color: white">\n\n'
s += '<h1 style="text-align: center">%s</h1>\n\n' % page_heading
s += '<hr />\n\n'
return s
|
def tabout(*args):
"""Return a tab-delimited string from the list
"""
output = ['NA' if x is None else str(x) for x in args]
return "\t".join(output)
|
def initialize_contral_vars(ubr_index,slabs,keys):
"""
for multi-dimensional minimum, variables should be a list or array
"""
# to change the pandas matrix in ubr_index to list
def tolist(mat):
# get the matrix size
c,r = mat.shape
# target list
tlist = []
# Noted! this loop determine the path from matrix to list
# recover list to matrix recall the revse path
# line1: from left to right-line2:.......
for ci in range(c):
for ri in range(r):
tlist.append(mat[ci,ri])
return tlist
# the parameters are rank as a list
slab_ubr = []
# the index of slab and keys, used to index the parameters and recover
slab_ubr_index = []
# loop of slabs
for slab in slabs:
slab_index = []
ubr_slab = ubr_index[slab]
# loop of keys in slab
for key in keys:
ubr_key = ubr_slab[key]
# this parameters are single number
if key in ['absorption','beta','roughness','scale']:
slab_index.append([key,1])
slab_ubr.append(ubr_key)
# vacancy is a list
elif key in ['vacancy','lattice_abc']:
slab_index.append([key,len(ubr_key)])
for i in ubr_key:
slab_ubr.append(float(i))
# these parameters are matrix
else:
slab_index.append([key,ubr_key.size])
tlist = tolist(ubr_key)
for i in tlist:
slab_ubr.append(i)
slab_ubr_index.append([slab,slab_index])
return slab_ubr, slab_ubr_index
|
def _solve_datasets_references(dsets: list) -> dict:
"""
Solve datasets/dictionary internal references
"""
def _solve_self_references(mapin: dict) -> dict:
"""
Return version of 'maps_in' where simple "{internal}-{reference}" are resolved.
Example:
input:
```
{
"id": "{target}/{mission}/{instrument}/{product_type}",
"target": "some_body",
"mission": "some_spacecraft",
"instrument": "some_sensor",
"product_type": "some_dataset"
}
```
output:
```
{
"id": "some_body/some_spacecraft/some_sensor/some_dataset",
"target": "some_body",
"mission": "some_spacecraft",
"instrument": "some_sensor",
"product_type": "some_datalevel"
}
```
"""
def _resolved(value):
"""
Return True if 'value' has NO more "{}": all fields are resolved.
"""
import re
return not re.match('.*{.+}.*', value)
from copy import deepcopy
mappings = deepcopy(mapin)
# Fields/Keys to skip dereferencing: we only map flate/self references,
# values that are not "str" (ie, data-structures) are moved to 'map_out'.
# 'map_aux' is composed by all values already "resolved" (without "{}")
keys_bypass = [k for k,v in mappings.items() if not isinstance(v,str)]
mapout = {k:mappings.pop(k) for k in keys_bypass}
mapaux = {k:v for k,v in mappings.items()
if _resolved(v)}
cnt = 0
while len(mapaux) < len(mappings):
# if 'mapaux' is smaller than 'mappings' it means non-resolved values still there
cnt += 1
assert cnt <= len(mappings), f"Apparently going for an infinite loop: {mappings} {mapaux}"
_reset = set(mappings.keys()) - set(mapaux.keys())
_aux = {k:mappings[k].format(**mapaux) for k in _reset}
mapaux.update({ k:v for k,v in _aux.items()
if _resolved(v) })
# Once 'mapaux' has same size as 'mappings' (all fields resolved),
# update 'map-out' with flat-fields resolved.
mapout.update(mapaux)
return mapout
_dsets = [_solve_self_references(d) for d in dsets]
# Make the output a pivot table with dataset/objects for keyword "dataset 'id'"
_pivot = {d['id']:d for d in _dsets}
return _pivot
|
def sfrd(z,theta):
""" Computes star formation history according to chosen parametrisation """
ap,bp,cp,dp = theta
rhosfr = ap*(1+z)**bp/(1+((1.+z)/cp)**dp)
return rhosfr
|
def format_number(number):
"""Format a number to more readable sting.
Args:
str: A string containing the formated number.
Example:
>>> format_number(321)
'321'
>>> format_number(5_432)
'5,432'
>>> format_number(7_654_321)
'7,654,321'
>>> format_number(9_876_543_210)
'9,876,543,210'
"""
return '{:,}'.format(number)
|
def merge_dicts(*dictionaries):
""" Merge multiple dictionaries. Last argument can be a boolean determining if the dicts can erase each other's content
Examples
--------
>>> dict1 = {'label': 'string'}
>>> dict2 = {'c': 'r', 'label': 'something'}
>>> merge_dicts(dict1, dict2, False)
{'label': 'string', 'c': 'r'}
>>> merge_dicts(dict1, dict2, True)
{'label': 'something', 'c': 'r'}
>>> merge_dicts(dict1, dict2)
{'label': 'string', 'c': 'r'}"""
# Argument for force rewriting the dictionaries
if isinstance(dictionaries[-1], bool):
force = dictionaries[-1]
dictionaries = dictionaries[:-1]
else:
force = False
merged_dictionary = {}
for dictionary in dictionaries:
if force:
merged_dictionary.update(dictionary)
else:
for key in dictionary.keys():
if key not in merged_dictionary:
merged_dictionary[key] = dictionary[key]
return merged_dictionary
|
def ansible_host(server, host_vars):
"""Get the ansible host. This will be used to ssh into the host.
Change this to whatever you need.
In this example we take the first IP from the "provider-790"
network. If not present, try the first ip.
:param server: Server object from nova api
:type server: ?
:param host_vars: Dictionary of host variables so far.
:type hosts_vars: dict:
:returns: Single ip address
:rtype: str
"""
# Look for first ip on preferred network.
preferred = 'provider-790'
preferred = 'somethingweird'
if preferred in host_vars.get('addresses', {}):
addrs = host_vars['addresses'].get(preferred, [])
if addrs:
return addrs[0]
# Look for first ip if not successful above
for _, addr_list in host_vars.get('addresses', {}).items():
if addr_list:
return addr_list[0]
# No ips, return None
return None
|
def ifpure(list2judge: list, typeexpected: type):
"""
"""
for i in list2judge:
if type(i) is not typeexpected:
return False
return True
|
def transform_prediction_to_scidtb_format(edus, arcs):
"""
Parameters
----------
edus: list[list[str]]
arcs: list[(int, int, str)]
Returns
-------
dict[str, list[str, any]]
"""
output = {"root": []}
output["root"].append({"id": 0,
"parent": -1,
"text": "ROOT",
"relation": "null"})
for head, dep, rel in arcs:
assert dep != 0
edu_info = {"id": dep,
"parent": head,
"text": " ".join(edus[dep]),
"relation": rel}
output["root"].append(edu_info)
return output
|
def is_scalar(value):
"""
Primitive version, relying on the fact that JSON cannot
contain any more complicated data structures.
"""
return not isinstance(value, (list, tuple, dict))
|
def cmp_column_indices(x,y):
"""Comparision function for column indices
x and y are XLS-style column indices e.g. 'A', 'B', 'AA' etc.
Returns -1 if x is a column index less than y, 1 if it is
greater than y, and 0 if it's equal.
"""
# Do string comparision on reverse of column indices
return (x[::-1] > y[::-1]) - (x[::-1] < y[::-1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.