content stringlengths 42 6.51k |
|---|
def safe_div(a, b):
"""
Safe division operation. When b is equal to zero, this function returns 0.
Otherwise it returns result of a divided by non-zero b.
:param a: number a
:param b: number b
:return: a divided by b or zero
"""
if b == 0:
return 0
return a / b |
def make_shard_endpoints(total_length, shard_size=int(1e6)):
"""
Partition the half-open integer interval [0, total_length) into a
sequence of half-open subintervals [s0,e0), [s1,e1), ... [s_n, e_n)
such that s0 = 0, s_(k+1) = e_k, e_n = total_length, and each of these
subintervals (except possibly the last) has length equal to the given
shard_size. Return the sequence of pairs of endpoints of the
subintervals.
"""
shard_end = 0
shards = []
while True:
shard_start = shard_end
shard_end = shard_start + shard_size
if shard_end > total_length:
shard_end = total_length
if shard_start >= shard_end:
break
shards.append((shard_start, shard_end))
return shards |
def get_chart_data(score_list):
"""Construct data dictionary to create histogram with chart.js."""
data_dict = {
"labels": ["1", "2", "3", "4", "5"],
"datasets": [{
"label": "Buyer Ratings",
"data": score_list,
"backgroundColor": 'rgba(96, 4, 122, 0.6)',
"hoverBackgroundColor": 'rgba(96, 4, 122, 1)',
"borderWidth": 5
}]
}
return data_dict |
def lastLocker (theDictionary):
"""Identifies the last locker number.
Assumes locker numbers within theDictionary are sequential with no
gaps in numbers.
:param dict[str, str] theDictionary:
key: locker number / value: student name or "open"
:return:
The last locker number in theDictionary
:rtype: str
"""
lockerCount = len(theDictionary)
lockers = list(theDictionary.keys())
for i in range(lockerCount - 1, lockerCount):
return lockers[i] |
def res_ene(alpha, beta):
""" resonance energy from alpha and beta """
Er = beta**2 - alpha**4
G = 4*alpha**2 * abs(beta)
return Er, G |
def parse_definition_ref(ref: str) -> str:
"""Parse a reference to a definition and return the definition name."""
prefix = '#/definitions/'
if not ref.startswith(prefix):
raise ValueError("Expected a ref with prefix {!r}, but got: {!r}".format(prefix, ref))
return ref[len(prefix):] |
def is_self_tackle_and_oppo_dispossessed(event_list, team):
"""Returns if event list has an attacking player losing possession due to opponent tackle"""
tackle = False
disposessed = False
for e in event_list[:2]:
if e.type_id == 50 and e.team != team:
disposessed = True
if e.type_id == 7 and e.team == team:
tackle = True
return tackle and disposessed |
def get_years(ncfiles, sep='-'):
"""
Retrieve a list of years from a list of netCDF filenames.
Parameters
----------
ncfiles : list
List of filenames from which years will be extracted.
sep : TYPE, optional
Separator. The default is '-'.
Returns
-------
years : set
List of years.
"""
years = set([str(f).split(sep)[-1][:4] for f in ncfiles])
return years |
def nb_murs(lab):
"""
Get the number of walls in a given lab
"""
n = lab["nlines"]
m = lab["ncolumns"]
murs_max = n*(m-1) + m*(n-1)
non_murs = []
for k in lab.keys():
if type(k)==str:
continue
for connect in lab[k]:
if [connect,k] in non_murs:
continue
non_murs.append([k,connect])
return murs_max - len(non_murs) |
def f(x: bool = True) -> bool:
"""
Given a boolean, return the opposite.
:param bool x: our boolean parameter.
:return: the opposite of the param.
:rtype: bool
:raises TypeError: if x isn't bool.
"""
if not type(x) == bool:
raise TypeError("No!")
return not x |
def issequence(object):
"""Test whether object is a sequence."""
try:
object[0]
except (TypeError, KeyError):
return 0
except IndexError:
pass
return 1 |
def listcomp_const_condition_false():
"""
>>> listcomp_const_condition_false()
[]
"""
return [x*2 for x in range(3) if 0] |
def ensure_bits_count(number: int, bits: int) -> int:
"""
Args:
number (int)
bits (int): maximum desirable bit count.
Returns:
int: number with the specified maximum number of bits.
"""
return number & ((1 << bits) - 1) |
def splitSentence(s):
""" split_sentence == PEP8 (forced mixedCase by Codewars) """
return s.split() |
def get_data(filename):
"""Get data from a file."""
with open(filename, 'r') as data:
return data.read() |
def cookiecutter_project(answer):
"""
Enable prompt to deploy cookiecutter template
"""
return "cookiecutter" in answer['tasks'] |
def generate_response(response):
"""
response of server always contains "\r\n", need to remove it
:param response: response of server
:return:
"""
if response is not None:
resp = response.split('\r\n')
resp = resp[0]
return resp
else:
raise Exception("response of server is none, please confirm it.") |
def error_message(error_log):
"""Return a string that contains the error message.
We use this to filter out false positives related to IDREF attributes
"""
errs = [str(x) for x in error_log if x.type_name != 'DTD_UNKNOWN_ID']
# Reverse output so that earliest failures are reported first
errs.reverse()
return "\n".join(errs) |
def to_fqdn(name: str) -> str:
""" Append trailing dot to FQDN-like DNS records lacking it.
:param name: the DNS record name to attempt to convert to a FQDN.
"""
if "." in name and not name.endswith("."):
return f"{name}."
return name |
def maps_won(D):
"""Total matches one"""
a = D["scores"][0]["value"]
b = D["scores"][1]["value"]
return a,b |
def get_line_ABC_inter(line1, line2):
"""get line intersection point,
line1: A1*x + B1*y + C1 = 0, [A1, B1, C1]
line2: A2*x + B2*y + C2 = 0, [A2, B2, C2]
inter = B1*A2 - A1*B2
x = (C1*B2-B1*C2)/inter, y=(A1*C2-A2*C1)/inter
"""
inter = line1[1]*line2[0] - line1[0]*line2[1]
if inter != 0:
x = (line1[2]*line2[1]-line1[1]*line2[2])/inter
y = (line1[0]*line2[2] - line2[0]*line1[2])/inter
return (x, y)
return None |
def find_two_entry_product(numbers):
"""
Given the list of numbers, print the product of two that sum to 2020.
numbers (list): A list of integers.
Return (int): The product of two entries in numbers that sum to 2020.
Raises:
ValueError if no two entries in numbers sums to 2020.
>>> l = [1721, 675, 299]
>>> find_two_entry_product(l)
514579
"""
for index, number1 in enumerate(numbers):
for number2 in numbers[index+1:]:
if (number1 + number2) == 2020:
return number1 * number2
raise ValueError('No two entries in numbers sums to 2020') |
def axis_translation(n_structures, distance=-10, axis=0):
"""
Automatically adjust structure positions equally distant from each other in given axis
- distance: distance between each structure
- axis: axis selection for translation (0: x-axis, 1: y-axis, 2: z-axis)
"""
translation_vectors = []
lim = (n_structures - 1) * distance / 2
for i in range(n_structures):
vec = [0, 0, 0]
vec[axis] = -lim + i * distance
translation_vectors.append(vec)
return translation_vectors |
def to_list(collection):
"""Converts the collection to a list.
Args:
collection (list|dict): Collection to iterate over.
Returns:
list: Collection converted to list.
Example:
>>> results = to_list({'a': 1, 'b': 2, 'c': 3})
>>> assert set(results) == set([1, 2, 3])
>>> to_list((1, 2, 3, 4))
[1, 2, 3, 4]
.. versionadded:: 1.0.0
"""
if isinstance(collection, dict):
ret = collection.values()
else:
ret = list(collection)
return ret |
def code_from_int(size, num):
"""
:type size: int
:type num: int
:rtype: list
"""
code = []
for i in range(size):
num, j = divmod(num, size - i)
code.append(j)
return code |
def normalize(x: complex) -> complex:
"""
Returns the normalized corresponding complex number,
which has the same argument, but module of 1.
A normalized complex number is one with module of 1
To get the normalized version of a number, it has to be divided
by its module.
"""
if x == 0:
return 0
return x / abs(x) |
def _ContainIp(ip_list, target_ip):
"""Returns true if target ip is in the list."""
for ip in ip_list:
if ip.RelativeName() in target_ip:
return True
return False |
def ddc_key_to_uri(key: str):
"""Convert a ddc key to a uri.
Parameters
----------
key: str
the ddc key, eg. `123.123`, `123` or `001`
Returns
-------
str
a URI build from the ddc key as string, e.g. `ddc:123.123`
"""
return f"ddc:{key}" |
def merge_configs(*configs):
"""
Merges dictionaries of dictionaries, by combining top-level dictionaries with last value taking
precedence.
For example:
>>> merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'b': 2, 'd': 3}})
{'a': {'b': 2, 'c': 2, 'd': 3}}
"""
merged_config = {}
for config in configs:
for k, v in config.items():
if k in merged_config:
merged_config[k].update(v)
else:
merged_config[k] = v.copy()
return merged_config |
def _fullname(obj):
"""
Get the fullname from a Python object
"""
return obj.__module__ + "." + obj.__name__ |
def is_close(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
Determines whether one float value is approximately equal or "close"
to another float value.
Copied from PEP 485.
Args:
a (float): Specifies the first value to be tested for relative
closeness.
b (float): Specifies the second value to be tested for relative
closeness.
rel_tol (float): Specifies the relative tolerance -- it is the
amount of error allowed, relative to the larger absolute
value of a or b. For example, to set a tolerance of 5%, use
rel_tol=0.05. The default tolerance is 1e-9, which assures
that the two values are the same within about 9 decimal
digits. rel_tol must be greater than 0.0.
abs_tol (float): Specifies a minimum absolute tolerance level --
useful for comparisons near zero.
Returns:
bool: Indicates whether the first float value is approximately
equal or "close" to the second float value.
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
def find_item(where, what):
"""
Tries to locate the item with either the id, or checks to see if it matches the name in the dict.
:param where: dict to to be searched
:param what: what to look for
:return: a tuple of the key and item found, or (None, None)
"""
value = where.get(what, None)
if value:
return what, value
if len(what) > 10:
key = what[:10]
if key in where:
return key, where[key]
for k, v in where.items():
if 'name' in v and v['name'] == what:
return k, v
return None, None |
def toStrish(s):
"""
Method aimed to convert a string in str type
@ In, s, string, string to be converted
@ Out, response, str, the casted value
"""
if type(s) == type(""):
return s
elif type(s) == type(b""):
return s
else:
return str(s) |
def reverse(s):
"""
Return reverse of 's'.
"""
r = "".join(reversed(s))
return r |
def _swap_2opt(route, i, k):
""" Swapping the route """
new_route = route[0:i]
new_route.extend(reversed(route[i:k + 1]))
new_route.extend(route[k + 1:])
return new_route |
def process_fastq_record(lines):
""" Create a dictionary from a list of the four lines of a fastq record."""
ks = ['name', 'sequence', 'strand', 'quality']
record = {k: v for k, v in zip(ks, lines)}
record['name'] = record['name'][1:] #drop the leading @
return record |
def count_vowels(s):
""" (str) -> int
Return the number of vowels (a, e, i, o, and u) in s.
>>> count_vowels('Happy Anniversary!')
5
>>> count_vowels('xyz')
0
"""
num_vowels = 0
for char in s:
if char in 'aeiouAEIOU':
num_vowels = num_vowels + 1
return num_vowels |
def value_constraint(state, district, n_districts, values, index, threshold):
""" Return if it violates constraint """
return values[index] > threshold |
def _list_to_zeros(xs):
"""Takes a list-of-lists, with arbitrary nesting level;
returns a list-of-lists of the same shape but with every non-list
element replaced with zero."""
if isinstance(xs, list):
return [_list_to_zeros(x) for x in xs]
return 0 |
def in_bisect(s,target):
"""Returns true if target is in sorted list.
s: sorted list
target: item to search for
"""
# Base case, down to one element.
if len(s)==1:
if s[0] == target:
return True
return False
if s[0] <= target <= s[-1]:
mid_index = int(len(s)/2)
mid = s[mid_index]
if target < mid:
return in_bisect(s[:mid_index], target)
else:
return in_bisect(s[mid_index:], target)
return False |
def create_idx(start, end):
""" Creates indexes for boundary vertices so that segments can be created for a constrained triangulation. """
return [[i, i + 1] for i in range(start, end)] + [[end, start]] |
def exponential_moving_average(samples, new_sample_wt):
""" Compute exponential moving average of sample set. """
if len(samples) is 0:
return None
else:
exp_avg_filtered = samples[:1]
for s in samples[1:]:
s_filtered = ((1.0 - new_sample_wt) * exp_avg_filtered[-1]) + \
(new_sample_wt * s)
exp_avg_filtered.append(s_filtered)
return exp_avg_filtered |
def busca_sequencial_sentinela(lista, elemento):
"""
Reliza busca sequencial com sentinela do elemento passado por parametro
lista -- lista de inteiros desordenada
elemento -- elemento a ser buscado
"""
contador = 0
lista.append(contador)
try:
while lista[contador] != elemento:
contador += 1
if contador == len(lista) - 1:
del lista[-1]
return -1
del lista[-1]
return contador
except IndexError:
print('Elemento nao achado') |
def stirling_number_of_the_first_kind(n: int, k: int) -> int:
"""
Returns one of the coefficients s(n, k) in the expansion of the falling factorial
s(3,3)=1, s(3,2)=-3, s(3,1)=2, because x(x-1)(x-2) = 1x^3 - 3x^2 + 2x
:param n: The depth of the falling factorial
:param k: The coefficient in the expansion
:return: The desired coefficient s(n, k) in the expansion of the falling factorial
"""
if n == k:
return 1
if k == 0 or k > n:
return 0
return -(n - 1) * stirling_number_of_the_first_kind(n - 1, k) + stirling_number_of_the_first_kind(n - 1, k - 1) |
def cubes(l=[1, 2, 3]):
"""
task 0.5.29
implement one line procedure taking list of integers 'l'
output should be a list of numbers whose 'i'th element is the cube of the 'i'th element of 'l'
e.g. input[1, 2, 3] output[1, 8, 27]
def cubes(l=[1, 2, 3]): return [i ** 3 for i in l]
"""
return [i ** 3 for i in l] |
def to_jsondict(obj, view=''):
"""Convert Python object to JSON-encodable dictionary"""
return obj.to_jsondict(view) if hasattr(obj, 'to_jsondict') else obj |
def sort_012(input_list):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Args:
input_list(list): List to be sorted
"""
if not input_list:
return 'please provide input list'
front_index = 0
final_index = len(input_list) -1
while front_index < final_index:
if input_list[front_index] == 2:
del input_list[front_index]
input_list.append(2)
final_index -= 1
elif input_list[front_index] == 0:
del input_list[front_index]
input_list.insert(0, 0)
front_index += 1
else:
front_index += 1
return input_list |
def get_placename_and_unique_alt_names(place_dict):
"""given a place we retrieve altnames and location (we don't use location for the moment)"""
placename = place_dict['placename']
unique_alt_names = list(place_dict['altnames'])
placeloc = (place_dict["lat"], place_dict["lon"])
return placename, unique_alt_names, placeloc |
def coding_problem_29(rle):
"""
Run-length encoding is a fast and simple method of encoding strings. The basic idea is to represent repeated
successive characters as a single count and character. Implement run-length encoding and decoding. You can assume
the string to be encoded have no digits and consists solely of alphabetic characters. You can assume the string to
be decoded is valid.
Examples:
>>> coding_problem_29('AAAABBBCCDAA')
'4A3B2C1D2A'
>>> coding_problem_29('4A3B2C1D2A')
'AAAABBBCCDAA'
"""
if rle.isalpha(): # no numbers, encode
encoded = ''
while rle:
idx = 0
while idx < len(rle) and rle[0] == rle[idx]:
idx += 1
encoded += str(idx) + rle[0]
rle = rle[idx:]
return encoded
else: # decode
return ''.join(c * int(n) for n, c in zip(rle[::2], rle[1::2])) |
def escape_json(json_string):
""" Clean JSON strings so they can be used as html attributes."""
return json_string.replace('"', '"') |
def should_connect_to(rules, our_type, peer_type):
"""
Return true if a peer of type our_type should connect to
based on the config connection rul
"""
rule = rules[our_type].get("connect_to", ["*"])
return ("*" in rule) or (peer_type in rule) |
def update_max_list(l, e, n):
"""Update a list and enforce maximum length"""
l.append(e)
return l[-n:] |
def mat_is_void(a):
"""
Check whether a given matrix is void
A matrix is considered as "void" if it is None or []
Parameters
----------
a: list[list]
The matrix to be checked
Returns
-------
bool
True if the matrix is void, False otherwise
"""
return a is None or len(a) == 0 |
def is_seq(o):
"""Check if the object is a sequence.
Parameters
----------
o : any object
The object to check
Returns
-------
is_seq : bool, scalar
True if *o* is a sequence, False otherwise
"""
return hasattr(o, '__len__') |
def wind_speed(tws):
"""Wind speed failure function."""
if tws > 25:
return 1
else:
return 0 |
def _read_bytes(socket, message_size):
"""Read message_size bytes from the given socket.
The method will block until enough bytes are available.
:param socket: the socket to read from.
:param message_size: size (in bytes) of the message to read.
:returns: received message.
"""
message = b""
while len(message) < message_size:
received = socket.recv(message_size - len(message))
message += received
if not received:
return message
return message |
def make_uniform(planes_dict, uniques, padding):
""" Ensure each section has the same number of images
This function makes the output collection uniform in
the sense that it preserves same number of planes across
sections. It also captures additional planes based
on the value of the padding variable
Args:
planes_dict (dict): planes to keep in different sections
uniques (list): unique values for the major grouping variable
padding (int): additional images to capture outside cutoff
Returns:
dictionary: dictionary containing planes to keep
"""
# max no. of planes
max_len = max([len(i) for i in planes_dict.values()])
# max planes that can be added on each side
min_ind = min([min(planes_dict[k]) for k in planes_dict])
max_ind = max([max(planes_dict[k]) for k in planes_dict])
max_add_left = uniques.index(min_ind)
max_add_right = len(uniques) - (uniques.index(max_ind)+1)
# add planes in each section based on padding and max number of planes
for section_id, planes in planes_dict.items():
len_to_add = max_len - len(planes)
len_add_left = min(int(len_to_add)/2+padding, max_add_left)
len_add_right = min(len_to_add - len_add_left+padding, max_add_right)
left_ind = int(uniques.index(min(planes)) - len_add_left)
right_ind = int(uniques.index(max(planes)) + len_add_right)+1
planes_dict[section_id] = uniques[left_ind:right_ind]
return planes_dict |
def split_in_k(l, k):
"""
Split the provided list into k sub-lists.
:param l: the list to split
:param k: the number of sub-lists to create
:return: a list containing all the sub-lists
"""
split_index = len(l) // k
return [l[split_index * i:split_index * i + split_index] if i < k - 1 else l[split_index * i:] for i in range(k)] |
def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):
"""
This function is written to determine if the system under test performs better than the template model
performance.
:param just_print: bool representing if we are just interested in printing the attribute values
:param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform
better than the template system.
:param h2o_att: number representing the h2o attribute under test
:param template_att: number representing the template attribute
:param bigger_is_better: bool representing if metric is perceived to be better if its value is higher
:return: bool indicating if the template attribute is better.
"""
if just_print: # not interested in comparison, just want to print attribute values
return True # does not matter what we return here
else:
if bigger_is_better: # metric is better if it is greater
return not(h2o_att > template_att)
else: # metric is better if it is less
return not(h2o_att < template_att) |
def fix_static_global_kernels(in_txt):
"""Static global kernels in HIP results in a compilation error."""
in_txt = in_txt.replace(" __global__ static", "__global__")
return in_txt |
def strongly_connected_components(
successors_by_node,
omit_single_node_components=True,
low_infinite=2**30):
"""
successors_by_node = {
"node1": ["successor1", "successor2"],
"node2": ["successor1", "successor3"]
}
http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Original implementation (by Paul Harrison), modified to accommodate
successors that do not appear as a key in successors_by_node.
"""
result = []
stack = []
low = {}
def visit(node):
if (node in low):
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in successors_by_node.get(node, []):
visit(successor)
low[node] = min(low[node], low[successor])
if (num == low[node]):
component = tuple(stack[stack_pos:])
del stack[stack_pos:]
if (not omit_single_node_components or len(component) != 1):
result.append(component)
for item in component:
low[item] = low_infinite
for node in successors_by_node:
visit(node)
return result |
def get_pid(node_name):
"""Convert <prefix>-<pid>-<nid>"""
return '-'.join(node_name.split('-')[:-1]) |
def add (x, y):
"""
Function that adds two arguments
"""
return (2 + 5) |
def ritu(masa_num):
"""0 = Vasanta,...,5 = Shishira"""
return (masa_num - 1) // 2 |
def exec_string(string, language="python", decorators=None):
"""Execute a string as python code.
The languages available are ``python`` and ``mel``.
During the process, creates a new function and calls it using the
:func:`exec` builtin function.
With this process, it is possible to apply decorators to the string to be
executed. Even if the language is set to "python" or "mel", because in the
case where the string is written in "mel", a python function is still
created and called the :func:`mel.eval` command.
Also, like any python function, it can have a :obj:`return` statement.
If specified in the string to be executed, the value will be returned.
See the examples for more details.
Warning:
The :obj:`return` statement only works for the python language.
Examples:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> command = \"\"\"
... from maya import cmds
... return cmds.polyCube(name="pythonCube")[0]
... \"\"\"
>>> exec_string(command)
'pythonCube'
>>> cmds.objExists("pythonCube")
True
>>> command = \"\"\"
... polyCube -name "melCube";
... \"\"\"
>>> exec_string(command, language="mel")
>>> cmds.objExists("melCube")
True
Arguments:
string (str): The command to execute as string.
language (str, optional): The language in which the object provided in
the ``string`` parameter is written.
decorators (list, optional): The python decorators to apply at runtime.
Returns:
any: Anything that the string will return.
Raises:
ValueError: The specified language is not supported by the function.
"""
lines = ["def _callback():\n"]
if language == "python":
lines.extend(string.splitlines(True))
elif language == "mel":
line = "from maya import mel;mel.eval('{}')"
lines.append(line.format(string.replace("\n", "")))
else:
msg = "The language '{}' is not supported.".format(language)
raise ValueError(msg)
exec((" " * 4).join(lines)) # pylint: disable=exec-used
callback = locals()["_callback"]
for decorator in decorators or []:
try:
callback = decorator()(callback)
except TypeError:
callback = decorator(callback)
return callback() |
def label_set_match(object_labels, returned_labels):
"""Return True if at least one of the object labels is contained in at least one of the returned labels"""
for o in object_labels:
for r in returned_labels:
if o.lower() in r.lower():
return True
return False |
def integrate(i,dx):
"""
This function takes the integral of the interval using a riemann sum with left endpoints.
Arguments i- generated interval
dx- spacing between each step"""
integral = 0
for k in range(len(i)-1):
if i == []:
return 0
else:
rect_area = i[k]*dx
integral += rect_area
return integral |
def objective(k, p):
""" Default objective function.
"""
delta = 0
if k < 1.1 :
delta = k - 1.1
return 1.0 * (1.5 - p) + 50.0 * delta |
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, "expected view func if endpoint is not provided."
return view_func.__name__ |
def _GetFailureTestRefs(test_summary):
"""Gets a list of test summaryRef id of all failure test.
Args:
test_summary: dict, a dict of test summary object.
Returns:
A list of failure test case's summaryRef id.
"""
failure_test_refs = []
if 'subtests' in test_summary:
for sub_test_summary in test_summary['subtests']['_values']:
failure_test_refs.extend(_GetFailureTestRefs(sub_test_summary))
else:
if (('testStatus' not in test_summary or
test_summary['testStatus']['_value'] != 'Success') and
'summaryRef' in test_summary):
summary_ref_id = test_summary['summaryRef']['id']['_value']
failure_test_refs.append(summary_ref_id)
return failure_test_refs |
def mapparms(old, new) :
"""Linear map between domains.
Return the parameters of the linear map ``off + scl*x`` that maps the
`old` domain to the `new` domain. The map is defined by the requirement
that the left end of the old domain map to the left end of the new
domain, and similarly for the right ends.
Parameters
----------
old, new : array_like
The two domains should convert as 1D arrays containing two values.
Returns
-------
off, scl : scalars
The map `=``off + scl*x`` maps the first domain to the second.
See Also
--------
getdomain, mapdomain
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl |
def edit_diff(start, goal, limit):
"""A diff function that computes the edit distance from START to GOAL."""
list_start = list(start)
list_goal = list(goal)
def count_changes(lst_start, lst_goal, total):
if lst_start[0] == lst_goal[0]:
return count_changes(lst_start[1: ], lst_goal[1: ], total)
else:
if lst_start[0] != lst_goal[0]:
total += 1
if total > limit:
return total
return count_changes(lst_start[1: ], lst_goal[1: ], total)
else:
return count_changes(lst_start[1: ], lst_goal[1: ], total)
return count_changes(list_start, list_goal, 0) |
def update_group_lr(schedulers, epoch):
"""Update Schedulers Dict"""
lr = []
for k, scheduler in schedulers.items():
scheduler.step(epoch)
# suitable for optimizer contains only one params_group
lr.extend(scheduler.get_lr()) # get_lr return a list contain current lr of params_group in scheduler
message = 'learning rate = '
for item in lr:
message += '%.7f ' % (item)
return message |
def _divisible_slice(perm, divisor, start_index):
"""Check if a three digit slice of a permutation is divisible by divisor"""
perm_slice = perm[start_index:start_index + 3]
number = int(''.join(perm_slice))
return number % divisor == 0 |
def rcumode2band(rcumode):
"""
Map rcumode to band string as used in beamctl arguments
Parameters
----------
rcumode: int or str
The RCU mode.
Returns
-------
band: str
The band name.
"""
rcumode = str(rcumode)
if rcumode == "3":
band = "10_90"
elif rcumode == "4":
band = "30_90"
elif rcumode == "5":
band = "110_190"
elif rcumode == "6":
band = "170_230"
elif rcumode == "7":
band = "210_250"
else:
raise ValueError('Undefined rcumode %{}'.format(rcumode))
return band |
def _BackendPremultiplication(color):
"""Apply premultiplication and unpremultiplication to match production.
Args:
color: color tuple as returned by _ArgbToRgbaTuple.
Returns:
RGBA tuple.
"""
alpha = color[3]
rgb = color[0:3]
multiplied = [(x * (alpha + 1)) >> 8 for x in rgb]
if alpha:
alpha_inverse = 0xffffff / alpha
unmultiplied = [(x * alpha_inverse) >> 16 for x in multiplied]
else:
unmultiplied = [0] * 3
return tuple(unmultiplied + [alpha]) |
def egcd(a, b):
"""Extended Euclidean algorithm for gcd."""
if a == 0:
return b, 0, 1
gcd, y, x = egcd(b % a, a)
return gcd, x - (b // a) * y, y |
def test(predictions, labels, k=1):
"""
Return precision and recall modeled after fasttext's test
"""
precision = 0.0
nexamples = 0
nlabels = 0
for prediction, labels in zip(predictions, labels):
for p in prediction:
if p in labels:
precision += 1
nexamples += 1
nlabels += len(labels)
return (precision / (k * nexamples), precision / nlabels) |
def has_payment_id(extra_nonce):
"""
Returns true if payment id is present
:param extra_nonce:
:return:
"""
return len(extra_nonce) == 33 and extra_nonce[0] == 0 |
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x):
"""Runs breadth first search starting from the nodes in |to_visit|
Args:
to_visit: the starting nodes
children: a function which takes a node and returns the nodes adjacent to it
visited_key: a function for deduplicating node visits. Defaults to the
identity function (lambda x: x)
Returns:
A list of nodes which are reachable from any node in |to_visit| by calling
|children| any number of times.
"""
to_visit = list(to_visit)
seen = set(map(visited_key, to_visit))
for node in to_visit:
for child in children(node):
key = visited_key(child)
if key not in seen:
seen.add(key)
to_visit.append(child)
return to_visit |
def migrate_packetbeat(content):
"""
Changes things like `interfaces:` to `packetbeat.interfaces:`
at the top level.
"""
sections = ["interfaces", "protocols", "procs", "runoptions", "ignore_outgoing"]
lines = content.splitlines()
outlines = []
for line in lines:
found = False
for sec in sections:
if line.startswith(sec + ":"):
outlines.append("packetbeat." + line)
found = True
break
if not found:
outlines.append(line)
return "\n".join(outlines) + "\n" |
def model_root(obj):
"""
Finds model root element for the given object.
"""
p = obj
while hasattr(p, 'parent'):
p = p.parent
return p |
def count_by(x, n):
"""Return a sequence of numbers counting by `x` `n` times."""
return [i * x for i in range(1, n + 1)] |
def strBool(bool_str):
"""
Take the string "true" or "false" of any case and returns a
boolean object.
"""
if bool_str.lower() == "true":
return True
elif bool_str.lower() == "false":
return False
else:
raise Exception(
"argument for strBool must be string either 'True' or 'False'.") |
def simplify_whitespace(name):
"""Strip spaces and remove duplicate spaces within names"""
if name:
return ' '.join(name.split())
return name |
def make_special_identifier(ln, ed, ms, aliquot=None):
"""
ln: str or int
a: int aliquot
ms: int mass spectrometer id
ed: int extract device id
"""
if isinstance(ed, int):
ed = "{:02d}".format(ed)
if isinstance(ms, int):
ms = "{:02d}".format(ms)
d = "{}-{}-{}".format(ln, ed, ms)
if aliquot:
if not isinstance(aliquot, str):
aliquot = "{:02d}".format(aliquot)
d = "{}-{}".format(d, aliquot)
return d |
def get_cloud_directive(key, task_directives, cloud_args_key='cloud_args'):
"""
Helper function to get a directive one layer undernearth ``cloud_args``
:param key: str Directive key
:param task_directives: dict The dictionary of task directives
:param cloud_args_key: str Key for the first level
:return: object The requested ``cloud_args`` directive
"""
return task_directives.get(cloud_args_key, dict()).get(key) |
def convert_int_to_str(integer, keys):
""""converts integer to string per appliance dictionary"""
for appliance, index in keys.items():
if index == integer:
return appliance |
def reconcile_artists(headers, tracks, titles):
"""De-duplicate the scraped artists and return a completed list."""
artists = []
for artist in tracks if tracks else headers:
if (artist, "main") not in artists:
artists.append((artist, "main"))
for artist in titles:
if (artist, "guest") not in artists:
artists.append((artist, "guest"))
return artists |
def get_indentation(line):
"""Returns the indentation (number of spaces and tabs at the begining) of a given line"""
i = 0
while (i < len(line) and (line[i] == ' ' or line[i] == '\t')):
i += 1
return i |
def transform_operator(left_c, premise, mid_c, hypothesis, right_c):
"""
left_c : Left conjunction
mid_c : Middle conjunction
right_c right conjunction
"""
return "".join([left_c, premise, mid_c, hypothesis, right_c]) |
def is_entry_a_header(key, value, entry):
"""Returns whether the given entry in the header is a expected header."""
return (key.lower() in entry.lower()
or value.lower() in entry.lower()) |
def good_head_angle(y, p, r, angle_min, angle_max):
"""
good head angle would be looking directly to the camera, give or take
some degree each
"""
if ((angle_min[0] < y) and (angle_max[0] > y)
and (angle_min[1] < p) and (angle_max[1] > p)
and(angle_min[2] < r) and (angle_max[2] > r)):
return True
return False |
def _extract_doc_comment_continuous(content, line, column, markers):
"""
Extract a documentation that starts at given beginning with continuous
layout.
The property of the continuous layout is that the each-line-marker and the
end-marker do equal. Documentation is extracted until no further marker is
found. Applies e.g. for doxygen style python documentation::
## main
#
# detailed
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
marker_len = len(markers[1])
doc_comment = content[line][column:]
line += 1
while line < len(content):
pos = content[line].find(markers[1])
if pos == -1:
return line, 0, doc_comment
else:
doc_comment += content[line][pos + marker_len:]
line += 1
if content[line - 1][-1] == '\n':
column = 0
else:
# This case can appear on end-of-document without a ``\n``.
line -= 1
column = len(content[line])
return line, column, doc_comment |
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2 * (x & b - 1) + (q & 1) > b) |
def flatten(item):
"""
Convert data returned from dynamodb to raw python objects
"""
if "S" in item:
return item["S"]
if "N" in item:
return int(item["N"])
if "L" in item:
flattened_list = []
for i in item["L"]:
flattened_list.append(flatten(i))
return flattened_list |
def _thread_worker_fn(samples, batchify_fn, dataset):
"""Threadpool worker function for processing data."""
return batchify_fn([dataset[i] for i in samples]) |
def column_to_list(data:list, prop:str):
"""
Agrupa os valores de uma coluna de dados para uma lista
args:
data_list (list): Uma lista de dados (list/dict)
prop (str): Uma key ou index
return (list): Uma lista dos valores mapeados como 'prop' em cada item da lista informada 'data'
"""
return [item.get(prop, None) for item in data] |
def format_BUILD_MAP_UNPACK_WITH_CALL(oparg):
"""The lowest byte of oparg is the count of mappings, the relative
position of the corresponding callable f is encoded in the second byte
of oparg."""
rel_func_pos, count = divmod(oparg, 256)
return ("%d mappings, function at %d" % (count, count + rel_func_pos)) |
def Sorted(lst):
"""Equivalent of sorted(), but not dependent on python version."""
sorted_list = lst[:]
sorted_list.sort()
return sorted_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.