content stringlengths 42 6.51k |
|---|
def get_evidence_docs(doc_json: dict):
"""Gets the document ids for the documents where the evidence is
Args:
doc_json (dict): A data dict from the FEVEROUS dataset
Returns:
List[str]: A list of the document ids
"""
doc_names = []
for evidence_content in doc_json["evidence"][0]["content"]:
doc_name = evidence_content.split("_")[0]
if doc_name not in doc_names:
doc_names.append(doc_name)
return doc_names |
def get_id2label(labels):
"""
Get id2label mapping based on labels
Args:
labels: list of labels.
Return:
id2label map
"""
return {str(k): v for k, v in enumerate(labels)} |
def json_formatter(data):
"""Method returns parsing result in json format.
"""
from json import dumps
return dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) |
def _is_junk(line, t_strs):
"""Ignore empty line, line with blast info, or whitespace line"""
# empty or white space
if not line or not line.strip():
return True
# blast info line
for t_str in t_strs:
if line.startswith("# %s" % t_str):
return True
return False |
def checkNextActions(grid):
"""
return True : if there is one/+ possible action
return False : if there is no possible action
"""
if any(None in line for line in grid):
return True
# Check lines
lastCase = False
for line in range(4):
for case in range(4):
if case == 0:
pass
elif lastCase == grid[line][case]:
return True
lastCase = grid[line][case]
# Check columns
lastCase = False
for case in range(4):
for column in range(4):
if case == 0:
pass
elif lastCase == grid[column][case]:
return True
lastCase = grid[column][case]
return False |
def GetBootDiskConfig(disk_name, source_image):
"""Helper method to create a boot disk property."""
return {
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskName': disk_name,
'sourceImage': source_image,
}
} |
def get_platzhalter_ES(tokens):
"""Identify, color and count Platzhalter ES"""
# get Platzhalter "es"
es = [t for t in tokens if t.lemma == 'es' and t.function == 'expl']
# color
for e in es:
e.pattern_color.append('Placeholder es')
return len(es) |
def get_file_less_ext(full_filename):
""" Returns filename without extension"""
dirs = full_filename.split("/")
parts = dirs[len(dirs) - 1].split(".")
if len(parts) == 1:
filename = parts[0]
else:
filename = None
for i in range(len(parts) - 1):
if filename == None:
filename = parts[i]
else:
filename = filename + "." + parts[i]
return filename |
def GetTokenRange(start_token, end_token):
"""Returns a list of tokens between the two given, inclusive.
Args:
start_token: Start token in the range.
end_token: End token in the range.
Returns:
A list of tokens, in order, from start_token to end_token (including start
and end). Returns none if the tokens do not describe a valid range.
"""
token_range = []
token = start_token
while token:
token_range.append(token)
if token == end_token:
return token_range
token = token.next |
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode('latin-1')
except UnicodeEncodeError as err:
raise UnicodeEncodeError(err.encoding, err.object, err.start, err.
end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') if you want to send it encoded in UTF-8."
% (name.title(), data[err.start:err.end], name)) from None |
def _valdiate_ArrayEntries(strInput : str, acttyp) -> bool:
"""
Description
-----------
Helper method used for validation of entry widgets which require an array
as input.
Parameters
----------
`strInput` : string
Input string which should be validated
Return
------
`bValidateResult` : bool
Result of the validation encoded as integer
"""
if (acttyp == '1'):
if (strInput != "," and strInput != "." and strInput != " " and
strInput != "+" and strInput != "-" and strInput != "e" and
not strInput.isdigit()):
return False
return True |
def fib_iter(n):
"""[summary]
Works iterative approximate O(n)
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description]
"""
# precondition
assert n >= 0, 'n must be positive integer'
fib_1 = 0
fib_2 = 1
sum = 0
if n <= 1:
return n
for _ in range(n-1):
sum = fib_1 + fib_2
fib_1 = fib_2
fib_2 = sum
return sum |
def bool_from_text(text):
"""Returns boolean value for string 'true' or 'false'; anything else results in None."""
if text is None:
return None
if text.strip().lower() == 'true':
return True
if text.strip().lower() == 'false':
return False
return None |
def sqlName( nameStr ):
"""Make a proper token out of a string with embedded spaces and special characters
' ' -> '_'
'#' -> 'Num'
"""
ss = nameStr.replace(" ","_")
ss = ss.replace("#","Num")
return ss |
def strvalidator(value):
"""Cast a value to string."""
if value is None:
return None
return str(value) |
def gcd_rec(a, b):
""" find greatest common divisor of a and b, recursive Euclid's algorithm"""
if b == 0:
return a
return gcd_rec(b, a % b) |
def generate_policy(principal_id, effect, method_arn):
"""
Utility function to generate a policy document
Args:
principal_id (str): request's user
effect (str): Allow or Deny
method_arn (str): resource's ARN
Returns: dict
"""
if not effect or not method_arn:
return None
response = {
"principalId": principal_id,
"policyDocument": {
"Version": "2012-10-17",
"Statement": [{
"Action": "execute-api:Invoke",
"Effect": effect,
"Resource": method_arn
}]
}
}
return response |
def _normalize_cmd_args(cmd):
"""
Normalize subprocess arguments to handle list commands, string and pipes.
Piped commands set pipefail and require use of bash to help with debugging
intermediate errors.
"""
if isinstance(cmd, str):
cmd = cmd.split()
# check for standard or anonymous named pipes
# if cmd.find(" | ") > 0 or cmd.find(">(") > 0 or cmd.find("<(") > 0:
# return "set -o pipefail; " + cmd, True, None
# else:
# not suing pipefail, because:
# - Ubuntu /bin/sh points to dash, which doesn't have pipefail
# - We don't want to start a separate bash process because it has issues with inheriting the environment
# (might wanna to fix it by setting a breakpoint and inspect os.envion contents
# using
return [str(x) for x in cmd] |
def dict_to_lower(dict):
"""
Converts a dictionary to all lowercase keys
:param dict: The dictionary
:return: The lowercased dictionary
"""
return {i.lower(): dict[i] for i in dict} |
def sjoin(strs, s=''):
"""
Function that joins strings.
Examples
--------
Consider a list `l` of strings.
>>> l = ["abc", "def"]
We can join the strings in `l` as:
>>> sjoin(l)
'abcdef'
Or with a joiner as:
>>> sjoin(l, " ")
'abc def'
"""
return s.join(strs) |
def to_plural(elt_nam, num_nod_lst):
"""
This adds an s to the end of the word and respectes English rules.
"""
if num_nod_lst == 1:
return elt_nam
if not elt_nam[-1].isalpha():
return elt_nam
# Win32_ReliabilityRecordses
# Win32_LogicalFileAccesses
if elt_nam[-1] == "s":
if (len(elt_nam) > 1) and (elt_nam[-2] != "s"):
# Maybe it is already plural.
return elt_nam
else:
return elt_nam + "es"
if elt_nam[-1] == "y":
return elt_nam[:-1] + "ies"
return elt_nam + "s" |
def enum_value(value):
"""Convert an enum to its value.
:param Enum value: An enum.
:returns string: The value of the enum.
"""
return value.value if value else None |
def EdgeWeights(setOfEdges, multiSetOfEdges):
"""Add weights on edges based on multiplicity."""
weights = dict()
for edge in setOfEdges:
weights[edge] = multiSetOfEdges.count(edge)
# a way to normalize edge weights
# edgeWeights = map((1 / max(list(weights.values()))), weights.values())
return weights |
def format_dict_str(record):
"""
Transform all dict values into strings
"""
for key, value in record.items():
record[key] = str(value)
return record |
def comma_sep(input: str) -> list:
"""Convert a comma seperated string into a list."""
return [i.strip() for i in input.split(",")] |
def exclamation_mark_finder(sentence):
"""
Returns 1 if sentence contains question mark, 0 otherwise
"""
if "!" in sentence:
return 1
else:
return 0 |
def wrap_errors(errors, function, args, kwargs):
"""
.. deprecated:: 1.1a2
Previously used by ThreadPool.apply_e.
"""
try:
return True, function(*args, **kwargs)
except errors as ex:
return False, ex |
def insertion_sort(array):
"""Insertion sort."""
length = len(array)
for i in range(1, length):
val = array[i]
while i > 0 and array[i - 1] > val:
array[i] = array[i - 1]
i -= 1
array[i] = val
return array |
def getlinktags(link):
"""
Return tags for a link (list)
"""
linktags = link.get('tags')
if linktags is None:
linktags = list()
else:
linktags = linktags.split(',')
return linktags |
def validacampo(campo):
""" valida campo vazio """
return not campo.strip() |
def results(person, result, rate):
"""
Checks if the current person is in the dict, if not adds them with their current rate
if they are, adds the current rate to the current person
:param person: the current person
:param result: the output result
:param rate: the current tax rate
:return: the resulting dict
"""
if person in result:
result[person] += int(rate)
else:
result[person] = int(rate)
return result |
def odd(number):
"""Returns `True` if `number` is odd. `False` otherwise.
:param number: number to check if it is odd.
"""
try:
int(number)
except:
return False
else:
return int(number) & 0x1 == 1 |
def validate_iyr(value: str) -> bool:
"""issue year must be an int between 2010 and 2020, inclusive"""
try:
return int(value) in range(2010, 2021)
except (TypeError, ValueError):
return False |
def fused_conv2d_alter_layout(attrs, inputs, tinfos, out_type):
"""Change Fused Conv2D layout.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : tvm.relay.Expr
Grouped input symbols
tinfos : list
Input shape and dtype
out_type: type
The output type
Note
----
Unlike other TOPI functions, this function operates on both graph level and operator level.
"""
# not to change by default
return None |
def remove_overlap(spans):
"""
remove overlapped spans greedily for flat-ner
Args:
spans: list of tuple (start, end), which means [start, end] is a ner-span
Returns:
spans without overlap
"""
output = []
occupied = set()
for start, end in spans:
if any(x for x in range(start, end + 1)) in occupied:
continue
output.append((start, end))
for x in range(start, end + 1):
occupied.add(x)
return output |
def parse_entry_point(entry_point):
"""
Parses a Python entry point and returns the module and function.
The two allowed formats are 'path.to.module', and 'path.to.module:function'.
In the former case, ('path.to.module', None) is returned.
In the latter case, ('path.to.module', 'function') is returned.
"""
module, sep, function = entry_point.partition(":")
if function and sep and module:
return (module, function)
else:
return (module, None) |
def get_worksheet_keys(data_dict, result_info_key):
"""Gets sorted keys from the dict, ignoring result_info_key and 'meta' key
Args:
data_dict: dict to pull keys from
Returns:
list of keys in the dict other than the result_info_key
"""
keys = set(data_dict.keys())
keys.remove(result_info_key)
if 'meta' in keys:
keys.remove('meta')
return sorted(keys) |
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> from sympy.combinatorics.permutations import _af_invert, _af_rmul
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form |
def _format_list_items(list_items):
"""Generate an indented string out of a list of items."""
list_string = ''
if list_items:
for item in list_items:
list_string = list_string + " '" + item + "',\n"
list_string = "[\n {}\n]".format(list_string.strip()[:-1])
else:
list_string = '[]'
return list_string |
def figshare_stem(stem: str = '', production: bool = True) -> str:
"""
Construct Grouper figshare stems
:param stem: string corresponding to the sub-stem.
Options are: 'quota', 'portal'. Default: root stem
:param production: Bool to use production stem.
Otherwise a stage/test is used. Default: ``True``
:return: Grouper stem/folder string
Usage:
For quota stem, call as: ``figshare_stem('quota')``
> "arizona.edu:dept:LBRY:figshare:quota"
For portal stem, call as: ``figshare_stem('portal')``
> "arizona.edu:dept:LBRY:figshare:portal"
For main stem, call as: ``figshare_stem()``
> "arizona.edu:dept:LBRY:figshare"
"""
if production:
stem_query = 'arizona.edu:dept:LBRY:figshare'
else:
stem_query = 'arizona.edu:dept:LBRY:figtest'
# If stem is not an empty string
if stem:
stem_query += f':{stem}'
return stem_query |
def sanitize_ctx(request, ctx):
"""Keep just the known context fields."""
nb_path = ctx.pop("nb_path", None)
if ctx:
request.log.debug("Ignoring context fields: %s", ", ".join(ctx.keys()))
return {"nb_path": nb_path} |
def strip_brackets(text) -> str:
"""
Strips starting and trailing brackets from input string
:param text: Text string to strip the brackets from.
:return: Text string without brackets.
"""
if text is None:
return ""
if text.startswith("[") and text.endswith("]"):
return text[1:len(text) - 1]
return text |
def is_ascii(string):
"""
Check for unicode encoded characters by trying to encode the string as ascii only
"""
try:
string.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
return False
else:
return True |
def parse_ens(ens_str):
"""Build the members strings for the ensemble answer"""
components = ens_str.split(',')
out = []
for c in components:
if 'member:' in c:
numbers = c.lstrip('member:')
if '-' in numbers:
start, end = numbers.split('-')
numbers = range(int(start), int(end) + 1)
else:
numbers = (int(numbers), )
for n in numbers:
out.append('m{}'.format(n))
else:
out.append(c)
return out |
def text_justification(words, max_width):
"""
:type words: list
:type max_width: int
:rtype: list
"""
ret = [] # return value
row_len = 0 # current length of strs in a row
row_words = [] # current words in a row
index = 0 # the index of current word in words
is_first_word = True # is current word the first in a row
while index < len(words):
while row_len <= max_width and index < len(words):
if len(words[index]) > max_width:
raise ValueError(
"there exists word whose length is larger than max_width"
)
tmp = row_len
row_words.append(words[index])
tmp += len(words[index])
if not is_first_word:
tmp += (
1
) # except for the first word, each word should have at least a ' ' before it.
if tmp > max_width:
row_words.pop()
break
row_len = tmp
index += 1
is_first_word = False
# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.
row = ""
# if the row is the last
if index == len(words):
for word in row_words:
row += word + " "
row = row[:-1]
row += " " * (max_width - len(row))
# not the last row and more than one word
elif len(row_words) != 1:
space_num = max_width - row_len
space_num_of_each_interval = space_num // (len(row_words) - 1)
space_num_rest = space_num - space_num_of_each_interval * (
len(row_words) - 1
)
for j in range(len(row_words)):
row += row_words[j]
if j != len(row_words) - 1:
row += " " * (1 + space_num_of_each_interval)
if space_num_rest > 0:
row += " "
space_num_rest -= 1
# row with only one word
else:
row += row_words[0]
row += " " * (max_width - len(row))
ret.append(row)
# after a row , reset those value
row_len = 0
row_words = []
is_first_word = True
return ret |
def generate_parenthesis(n):
"""
:type n: int
:rtype: List[str]
"""
stack = []
result = []
def backtrack(open_n, closed_n):
if open_n == closed_n == n:
result.append(''.join(stack))
if open_n<n:
stack.append("(")
backtrack(open_n+1, closed_n)
stack.pop()
if closed_n < open_n:
stack.append(")")
backtrack(open_n, closed_n+1)
stack.pop()
backtrack(0,0)
return result |
def list_to_html_table(list_of_data):
"""Creating sting that looks like html code of table from list of data"""
table_content = ""
for sublist in list_of_data:
table_content += " <tr>\n"
for data in sublist:
table_content += " <td>" + str(data) + "</td>\n"
table_content += " </tr>\n"
return table_content[:-1] |
def tissue_vol_in_samp_gr(term_vol_in_grid, br_vol_in_grid):
"""Calculate the total tissue volume (i.e. including terminal conduits) of tree branches
Inputs are:
- term_vol_in_grid:total volume of terminal conduits
- br_vol_in_grid:total volume of branches before terminal conduits
Return:
tissue_vol: total tissue volume of whole tree
A way you might want to use me is:
>>> term_vol_in_grid=0.444
>>> br_vol_in_grid=0.008
This will return:
>>> tissue_vol: 0.452"""
tissue_vol = br_vol_in_grid + term_vol_in_grid
return tissue_vol |
def merge_span_token(master_token_list):
"""
Merge the span style token (row span or col span).
:param master_token_list:
:return:
"""
new_master_token_list = []
pointer = 0
if master_token_list[-1] != '</tbody>':
master_token_list.append('</tbody>')
while master_token_list[pointer] != '</tbody>':
try:
if master_token_list[pointer] == '<td':
if master_token_list[pointer+1].startswith(' colspan=') or master_token_list[pointer+1].startswith(
' rowspan='):
if master_token_list[pointer + 2].startswith(' colspan=') or master_token_list[pointer + 2].\
startswith(' rowspan='):
"""
example:
pattern <td rowspan="2" colspan="3">
'<td' + 'rowspan=" "' + 'colspan=" "' + '>' + '</td>'
"""
# tmp = master_token_list[pointer] + master_token_list[pointer+1] + \
# master_token_list[pointer+2] + master_token_list[pointer+3] + \
# master_token_list[pointer+4]
tmp = ''.join(master_token_list[pointer:pointer + 4 + 1])
pointer += 5
new_master_token_list.append(tmp)
else:
"""
example:
pattern <td colspan="3">
'<td' + 'colspan=" "' + '>' + '</td>'
"""
# tmp = master_token_list[pointer] + master_token_list[pointer+1] + \
# master_token_list[pointer+2] + master_token_list[pointer+3]
tmp = ''.join(master_token_list[pointer:pointer+3+1])
pointer += 4
new_master_token_list.append(tmp)
else:
new_master_token_list.append(master_token_list[pointer])
pointer += 1
else:
new_master_token_list.append(master_token_list[pointer])
pointer += 1
except:
print("Break in merge...")
break
new_master_token_list.append('</tbody>')
return new_master_token_list |
def to_int(byte):
"""
Convert bytes to int
:type byte: bytes
:rtype: int
"""
s = 0
for i, number in enumerate(byte):
s = s * 256 + number
return s |
def is_val_desc(p, pltype):
"""
Value descriptions are a continuation of a previous variable
value or a (long) value description. It should have an optional
whitespace, period, and then the value description.
"""
import re
if pltype in ['Blank', 'Var Value', 'Val Desc']:
pparser = re.compile(r"[\t ]*\.")
words = pparser.split(p)
if len(words) > 1 and not words[0]:
return True
return False |
def get_all_interval_ids(region_datasets):
"""Returns ids of all intervals associated with list or region datasets."""
interval_ids = []
for region_dataset in region_datasets:
interval_ids.extend([entry.id for entry in region_dataset.intervals.all()])
return interval_ids |
def read_variant(variant):
"""Read and parse a binary protobuf variant value."""
result = 0
cnt = 0
for data in variant:
result |= (data & 0x7F) << (7 * cnt)
cnt += 1
if not data & 0x80:
return result, variant[cnt:]
raise ValueError("invalid variant") |
def moveCommand(a, b):
""" Create a move command to move a file from `a' to `b'
"""
cmd = ['mv']
cmd.append(a)
cmd.append(b)
return cmd |
def key_value_list(d):
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value).
d -- a dictionary to iterate through
"""
if not isinstance(d, dict) and not isinstance(d, list):
return []
key_values = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
key_values.extend(key_value_list(entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
key_values.append((k, v))
key_values.extend(key_value_list(v))
return key_values |
def compute_boost(duty_cycle, min_duty_cycle, max_boost=10):
"""
Evaluate the boost function.
@param duty_cycle: The duty cycle of the current column.
@param min_duty_cycle: The minimum duty cycle for the column's region.
@param max_boost: The max boost.
"""
if min_duty_cycle == 0:
return max_boost
elif duty_cycle > min_duty_cycle:
return 1
else:
return duty_cycle * ((1 - max_boost) / min_duty_cycle) + max_boost |
def has_duplicates(t):
"""Returns True if any element appears more than once in a sequence.
t: list returns: bool
"""
s = t[:] # make a copy of t to avoid modifying the parameter
s.sort()
for i in range(len(s)-1):
if s[i] == s[i+1]: # check for adjacent elements that are equal
return True
return False |
def get_callable_params(obj):
"""Get callable params of an object"""
# Get the method signature and fall back to simply returning
# parentheses on exception.
try:
signature = safe_str(inspect.signature(obj))
except Exception:
signature = '()'
signature_minus_parentheses = signature[1:-1]
return signature_minus_parentheses |
def InvertDirection(direction: int) -> int:
"""Returns the opposite direction to the one given.
"""
if direction == 0:
return 1
elif direction == 1:
return 0
elif direction == 2:
return 3
else:
return 2 |
def getFileType(filename):
"""Determine if uploaded file is an image or video."""
if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.gif'):
return 'image'
elif filename.endswith('.mp4') or filename.endswith('.webm'):
return 'video'
else:
return 'invalid' |
def uv2spd_dir(u,v):
"""
converts u, v meteorological wind components to speed/direction
where u is velocity from N and v is velocity from E (90 deg)
usage spd, dir = uv2spd_dir(u, v)
"""
import numpy as np
spd = np.zeros_like(u)
dir = np.zeros_like(u)
spd = np.sqrt(u**2 + v**2)
dir = np.arctan2(v, u)*180.0/np.pi
return (spd, dir) |
def board_data(board_build_dict):
"""Raw data as given by the client."""
board_data = board_build_dict.copy()
return board_data |
def Interpolate(a, b, p, limit = False):
"""\
Interpolate between values a and b at float position p (0-1)
Limit: No extrapolation
"""
i = a + (b - a) * p
if limit and i < a:
return a
elif limit and i > b:
return b
else:
return i |
def format_total_seconds(raw_time):
"""Take a total time in seconds and convert to 'hh:mm:ss.f'
format.
"""
minutes, seconds = divmod(raw_time, 60)
hours, minutes = divmod(minutes, 60)
hours = str(int(hours)) + ':' if hours > 0 else ''
return '{}{:02d}:{:06.3f}'.format(hours, int(minutes), seconds) |
def eh_dificuldade(dificuldade): # str -> booleano
"""
Funcao que verifica se o argumento introduzido e uma dificuldade valida
:param dificuldade: "facil", "normal", "dificil"
:return: True ou False
"""
if dificuldade == 'facil' or \
dificuldade == 'normal' or \
dificuldade == 'dificil':
return True
return False |
def read_file(filename):
"""Opens and reads magazine source, strips whitespace, and
returns a list of lines converted to lowercase
Args:
filename (str): The file name of the magazine source file
Returns:
list: a list of strings for each non-blank line from the source file
converted to lowercase
"""
with open(filename) as file:
lines = file.readlines()
lower_lines = []
for line in lines:
# remove any lines with no characters
if not line.strip():
continue
lower_lines.append(line.rstrip().lower())
return lower_lines |
def lag(x,y,samps):
"""
Lag x samps to the left and
lag y samps to the right.
samps must be even.
If samps is negative x shifted to the right and y to the left.
This process truncates trace length by samps and preserves centrality.
Therefore windowing must be used after this process
to ensure even trace lengths when measuring splitting.
"""
if samps == 0:
return x,y
if samps > 0:
# positive shift
return x[samps:], y[:-samps]
else:
# negative shift
return x[:samps], y[-samps:] |
def remove_ignores(file_paths, ignore_list):
"""
Remove files that match gitignore patterns
:param file_paths:
:param ignore_list:
:return:
"""
# https://stackoverflow.com/a/25230908/5549
from fnmatch import fnmatch
matches = []
for ignore in ignore_list:
file_paths = [
n for n
in file_paths
if n.startswith('#') or not fnmatch(n, ignore)
]
matches.extend(file_paths)
return matches |
def check_shape(a, b):
""" Checks if the shape of `a` and `b` is the same """
if not hasattr(a, "shape") or not hasattr(b, "shape"):
# automatically true if one of them is a scalar
return True
if a.shape != () and b.shape != ():
return a.shape == b.shape
return True |
def find_left_anchor_index(fragment_info, fragments):
"""
Description:
Use the fragment information to find which fragment is the left anchor
:param fragment_info: [list[dict]] the list of fragment information
:param fragments: [list] the list of fragments being searched
:return: left_anchor_index: [int] the index location in the fragment list for the left anchor fragment
"""
for info in fragment_info:
if len(info['left_matches']) == 0:
left_anchor_index = fragments.index(info['anchor'])
return left_anchor_index |
def __checkIfKey(key):
"""
Helper-Method: Check if a given key starts and ends witch the specific PGP key block syntax.
@param key: The key to check.
@return: True if it is a valid PGP key, else False.
"""
if key is not None:
if key.strip().startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'):
return key.strip().endswith(b'-----END PGP PUBLIC KEY BLOCK-----')
return False |
def letterCombinations( digits):
"""
:type digits: str
:rtype: List[str]
"""
result = []
if digits == None or len(digits) == 0:
return result
phoneMap = {}
phoneMap['2'] = 'abc'
phoneMap['3'] = 'def'
phoneMap['4'] = 'ghi'
phoneMap['5'] = 'jkl'
phoneMap['6'] = 'mno'
phoneMap['7'] = 'pqrs'
phoneMap['8'] = 'tuv'
phoneMap['9'] = 'wxyz'
def getCombinationsRecursive(result, digits, current, index, phoneMap):
if index == len(digits):
result.append(current)
return
lettersFromNumber = phoneMap[digits[index]]
for letter in lettersFromNumber:
getCombinationsRecursive(result, digits, current+letter, index + 1, phoneMap)
getCombinationsRecursive(result, digits, '', 0, phoneMap)
return result |
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag |
def mutate_string(string, pos, change_to):
"""
Fastest way I've found to mutate a string
@ 736 ns
>>> mutate_string('anthony', 0, 'A')
'Anthony'
:param string:
:param pos:
:param change_to:
:return:
"""
string_array = list(string)
string_array[pos] = change_to
return ''.join(string_array) |
def empty_iter(iterable):
"""
Return None if the iterable object is empty
"""
import itertools
try:
first = next(iterable)
except StopIteration:
return None
return itertools.chain([first], iterable) |
def _make_divisible(v, divisor, min_value=None):
"""Make channel divisible.
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
Utility function for computing output of convolutions
takes a tuple of (h,w) and returns a tuple of (h,w)
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(pad) is not tuple:
pad = (pad, pad)
if type(dilation) is not tuple:
dilation = (dilation, dilation)
h = (h_w[0] + (2 * pad[0]) - (dilation[0] *
(kernel_size[0] - 1)) - 1) // stride[0] + 1
w = (h_w[1] + (2 * pad[1]) - (dilation[1] *
(kernel_size[1] - 1)) - 1) // stride[1] + 1
return h, w |
def calc_clean_up_default_res(crop, clean_up, allow_incomplete):
"""Logic for choosing whether to automatically clean up a crop, and what,
if any, the default all-nan result should be.
"""
if clean_up is None:
clean_up = not allow_incomplete
if allow_incomplete:
default_result = crop.all_nan_result
else:
default_result = None
return clean_up, default_result |
def pdf_row_limiter(rows, limits=None, **kwargs):
"""
Limit row passing a value. In this case we dont implementate a best effort
algorithm because the posibilities are infite with a data text structure
from a pdf.
"""
limits = limits or [None, None]
upper_limit = limits[0] if limits else None
lower_limit = limits[1] if len(limits) > 1 else None
return rows[upper_limit: lower_limit] |
def problem_10_5(square1, square2):
""" Given two squares on a two dimensional plane, find a line that would
cut these two squares in half.
Solution: Compute the ecuation of the line passing through the center of
the two squares.
Args:
square1: tuple, of dicts, format ({x, y}, {x, y})
square2: tuple, of dicts, format ({x, y}, {x, y})
Returns:
tuple, format (sloape, intercept)
"""
(p1, p2) = square1
(p3, p4) = square2
c1 = {'x': float(p1['x'] + p2['x']) / 2, 'y': float(p1['y'] + p2['y']) / 2}
c2 = {'x': float(p3['x'] + p4['x']) / 2, 'y': float(p3['y'] + p4['y']) / 2}
slope = float(c2['y'] - c1['y']) / (c2['x'] - c1['x'])
intercept = float(p1['y'] * p2['x'] - p1['x'] * p2['y']) / (p2['x'] - p1['x'])
return (slope, intercept) |
def get_circle_base_url(project_target):
"""Create circle base url used against the v1.1 API.
Arguments:
project_target {string} -- project target in circleCi <Org>/<Project>
Returns:
string -- url that can be used to access
"""
return "https://circleci.com/api/v1.1/project/github/{}".format(project_target) |
def _is_number(s):
"""
Check if s is an int (for indexes in dict)
@param s: string
"""
if len(s) > 0 and s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit() |
def integer_from_str(src):
"""Decodes an integer
If string is not a valid lexical representation of an integer then
ValueError is raised. This uses XML Schema's lexical rules which
are slightly different from Python's native conversion."""
sign = False
for c in src:
v = ord(c)
if v == 0x2B or v == 0x2D:
if sign:
raise ValueError(
"Invalid lexical representation of integer: %s" % src)
else:
sign = True
elif v < 0x30 or v > 0x39:
raise ValueError(
"Invalid lexical representation of integer: %s" % src)
else:
# a digit means we've got an empty sign
sign = True
return int(src) |
def three_way_radix_quicksort(sorting: list) -> list:
"""
Three-way radix quicksort:
https://en.wikipedia.org/wiki/Quicksort#Three-way_radix_quicksort
First divide the list into three parts.
Then recursively sort the "less than" and "greater than" partitions.
>>> three_way_radix_quicksort([])
[]
>>> three_way_radix_quicksort([1])
[1]
>>> three_way_radix_quicksort([-5, -2, 1, -2, 0, 1])
[-5, -2, -2, 0, 1, 1]
>>> three_way_radix_quicksort([1, 2, 5, 1, 2, 0, 0, 5, 2, -1])
[-1, 0, 0, 1, 1, 2, 2, 2, 5, 5]
"""
if len(sorting) <= 1:
return sorting
return (
three_way_radix_quicksort([i for i in sorting if i < sorting[0]])
+ [i for i in sorting if i == sorting[0]]
+ three_way_radix_quicksort([i for i in sorting if i > sorting[0]])
) |
def get_curly_brace_scope_end(string, start_pos):
"""Given a string and the position of an opening curly brace, find the
position of the closing brace.
"""
if string[start_pos] != "{":
raise ValueError("string must have \"{\" at start pos")
string_end = len(string)
bracket_counter = 1
start_pos += 1
while start_pos < string_end:
if string[start_pos] == "{":
bracket_counter += 1
elif string[start_pos] == "}":
bracket_counter -= 1
if bracket_counter == 0:
return start_pos
start_pos += 1
return -1 |
def convert_str2list(str_data, space_mark = ' '):
"""
:param str_data: The data for the string to be converted is separated by spacek_mark
:param space_mark: The spacing symbol between string
:return: A list of strings, each of which is a string
"""
return str_data.split(space_mark) |
def _pandas_style_to_css(style, uuid, separator=""):
"""Convert pandas.Styler translated styles entry to CSS.
Parameters
----------
style : dict
pandas.Styler translated styles entry.
uuid: str
pandas.Styler UUID.
separator: str
A string separator used between table and cell selectors.
"""
declarations = []
for css_property, css_value in style["props"]:
declaration = css_property.strip() + ": " + css_value.strip()
declarations.append(declaration)
table_selector = "#T_" + str(uuid)
cell_selector = style["selector"]
selector = table_selector + separator + cell_selector
declaration_block = "; ".join(declarations)
rule_set = selector + " { " + declaration_block + " }"
return rule_set |
def unique_chars_iterator(smile):
"""
"""
atoms = []
for i in range(len(smile)):
atoms.append(smile[i])
return atoms |
def break_instance_ids_into_chunks(instance_ids):
"""
Returns successive chunks from instance_ids
"""
size = 3
chunks = []
for i in range(0, len(instance_ids), size):
chunks.append(instance_ids[i:i + size])
return chunks |
def drop(num, iterator):
"""Drop the first n elements on an iterator"""
try:
for _ in range(num):
next(iterator)
except StopIteration:
return iterator
return iterator |
def trSrc(transition):
"""Given a transition represented as ((Q,symb),Q'),
return Q.
"""
return transition[0][0] |
def _get_tags(ws_info):
"""Get the tags relevant to search from the ws_info metadata"""
metadata = ws_info[-1]
if metadata.get('searchtags'):
if isinstance(metadata['searchtags'], list):
return metadata['searchtags']
else:
return [metadata['searchtags']]
else:
return [] |
def unique_list(l):
"""Return unique list form list."""
r = []
for i in l:
if i not in r:
r.append(i)
return r |
def _supports_alg(user_alg: bytes, peer_alg: bytes) -> bool:
"""
Return whether the peer supports the algorithm specified by the user.
:param user_alg: Service ID requested by user
:param peer_alg: Service ID offered by privacy peer
:return: True, if algorithm is supported by the peer
"""
return (user_alg == peer_alg) |
def next_perm(v: int) -> int:
"""
Compute the lexicographically next bit permutation
Generates next permutation with a given amount of set bits,
given the previous lexicographical value.
Taken from http://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
"""
t = (v | (v - 1)) + 1
w = t | ((((t & -t) // (v & -v)) >> 1) - 1)
return w |
def is_multiple(base, multiple):
"""Determine if base is a multiple of multiple
Args:
base (Integer): Base value
multiple (Integer): Multiple value
Returns:
Boolean: True if base is a multiple of multiple, false otherwise
"""
if multiple == 0:
return False
return base % multiple == 0 |
def temporalRanges(start, end, step):
"""
Return a list of tuples representing the temporal boundaries
"""
temporal = []
t1 = start
t2 = start + step
while t2 < end:
temporal.append((t1, t2))
t1 = t2
t2 += step
temporal.append((t1, end))
return temporal |
def triple_split(triple):
"""Split target triple into parts.
"""
arch, vendor, os = triple.split('-', 2)
if '-' in os:
os, env = os.split('-', 1)
else:
env = ''
return arch, vendor, os, env |
def get_rt(user_input, bound):
"""
Get reproduction rate from scenario choice.
"""
rules = {
"estavel": lambda x: x, # cenario estavel
"positivo": lambda x: x / 2, # cenario positivo
"negativo": lambda x: x * 2, # cenario negativo
}
return rules[user_input["strategy"]](user_input["rt_values"][bound]) |
def straight_line(abscissa, gradient, intercept):
"""
A one dimensional straight line function.
Args:
abscissa (:py:attr:`array_like`): The abscissa data.
gradient (:py:attr:`float`): The slope of the line.
intercept (:py:attr:`float`): The y-intercept of the line.
Returns:
:py:attr:`array_like`: The resulting ordinate.
"""
return gradient * abscissa + intercept |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.