content stringlengths 42 6.51k |
|---|
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
node_list = [n for n in node_list if n is not None]
if node_list == []:
return None
from operator import add
from functools import reduce
return reduce(add, node_list) |
def extract_major_version(scala_version):
"""Return major Scala version given a full version, e.g. "2.11.11" -> "2.11" """
return scala_version[:scala_version.find(".", 2)] |
def serialize_beneficiarios_programa(beneficiario):
"""
# $ref: '#/components/schemas/beneficiariosPrograma'
"""
if beneficiario:
if beneficiario.codigo == 'DC':
return {
"clave": beneficiario.codigo,
"valor": beneficiario.tipo_relacion
}
else:
return {"clave": "OTRO", "valor":"Otro"}
else:
return {"clave": "OTRO", "valor":"Otro"} |
def get_chebi_synonyms(chebi_ent):
"""
Get synonyms from ChEBI entity
:param chebi_ent:
:return:
"""
if hasattr(chebi_ent, 'Synonyms'):
return [entry.data for entry in chebi_ent.Synonyms]
else:
return [] |
def find_keywords_sentences(keywords, cleaned_lines):
"""
Find the occurrences of the keywords in the text.
:param keywords: The keywords to be searched in the lines
:param cleaned_lines: The lines of the document
:return: The lines with the given keywords
"""
accepted_lines = list()
for cleaned_line in cleaned_lines:
for keyword in keywords:
if keyword.lower() in cleaned_line.lower():
accepted_lines.append(cleaned_line)
return list(set(accepted_lines)) |
def flatten_cond_gen_values(gen_eval: dict):
"""
Converts the coherence evaluation results into a flattened dict
"""
flattened_dict = {}
for j, l_key in enumerate(sorted(gen_eval['cond'].keys())):
for k, s_key in enumerate(gen_eval['cond'][l_key].keys()):
for g_key in gen_eval['cond'][l_key][s_key]:
key = l_key + '_' + s_key + '__' + g_key
flattened_dict[key] = gen_eval['cond'][l_key][s_key][g_key]
return flattened_dict |
def to_sbp_file_name(identifier):
"""
Creates file name from sbp message identifier.
"""
prefix = 'swiftnav.sbp.'
if identifier.startswith(prefix):
return identifier[len(prefix):]
else:
return identifier |
def info_from_header(header):
"""Returns mb, ms, sequence extracted from header in logfile."""
from numpy import nan
# If present in header information, extract mb, ms, and sequence
mb = nan
ms = nan
sequence = ''
header = str(header).replace(' ','') # remove spaces before splitting
if 'mb=' in header: mb = float(header.split('mb=')[1].split(',')[0])
if 'ms=' in header: ms = float(header.split('ms=')[1].split(',')[0])
if 'sequence=' in header: sequence = header.split('sequence=')[1].split(',')[0]
return mb,ms,sequence |
def build_description(summary, description):
"""
Return a description string from a summary and description
"""
summary = (summary or '').strip()
description = (description or '').strip()
if not description:
description = summary
else:
if summary and summary not in description:
description = '\n'.join([summary , description])
return description |
def try_parse_float(text):
"""
Try to parse a string to a float.
"""
try:
return float(text)
except ValueError:
return None |
def single_timeseries_to_json(value=None, unit="", label="", asset_type=""):
"""format the information about a single timeseries in a specific JSON"""
if value is None:
value = []
return {"value": value, "unit": unit, "label": label, "asset_type": asset_type} |
def _GenerateEstimatorConstructor(estimator_class_name, variable_types, variable_names, extension_class_name):
"""
Generates the consructor for the estimator class.
"""
code = ["\n\npublic {0}(IHostEnvironment env".format(estimator_class_name)]
# Generate the Constructor parameters
for variable_type, variable_name in zip(variable_types, variable_names):
code.append(", {0}.TransformParameter<{1}> {2}".format(extension_class_name, variable_type, variable_name))
code.extend(
[
", string outputColumn",
")\n{"
]
)
# Generate assigning the values in the constructor
for variable_name in variable_names:
code.append("\n_{0} = {0};".format(variable_name))
# Add assignments that are always required
code.extend(
[
"\n_outputColumn = outputColumn;",
"\n_host = env.Register(nameof({0}));".format(estimator_class_name),
"\n}"
]
)
return "".join(code) |
def compute_poi_email_ratio(poi_messages, all_messages):
""" FEATURE
given a number messages to/from POI (numerator)
and number of all messages to/from a person (denominator),
return the ratio of messages to/from that person
that are from/to a POI
"""
ratio = 0.
if type(poi_messages) is int and type(all_messages) is int and poi_messages > 0 and all_messages > 0:
ratio = float(poi_messages) / float(all_messages)
return ratio |
def selu_initialization_std(shape):
"""
initialization meant to be used in conjunction with selu non-linearity. sqrt(1/fan_in)
"""
std = (1.0/shape[0]) ** .5
print("selu init std", shape, std)
return std |
def HelperParseOutput(lst):
"""Self defined parameter function for 'object_pairs_hook' of json.loads().
Purpose:
This function helps to parse json text with duplicate keys, for instance:
json text: {"key":1, "key":2, "key2":3, "key2":4}
With this function, we will have the following dict:
{"key":[1, 2],
"key2":[3, 4]
}
Args:
lst: A list of tuples contains the output of benchmark tests.
Returns:
A dict contains the output of benchmark tests.
"""
result = {}
for key, val in lst:
if key in result:
if isinstance(result[key], list):
result[key].append(val)
else:
result[key] = [result[key], val]
else:
result[key] = val
return result |
def fuzzy_not(arg):
"""
Not in fuzzy logic
Will return Not if arg is a boolean value, and None if argument
is None.
Examples:
>>> from sympy.logic.boolalg import fuzzy_not
>>> fuzzy_not(True)
False
>>> fuzzy_not(None)
>>> fuzzy_not(False)
True
"""
if arg is None:
return
return not arg |
def write_key(key, value):
"""Write a `key = value` line in an LTA file.
Parameters
----------
key : str
value : int or float or str
Returns
-------
str
"""
if isinstance(value, (str, int, float)):
return f'{key} = {value}'
else:
return f'{key} = ' + ' '.join([str(v) for v in value]) |
def enumerate(lst):
"""
>>> enumerate(["a", "b", "c"])
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> for index, value in enumerate(["a", "b", "c"]):
... print index, value
0 a
1 b
2 c
"""
return [(i, lst[i]) for i in range(len(lst))] |
def get_user_plist_path():
"""
Helper function returning the path to the user account property list.
"""
user_plist_path = "/private/var/db/dslocal/nodes/Default/users/"
return user_plist_path |
def argif(cond,if_true_value,else_value):
"""
If cond is true, returns the if_true_value, otherwise else_value.
Roughly equilvanet to a lisp (if cond true false)
"""
if(cond):
return if_true_value
else:
return else_value |
def splitext(fname):
"""Splits filename and extension (.gz safe)
>>> splitext('some/file.nii.gz')
('file', '.nii.gz')
>>> splitext('some/other/file.nii')
('file', '.nii')
>>> splitext('otherext.tar.gz')
('otherext', '.tar.gz')
>>> splitext('text.txt')
('text', '.txt')
Source: niworkflows
"""
from pathlib import Path
basename = str(Path(fname).name)
stem = Path(basename.rstrip(".gz")).stem
return stem, basename[len(stem) :] |
def get_AC(GT):
"""
Convert GT to AC for a single sample
"""
if GT == 'None/None':
AC = 'NA'
else:
AC = sum([int(a) for a in GT.split('/')])
return str(AC) |
def unhappy_f_point_lin(list, index, other_index):
"""
Checks if a new point will be unhappy. Returns False if will be happy.
"""
if list[other_index] == 1:
list[other_index] = 0
else:
list[other_index] = 1
if index == 0:
if list[index] != list[index + 1]:
if list[other_index] == 1:
list[other_index] = 0
else:
list[other_index] = 1
return False
if index == len(list)-1:
if list[index] != list[index - 1]:
if list[other_index] == 1:
list[other_index] = 0
else:
list[other_index] = 1
return False
else:
if list[index] != list[index - 1] or list[index] != list[(index + 1) % len(list)]:
if list[other_index] == 1:
list[other_index] = 0
else:
list[other_index] = 1
return False
if list[other_index] == 1:
list[other_index] = 0
else:
list[other_index] = 1
return True |
def left_join(phrases):
"""
Join strings and replace "right" to "left"
"""
# l = list(phrases)
return ','.join([x.replace('right', 'left') for x in phrases]) |
def remove_optimized_key_from_ai_list(key_list: list, optimized_dict: dict):
"""
:param key_list:
:param optimized_dict:
:return:
"""
for key, value in optimized_dict.items():
if value in key_list:
ind = key_list.index(value)
key_list.pop(ind)
return key_list |
def find_number(value):
"""Convert value to a number, if posible, and indicate success or failure
Parameters:
value: The variable to convert (could be _anything_)
Return values: First the converted number, as float or int; then
a boolean that shows whether the conversion succeeded (true) or failed
(false)
"""
number = 0 #Default value
valid = True #Also a default (one of these will usually change)
try:
number = float(value)
except (ValueError, TypeError):
valid = False
if not valid:
try:
number = int(value, 16) # Catch hexadecimals
valid = True
except (ValueError, TypeError):
valid = False
return number, valid |
def cat_matrices2D(mat1, mat2, axis=0):
"""Concatenates two matrices along a specific axis"""
ans = []
if axis == 0 and len(mat1[0]) == len(mat2[0]):
return mat1 + mat2
elif axis == 1 and len(mat1) == len(mat2):
for i in range(len(mat1)):
ans.append(mat1[i] + mat2[i])
return ans
return None |
def _tupleindex(index):
"""Convert 'line.column' to (line, column)."""
line, column = index.split('.')
return (int(line), int(column)) |
def read_leaf_header(leaf):
"""
Parse the leaf header
"""
header = {}
header["version"] = int(leaf[0])
header["merkle_leaf_type"] = int(leaf[1])
header["timestamp"] = int.from_bytes(leaf[2:10], "big")
header["LogEntryType"] = int.from_bytes(leaf[10:12], "big")
header["Entry"] = leaf[12:]
return header |
def advance_version(v, bumprule):
"""
Advance the version based on a version bump rule
"""
vercomps = v.replace('v', '').split('.')
majv = int(vercomps[0])
minv = int(vercomps[1])
patchv = int(vercomps[2])
if bumprule == "major":
majv += 1
minv = 0
patchv = 0
if bumprule == "minor":
minv += 1
patchv = 0
if bumprule == "patch":
patchv += 1
return "{}.{}.{}".format(majv, minv, patchv) |
def moving_average(lst, size):
""" calculate simple moving average values """
if not isinstance(size, int):
raise TypeError('size must be integer. {} given'.format(size))
if not size > 0:
raise ValueError('size must be greater than zero. {} given'.format(size))
if len(lst) < size:
raise ValueError(('length of list must be greater than size.'
' list: {} size: {}'.format(lst, size)))
summ = sum(lst[:size])
nlst = [summ / size]
for end in range(size, len(lst)):
summ += lst[end] - lst[end - size]
nlst.append(summ / size)
return nlst |
def flatten_research(research):
"""Flatten research data."""
flat = []
for pid, techs in research.items():
for tid, values in techs.items():
flat.append(dict(values, player_number=pid, technology_id=tid))
return flat |
def check_password(option, opt, value):
"""check a password value (can't be empty)
"""
# no actual checking, monkey patch if you want more
return value |
def getFIndex(field_names, field_name):
"""Will get the index for a arcpy da.cursor based on a list of field names as an input.
Assumes string will match if all the field names are made lower case."""
try:
return [str(i).lower() for i in field_names].index(str(field_name).lower())
# Make iter items lower case to get right time field index.
except:
print("Couldn't retrieve index for {0}, check arguments.".format(str(field_name)))
return None |
def priviledged_instructions(instr, bv, isa_specific_data):
"""
"""
if not isa_specific_data:
return False
if isa_specific_data and isa_specific_data.get('privileged_instructions'):
if instr[0][0].text in isa_specific_data['privileged_instructions']:
return True
return False |
def sort_list(data_list, index, reverse):
""" index: int number, according to it to sort data_list"""
if index is None:
sorted_data_list = sorted(data_list, key=lambda x: x, reverse=reverse)
else:
sorted_data_list = sorted(data_list, key=lambda x: x[index], reverse=reverse)
return sorted_data_list |
def extend(value, tupleo):
"""
extend(...) method of tupleo.tuple instance
T.extend(iterable, tupleo) -> None -- extend tuple, tupleo by appending elements from the iterable
"""
if type(tupleo) != tuple:
raise TypeError("{} is not tuple".format(tupleo))
if type(value) !=tuple:
raise TypeError("{} is not tuple".format(value))
convertlist = list(tupleo)
convertlist.extend(list(value))
return convertlist |
def var_name(var):
""" Return the name of a variable """
for name,value in globals().items() :
if value is var :
return name
return None |
def decrement_items(inventory, items):
"""
:param inventory: dict - inventory dictionary.
:param items: list - list of items to decrement from the inventory.
:return: dict - updated inventory dictionary with items decremented.
"""
for item in items:
inventory[item] = max(0, inventory[item] - 1)
return inventory |
def count_same(sequence1, sequence2):
"""
What comes in:
-- two sequences that have the same length
What goes out: Returns the number of indices at which the two
given sequences have the same item at that index.
Side effects: None.
Examples:
If the sequences are:
(11, 33, 83, 18, 30, 55)
(99, 33, 83, 19, 30, 44)
then this function returns 3
since the two sequences have the same item at:
-- index 1 (both are 33)
-- index 2 (both are 83)
-- index 4 (both are 30)
Another example: if the sequences are:
'how are you today?'
'HOW? r ex u tiday?'
then this function returns 8 since the sequences are the same
at indices 5 (both are 'r'), 10 (both are 'u'), 11 (both are ' '),
12 (both are 't'), 14 (both are 'd'), 15 (both are 'a'),
16 (both are 'y') and 17 (both are '?') -- 8 indices.
Type hints:
type: sequence1: tuple or list or string
type: sequence2: tuple or list or string
"""
# -------------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
count = 0
for k in range(len(sequence1)):
if sequence1[k] == sequence2[k]:
count = count + 1
return count |
def _convert_german_time_to_iso(date_string: str):
"""Convert a german date input as a string to ISO format (DD.MM.YYYY -> YYYY-MM-DD). ONLY DD.MM.YYYY FORMAT."""
return date_string[-4:] + '-' + date_string[3:5] + '-' + date_string[:2] |
def set_optional_arguments(options, option_values, command_line_options):
""" Validate and set optional command cline arguments. """
option_value_keys = option_values.keys()
for option in options:
if option[0] == '<':
non_required_option = option[1:len(option) - 1]
if non_required_option in option_value_keys:
command_line_options[non_required_option] = option_values[non_required_option]
elif non_required_option[0] in option_value_keys:
command_line_options[non_required_option] = option_values[non_required_option[0]]
else:
if option in option_value_keys:
command_line_options[option] = option_values[option]
elif option[0] in option_value_keys:
command_line_options[option] = option_values[option[0]]
else:
return (True, f'Missing required option: {option}')
return None |
def ATGC_content(dna_sequence):
"""Returns ATGC contents in the form of a dict."""
ATGCDict = {'A': 0, 'T': 0, 'G': 0, 'C': 0}
for nucleotide in dna_sequence:
try:
ATGCDict[nucleotide] += 1
except KeyError:
pass
return ATGCDict |
def numberIn(s):
"""Return a Decimal from a string that might have unicode pound symbols and commas"""
number = str()
for c in s:
if c in '0123456789.':
number = number + c
if len(number) > 0:
from decimal import Decimal
return Decimal(number)
else:
return None |
def compute_peg(profit, infos_boursiere):
"""
Returns an approximation of the PEG
"""
if not 'PER' in infos_boursiere:
return 0
if profit is None:
return None
per = float(infos_boursiere['PER'].split()[0])
if profit <= 0:
return 0
return round(per/profit, 1) |
def count_ways(target, max_integer=None, cache=None):
"""
Count the number of ways of making target from sum of at least 1 integers
with max_integer as the largest integer in the sum. If max_integer is not
provided, count all possible ways from sum of at least 2 integers.
"""
# Special cases: because we use this recursively, we'll special-case the 1 or 2 targets
# and the 1 max_integer
if max_integer == 1:
return 1
if target == 1:
return 1
elif target == 2 and max_integer == 2:
return 2
if not max_integer:
max_integer = target - 1
if max_integer > target:
max_integer = target
if not cache:
# Create a cache for storing results for all combinations of (target, max_integer)
cache = [[None] * target for i in range(target + 1)]
elif cache[target][max_integer] is not None:
return cache[target][max_integer]
remainder = target
ways = 0
# How many ways are there to make target using max_integer at least once?
while remainder > max_integer:
remainder -= max_integer
ways += count_ways(remainder, max_integer - 1, cache=cache)
# Special case: we have just 1 way to make this last case!
if remainder == max_integer:
ways += 1
# Now recurse with the next smaller max_integer
if max_integer > 0:
ways += count_ways(target, max_integer - 1, cache=cache)
cache[target][max_integer] = ways
return ways |
def baby_names_list_from_table_rows(a_table_rows):
"""
a_table_rows is a list of dictionaries
return list of lists, discarding gender information contained in a_table_rows
element [name, rank]
list is sorted by name
"""
print('baby_names_list_from_table_rows')
baby_names_list = []
for row in a_table_rows:
baby_names_list.append([row['name_boy'], row['rank']])
baby_names_list.append([row['name_girl'], row['rank']])
def sort_key_from_array(an_array):
"""
return key for sorted()
"""
return an_array[0]
baby_names_list = sorted(baby_names_list, key=sort_key_from_array)
#print(baby_names_list)
return baby_names_list |
def build_bankruptcy_definition(years):
"""Build a bankruptcy definition
Notes:
This function is set according to a line of best fit from Year 0 at -10% ROI to 10% ROI by Year 7.
Args:
years (int): No. of years for analysis
Returns:
Bankruptcy definition (list): A timeseries of the bankruptcy threshold graph
TO DO:
Allow flexibility of bankruptcy definition by allowing input of two data points rather than assuming (-10%, 0) and (10%, 7)
"""
bankruptcy_definition =[]
for y in range(years+1):
# Threshold for bankruptcy
if y <= 7:
bankruptcy_definition.append(y*2.8571 - 10) # Year 0 - below 10% ROI, Year 7 - 10% ROI
elif y > 7:
bankruptcy_definition.append(10)
balance_threshold = 0
return bankruptcy_definition, balance_threshold
return bankruptcy_definition |
def get_iou(bb1, bb2):
""" Determine Intersection over Union for two bounding boxes
"""
# Determine the coordinates of the intersection rectangle
xmin = max(bb1[0], bb2[0])
ymin = max(bb1[1], bb2[1])
xmax = min(bb1[2], bb2[2])
ymax = min(bb1[3], bb2[3])
if xmax < xmin or ymax < ymin:
return 0.0
# Determine intersection area
intersection_area = (xmax - xmin) * (ymax - ymin)
# Compute area of bounding boxes
bb1_area = (bb1[2] - bb1[0]) * (bb1[3] - bb1[1])
bb2_area = (bb2[2] - bb2[0]) * (bb2[3] - bb2[1])
# compute the intersection over union
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou |
def get_varinfo_from_table(discipline,parmcat,parmnum):
"""
Return the GRIB2 variable information given values of `discipline`,
`parmcat`, and `parmnum`. NOTE: This functions allows for all arguments
to be converted to a string type if arguments are integer.
Parameters
----------
**`discipline`**: `int` or `str` of Discipline code value of a GRIB2 message.
**`parmcat`**: `int` or `str` of Parameter Category value of a GRIB2 message.
**`parmnum`**: `int` or `str` of Parameter Number value of a GRIB2 message.
Returns
-------
**`list`**: containing variable information. "Unknown" is given for item of
information if variable is not found.
- list[0] = full name
- list[1] = units
- list[2] = short name (abbreviated name)
"""
if isinstance(discipline,int): discipline = str(discipline)
if isinstance(parmcat,int): parmcat = str(parmcat)
if isinstance(parmnum,int): parmnum = str(parmnum)
try:
tblname = 'table_4_2_'+discipline+'_'+parmcat
modname = '.section4_discipline'+discipline
exec('from '+modname+' import *')
return locals()[tblname][parmnum]
except(ImportError,KeyError):
return ['Unknown','Unknown','Unknown'] |
def matrix_from_vectors(op, v):
"""
Given vector v, build matrix
[[op(v1, v1), ..., op(v1, vn)],
...
[op(v2, v1), ..., op(vn, vn)]].
Note that if op is commutative, this is redundant: the matrix will be equal to its transpose.
The matrix is represented as a list of lists.
"""
return [[op(vi, vj) for vj in v] for vi in v] |
def flatten(iterable, n=1):
"""
remove n levels of list nesting for a list of lists
assumes uniform nesting
:param iterable: nested list of lists to be flattened
:param n: how many levels of nesting to remove
:return: flattened list
"""
tmp = iterable
for _ in range(n):
tmp = [item for sublist in tmp for item in sublist]
return tmp |
def find_identical_rects(frames):
"""Unify ChangedRects with identical key()s."""
rawRectCount = 0
cache = {}
for frame in frames:
content = frame.content()
rawRectCount += len(content)
uniqueContent = []
for r in content:
key = r.key()
r = cache.get(key, r)
r.addOccurrence(frame)
cache[key] = r
uniqueContent.append(r)
content[:] = uniqueContent
return rawRectCount, len(cache) |
def request_id(request, flavor_id):
"""Flavor id to use on the url call."""
return request.param if hasattr(request, 'param') else flavor_id |
def ref_seq_output(in_seq_name, in_ref_name, ext):
"""
Generate output file name
:param in_seq_name: treatment name (str)
:param in_ref_name: reference name (str)nt)
:param ext: extension (ie. csv or pdf)
:return: output filename (str)
"""
return "{0}_{1}.{2}".format(in_ref_name,
in_seq_name,
ext) |
def unknown_id_to_symbol(unknown_id, header="X"):
"""Get the symbol of unknown whose id is |unknown_id|.
:type unknown_id: int
:type header: str
:param unknown_id: The ID of the unknown.
:param header: The symbol header.
:rtype : str
:return: A string that contains the symbol.
"""
# If the |unknown_id| is zero, just returns |PROTECT_HEADER|a.
if unknown_id == 0:
return header + "a"
# Initialize alphabet table.
ch_table = "abcdefghijklmnopqrstuvwxyz"
ch_table_len = len(ch_table)
# Convert decimal to 26 ary.
cur_id = unknown_id
r = ""
while cur_id != 0:
r = ch_table[cur_id % ch_table_len] + r
cur_id = int(cur_id / ch_table_len)
# Return the converted symbol.
return header + r |
def make_config( image_h, image_w, image_d, train_split=0.8, batch_size=128, cache_images=True, crop_top=0, crop_bot=0, data_path=None, max_thr=0.5 ):
""" Make a DonkeyCar config-like object if there is no config file available.
Entries needed for:
gather_records -> gather_tubs -> gather_tub_paths
cfg.DATA_PATH if tub_names is None
collate_records
cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, if opts['categorical']
cfg.TRAIN_TEST_SPLIT
vae_generator
cfg.BATCH_SIZE
cfg.CACHE_IMAGES (bool)
cfg.IMAGE_H
cfg.IMAGE_W
cfg.IMAGE_DEPTH
-> load_scaled_image_arr
cfg.IMAGE_H
cfg.IMAGE_W
cfg.IMAGE_DEPTH
-> normalize_and_crop
cfg.ROI_CROP_TOP
cfg.ROI_CROP_BOTTOM
"""
from collections import namedtuple
CFG = namedtuple('Config', ['DATA_PATH', 'MODEL_CATEGORICAL_MAX_THROTTLE_RANGE', 'TRAIN_TEST_SPLIT', 'BATCH_SIZE', 'CACHE_IMAGES', 'IMAGE_H', 'IMAGE_W', 'IMAGE_DEPTH', 'ROI_CROP_TOP', 'ROI_CROP_BOTTOM'])
cfg = CFG(DATA_PATH=data_path, MODEL_CATEGORICAL_MAX_THROTTLE_RANGE=max_thr, TRAIN_TEST_SPLIT=train_split, BATCH_SIZE=batch_size, CACHE_IMAGES=cache_images, IMAGE_H=image_h, IMAGE_W=image_w, IMAGE_DEPTH=image_d, ROI_CROP_TOP=crop_top, ROI_CROP_BOTTOM=crop_bot)
return cfg |
def extract_row(key: str, clientresult) -> dict:
""" Extract one row from the client result, and return result as dict
"""
data = clientresult[key]
return dict(data) |
def trimArray(array):
"""
>>> trimArray([0 for _ in range(10)]) == [0]
True
>>> trimArray([1, 0, 0, 0, 0, 0, 0, 0])
[1]
"""
for i in range(len(array) - 1, -1, -1):
if array[i] != 0:
return array[:i + 1]
return [0] |
def RPL_YOURESERVICE(sender, receipient, message):
""" Reply Code 383 """
return "<" + sender + ">: " + message |
def _texture(number):
"""Generates ARB assembly for a texture."""
return 'texture[{}]'.format(number) |
def solution(value: int) -> str:
"""
Complete the solution so that it
returns a formatted string.
The return value should equal "Value
is VALUE" where value is a 5 digit
padded number.
:param value:
:return:
"""
result = str(value)
while len(result) != 5:
result = '0' + result
return 'Value is {}'.format(result) |
def _get_axis_num(nrow, row, col):
"""
computes axis number
Parameters
----------
nrow : int
number of rows
row : int
row number
col : int
column number
"""
return (nrow * row + col) + 1 |
def _list_clean(tweet):
"""Splits the tweet into words based on spaces and returns this list
along with a concatenated string without spaces.
Args:
tweet (str): Tweet text
Returns:
tuple
"""
# Tweet split into a list of words
tweet_word_list = tweet.split(" ")
# Tweet rejoined into string thus removing all spaces
tweet_clean = "".join(tweet_word_list)
return tweet_word_list, tweet_clean |
def plist_set_version(plist_data, version_str):
"""
<key>CFBundleShortVersionString</key>
<string>0.1</string>
<key>CFBundleVersion</key>
<string>0.1</string>
"""
plist_lines = plist_data.split("\n")
plist_num_lines = len(plist_lines)
out_lines = []
line_idx = 0
while line_idx < plist_num_lines:
l = plist_lines[line_idx]
line_idx += 1
out_lines.append(l)
if -1 != l.find("CFBundleShortVersionString") or \
-1 != l.find("CFBundleVersion"):
out_lines.append(" <string>%s</string>" % version_str)
line_idx += 1
return "\n".join(out_lines) |
def safe_import(origin, funk1, funk2):
"""Safely import a function whose name was changed from a module whose name was not.
This function is specially useful if the function name is not known at runtime.
Args:
origin (str): name of the module where the function to be imported is located
funk1 (str): name of the first function name to try to import
funk2 (str): name of the second function name to try to import
Returns:
function: function object imported from `origin`
Example:
# instead of writting this
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
# write this
filterfalse = safe_import('itertools', 'filterfalse', 'ifilterfalse')
"""
try:
hook = __import__(origin, globals(), locals(), [funk1], 0)
return getattr(hook, funk1)
except:
hook = __import__(origin, globals(), locals(), [funk2], 0)
return getattr(hook, funk2) |
def same_rank(hand, n_of_a_kind):
"""
Given a hand of cards, return the number of
n_of_a_kind combinations of ranks.
For example, with n_of_a_kind=2, the function
returns the number of pairs in the hand.
"""
ranks = [card[1:] for card in hand]
counter = 0
already_counted = []
for rank in ranks:
if rank not in already_counted and \
ranks.count(rank) == n_of_a_kind:
counter += 1
already_counted.append(rank)
return counter |
def filtered_map(method, object_list):
"""
Performs a map then a filter on the object list. The final filter strips
out objects which evaluate to False.
"""
return [x for x in map(method, object_list) if x] |
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs |
def uniquify_tuples(tuples):
"""
Uniquifies Mimikatz tuples based on the password.
cred format- (credType, domain, username, password, hostname, sid)
"""
seen = set()
return [item for item in tuples if "%s%s%s%s"%(item[0],item[1],item[2],item[3]) not in seen and not seen.add("%s%s%s%s"%(item[0],item[1],item[2],item[3]))] |
def version_splitter(s):
"""Splits a version string into a tuple of integers.
The following ASCII characters are allowed, and employ
the following conversions:
a -> -3
b -> -2
c -> -1
(This permits Python-style version strings such as "1.4b3".)
"""
version = []
accumulator = []
def flush():
if not accumulator:
raise ValueError('Malformed version string: ' + repr(s))
version.append(int(''.join(accumulator)))
accumulator.clear()
for c in s:
if c.isdigit():
accumulator.append(c)
elif c == '.':
flush()
elif c in 'abc':
flush()
version.append('abc'.index(c) - 3)
else:
raise ValueError('Illegal character ' + repr(c) + ' in version string ' + repr(s))
flush()
return tuple(version) |
def layer_type(flags):
"""
Returns the layer type from the feature classification flag
0 = invalid (bad or missing data)
1 = "clear air"
2 = cloud
3 = aerosol
4 = stratospheric feature
5 = surface
6 = subsurface
7 = no signal (totally attenuated)
"""
# type flag : bits 1 to 3
return flags & 7 |
def generate(output, aid_to_ans):
"""
Return the answer given the Answer ID/label
:param output: The answer label
:param aid_to_ans:
:return:
"""
ans = aid_to_ans[output]
return ans |
def improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer.
The SQuAD annotations are character based. We first project them to
whitespace-tokenized words. But then after WordPiece tokenization, we can
often find a "better match". For example:
Question: What year was John Smith born?
Context: The leader was John Smith (1895-1943).
Answer: 1895
The original whitespace-tokenized answer will be "(1895-1943).". However
after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
the exact answer, 1895.
However, this is not always possible. Consider the following:
Question: What country is the top exporter of electornics?
Context: The Japanese electronics industry is the lagest in the world.
Answer: Japan
In this case, the annotator chose "Japan" as a character sub-span of
the word "Japanese". Since our WordPiece tokenizer does not split
"Japanese", we just use "Japanese" as the annotation. This is fairly rare
in SQuAD, but does happen.
Parameters
----------
doc_tokens: list
A list of doc tokens
input_start: int
start position of the answer
input_end: int
end position of the answer
tokenizer: callable function
orig_answer_text: str
origin answer text.
Returns
-------
tuple: a tuple of improved start position and end position
"""
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end) |
def cidr_to_common(cidr_mask):
"""Function that returns a common mask (Ex: 255.255.255.0)
for a given input CIDR mask (Ex: 24)"""
cidrtocommon = {
1: "128.0.0.0",
2: "192.0.0.0",
3: "224.0.0.0",
4: "240.0.0.0",
5: "248.0.0.0",
6: "252.0.0.0",
7: "254.0.0.0",
8: "255.0.0.0",
9: "255.128.0.0",
10: "255.192.0.0",
11: "255.224.0.0",
12: "255.240.0.0",
13: "255.248.0.0",
14: "255.252.0.0",
15: "255.254.0.0",
16: "255.255.0.0",
17: "255.255.128.0",
18: "255.255.192.0",
19: "255.255.224.0",
20: "255.255.240.0",
21: "255.255.248.0",
22: "255.255.252.0",
23: "255.255.254.0",
24: "255.255.255.0",
25: "255.255.255.128",
26: "255.255.255.192",
27: "255.255.255.224",
28: "255.255.255.240",
29: "255.255.255.248",
30: "255.255.255.252",
31: "255.255.255.254",
32: "255.255.255.255",
}
if int(cidr_mask) >= 0 and int(cidr_mask) <= 32:
return cidrtocommon[int(cidr_mask)]
else:
raise ValueError("Incorrect CIDR mask entered") |
def _parse_versions_json_field(line):
"""Parses a line like '"key": "value",' into a key and value pair."""
if line.endswith(","):
line = line[:-1]
k, sep, v = line.partition('": "')
if not sep or not k.startswith('"') or not v.endswith('"'):
return "", ""
return k[1:], v[:-1] |
def generate_gateway_url(project, gateway):
""" Format the resource name as a resource URI. """
return 'projects/{}/global/gateways/{}'.format(project, gateway) |
def is_ambiguous_align(tags, multi_align_tag):
"""Returns whether the read aligns to multiple locations. The
multi_align_tag depends on mapper. For bowtie2 it is XS."""
for t in tags:
if t[0] == multi_align_tag:
return True
return False |
def normalize_index(i):
"""Ensure 0 <= i < 2."""
return max(0, min(2, i)) |
def get_qualified_name(obj):
"""Retrieve the full module path of an object.
Example:
.. code-block:: python
from watson.http.messages import Request
request = Request()
name = get_qualified_name(request) # watson.http.messages.Request
"""
try:
name = obj.__qualname__
except AttributeError:
try:
name = obj.__class__.__name__
except Exception: # pragma: no cover
name = obj.__name__ # pragma: no cover
try:
module = obj.__module__
return '{0}.{1}'.format(module, name)
except Exception: # pragma: no cover
return name |
def response_with_headers(headers, code=200):
"""
Content-Type: text/html
Set-Cookie: user=gua
"""
header = 'HTTP/1.x {} VERY OK\r\n'.format(code)
header += ''.join([
'{}: {}\r\n'.format(k, v) for k, v in headers.items()
])
return header |
def parse_int(value, fallback=0):
""" Convert a string number to integer """
if value is None:
return fallback
try:
return int(value)
except ValueError:
return fallback |
def adjust_ifs(content):
""" Adds ':' after ')' """
for n,line in enumerate(content):
count = 0
if line.strip().startswith('IF'):
i = line.find('(')
if i >= 0:
count = 1
for k in range(i,len(line)):
#print(k, line[k])
if line[k] == '#':
break
if line[k] == ')':
count = 0
if k==len(line)-1 :
content[n] = line[:k+1] + ':' + line[k+1:]
elif line[k+1] != ':':
content[n] = line[:k+1] + ':\n' + line[k+1:]
break
else:
count = 0
i = line.find('#')
if i >= 0:
content[n] = line[:i] + ':' + line[i:]
else:
content[n] = line + ':'
else:
if count > 0:
i = 0
for k in range(i,len(line)):
if line[k] == '#':
break
if line[k] == ')':
count = 0
content[n] = line[:k+1] + ':' + line[k+1:]
break
return content |
def attack(p, k, a1, f, x, y):
"""
Recovers the shared secret if the coefficients are generated deterministically, and a single share is given.
:param p: the prime used for Shamir's secret sharing
:param k: the amount of shares needed to unlock the secret
:param a1: the first coefficient of the polynomial
:param f: a function which takes a coefficient and returns the next coefficient in the polynomial
:param x: the x coordinate of the given share
:param y: the y coordinate of the given share
:return: the shared secret
"""
s = y
a = a1
for i in range(1, k):
s -= a * x ** i
a = f(a)
return s % p |
def invertIntervalList(inputList, maxValue=None):
"""
Inverts the segments of a list of intervals
e.g.
[(0,1), (4,5), (7,10)] -> [(1,4), (5,7)]
"""
inputList = sorted(inputList)
# Special case -- empty lists
if len(inputList) == 0 and maxValue is not None:
invList = [
(0, maxValue),
]
else:
# Insert in a garbage head and tail value for the purpose
# of inverting, in the range does not start and end at the
# smallest and largest values
if inputList[0][0] != 0:
inputList.insert(0, ["", 0])
if maxValue is not None and inputList[-1][1] < maxValue:
inputList.append((maxValue, ""))
invList = [
[inputList[i][1], inputList[i + 1][0]] for i in range(0, len(inputList) - 1)
]
return invList |
def get_labeled_spans(prop):
"""
:param prop: 1D: n_words; elem=bracket label
:return: 1D: n_words; elem=BIO label
"""
def _concat_c_spans(_spans):
labels = [_span[0] for _span in _spans]
c_indices = [i for i, _span in enumerate(_spans) if _span[0].startswith('C')]
non_ant_c_spans = []
for c_index in c_indices:
c_span = _spans[c_index]
_label = c_span[0][2:]
if _label in labels:
_spans[labels.index(_label)].extend(c_span[1:])
else:
non_ant_c_spans.append([_label] + c_span[1:])
concated_spans = [span for i, span in enumerate(_spans) if i not in c_indices]
_spans = concated_spans + non_ant_c_spans
return _spans
labeled_spans = []
labeled_span = []
for i, arg in enumerate(prop):
if arg.startswith('('):
if arg.endswith(')'):
label = arg.split("*")[0][1:]
labeled_span = [label, i, i]
else:
label = arg[1:-1]
labeled_span = [label, i]
elif arg.endswith(')'):
labeled_span.append(i)
if len(labeled_span) == 3 and labeled_span[0] != "V" and labeled_span[0] != "C-V":
labeled_spans.append(labeled_span)
labeled_span = []
labeled_spans = _concat_c_spans(labeled_spans)
return labeled_spans |
def is_pod_type(type_name):
"""
Those are types for which no class should be generated.
"""
return type_name in [
"Nil",
"void",
"bool",
"real_t",
"float",
"double",
"int",
"int8_t",
"uint8_t",
"int16_t",
"uint16_t",
"int32_t",
"int64_t",
"uint32_t",
"uint64_t",
] |
def pad(string):
""" Decongest statements """
padded = string.replace("\r", "").replace("\t", " ")
symbols = ["#", "%", "*", ")", "+", "-", "=",
"{", "}", "]", "\"", "'", "<", ">" ]
for item in symbols:
padded = padded.replace(item, f" {item} ")
return padded.replace("(", "( ") |
def file_finder(filepath):
"""
Takes a filepath (str) and exracts: (1) its path, (2) its name, and (3) its extension.
This metadata is filtered into a dictionary.
For example, the filepath 'folder/directory/repository/file_name.txt' would return:
File path: 'folder/directory/repository/'
File name: 'file_name'
File extention: '.txt'
:param: filepath: string of a filepath (from current working directory)
:returns: dictionary with orginal filepath as well as sorted metadata
"""
prefix = ""
file_name = ""
file_ext = ""
furthest_dir_index = 0
extention_index = 0
if "/" in filepath:
index_ls = []
for index,char in enumerate(filepath):
if char == "/":
index_ls.append(index)
furthest_dir_index = max(index_ls)
path = filepath[:(furthest_dir_index + 1)]
if "." in filepath:
for i,c in enumerate(filepath):
if c == ".":
extention_index = i
file_ext = filepath[extention_index:]
file_name = filepath[(furthest_dir_index + 1):(extention_index)]
else:
file_name = filepath[furthest_dir_index:]
dictionary = {"full_path": filepath, "file_name": file_name,
"prefix": prefix, "extention" : file_ext}
return(dictionary) |
def get_object_type(objects: list, types: list) -> list:
"""Get the object specified.
Args:
objects: a list of objects.
types: a list of the types.
Returns:
A list of a certain type.
"""
return [item for item in objects if item.get('type') in types] |
def pipe(arg, *args):
""" Pipe """
last_result = arg
for combinator in args:
last_result = combinator(last_result)
if last_result is None:
return None
return last_result |
def format_boolean(value):
"""Return value as boolean."""
return_value = None
if value is not None:
return_value = value
return return_value |
def svm_grid():
""" Return grid for RandomSearchCV for the SVM classifier
:return: dictionary
"""
C = [0.1, 1, 10, 100]
kernel = ['linear', 'poly', 'rbf', 'sigmoid']
degree = [2, 3, 4, 5, 6, 7, 8]
coef0 = [0.1, 1, 10, 100]
gamma = ['scale', 'auto']
shrinking = [True, False]
probability = [True, False]
grid = {'C': C,
'kernel': kernel,
'degree': degree,
'coef0': coef0,
'gamma': gamma,
'shrinking': shrinking,
'probability': probability}
return grid |
def recursive_max(items):
"""O(n)."""
if len(items) == 1:
return items[0]
prev_max = recursive_max(items[1:])
return prev_max if prev_max > items[0] else items[0] |
def rotate(string, bits):
"""
Given that all our words are stored as strings, using slice notation is the easiest way to perform rotations.
:param string:
:param bits:
:return:
"""
rf = string[0: len(string) - bits]
rs = string[len(string) - bits:]
return rs + rf |
def PlmIndex(l, m):
"""
Compute the index of a array of Legendre functions corresponding to degree
l and angular order m.
Usage
-----
index = PlmIndex (l, m)
Returns
-------
index : integer, ndarray
Index of an array of associated Legendre functions corresponding to
degree l and angular order m.
Parameters
----------
l : integer, array_like
The spherical harmonic degree.
m : integer, array_like
The angular order.
Notes
-----
PlmIndex will calculate the index of an array of associated Legendre
functions corresponding to degree l and angular order m. The input arrays
are generated by routines such as PlmBar, PlmBar_d1, PlmSchmidt,
PlmSchmidt_d1, PlmON, PlmON_d1, PLegendreA, and PLegendreA_d1. The array
index in Python is equal to l * (l + 1) / 2 + m.
"""
return (l * (l + 1)) // 2 + m |
def _Join(*args):
"""Join parts of a gs:// Cloud Storage or local file:// path."""
# URIs always uses '/' as separator, regardless of local platform.
return '/'.join([arg.strip('/') for arg in args]) |
def obj_type(key):
"""If it is a 'dir' it will end with a slash
otherwise it is a 'file'
"""
if key[-1:] == '/':
return 'directory'
else:
return 'file' |
def watch(name, timespec, tag=None, user=None, job=None, unique_tag=False):
"""
.. versionadded:: 2017.7.0
Add an at job if trigger by watch
job : string
Command to run.
timespec : string
The 'timespec' follows the format documented in the at(1) manpage.
tag : string
Make a tag for the job.
user : string
The user to run the at job
.. versionadded:: 2014.1.4
unique_tag : boolean
If set to True job will not be added if a job with the tag exists.
.. versionadded:: 2017.7.0
.. code-block:: yaml
minion_restart:
at.watch:
- job: 'salt-call --local service.restart salt-minion'
- timespec: 'now +1 min'
- tag: minion_restart
- unique_tag: trye
- watch:
- file: /etc/salt/minion
"""
return {"name": name, "changes": {}, "result": True, "comment": ""} |
def get_policy_published(reportee, policy, **kw):
"""Return "policy_published" part for a passed `reportee` and `policy`. For
now we use the hardcoded values for `adkim`, `aspf` and `pct` everywhere. """
return {
"domain": reportee,
"adkim": "r",
"aspf": "r",
"p": policy,
"sp": policy,
"pct": 100
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.