content stringlengths 42 6.51k |
|---|
def get_defects(defects, n):
"""
Provides the exact type of defect by using target class predicted and defect dictinary provided.
Arguments:
defects : This is the defect dictionary contianing all possible types of defects
target_class : This is the target class predicted using the 'get_predictions' functions
Returns:
Defect type in the image provided
"""
for i, key in enumerate(defects.keys()):
if i == n:
return key |
def factory_name(obj):
""" Returns a string describing a DisCoPy object. """
return '{}.{}'.format(type(obj).__module__, type(obj).__name__) |
def maximumToys(prices, budget):
"""
Args:
prices (list): list of integers
budget (int): how much we can spend
Returns:
int: toy count"""
# in order to maximize number of toys we pick the cheapest ones first
prices = sorted(prices)
i = 0
toy_count = 0
# spend budget starting from the cheapest toys
while budget > 0 and i <= len(prices) - 1:
if budget >= prices[i]:
toy_count += 1
budget -= prices[i]
i += 1
return toy_count |
def _sanitize_name(name):
"""
Sanitize input of chars that can't be used in a module map token.
"""
for c in "-/.":
name = name.replace(c, "_")
return name |
def maybe_snowflake(value):
"""
Converts the given `value` to `snowflake` if applicable. If not returns `None`.
Parameters
----------
value : `str`, `int`, `Any`
A value what might be snowflake.
Returns
-------
value : `None`, `int`
Raises
------
AssertionError
- If `value` was passed as `str` and cannot be converted to `int`.
- If the `value` is negative or it's bit length is over 64.
"""
if isinstance(value, int):
pass
elif isinstance(value, str):
if value.isdigit():
if __debug__:
value_length = len(value)
if (value_length < 7) or (value_length > 21):
raise AssertionError('An `id` was given as `str`, but it\'s value is out of 64uint '
f'range, got {value!r}.')
value = int(value)
else:
return None
else:
return None
if __debug__:
if (value < 0) or (value > ((1 << 64) - 1)):
raise AssertionError('An `id` was given as `str`, but it\'s value is out of 64uint range, got '
f'{value!r}.')
return value |
def countingSort (A, k):
""" Smarter Counting Sort """
L = [[] for _ in range(k)]
for n in A:
L[n].append(n)
output = []
for i in L:
output.extend(i)
return output |
def _add_index(i, bands):
"""Appends the index number `i` at the end of each element of `bands`."""
return [f'{band}_{i}' for band in bands] |
def get_intercept(x, y, slope):
"""Calculate intercept by taking first value."""
return y[0] - slope*x[0] |
def green(msg: str) -> str:
"""Return green string in rich markdown"""
return f"[green]{msg}[/green]" |
def read_linked_entities(data):
"""Obtain lists of liken entities (IDs and names) from dictionary
Args:
data (json): dictionary with linked pages.
Returns:
(list): List of liked entityIDs
(list): List of liked entity names
"""
related_entities = []
related_names = []
if data == {}:
return related_entities, related_names
for c in data.get("results").get("bindings"):
url = c.get("valUrl").get("value")
related_entities.append(url.replace("http://www.wikidata.org/entity/", ""))
name = c.get("valLabel").get("value")
related_names.append(name)
return related_entities, related_names |
def getPositions(data):
"""
Get the positions that were determined by the neural network model
:param data: JSON Object
:return: Array of dance positions
"""
positions = data['pos']
return positions |
def flatten_dxarray(listish):
"""Ensure an array-of-refs is not an array-of-arrays.
Specifically, a list of job outputs might resolve to a list-of-lists if each
job output is itself a list -- but it's hard to know or handle that until
the blocking job completes.
Use this function within the subjob that takes an array as input.
"""
if not isinstance(listish, list):
return [listish]
out = []
for elem in listish:
if isinstance(listish, list):
out.extend(flatten_dxarray(elem))
else:
out.append(elem)
return out |
def _flatten_fontspec(fontspecs):
"""
Flatten a list of fontspec elements into a mapping dict
"""
fontspec_map = dict()
for fontspec in fontspecs:
fs_attrs = dict()
fs_attrs['size'] = fontspec.get('size')
fs_attrs['family'] = fontspec.get('family')
fs_attrs['color'] = fontspec.get('color')
fontspec_map[fontspec.get('id')] = fs_attrs
return fontspec_map |
def process_input(input_string, max_depth):
"""
Clean up the input, convert it to an array and compute the longest
array, per feature type.
"""
# remove the quotes and extra spaces from the input string
input_string = input_string.replace('"', '').replace(', ', ',').strip()
# convert the string to an array and also track the longest array, so
# we know how many levels for the feature type.
tmp = []
if input_string:
tmp = input_string.split(',')
if max_depth < len(tmp):
max_depth = len(tmp)
# return the array and the depth
return tmp, max_depth |
def write_image(filename, color_array):
"""
Save the changes you done with image
"""
#print(color_array)
def break_array(color_array):
"""
Convert any n-dimensional array to a line
"""
line = []
for y_index in range(len(color_array)):
for x_index in range(len(color_array[0])):
line.append(b''.join(color_array[y_index][x_index]))
return line
def bmpInfo_gen(color_array):
"""
Generate bmpInfo for new image
"""
image_size = (len(color_array) * len(color_array[0]) * 3) + 2
zero = b'\x00\x00\x00\x00'
bfType = b'\x42\x4D'
bfReserved1 = b'\x00\x00'
bfReserved2 = b'\x00\x00'
bfOffBits = b'\x36\x00\x00\x00'
biSize = b'\x28\x00\x00\x00'
biWidth = len(color_array[0]).to_bytes(4, 'little')
biHeight = len(color_array).to_bytes(4, 'little')
biPlanes = b'\x01\x00'
biBitCount = b'\x18\x00'
biCompression = zero
biSizeImage = image_size.to_bytes(4, 'little')
biXPelsPerMeter = b'\x12\x0B\x00\x00'
biYPelsPerMeter = b'\x12\x0B\x00\x00'
biClrUsed = zero
biClrImportant = zero
bfSize = (54 + image_size - 2).to_bytes(4, 'little')
return [bfType, bfSize, bfReserved1, bfReserved2, bfOffBits, biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression, \
biSizeImage, biXPelsPerMeter, biYPelsPerMeter, biClrUsed, biClrImportant]
toWrite_array = bmpInfo_gen(color_array) + break_array(color_array) + [b'\x00', b'\x00']
toWrite_file = open(filename, "wb")
for write_index in range(len(toWrite_array)):
toWrite_file.write(toWrite_array.pop(0))
toWrite_file.close() |
def flag(s):
"""Turn 'flag_name' into `--flag-name`."""
return '--' + str(s).replace('_', '-') |
def rescale(data, new_min=0, new_max=1, data_min=None, data_max=None):
"""Rescale/normalize the data to be within the range `[new_min, new_max]`
If data_min and data_max are explicitly provided, they will be used
as the old min/max values instead of taken from the data.
.. note::
This is same as the `scipy.MinMaxScaler` with the exception that we can override
the min/max of the old scale.
Args:
data (numpy.ndarray): 1d scores vector or 2d score matrix (users x items).
new_min (int|float): The minimum of the newly scaled data.
new_max (int|float): The maximum of the newly scaled data.
data_min (None|number): The minimum of the passed data [if omitted it will be inferred].
data_max (None|number): The maximum of the passed data [if omitted it will be inferred].
Returns:
numpy.ndarray: The newly scaled/normalized data.
"""
data_min = data.min() if data_min is None else data_min
data_max = data.max() if data_max is None else data_max
return (data - data_min) / (data_max - data_min) * (new_max - new_min) + new_min |
def convert_attr_name(s):
"""Convert all underline in string name to space and capitalize
"""
return " ".join(map(str.capitalize, s.strip().split("_"))) |
def build_image_args(version):
"""
Returns a list of --build-arg command line arguments that are used by the
docker build command.
Arguments:
- version: Can be a str, in which case it's considered the PRODUCT
or a dict.
"""
result = []
if isinstance(version, dict):
for k, v in version.items():
result.extend(['--build-arg', f'{k.upper()}={v}'])
elif isinstance(version, str):
result=['--build-arg', f'PRODUCT={version}']
else:
raise ValueError(f'Unsupported version object: {version}')
return result |
def format_lines(matched_lines, files, flags):
"""
This formats the result of the find if the pattern is in a certain line in a file.
If -n is in the flags the line number will be included in the search result
`-n` Print the line numbers of each matching line.
:param matched_lines: a list with 1 tuple containing file_name, line_number and line
:param files: files to search for the matching pattern, will be a list
:param flags: The flags to use for the search
:return: a formatted search result
:rtype: str
"""
search_result = []
for file_name, line_number, line in matched_lines:
line_result = ""
if len(files) > 1:
line_result += file_name + ":"
if "-n" in flags:
line_result += str(line_number) + ":"
line_result += line
search_result.append(line_result)
return "".join(search_result) |
def get_percentages(MAT, INS, DEL, numref):
"""Calculates percentages given number of reference segments."""
MAT = float(MAT) / float(numref) * 100.0
INS = float(INS) / float(numref) * 100.0
DEL = float(DEL) / float(numref) * 100.0
return [MAT, INS, DEL] |
def _cid2c(cid):
"""Gets just the component portion of a cid string
e.g. main_page/axis_controls|x_column.value => x_column
"""
return cid.split("|")[-1].split(".")[0] |
def MostSimilar(caseAttrib, queryValue, weight):
"""
Most similar matches using ES default (works for all attribute types). Default similarity for strings and exact match for other types.
"""
# build query string
query = {
"match": {
caseAttrib: {
"query": queryValue,
"boost": weight,
"_name": "mostsimilar"
}
}
}
return query |
def kpoint_path(ikpt):
""" construct path to kpoint
e.g. electrons/kpoint_0/spin_0/state_0
Args:
ikpt (int): kpoint index
Returns:
str: path in hdf5 file
"""
path = 'electrons/kpoint_%d' % (ikpt)
return path |
def regex_escape(val):
""" A custom Jinja filter function which helps to handle Windows FilePaths and escaping"""
return val.replace('\\', r'\\') |
def split_name(name):
"""Extracts pieces of name from full name string.
Full name can have one of these formats:
<NAME_TEXT> |
/<NAME_TEXT>/ |
<NAME_TEXT> /<NAME_TEXT>/ |
/<NAME_TEXT>/ <NAME_TEXT> |
<NAME_TEXT> /<NAME_TEXT>/ <NAME_TEXT>
<NAME_TEXT> can include almost anything excluding commas, numbers,
special characters (though some test files use numbers for the names).
Text between slashes is considered a surname, outside slashes - given
name.
This method splits full name into pieces at slashes, e.g.:
"First /Last/" -> ("First", "Last", "")
"/Last/ First" -> ("", "Last", "First")
"First /Last/ Jr." -> ("First", "Last", "Jr.")
"First Jr." -> ("First Jr.", "", "")
:param str name: Full name string.
:return: 2-tuple `(given1, surname, given2)`, `surname` or `given` will
be empty strings if they are not present in full string.
"""
given1, _, rem = name.partition("/")
surname, _, given2 = rem.partition("/")
return given1.strip(), surname.strip(), given2.strip() |
def parse_filename(filename: str, pat: str) -> str:
"""generates output filename with pattern addition
Args:
filename (str): file to process
pat (str): pattern to add
Returns:
str: generated filename
"""
# very naive implementation
idx = filename.rfind(".")
name = filename[:idx]
ext = filename[idx:]
out = f"{name}-{pat}{ext}"
return out |
def _to_packing_fmt(dds_types):
""" Calculate packing format for struct.pack
Args:
dds_types(list): list of member types
Returns:
packing format string
"""
result = []
for val in dds_types:
if 'Short' == val:
result.append('h')
elif 'UShort' == val:
result.append('H')
elif 'Boolean' == val:
result.append('?')
elif 'Long' == val:
result.append('i')
elif 'ULong' == val:
result.append('I')
elif 'LongLong' == val:
result.append('q')
elif 'ULongLong' == val:
result.append('Q')
elif 'Float' == val:
result.append('f')
elif 'Double' == val:
result.append('d')
elif 'Char' == val:
result.append('c')
elif 'Octet' == val:
result.append('B')
elif 'String' == val:
result.append('P')
elif 'Enum' == val:
result.append('i')
elif 'Sequence' == val:
result.append('iiP?')
return ' '.join(result) |
def _generate_widget_parameters(bot_name, user_photo, size, corner_radius, access_write):
"""
Generate common widget embed code parameters.
"""
user_photo_bool = str(user_photo).lower()
data_telegram_login = 'data-telegram-login="{}" '.format(bot_name)
data_size = 'data-size="{}" '.format(size)
data_userpic = 'data-userpic="{}" '.format(user_photo_bool) if not user_photo else ''
data_radius = 'data-radius="{}" '.format(corner_radius) if corner_radius else ''
data_request_access = 'data-request-access="write"' if access_write else ''
return data_telegram_login, data_size, data_userpic, data_radius, data_request_access |
def _is_path_reversed(path, reversed_values):
"""
Determine if the order of nodes in a path should be reversed.
Parameters
----------
path : dict
a path's tag:value attribute data
reversed_values : set
the values OSM uses in its 'oneway' tag to denote travel can only
occur in the opposite direction of the node order
Returns
-------
bool
"""
if "oneway" in path and path["oneway"] in reversed_values:
return True
else:
return False |
def split_message(message):
"""
Apparently, Facebook has a text limit of 640
We currently assume there's no line longer than 640
"""
if len(message) <= 640:
return [message]
messages = []
# Split based on new line
d = "\n"
lines = [e+d for e in message.split(d)]
current_line = ""
for line in lines:
if len(current_line) + len(line) <= 640:
current_line += line
else:
messages.append(current_line)
current_line = line
if current_line:
messages.append(current_line)
return messages |
def _split_two_q_clifford_idx(idx: int):
"""Decompose the index for two-qubit Cliffords."""
idx_0 = int(idx / 480)
idx_1 = int((idx % 480) * 0.05)
idx_2 = idx - idx_0 * 480 - idx_1 * 20
return (idx_0, idx_1, idx_2) |
def _get_asl_pipeline(aml_model):
""" Extract the pipeline object from an autosklearn_optimizer model.
"""
# this is from the old development branch
# the model *contained* a pipeline
#aml_model_pipeline = aml_model.pipeline_
# this is the updated 0.1.3 branch
# that is, the model simply *is* a pipeline now
asl_pipeline = aml_model
return asl_pipeline |
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
"""Builds a menu with the given style using the provided buttons
:return:
list of buttons
"""
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, [header_buttons])
if footer_buttons:
menu.append([footer_buttons])
return menu |
def precision(xs, ys):
"""Precision of list `xs` to list `ys`."""
return len([x for x in xs if x in ys]) / float(len(xs)) if xs else 0. |
def get_submod(channel):
"""
function to convert K2 channel number to module
returns
mod: 2-24
submod: 1-4
"""
#if channel > 85 or channel < 1:
# raise ValueError('Invalid Channel Number: %i'%channel)
offset = 2
if channel%4==0:
offset -= 1
if channel > 12:
offset += 1
if channel > 72:
offset += 1
mod = channel//4 + offset
submod = channel-(mod-offset)*4
if submod==0: submod=4
return mod, submod |
def search4vowels(phrase:str) ->set:
"""Return any vowels found in a supplied word."""
vowels = set('aeiou')
return vowels.intersection(set(phrase)) |
def parse_cigar(cigar):
""" Parse a cigar string which is made up of numbers followed
by key letters that represent a sequence alignment; return a dictionary
with alignment keys and number of bases with that alignment key as values.
Below is some more information about cigar strings.
2S20M1I2M5D,for, example would mean that the 2 bases are "S"oft clipped
from 5' end of the sequence(read) aligned and it is not part of the
alignment; following that 2 bases, 20 bases of the read aligns or "M"atches
to the reference sequence, match here does not mean the bases are
identical, just that there is 1 base of reference for each base of the read
and there are enough similarity between the two sequences that they
aligned. 1 base following the 20M is an insertion, that is, it exists in
the read but not in the reference; 5 bases at the end are "D"eletions,
they are in the reference but not in the read.
"""
cig = {}
values = []
for c in cigar:
try:
values.append(str(int(c)))
except ValueError:
if c in list(cig.keys()):
cig[c] += int("".join(values))
else:
cig[c] = int("".join(values))
values = []
return cig |
def linear_gain(db: float) -> float:
"""Convert from dB to linear gain factor"""
return 10 ** (db / 20) |
def is_not_empty_value(value):
"""
Checks for empty response values. Demisto recommends returning the None type if a value is empty,
rather than an empty string/list.
"""
return value != "" and value != [] and value != [""] |
def make_param_dict(fit_param,*args):
"""Helper function for IB.py to handle setting of keyword arguments for the
IB_single.py function."""
param_dict = {}
for arg in args: # loop over column names passed
if fit_param.get(arg,None) is not None: # if col specified exists and isn't None...
param_dict[arg] = fit_param[arg] # ...add to keyword dictionary
return param_dict |
def make_key(*criteria):
"""Make a string key out of many criteria."""
criteria = [c or '' for c in criteria]
criteria = [str(c) for c in criteria]
return ':'.join(criteria) |
def isNumber (any):
"""
Check if data type is a number.
"""
return (type (any) == int) or (type (any) == float) |
def convert_tokens_to_ids(vocab, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(vocab[token])
return ids |
def is_numeric(elem):
"""
Checks whether the given elemen is a numeric, by trying to sum 1
"""
try:
x = elem + 1
except TypeError:
return False
return True |
def get_version_bump(diff):
"""Get the version bumps in DESCRIPTION file."""
prev_version = [line.replace("-Version:", "")
for line in diff
if line.startswith("-Version")]
new_version = [line.replace("+Version:", "")
for line in diff
if line.startswith("+Version")]
# If versions are equal, no version change
if prev_version == new_version:
return None, None
# No change in DESCRIPTION file from new package push
if not prev_version or not new_version:
return None, None
return prev_version[0].strip(), new_version[0].strip() |
def get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration.
Args:
exploration_id: str. The id of the exploration whose memcache key
is to be returned.
version: int or None. If specified, the version of the exploration
whose memcache key is to be returned.
Returns:
str. Memcache key for the given exploration (or exploration version).
"""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id |
def remove_stop_words(content, stopwords):
"""Removes the stopwords in an article.
:param tokens: The tokens of an article.
:type tokens: []str
:param stopwords: the list of stopwords
:type stopwords: []str
:return: The tokens of an article that are not stopwords.
:rtype: []str
"""
return [token for token in content if token not in stopwords] |
def moeda(n=0, moeda='R$'):
"""
Moeda comercial
:param n: valor do dinheiro
:param moeda: Tipo de moeda
:return: valor com a meoda
"""
n = float(n)
return f'{moeda}{n:.2f}'.replace('.', ',') |
def _process_chunk(fn, chunk):
""" Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk] |
def mul(m, n):
"""answer"""
if n == 1:
return m
return m + mul(m, n-1) |
def double_sort(pathways_dictionary):
"""
Return the keys to a dictionary sorted with top values first
then for duplicate values sorted alphabetically by key
"""
sorted_keys=[]
prior_value=""
store=[]
for pathway in sorted(pathways_dictionary, key=pathways_dictionary.get, reverse=True):
if prior_value == pathways_dictionary[pathway]:
if not store:
store.append(sorted_keys.pop())
store.append(pathway)
else:
if store:
sorted_keys+=sorted(store)
store=[]
prior_value=pathways_dictionary[pathway]
sorted_keys.append(pathway)
if store:
sorted_keys+=sorted(store)
return sorted_keys |
def pascal_to_camelcase(argument):
"""Converts a PascalCase param to the camelCase equivalent"""
return argument[0].lower() + argument[1:] |
def SubStringExistsIn(substring_list, string):
"""Return true if one of the substring in the list is found in |string|."""
return any([substring in string for substring in substring_list]) |
def average(sequence):
"""Calculate the mean across an array of e.g. read depths.
Defaults to the mean calculated using numpy and falls back to the
naive Python solution.
Args:
sequence (list): ``numpy.array`` or list of values
Returns:
float: calculated average value
"""
try:
# first assume that numpy is installed for the fastest approach
return sequence.mean()
except AttributeError:
# no numpy available, fall back to support regular list
return sum(sequence) / len(sequence) |
def process_coco_keypoints(keypoints):
"""Process a list of keypoints into xy format, since the coco
keypoints format is (x,y,v). If v is 0(i.e not labelled) then
it is not included in the return
Arguments:
keypoints (list): A flattened list of keypoints where each one is
in x,y,v format
Returns:
processed_idx (list): List of keypoints of the format
[[x1,y1],[x2,y2], ...]
"""
processed_keypoints = []
for i in range(0, len(keypoints), 3):
if keypoints[i + 2] != 0:
# reason for adding by 0.5 is b/c
# https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
processed_keypoints.append(
[keypoints[i] + 0.5, keypoints[i + 1] + 0.5])
return processed_keypoints |
def shunt(infix):
"""Return the infix regular expression in postfix."""
#Convert input to a stack-ish list.
infix = list(infix)[::-1]
#Operator Stack.
opers = []
#Postfix regular expression.
postfix = []
#Operator Precendence
prec = {'*': 80,'+': 70,'?':60, '$':50,'.': 40, '|': 30, ')': 20, '(': 10}
# Loop through input one character at a time
while infix:
# Pop Character from the input
c = infix.pop()
# Decide What to do With the Character
if c == '(':
# Push An Open Bracket to the opers stack
opers.append(c)
elif c == ')':
# Pop the operators stack until you find )
while opers[-1] != '(':
postfix.append(opers.pop())
# Get rid of the '('.
opers.pop()
elif c in prec:
# Push any operators on the opers stack with higher prec to the output.
while opers and prec[c] < prec[opers[-1]]:
postfix.append(opers.pop())
# Push C to Operator Stack
opers.append(c)
else:
# Typically we just push the character to the output.
postfix.append(c)
# Pop all operators to the output.
while opers:
postfix.append(opers.pop())
# Convert output list to string.
return ''.join(postfix) |
def vcgencmd(*args):
"""
Creates VCGENCMD command line string
"""
return ('vcgencmd', ) + args |
def generate_2d_array(width, height, value=None):
""" Generate 2D array of size width x height """
return [[value for _ in range(width)] for _ in range(height)] |
def kind(n, ranks):
"""Return the first rank that this hand has
exactly n-of-a-kind of. Return None if there
is no n-of-a-kind in the hand."""
for r in ranks:
if ranks.count(r) == n:
return r
return None |
def sample_cloudwatch_events_rule(rec):
"""Any activity on EC2"""
return rec['source'] == 'aws.ec2' |
def iso_string_to_sql_utcdate_sqlite(x: str) -> str:
"""
Provides SQLite SQL to convert a column to a ``DATE`` in UTC. The argument
``x`` is the SQL expression to be converted (such as a column name).
"""
return f"DATE({x})" |
def update_unq_date_counter(counter:dict, name:str, constraint_id:str, date:str)->dict:
"""This function updates the unq_date_counter by incrementing the count for the constraint
in `name` for `date` if the constraint_id is not already in the `date` set.
Args:
counter (Dict[
name: Dict[
date: Dict[
"count": int,
"set": Set[constraint_id]
]
]
]):
dictionary with structure above. For each constraint_name and for each date-bucket (year-month),
it has a count of the unique occurances of the `constraint_id` as regulated by the Set of `ids`
name (str): name of the constraint e.g. "lesson", "line", or "speaker"
constraint_id (str): id for constraint specified by `name`
date (str): date string of the year and month in the YYYY-MM format e.g. "2019-08"
Returns:
(dict): updated counter dict
"""
# create a date entry if one doesn't exist
if date not in counter[name]:
counter[name][date] = dict()
# create the id-set for the given `date` if it doesn't exist
if "set" not in counter[name][date]:
counter[name][date]["set"] = set()
# if the `constraint_id` is not in the set, increment the date count and add the id to the set
if constraint_id not in counter[name][date]["set"]:
counter[name][date]["count"] = counter[name][date].get("count", 0) + 1
counter[name][date]["set"].add(constraint_id)
return counter |
def bytes_(s, encoding = 'utf-8', errors = 'strict'):
"""Utility to ensure binary-like usability.
If s is type str or int, return s.encode(encoding, errors),
otherwise return s as it is."""
if isinstance(s, bytes):
return s
if isinstance(s, int):
s = str(s)
if isinstance(s, str):
return s.encode(encoding, errors)
return s |
def get_first_item_that_startswith(items, starts_with):
"""Get the first item within the list that starts with a specific string.
Parameters
-----------
items: list of str
Path to input image.
starts_with: str
String to search for.
Returns
-------
str
First item in the list that starts with the given string.
"""
starters = [item for item in items if item.startswith(starts_with)]
return starters[0] |
def tokenize_metric_path(path):
"""
Tokenizes a metric path for InfluxDB
The path is split into 3 parts: metric_name, tag and value_prefix
Example:
path = "kafka.server:delayedOperation=Fetch,name=NumDelayedOperations,type=DelayedOperationPurgatory"
results in:
- metric_name = kafka.server
- tags = {'delayedOperation': 'Fetch', 'name': 'NumDelayedOperations'}
- value_prefix = DelayedOperationPurgatory
"""
metric_name, metric_path = path.split(":")
tokens = {token.split("=")[0]: token.split("=")[1] for token in metric_path.split(',')}
# the 'type' field from the metric path as value_prefix, rest of the tokens in the metric path are tags
value_prefix = tokens.pop('type')
return metric_name, value_prefix, tokens |
def distribute_maciej( lightMasterWeightX, lightMasterWeightY, boldMasterWeightX, boldMasterWeightY, interpolationWeightX ):
"""
Algorithm by Maciej Ratajski
http://jsfiddle.net/Dm2Zk/1/
"""
interpolationPointX = ( interpolationWeightX - lightMasterWeightX ) / ( boldMasterWeightX - lightMasterWeightX )
interpolationWeightY = ( ( 1 - interpolationPointX ) * ( lightMasterWeightY / lightMasterWeightX - boldMasterWeightY / boldMasterWeightX ) + boldMasterWeightY / boldMasterWeightX ) * interpolationWeightX
interpolationPointY = ( interpolationWeightY - lightMasterWeightY) / ( boldMasterWeightY - lightMasterWeightY )
return round( ( boldMasterWeightX - lightMasterWeightX ) * interpolationPointY + lightMasterWeightX, 1 ) |
def hasAdminAccess(user):
"""
User is Superuser or member of the admin group.
Return: {bool}
"""
try:
hasAccess = user.is_superuser or user.groups.filter(name='admins').exists()
except:
hasAccess = False
return hasAccess |
def changeColumnName(name) -> str:
"""For changing column names of the dataframe"""
return {
"FOREIGN NAME": "childname",
"INVENTORY UOM": "childuom",
"Item MRP": "mrp",
"Last Purchase Price": "childrate",
}.get(name, name.lower().replace(" ", "")) |
def assert_group_keys(group):
"""Asserts if a testgroup is able to be migrated.
To be migratable, the group must only contain allowed keys
"""
allowedKeys = ["name", "gcs_prefix", "alert_stale_results_hours",
"num_failures_to_alert", "num_columns_recent"]
if [k for k in group.keys() if k not in allowedKeys]:
return False
return True |
def sjuncChunk(key, chunk):
"""
Parse Super Junction (SJUNC) Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'sjuncNumber': schunk[1],
'groundSurfaceElev': schunk[2],
'invertElev': schunk[3],
'manholeSA': schunk[4],
'inletCode': schunk[5],
'linkOrCellI': schunk[6],
'nodeOrCellJ': schunk[7],
'weirSideLength': schunk[8],
'orificeDiameter': schunk[9]}
return result |
def sol(s, n, lookup):
"""
This counts the no. of solutions. To reduce the complexity we have used
a lookup table.
"""
#print(lookup)
if lookup[s][n] != None:
return lookup[s][n]
if n == 0 and s == 0:
return 1
# Leading zeroes are not counted because in that case s will be zero
# before n becomes zero
elif n < 0 or s <= 0:
return 0
c = 0
for i in range(10):
if i <= s:
c += sol(s-i, n-1, lookup)
lookup[s][n] = c%1000000007
return lookup[s][n] |
def formatHelp(route):
""" Gets the __doc__ text from a method and formats it
for easier viewing. Whats "__doc__"? This text
that your reading is!!
"""
help = globals().get(str(route)).__doc__
if help != None:
help = help.split("\n")
clean_help = []
for i in range(len(help)):
help[i] = help[i].rstrip()
if len(help[i]) > 0:
clean_help.append(help[i])
else:
clean_help = "No Help Provided."
return clean_help |
def lower(text):
"""A tag/function/filter that converts `text` to lowercase. Inner tag names and attributes are converted, too (!)."""
return text.lower() |
def no(seq, pred=None):
"""Is ``pred(elm)`` false for all elements?
With the default predicate, this returns true if all elements are false.
>>> no(["A", "B"])
False
>>> no(["A", ""])
False
>>> no(["", ""])
True
>>> no(["A", "B", "C"], lambda x: x <= "C")
False
>>> no(["X", "Y", "Z"], lambda x: x <="C")
True
From recipe in itertools docs.
"""
for elm in filter(pred, seq):
return False
return True |
def pow2_ru(n):
"""Given an integer >= 1, return the next power of 2 >= to n."""
assert n <= 2**31
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n |
def create_table_name(
table_type: str, command_id: str, context_id: str, node_id: str
) -> str:
"""
Creates and returns in lower case a table name with the format <tableType>_<commandId>_<contextId>_<nodeId>
"""
if table_type not in {"table", "view", "merge"}:
raise TypeError(f"Table type is not acceptable: {table_type} .")
return f"{table_type}_{command_id}_{context_id}_{node_id}".lower() |
def deep_get(target_dict, key_list):
"""
Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
:param target_dict: dictionary to be read
:param key_list: list of keys to read from target_dict
:return: The wanted value if exist, None otherwise
"""
for key in key_list:
if not isinstance(target_dict, dict) or key not in target_dict:
return None
target_dict = target_dict[key]
return target_dict |
def optional_id(record, context):
"""
Create an optional id for mirbase entries. This basically uses the name
field, which will be the miRBase gene name.
"""
if "description" in record and "name" in record and " " not in record["name"]:
return record["name"]
if context.database == "MIRBASE":
return record["name"]
return None |
def get_ip_v4_class(ip_int):
"""Return the letter corresponding to the IP class the ip_int is in."""
if (ip_int & 0x80000000) == 0x00000000:
return "A"
elif (ip_int & 0xC0000000) == 0x80000000:
return "B"
elif (ip_int & 0xE0000000) == 0xC0000000:
return "C"
elif (ip_int & 0xF0000000) == 0xE0000000:
return "D"
else:
return "E" |
def map_inv(succ):
"""Invert a multimap.
Given a successor edge map, for example, produce an inverted
predecessor edge map.
"""
out = {key: [] for key in succ}
for p, ss in succ.items():
for s in ss:
out[s].append(p)
return out |
def unquote_colors(context):
"""
URL unqote colors from context.
"""
for k, v in context.items():
if len(v) == 9 and v.startswith("%23"):
context[k] = "#" + v[3:]
return context |
def get_surname(author_name):
"""finds the index of the last period in the string then returns the substring
starting 2 positions forward from that period"""
return author_name[author_name.rfind('.')+2:] |
def apply_args_and_kwargs(fn, args, kwargs):
""" https://stackoverflow.com/a/53173433/13747259 """
return fn(*args, **kwargs) |
def fizzbuzz(num):
"""Test divisibility."""
if num % 3 == 0 and num % 5 == 0:
return "Fizz Buzz"
if num % 3 == 0:
return "Fizz"
if num % 5 == 0:
return "Buzz"
return num |
def remove_leading_trailing_whitespace_lines(src):
"""
Remove any leading or trailing whitespace lines.
Parameters
----------
src : str
Input code.
Returns
-------
str
Code with trailing whitespace lines removed.
"""
lines = src.splitlines()
non_whitespace_lines = []
for i, l in enumerate(lines):
if l and not l.isspace():
non_whitespace_lines.append(i)
imin = min(non_whitespace_lines)
imax = max(non_whitespace_lines)
return '\n'.join(lines[imin: imax+1]) |
def _error_message(failed):
"""Construct error message for failed scanners.
Args:
failed (list): names of scanners that failed
Returns:
str: error message detailing the scanners that failed
"""
return 'Scanner(s) with errors: %s' % ', '.join(failed) |
def vector_subtract(v, w):
"""subtracts corresponding elements"""
return [v_i - w_i for v_i, w_i in zip(v, w)] |
def Madd(M1, M2):
"""Matrix addition (elementwise)"""
return [[a+b for a, b in zip(c, d)] for c, d in zip(M1, M2)] |
def common_head (list) :
"""Return common head of all strings in `list`.
>>> common_head ([])
''
>>> common_head (["a"])
'a'
>>> common_head (["a", "b"])
''
>>> common_head (["ab", "ac", "b"])
''
>>> common_head (["ab", "ac", "ab"])
'a'
>>> common_head (["abc", "abcde", "abcdxy"])
'abc'
"""
result = ""
list = sorted (list)
if list :
match = [(l == r and r) or "\0"
for (l, r) in zip (list [0], list [-1])
]
try :
last = match.index ("\0")
except ValueError :
last = len (match)
result = "".join (match [:last])
return result |
def like_suffix(value, end='%'):
"""
gets a copy of string with `%` or couple of `_` values attached to end.
it is to be used in like operator.
:param str value: value to be processed.
:param str end: end place holder to be appended.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:rtype: str
"""
if value is None:
return None
return '{value}{end}'.format(value=value, end=end) |
def roman_to_num(string):
"""returns the numerical representation
of the roman numeral
"""
# define numerical mapping to roman numerals
roman_dic = {'I': 1, 'V': 5, 'X': 10,
'L': 50, 'C': 100, 'D': 500, 'M': 1000}
# keep cumsum of nums
total = 0
for index, val in enumerate(string[:-1]):
if roman_dic[val] < roman_dic[string[index+1]]:
total -= roman_dic[val]
else:
total += roman_dic[val]
total += roman_dic[string[-1]]
return total |
def firstOccurTable(firstCol):
"""
creates a lookup table for the first occurence
"""
firstColumn = firstCol
table = {}
characters = ['A','C','G','T']
for char in characters:
table[char] = firstColumn.index(char)
return table |
def trace_methods(cls, name, until_cls=None):
"""Look for a method called `name` in cls and all of its parent classes."""
methods = []
last_method = None
for cls in cls.__mro__:
try:
this_method = getattr(cls, name)
except AttributeError:
continue
if this_method != last_method:
methods.append(this_method)
last_method = this_method
if cls is until_cls:
break
return methods |
def determine_trial_result(CI_lower, CI_upper, value):
"""
:param CI_lower: confidence interval lower bound
:param CI_upper: confidence interval upper bound
:param value: value to check (does the confidence interval include the value?)
:return: 1 if the CI covers the value, 0 otherwise
"""
if value < CI_lower:
return -1
elif value > CI_upper:
return 1
else:
return 0 |
def quizn_to_index(quizn):
"""See: https://github.com/fielddaylab/jo_wilder/blob/master/src/scenes/quiz.js
For some reason there are 5 quizzes, but there is no quiz numbered 1.
Returns:
The correct quiz number for quizzes 2-5, or 0 for quiz 0.
"""
return quizn - 1 if quizn >= 2 else quizn |
def get_job_tasks(rank, ranks, tasks_tot):
"""
Return a tuple of job task indices for a particular rank.
This function distribute the job tasks in tasks_tot
over all the ranks.
Note
----
This is a primerly a MPI help function.
Parameters
----------
rank : int
Current MPI rank/worker.
ranks : int
Number of MPI ranks/workers in total.
tasks_tot : list
List of task indices.
Length is the total number of job tasks.
"""
n_tot = len(tasks_tot)
nj = n_tot//ranks
rest = n_tot%ranks
#tasks = range(nj*rank, nj*rank + nj)
tasks = [tasks_tot[i] for i in range(nj*rank, nj*rank + nj)]
if rank < rest:
#tasks.append(n_tot - rest + rank)
tasks.append(tasks_tot[n_tot - rest + rank])
return tuple(tasks) |
def denormalize_11(x, low, high):
"""
Denormalize [-1, 1] to [low, high]
low and high should either be scalars or have the same dimension as the last dimension of x
"""
return (x + 1) * (high - low)/2 + low |
def generate_recursive_rules(limit):
"""
Note this is not a general solution for ALL possible recursive calls.
The generated inputs have recursive rules appear only at root in `0: 8 11`.
Thus I only need to create a rule that has extracted recursion to some levels until I see a difference on matched number in my results.
"""
return [
# 8: 42 | 42 8 - recursion creates the pattern: 42 | 42 42 | 42 42 42 etc...
f"8: {' | '.join([('42 ' * x).strip() for x in range(1, limit + 1)])}",
# 11: 42 31 | 42 11 31 - recursion creates the pattern: 42 31 | 42 42 31 31 | 42 42 42 31 31 31 etc...
f"11: {' | '.join([('42 ' * x).strip() + ' ' + ('31 ' * x).strip() for x in range(1, limit + 1)])}"
] |
def possible_bipartition(dislikes):
""" Will return True or False if the given graph
can be bipartitioned without neighboring nodes put
into the same partition.
Time Complexity: O(N+E)
Space Complexity: O(N)
"""
if not dislikes:
return True
frontier = [0]
visited = {}
group1 = set()
group2 = set()
while frontier:
current = frontier.pop(0)
if not dislikes[current]:
if (current+1) not in visited:
visited[current+1] = True
frontier.append(current+1)
for neigbor in dislikes[current]:
if neigbor not in visited:
visited[neigbor] = True
frontier.append(neigbor)
if current not in group1:
if neigbor in group2:
return False
group1.add(neigbor)
else:
if neigbor in group1:
return False
group2.add(neigbor)
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.