content stringlengths 42 6.51k |
|---|
def get_manual_finding_report_detail(manual_finding_reports):
"""
Iterate over manual finding report detail from response.
:param manual_finding_reports: manual finding report detail from the response
:return: List of manual finding report elements.
"""
return [{
'ID': manual_finding_report.get('id', ''),
'Title': manual_finding_report.get('title', ''),
'Label': manual_finding_report.get('label', ''),
'Pii': manual_finding_report.get('pii', ''),
'Source': manual_finding_report.get('source', ''),
'IsManualExploit': manual_finding_report.get('isManualExploit', ''),
'EaseOfExploit': manual_finding_report.get('easeOfExploit', '')
} for manual_finding_report in manual_finding_reports] |
def find_root_domain(soa):
"""
It is nessicary to know which domain is at the top of a zone. This
function returns that domain.
:param soa: A zone's :class:`SOA` object.
:type soa: :class:`SOA`
The following code is an example of how to call this function using
a Domain as ``domain``.
>>> find_root_domain('forward', domain.soa)
The following code is an example of how to call this function using
a ReverseDomain as ``domain``.
>>> find_root_domain('reverse', reverse_domain.soa)
"""
if soa is None:
return None
domains = soa.domain_set.all()
if domains:
key = lambda domain: len(domain.name.split('.'))
return sorted(domains, key=key)[0] # Sort by number of labels
else:
return None |
def ints(int_list):
"""coerce a list of strings that represent integers into a list of integers"""
return [ int(number) for number in int_list ] |
def jaro_similarity(s1, s2):
"""
Computes the Jaro similarity between 2 sequences from:
Matthew A. Jaro (1989). Advances in record linkage methodology
as applied to the 1985 census of Tampa Florida. Journal of the
American Statistical Association. 84 (406): 414-20.
The Jaro distance between is the min no. of single-character transpositions
required to change one word into another. The Jaro similarity formula from
https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance :
jaro_sim = 0 if m = 0 else 1/3 * (m/|s_1| + m/s_2 + (m-t)/m)
where:
- |s_i| is the length of string s_i
- m is the no. of matching characters
- t is the half no. of possible transpositions.
"""
# First, store the length of the strings
# because they will be re-used several times.
len_s1, len_s2 = len(s1), len(s2)
# The upper bound of the distance for being a matched character.
match_bound = max(len_s1, len_s2) // 2 - 1
# Initialize the counts for matches and transpositions.
matches = 0 # no.of matched characters in s1 and s2
transpositions = 0 # no. of transpositions between s1 and s2
flagged_1 = [] # positions in s1 which are matches to some character in s2
flagged_2 = [] # positions in s2 which are matches to some character in s1
# Iterate through sequences, check for matches and compute transpositions.
for i in range(len_s1): # Iterate through each character.
upperbound = min(i+match_bound, len_s2-1)
lowerbound = max(0, i-match_bound)
for j in range(lowerbound, upperbound+1):
if s1[i] == s2[j] and j not in flagged_2:
matches += 1
flagged_1.append(i)
flagged_2.append(j)
break
flagged_2.sort()
for i, j in zip(flagged_1, flagged_2):
if s1[i] != s2[j]:
transpositions += 1
if matches == 0:
return 0
else:
return 1/3 * (matches/len_s1 +
matches/len_s2 +
(matches-transpositions//2)/matches
) |
def merge_nodes(a, b):
"""Recursively and non-destructively merges two nodes. Returns the newly
created node.
"""
if a is None:
return b
if b is None:
return a
if a[0] > b[0]:
a, b = b, a
return a[0], merge_nodes(b, a[2]), a[1] |
def reverse_loop(value):
"""Reverse string using a loop."""
result = ""
for char in value:
result = char + result
return result |
def perm(n, k):
"""Return P(n, k), the number of permutations of length k drawn from n
choices.
"""
result = 1
assert k > 0
while k:
result *= n
n -= 1
k -= 1
return result |
def checklen(astring):
"""
(str) -> Boolean
Returns true if length of astring is at least 5 characters long, else False
>>> checklen('', 1)
False
>>> checklen('four', 5)
False
>>> checklen('check', 5)
True
>>> checklen('check6', 6)
True
"""
return len(astring) >= 5 |
def dummy_address_check( address ):
"""
Determines if values contain dummy addresses
Args:
address: Dictionary, list, or string containing addresses
Returns:
True if any of the data contains a dummy address; False otherwise
"""
dummy_addresses = [ "", "0.0.0.0", "::" ]
if isinstance( address, dict ):
# Go through each property and check the value
for property in address:
if dummy_address_check( address[property] ):
return True
elif isinstance( address, list ):
# Go through each index and check the value
for value in address:
if dummy_address_check( value ):
return True
elif isinstance( address, str ):
if address in dummy_addresses:
return True
return False |
def process_builder_convert(data, test_name):
"""Converts 'test_name' to run on Swarming in 'data'.
Returns True if 'test_name' was found.
"""
result = False
for test in data['gtest_tests']:
if test['test'] != test_name:
continue
test.setdefault('swarming', {})
if not test['swarming'].get('can_use_on_swarming_builders'):
test['swarming']['can_use_on_swarming_builders'] = True
result = True
return result |
def hosoya(height, width):
""" Calculates the hosoya triangle
height -- height of the triangle
"""
if (width == 0) and (height in (0,1)):
return 1
if (width == 1) and (height in (1,2)):
return 1
if height > width:
return hosoya(height - 1, width) + hosoya(height - 2, width)
if width == height:
return hosoya(height - 1, width - 1) + hosoya(height - 2, width - 2)
return 0 |
def get_domains(resolution, frequency, variable_fixed, no_frequencies):
"""Get the domains for the arguments provided.
Args:
resolution (int): Resolution
frequency (str): Frequency
variable_fixed (bool): Is the variable fixed?
no_frequencies (bool): True if no frequencies were provided.
Raises:
Exception: When the arguments provided don't yield any domain information.
Returns:
dict: Dictionary of domain information.
"""
is_cordex = frequency == 'cordex'
domains, frequencies, resolution_dir, degrees = None, None, None, None
# Quickly bail if necessary
assert resolution in [50, 5, 10, 12]
if resolution == 50:
degrees = 0.5
resolution_dir = f'{resolution}km'
if not is_cordex:
domains = ['AUS-50']
else:
domains = ['AUS-44i']
frequencies = ['1D', '1M']
if variable_fixed:
frequencies = ['1D']
if no_frequencies:
frequencies = ['3H', '1D', '1M']
elif resolution == 5:
domains = ['VIC-5']
degrees = 0.05
resolution_dir = f'{resolution}km'
elif resolution == 10:
domains = ['SEA-10', 'TAS-10']
resolution_dir = '5km',
degrees = 0.1
elif resolution == 12:
if not is_cordex:
raise Exception('Domain not known.')
domains = ['AUS-44i']
frequencies = ['1D', '1M']
if variable_fixed:
frequencies = ['1D']
# Return everything
return dict(
domains=domains,
frequencies=frequencies,
resolution_dir=resolution_dir,
degrees=degrees
) |
def filter_keyphrases_brat(raw_ann):
"""Receive raw content in brat format and return keyphrases"""
filter_keyphrases = map(lambda t: t.split("\t"),
filter(lambda t: t[:1] == "T",
raw_ann.split("\n")))
keyphrases = {}
for keyphrase in filter_keyphrases:
keyphrase_key = keyphrase[0]
# Merge annotations with ";"
if ";" in keyphrase[1]:
label_span = keyphrase[1].replace(';', ' ').split()
span_str = [min(label_span[1:]), max(label_span[1:])]
else:
label_span = keyphrase[1].split()
span_str = label_span[1:]
label = label_span[0]
span = (int(span_str[0]), int(span_str[1]))
text = keyphrase[2]
keyphrases[keyphrase_key] = {"keyphrase-label": label,
"keyphrase-span": span,
"keyphrase-text": text,
"tokens-indices": []}
return keyphrases |
def formatter(n):
"""formatter for venn diagram, so it can be easily turned off."""
#if you want it to be there
return f"{n:.02f}" |
def ctext(text, colour="green"):
"""Colour some terminal output"""
# colours
c = {
"off": "\033[0m",
# High Intensity
"black": "\033[0;90m",
"bl": "\033[0;90m",
"red": "\033[0;91m",
"r": "\033[0;91m",
"green": "\033[0;92m",
"g": "\033[0;92m",
"yellow": "\033[0;93m",
"y": "\033[0;93m",
"blue": "\033[0;94m",
"b": "\033[0;94m",
"purple": "\033[0;95m",
"p": "\033[0;95m",
"cyan": "\033[0;96m",
"c": "\033[0;96m",
"white": "\033[0;97m",
"w": "\033[0;97m",
}
return f"{c[colour]}{text}{c['off']}" |
def deleteFirstRow(array):
"""Deletes the first row of a 2D array.
It returns a copy of the new array"""
array = array[1::]
return array |
def user_name_for(name):
""" Returns a "user-friendly" name for a specified trait.
"""
name = name.replace('_', ' ')
name = name[:1].upper() + name[1:]
result = ''
last_lower = 0
for c in name:
if c.isupper() and last_lower:
result += ' '
last_lower = c.islower()
result += c
return result |
def _verify_type(val, default, the_type, name, instance):
"""
Validate that the input is an instance of the provided type.
Parameters
----------
val
The prospective value.
default : None|float
The default value.
the_type : Type
The desired type for the value.
name : str
The bound variable name.
instance
The instance to which the variable belongs.
Returns
-------
"""
if val is None:
return default
elif isinstance(val, the_type):
return val
else:
raise TypeError('The attribute {} of class {} is required to be an instance of type {}, '
'but we got type {}'.format(name, instance.__class__.__name__, the_type, type(val))) |
def pointInPolygon(pt, poly, bbox=None):
"""Returns `True` if the point is inside the polygon.
If `bbox` is passed in (as ``(x0,y0,x1,y1)``), that's used for a quick check first.
Main code adapted from http://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/pnpoly.html
"""
x, y = pt
if bbox:
x0, y0, x1, y1 = bbox
if not (x0 <= x <= x1) or not (y0 <= y <= y1): return 0
c = 0
i = 0
nvert = len(poly)
j = nvert-1
while i < nvert:
if (((poly[i][1]>y) != (poly[j][1]>y)) and (x < (poly[j][0]-poly[i][0]) * (y-poly[i][1]) / (poly[j][1]-poly[i][1]) + poly[i][0])):
c = not c
j = i
i += 1
return c |
def deleteRules(parentRule,ruleName):
"""
Function to fetch all delete rules matching a ruleName
Parameters
----------
ruleName : <List>
Default parent rule represented as a list
Returns
-------
parentRule : Updated Rule tree
"""
if parentRule[0]['name'] == ruleName:
del parentRule[0]
for eachRule in parentRule:
#Check whether we have child rules, where in again behavior might be found
if 'eachRule' in locals() and len(eachRule['children']) != 0:
deleteRules(eachRule['children'],ruleName)
#Awesome, we are done updating rules, lets go back
return parentRule |
def get_row_col(num_pic: int):
"""
get figure row and column number
"""
sqr = num_pic ** 0.5
row = round(sqr)
col = row + 1 if sqr - row > 0 else row
return row, col |
def safe_division_d(number, divisor, **kwargs):
"""
safe_division_d
:param number:
:param divisor:
:param kwargs:
:return:
"""
# ignore_overflow = kwargs.pop('ignore_overflow', False)
# ignore_zero_div = kwargs.pop('ignore_zero_division', False)
if kwargs:
raise TypeError('Unexpected **kwargs:{0}'.format(kwargs))
return True |
def _parse_ref_dict(reference_dict, strict=True):
"""Parse the referenced dict into a tuple (TYPE, ID).
The ``strict`` parameter controls if the number of keys in the
reference dict is checked strictly or not.
"""
keys = list(reference_dict.keys())
if strict and len(keys) != 1:
raise ValueError(
"Reference dicts may only have one property! "
f"Offending dict: {reference_dict}"
)
if not keys:
return None
type_ = keys[0]
id_ = reference_dict[type_]
return (type_, id_) |
def clean_texmath(txt):
"""
clean tex math string, preserving control sequences
(incluing \n, so also '\nu') inside $ $, while allowing
\n and \t to be meaningful in the text string
"""
s = "%s " % txt
out = []
i = 0
while i < len(s)-1:
if s[i] == '\\' and s[i+1] in ('n', 't'):
if s[i+1] == 'n':
out.append('\n')
elif s[i+1] == 't':
out.append('\t')
i += 1
elif s[i] == '$':
j = s[i+1:].find('$')
if j < 0:
j = len(s)
out.append(s[i:j+2])
i += j+2
else:
out.append(s[i])
i += 1
if i > 5000:
break
return ''.join(out).strip() |
def collect_all_methods(cls, method_name):
"""Return list of all `method_name` methods for cls and its superclass chain.
List is in MRO order, with no duplicates. Methods are unbound.
(This is used to simplify mixins and subclasses that contribute to a method set,
without requiring superclass chaining, and without requiring cooperating
superclasses.)
"""
methods = []
for ancestor in cls.__mro__:
try:
validator = getattr(ancestor, method_name)
except AttributeError:
pass
else:
if validator not in methods:
methods.append(validator)
return methods |
def cobs_encode(data):
"""COBS-Encode bytes.
:param data: input bytes
:return: cobs-encoded bytearray
"""
out = bytearray(len(data) + 1 + (len(data) // 254))
ci = ri = 0
c = wi = 1
while ri < len(data):
if not data[ri]:
out[ci] = c
c = 1
ci = wi
wi += 1
ri += 1
else:
out[wi] = data[ri]
ri += 1
wi += 1
c += 1
if c == 0xFF:
out[ci] = c
c = 1
ci = wi
wi += 1
out[ci] = c
return out[:wi] |
def _stats_source(year):
"""Returns the path to the stats source file of the given year."""
return f'stats/stats.{year}.txt' |
def replace_in_list(my_list, idx, element):
"""
Replaces an element in a list at given index
"""
list_len = len(my_list)
if list_len <= idx or idx < 0:
return (my_list)
my_list[idx] = element
return (my_list) |
def rake_to_mech(rake):
"""
Convert rake to mechanism.
Args:
rake (float): Rake angle in degrees.
Returns:
str: Mechanism.
"""
mech = 'ALL'
if rake is not None:
if (rake >= -180 and rake <= -150) or \
(rake >= -30 and rake <= 30) or \
(rake >= 150 and rake <= 180):
mech = 'SS'
if rake >= -120 and rake <= -60:
mech = 'NM'
if rake >= 60 and rake <= 120:
mech = 'RS'
return mech |
def sub_binary_search(sorted_array, test_value, low, high):
""" run through sorted_array and look for test_value.
if test_value is in sorted_array, return index for test_value
in sorted_array. If test_value is not in sorted_array, return
-1 """
if low > high:
False
else:
mid = (low+high)//2
if test_value == sorted_array[mid]:
print(mid)
return mid
elif test_value < sorted_array[mid]:
return sub_binary_search(sorted_array, test_value, low, mid-1)
else:
return sub_binary_search(sorted_array, test_value, mid+1, high) |
def fisbHexErrsToStr(hexErrs):
"""
Given an list containing error entries for each FIS-B block, return a
string representing the errors. This will appear as a comment in either
the result string, or the failed error message.
Args:
hexErrs (list): List of 6 items, one for each FIS-B block. Each entry
will be the number of errors in the message (0-10), or 98 for a packet
that failed, and 99 for a packet that wasn't tried.
Returns:
str: String containing display string for error messages.
"""
return f'{hexErrs[0]:02}:{hexErrs[1]:02}:{hexErrs[2]:02}:{hexErrs[3]:02}:' + \
f'{hexErrs[4]:02}:{hexErrs[5]:02}' |
def remove_comment(line, marker="##"):
"""Return the given line, without the part which follows the comment
marker ## (and without the marker itself)."""
i = line.find(marker)
if i < 0:
return line
else:
return line[:i] |
def config_from_defaults(struct: tuple) -> dict:
"""Return dict from defaults."""
return {x: y for x, _, y in struct} |
def getOverlapSetSim(concepts_1: set, concepts_2: set):
""" Returns Overlap Set Similarity for the given concept sets """
intersection = len(concepts_1.intersection(concepts_2))
return intersection/min(len(concepts_1),len(concepts_2)) |
def time_convert(input_time):
"""
Convert input time from sec to MM,SS format.
:param input_time: input time in sec
:type input_time: float
:return: converted time as str
"""
sec = float(input_time)
_days, sec = divmod(sec, 24 * 3600)
_hours, sec = divmod(sec, 3600)
minutes, sec = divmod(sec, 60)
return ", ".join([
"{:02.0f} minutes".format(minutes),
"{:02.0f} seconds".format(sec),
]) |
def keep_aa(attentions):
""" Last minute change: transfer over the network is very slow. Need to drop keys from the JSON to make rendering faster """
aa = attentions['aa']
out = {'aa': aa}
return out |
def welcome(location):
"""Takes the input string welcome and returns a string of the form
'Welcome to the location'
"""
return "Welcome to the " + location |
def package_name(package):
# type: (str) -> str
"""
Returns the package name of the given module name
"""
if not package:
return ""
lastdot = package.rfind(".")
if lastdot == -1:
return package
return package[:lastdot] |
def concatenate_title_and_text(title, text):
"""Concatenates title and content of an article in the same string.
The two parts are separated by a blank space.
:param title: The tile of an article
:type title: str
:param text: The text content of an article
:type text: str
:return: The string resulting from the blank space based concatenation of the input data.
:rtype: str
"""
content = " ".join([title, text])
return content |
def _format_kwargs(kwargs):
"""Returns a dictionary as key value pairs to be used in tags
Usage:
>>> _format_kwargs({a:1, b:"2"})
a="1" b="2"
"""
element_as_str = []
for key, value in kwargs.items():
element_as_str.append('{}="{}"'.format(key, value))
return ' '.join(element_as_str) |
def __makenumber(value):
"""
Helper function to change the poorly formatted numbers to floats
Examples:
> value is an integer / float data type -> float type returned
> value = '1,000', then the float value is 1000
> value = '1 000 000.00' then the float value is 1000000
Parameters
----------
value : mixed type
Can be an int, float, or string
Returns
-------
number : Float
The value object returned as a float
"""
if type(value) == float:
number = value
elif type(value) == int:
number = float(value)
elif type(value) == str:
number = float(value.replace(',','').replace(' ',''))
else:
raise Exception('Incompatible data type. Review logic.')
return number |
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
return None
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass |
def get_prop(obj, prop, mytype='str'):
"""Get a property of a dict, for example device['uptime'], and handle None-values."""
if mytype == 'str':
if prop in obj:
if obj[prop] is not None:
return obj[prop].encode('utf-8')
return ''
else:
if prop in obj:
if obj[prop] is not None:
return obj[prop]
return None |
def closeEnough(tolerance, length, string):
"""
tolerance: the tolerance of the string length
length: the target length
string: the string to evaluate
"""
if(abs(len(string) - length) <= tolerance):
return True
else:
return False |
def _requires_dist_to_pip_requirement(requires_dist):
"""Parse "Foo (v); python_version == '2.x'" from Requires-Dist
Returns pip-style appropriate for requirements.txt.
"""
env_mark = ''
if ';' in requires_dist:
name_version, env_mark = requires_dist.split(';', 1)
else:
name_version = requires_dist
if '(' in name_version:
# turn 'name (X)' and 'name (<X.Y)'
# into 'name == X' and 'name < X.Y'
name, version = name_version.split('(', 1)
name = name.strip()
version = version.replace(')', '').strip()
if not any(c in version for c in '=<>'):
version = '==' + version
name_version = name + version
# re-add environment marker
return ';'.join([name_version, env_mark]) |
def binary_search(start, end, intervals):
"""Performs a binary search"""
start_search = 0
end_search = len(intervals)
while start_search < (end_search - 1):
mid = start_search + (end_search - start_search) // 2
(interval_start, interval_end) = intervals[mid]
if interval_end <= start:
start_search = mid
elif interval_start >= end:
end_search = mid
else:
break
return start_search, end_search |
def enum_name(name):
"""Shorten an enumeration name."""
assert name.startswith('GL_')
return name[3:] |
def _replace_oov(original_vocab, line):
"""Replace out-of-vocab words with "<UNK>".
This maintains compatibility with published results.
Args:
original_vocab: a set of strings (The standard vocabulary for the dataset)
line: a unicode string - a space-delimited sequence of words.
Returns:
a unicode string - a space-delimited sequence of words.
"""
return u" ".join([
word if word in original_vocab else u"<UNK>" for word in line.split()
]) |
def speed_func(t_n):
"""
Returns the normalised velocity U(t)/U0.
"""
return min(1,t_n) |
def return_min(list_of_dims):
"""
Returns the dimensions that produce the minimum area.
In the event of a tie, will return the first match.
:param list_of_dims: A list of dimensions.
:return: The dimensions with the minimum area.
"""
return min(list_of_dims, key=lambda dim: dim[0] * dim[1]) |
def html_decode(s):
"""
Returns the ASCII decoded version of the given HTML string. This does
NOT remove normal HTML tags like <p>.
"""
htmlCodes = (
("'", '''),
('"', '"'),
('>', '>'),
('<', '<'),
('&', '&')
)
for code in htmlCodes:
s = s.replace(code[1], code[0])
return s |
def _normalize_encoding(encoding):
"""returns normalized name for <encoding>
see dist/src/Parser/tokenizer.c 'get_normal_name()'
for implementation details / reference
NOTE: for now, parser.suite() raises a MemoryError when
a bad encoding is used. (SF bug #979739)
"""
if encoding is None:
return None
# lower() + '_' / '-' conversion
encoding = encoding.replace('_', '-').lower()
if encoding == 'utf-8' or encoding.startswith('utf-8-'):
return 'utf-8'
for variant in ['latin-1', 'iso-latin-1', 'iso-8859-1']:
if (encoding == variant or
encoding.startswith(variant + '-')):
return 'iso-8859-1'
return encoding |
def _true(*args):
"""
Default rerun filter function that always returns True.
"""
# pylint:disable=unused-argument
return True |
def get_calmag(inimag, distance, ebv_val, magext_val):
""" Apply the absorption and distance modulus
color = colsub * 3.07 * EBV
inimag -- magnitude from star star[chip[FILTER] (it should be = star[chip[BAND]]
distance -- Distance Modulus used to calculate the calibrated magnitude
ebv_val -- value of EBV
magext_val -- value of magnitude extinction
RETURNS:
* calculated magnitude according to the given equation
"""
absorption = magext_val * ebv_val
calmag = inimag + absorption + distance
return calmag |
def detokenize_text(src):
""" Join all tokens corresponding to single characters for creating the resulting text.
This function is reverse for the function `tokenize_text`.
:param src: source token list.
:return: the resulting text.
"""
new_text = u''
for cur_token in src.split():
if cur_token == u'<space>':
new_text += u' '
else:
new_text += cur_token
return new_text.strip() |
def _is_no_rec_name(info_name):
"""
helper method to see if we should not provide any recommendation
"""
if info_name == "last_boot_time":
return True |
def prod(x):
"""
Computes the product of the elements of an iterable
:param x: iterable
:type x: iterable
:return: product of the elements of x
"""
ret = 1
for item in x:
ret = item * ret
return ret |
def temporal_filter(start_date, end_date=None):
"""TemporalFilter data model.
Parameters
----------
start_date : str
ISO 8601 formatted date.
end_date : str, optional
ISO 8601 formatted date.
Returns
-------
temporal_filter : dict
TemporalFilter data model as a dictionnary.
"""
if not end_date:
end_date = start_date
return {
'startDate': start_date,
'endDate': end_date
} |
def inflate_dict(dct, sep=".", deep=-1):
"""Inflates a flattened dict.
Will look in simple dict of string key with string values to
create a dict containing sub dicts as values.
Samples are better than explanation:
>>> from pprint import pprint as pp
>>> pp(inflate_dict({'a.x': 3, 'a.y': 2}))
{'a': {'x': 3, 'y': 2}}
The keyword argument ``sep`` allows to change the separator used
to get subpart of keys:
>>> pp(inflate_dict({'etc/group': 'geek', 'etc/user': 'bob'}, "/"))
{'etc': {'group': 'geek', 'user': 'bob'}}
Warning: you cannot associate a value to a section:
>>> inflate_dict({'section.key': 3, 'section': 'bad'})
Traceback (most recent call last):
...
TypeError: 'str' object does not support item assignment
Of course, dict containing only keys that doesn't use separator will be
returned without changes:
>>> inflate_dict({})
{}
>>> inflate_dict({'a': 1})
{'a': 1}
Argument ``deep``, is the level of deepness allowed to inflate dict:
>>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=1))
{'a': {'b.c': 3, 'd': 4}}
Of course, a deepness of 0 won't do anychanges, whereas deepness of -1 is
the default value and means infinite deepness:
>>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=0))
{'a.b.c': 3, 'a.d': 4}
"""
def mset(dct, k, v, sep=".", deep=-1):
if deep == 0 or sep not in k:
dct[k] = v
else:
khead, ktail = k.split(sep, 1)
if khead not in dct:
dct[khead] = {}
mset(dct[khead], ktail, v,
sep=sep,
deep=-1 if deep < 0 else deep - 1)
res = {}
## sorting keys ensures that colliding values if any will be string
## first set first so mset will crash with a TypeError Exception.
for k in sorted(dct.keys()):
mset(res, k, dct[k], sep, deep)
return res |
def config_shim(args):
"""Make new argument parsing method backwards compatible."""
if len(args) == 2 and args[1][0] != '-':
return ['--config-file', args[1]] |
def unpad(string):
"""
From: https://github.com/CharlesBlonde/libpurecoollink
Copyright 2017 Charles Blonde
Licensed under the Apache License
Un pad string."""
return string[:-ord(string[len(string) - 1:])] |
def pydiff(text1, text2, text1_name='text1', text2_name='text2',
prefix_diff_files='tmp_diff', n=3):
"""
Use Python's ``difflib`` module to compute the difference
between strings `text1` and `text2`.
Produce text and html diff in files with `prefix_diff_files`
as prefix. The `text1_name` and `text2_name` arguments can
be used to label the two texts in the diff output files.
No files are produced if the texts are equal.
"""
if text1 == text2:
return False
# Else:
import difflib, time, os
text1_lines = text1.splitlines()
text2_lines = text2.splitlines()
diff_html = difflib.HtmlDiff().make_file(
text1_lines, text2_lines, text1_name, text2_name,
context=True, numlines=n)
diff_plain = difflib.unified_diff(
text1_lines, text2_lines, text1_name, text2_name, n=n)
filename_plain = prefix_diff_files + '.txt'
filename_html = prefix_diff_files + '.html'
f = open(filename_plain, 'w')
# Need to add newlines despite doc saying that trailing newlines are
# inserted...
diff_plain = [line + '\n' for line in diff_plain]
f.writelines(diff_plain)
f.close()
f = open(filename_html, 'w')
f.writelines(diff_html)
f.close()
return True |
def str2tuple(s, sep=',', converter=None, *, maxsplit=-1):
"""Convert a string to a tuple.
If ``converter`` is given and not ``None``, it must be a callable that
takes a string parameter and returns an object of the required type,
or else a tuple with string elements will be returned.
>>> str2tuple('1, 2, 3,4', converter=int)
(1, 2, 3, 4)
>>> str2tuple('on, off, no, true, YES')
('on', 'off', 'no', 'true', 'YES')
>>> str2tuple('on, off, no, true, YES', converter=str2bool)
(True, False, False, True, True)
>>> str2tuple('a, b, , d')
('a', 'b', '', 'd')
:param str s: the string
:param str sep: the separator (whitespace around ``sep`` will be ignored)
:param converter: the converter function
:type converter: callable(str)
:param int maxsplit: max. number of splits (-1 means no limit)
:return: tuple with elements of the required type
:rtype: tuple
.. versionchanged:: 0.14.0 Add parameter ``maxsplit``
"""
if s:
f = converter or str
return tuple(f(x.strip()) for x in s.split(sep, maxsplit))
return () |
def downcase(string):
"""Returns a copy of `string` with all the alphabetic characters converted
to lowercase.
:param string: string to downcase.
"""
return string.lower() |
def minmax(min_value, value, max_value):
"""
Restrict value to [min_value; max_value]
>>> minmax(-2, -3, 10)
-2
>>> minmax(-2, 27, 10)
10
>>> minmax(-2, 0, 10)
0
"""
return min(max(min_value, value), max_value) |
def makeSafeString( someString:str ) -> str:
"""
Replaces potentially unsafe characters in a string to make it safe for display.
"""
#return someString.replace('&','&').replace('<','<').replace('>','>')
return someString.replace('<','_LT_').replace('>','_GT_') |
def M_TO_N(m, n, e):
"""
match from m to n occurences of e
:param:
- `m`: the minimum required number of matches
- `n`: the maximum number of matches
- `e`: the expression to match
"""
return "{e}{{{m},{n}}}".format(m=m, n=n, e=e) |
def _convert_obj_ids_to_strings(data):
"""Convert ObjectIds to hexadecimal strings.
Takes a dictionary or a list of dictionaries of MongoDB BSON
documents. Transforms their ObjectIds into strings so the documents
are JSON serializable and the doc ids are easily accessed.
"""
if isinstance(data, list):
for doc in data:
doc['_id'] = str(doc['_id'])
elif isinstance(data, dict):
data['_id'] = str(data['_id'])
return data |
def flatten_meal_item(meal_item):
"""
Input: <meal_items> is in the form:
[
[category,title,extra],
...
]
Output: dictionaries, each one containing
a list of items w/extra text that fall into the
category.
{'stockpot':[
{
'title':'oatmeal',
'extra':'decent'
},...
]
},
{'classics'
}
"""
categories = {}
for meal in meal_item:
meal_category = meal[0]
meal_data = {'title': meal[1], 'extra': meal[2]}
if meal_category not in categories:
categories[meal_category] = [meal_data]
else:
categories[meal_category].append(meal_data)
return categories |
def sumDigits(s):
"""
:Assumes s is a string:
:Returns the sum of the decimal digits in s:
:For example, if s is 'a2b3c' it returns 5:
"""
digits = []
for char in s:
try:
digits.append(int(char))
except ValueError:
pass
return sum(digits) |
def lr_schedule_adam(epoch):
"""Learning Rate Schedule
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
# for Adam Optimizer
lr = 1e-3
if epoch > 350:
lr = 1e-5
elif epoch > 300:
lr = 1e-4
elif epoch > 200:
lr = 2e-4
elif epoch > 100:
lr = 5e-4
print('Learning rate: ', lr)
return lr |
def index_name(i, j=None, k=None, l=None):
"""
Provides formatting for a name, given title and up to 3 indices.
Parameters:
title: string
i: string
j: string
k: string
l: string
Returns: string
"""
if i is not None and j is None and k is None and l is None:
return f"({i})"
if i is not None and j is not None and k is None and l is None:
return f"({i}, {j})"
if i is not None and j is not None and k is not None and l is None:
return f"({i}, {j}, {k})"
if i is not None and j is not None and k is not None and l is not None:
return f"({i}, {j}, {k}, {l})" |
def calc_avg(varlist):
"""
Collecting the statistics for descriptives, including number of elements, number of functions,
number of variables, number of constants
:param varlist: list of dict of the variables
:return: total variables, average elements per equation, number of functions and average, constants, empty units
"""
tot, els, avg, funcs, cons, e_unit, sl_tot = 0, 0, 0, 0, 0, 0, 0
# two different count types, once with subscript and once without (i.e. number of elements with
# and without subscripts)
for var in varlist:
if var['type'] != 'constant' and var['type'] != 'subscripted constant':
tot = tot + 1 * var['no of sub_ins']
els = els + var['Number of elements'] * var['no of sub_ins']
funcs = funcs + var['no of functions'] * var['no of sub_ins']
if var['type'] == 'constant' or var['type'] == 'subscripted constant':
cons = cons + 1 * var['no of sub_ins']
if var['type'] != 'subscript list':
sl_tot = sl_tot + 1
if var['unit'] is None:
e_unit = e_unit + 1
try:
avg = els / tot
f_avg = funcs / tot
unit_per = e_unit / sl_tot
except ZeroDivisionError:
avg = 0
f_avg = 0
unit_per = 1
return tot, avg, funcs, f_avg, cons, unit_per |
def unpad(bytestring, k=16):
"""
Remove the PKCS#7 padding from a text bytestring.
"""
val = bytestring[-1]
if val > k:
raise ValueError("Input is not padded or padding is corrupt")
l = len(bytestring) - val
return bytestring[:l] |
def find_min_loc(L):
"""find min loc uses a loop to return the minimum of L
and the location (index or day) of that minimum.
Argument L: a nonempty list of numbers.
Results: the smallest value in L, its location (index)
"""
minval = L[0]
minloc = 0
for i in list(range(len(L))):
if L[i] < minval: # a smaller one was found!
minval = L[i]
minloc = i
return minval, minloc |
def _format_tracestate(tracestate):
"""Parse a w3c tracestate header into a TraceState.
Args:
tracestate: the tracestate header to write
Returns:
A string that adheres to the w3c tracestate
header format.
"""
return ','.join(key + '=' + value for key, value in tracestate.items()) |
def _running_locally(coreapi_url, jobs_api_url):
"""Check if tests are running locally."""
return not (coreapi_url and jobs_api_url) |
def merge_dicts(*args, **kwargs):
""" Merge dict into one dict """
final = {}
for element in args:
for key in element:
final[key] = element[key]
return final |
def empty_cache(max_T, labeling_with_blanks):
"""Create empty cache."""
return [[None for _ in range(len(labeling_with_blanks))] for _ in range(max_T)] |
def _score(estimator, x, y, scorers):
"""Return a dict of scores"""
scores = {}
for name, scorer in scorers.items():
score = scorer(estimator, x, y)
scores[name] = score
return scores |
def gcd(a, b):
"""
>>> gcd(3,6)
3
>>> gcd(10,15)
5
"""
if a == 0: return abs(b)
if b == 0: return abs(a)
if a < 0: a = -a
if b < 0: b = -b
while b:
c = a % b
a = b
b = c
return a |
def permute(string):
"""permute(str) -> str
Outputs a list of all possible permutations a string.
Note: If a character is repeated, each occurence as distinct.
>>> permute('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
>>> permute('dog')
['dog', 'dgo', 'odg', 'ogd', 'gdo', 'god']
"""
if not isinstance(string, str):
raise TypeError("Must be a string")
result = []
if len(string) == 1:
result = [string]
else:
for count, char in enumerate(string):
for variation in permute(string[:count] + string[count + 1:]):
result += [char + variation]
return result |
def reverse(s):
""" (str) -> str
Return a reversed version of s.
>>> reverse('hello')
'olleh'
>>> reverse('a')
'a'
"""
rev = ''
# For each character in s, add that char to the beginning of rev.
for ch in s:
rev = ch + rev
return rev |
def create_grid(locked_positions={}):
"""Creates the playfield's gridfield."""
grid = [[(0, 0, 0) for _ in range(10)] for _ in range(20)]
for i in range(len(grid)):
for j in range(len(grid[i])):
if (j, i) in locked_positions:
c = locked_positions[(j, i)]
grid[i][j] = c
return grid |
def int_or_none(x):
"""Given a value x it cast as int or None
:param x: The value to transform and return
:returns: Either None or x cast to an int
"""
if x is None:
return None
return int(x) |
def lambda_handler(event, context):
"""Lambda function which does no operation
Args:
event (dict): Parameter to pass in event data to the handler.
context (bootstrap.LambdaContext): Parameter to provide runtime information to the handler.
Returns:
json: A simple json object with two keys and corresponding values
"""
return {
'status_code': 200,
'body': 'Hello from Lambda!'
} |
def get_role_name(account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
return "arn:aws:iam::{0}:role/{1}".format(account_id, role) |
def has_pythonX_package(pkg_name, name_by_version, version):
"""Given the package name, check if python<version>-<pkg_name>
or <pkg_name>-python<version> exists in name_by_version.
Return: (bool) True if such package name exists, False otherwise
"""
return (
'python{}-{}'.format(version, pkg_name) in name_by_version[version] or
'{}-python{}'.format(pkg_name, version) in name_by_version[version]) |
def get_primes(start_value, end_value) -> list:
"""
:param start_value: interval start_value
:param end_value: interval end_value
:return: List of primes in the given range
"""
primes_list = []
for value in range(start_value, end_value + 1):
if value > 1:
for n in range(2, value):
if (value % n) == 0:
break
else:
primes_list.append(value)
return primes_list |
def constrain(x, lower, upper):
"""Limits the incoming value to
the given lower and upper limit."""
y = 0
if x > upper:
y = upper
elif x < lower:
y = lower
if x > 6500:
y = 0
else:
y = x
return y |
def gen_I(n):
"""Returns an nxn identity matrix."""
return [[min(x // y, y // x) for x in range(1, n + 1)] for y in range(1, n + 1)] |
def padr(text, n, c):
"""
padr - right pad of text with character c
"""
text = str(text)
return text + str(c) * (n - len(text)) |
def myfun(x, binary=True):
"""
fonction de seuillage
si > theta : 1
si < theta : min
sinon theta
"""
if binary:
_min, _theta = 0, .5
else:
_min, _theta = -1, 0
if x > _theta: return 1
if x == _theta: return _theta
return _min |
def convert_list_items(old_list: list, convert_type: type):
"""
Info:
Converts each list item to the type specified
Paramaters:
old_list: list - List to convert
convert_type: type - The type to convert to.
Usage:
convert_list_items(old_list, convert_type)
Returns:
list
"""
new_list = []
for item in old_list:
new_list.append(convert_type(item))
return new_list |
def find_column(header_row, pattern="rs"):
"""Find the first column in a row that matches a pattern."""
snp_columns = (index for index, column in enumerate(header_row) if column.startswith(pattern))
# return the first index
return next(snp_columns) |
def invalid_route(path):
"""Catches all invalid routes."""
response = {"success": False, "error": {"type": "RouteNotFoundError",
"message": "No such route"}}
return response, 404 |
def merge_outfiles(filelist, outfile_name):
""" merge the output from multiple BLAST runs of type 6 output
(no headers)
"""
# only grab .tab files, ie, the blast output
with open(outfile_name, "a") as outf:
for idx, f in enumerate(filelist):
with open(f, "r") as inf:
for line in inf:
outf.write(line)
return outfile_name |
def skip_mul(n):
"""Return the product of n * (n - 2) * (n - 4) * ...
>>> skip_mul(5) # 5 * 3 * 1
15
>>> skip_mul(8) # 8 * 6 * 4 * 2
384
"""
if n == 2:
return 2
elif n == 1:
return 1
else:
return n * skip_mul(n - 2) |
def _bool2str(b):
"""
Convert boolean to string
Used by XML serialization
"""
return "true" if b else "false" |
def count_empty_fields(cur_elem):
"""
Loop through the whole dict and count the number of fields that contain empty values,
such as empty lists or empty dicts
"""
empty_elems = 0
if type(cur_elem) is dict:
if len(cur_elem.items()) == 0:
empty_elems += 1
for k, v in cur_elem.items():
c_e = v
if type(v) is not str:
c_e = cur_elem[k]
if not c_e:
empty_elems += 1
else:
empty_elems += count_empty_fields(c_e)
elif type(cur_elem) is list:
if not cur_elem:
empty_elems += 1
for i in cur_elem:
if not i:
empty_elems += 1
else:
empty_elems += count_empty_fields(i)
elif type(cur_elem) is str:
if not cur_elem:
empty_elems += 1
return empty_elems |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.