content
stringlengths 42
6.51k
|
|---|
def license_header(year_from, year_to, holder, license_type):
"""Return SPDX license header using specified data."""
return [
f"// SPDX-FileCopyrightText: {year_from} - {year_to} {holder}\n",
f"// SPDX-License-Identifier: {license_type}\n",
]
|
def get_unks(hyps_edits):
"""
replace the words in the pe that have been inserted as new lexical items
"""
#FIXME: first just all inserts, without checking if they occur else-where
pe_unks = list()
for hyp_edit in hyps_edits:
if hyp_edit.o == "I":
pe_unks.append("UNK")
else:
pe_unks.append(hyp_edit.r)
return pe_unks
|
def calc_pident_ignore_gaps(a, b):
"""
calculate percent identity
"""
m = 0 # matches
mm = 0 # mismatches
for A, B in zip(list(a), list(b)):
if A == '-' or A == '.' or B == '-' or B == '.':
continue
if A == B:
m += 1
else:
mm += 1
try:
return float(float(m)/float((m + mm))) * 100
except:
return 0
|
def split_class_and_test_names(nodeid):
"""Returns the class and method name from the current test"""
names = nodeid.split('::')
names[0] = names[0].replace('/', '.')
names = [x.replace('.py', '') for x in names if x != '()']
classnames = names[:-1]
classname = '.'.join(classnames)
name = names[-1]
return (classname, name)
|
def validate_time_string(index, text):
"""
Validate substring for times
"""
spaces = text[index-3] + text[index+3]
if spaces in (" ", " ,", " ."):
hour = int(text[index-2:index])
if -1 < hour < 24:
minute = int(text[index+1:index+3])
if -1 < minute < 60:
return text[index-2:index+3]
return False
|
def flip(d):
"""In a dictionary, swap keys and values"""
return {v: k for k, v in d.items()}
|
def getElementNamesWithInterface(elements, searching_interface):
"""
Find all the elements that include an specific interface
Implementation of
https://sardana-controls.org/_modules/sardana/taurus/core/tango/sardana/macroserver.html#BaseMacroServer.getElementNamesWithInterface
"""
found = []
for element in elements:
if searching_interface in element["interfaces"]:
found.append(element["name"])
return found
|
def get_task_representation(task):
"""
Get string representation of task, from function and arguments
"""
task_str = '{}('.format(task['func'])
task_str += ', '.join(str(arg) for arg in task['args'])
if task['args'] and task['kwargs']:
task_str += ', '
task_str += ', '.join('{}={}'.format(key, value) for key,value in task['kwargs'].items())
task_str += ')'
return task_str
|
def firstLocker (theDictionary):
"""Identifies the first locker number.
:param dict[str, str] theDictionary:
key: locker number / value: student name or "open"
:return:
The first locker number in theDictionary
:rtype: str
"""
lockerCount = 1
lockers = list(theDictionary.keys())
for i in range(lockerCount):
return lockers[i]
|
def strip_right_above_below(chars):
"""Drop leading and trailing blank lines, and drop trailing spaces in each line"""
lines_plus = chars.splitlines()
lines = list(_.rstrip() for _ in lines_plus)
while lines and not lines[0]:
lines = lines[1:]
while lines and not lines[-1]:
lines = lines[:-1]
chars = ""
if lines:
chars = "\n".join(lines) + "\n"
return chars
|
def count_substrings_l(k, s, l):
"""
Function returning the number of substrings that will be selected by the
multi-match-aware selection scheme for theshold `k`, for a string of length
`s` to match strings of length `l`.
Args:
k (int): Levenshtein distance threshold.
s (int): Length of target strings.
l (int): Length of strings to match.
Returns:
int: The number of selected substrings.
"""
return ((k ** 2 - abs(s - l) ** 2) // 2) + k + 1
|
def pivotpoint(first, last):
""" pivot point strategy
using middle element
to prevent worst case scenario """
return first + (last - first) >> 1
|
def norm_colname(colname):
"""Given an arbitrary column name, translate to a SQL-normalized column
name a la CARTO's Import API will translate to
Examples
* 'Field: 2' -> 'field_2'
* '2 Items' -> '_2_items'
Args:
colname (str): Column name that will be SQL normalized
Returns:
str: SQL-normalized column name
"""
last_char_special = False
char_list = []
for colchar in str(colname):
if colchar.isalnum():
char_list.append(colchar.lower())
last_char_special = False
else:
if not last_char_special:
char_list.append('_')
last_char_special = True
else:
last_char_special = False
final_name = ''.join(char_list)
if final_name[0].isdigit():
return '_' + final_name
return final_name
|
def build_owner_reference(body):
"""
Construct an owner reference object for the parent-children relationships.
The structure needed to link the children objects to the current object as a parent.
See https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/
"""
return dict(
controller=True,
blockOwnerDeletion=True,
apiVersion=body['apiVersion'],
kind=body['kind'],
name=body['metadata']['name'],
uid=body['metadata']['uid'],
)
|
def field_list(feature_list):
"""get the list of field names in features"""
fields = list(feature_list[0].keys())
fields = fields + (list(
feature_list[0]['properties'].keys()))
field_list = {}
for k in fields:
field_list[k] = k
return field_list
|
def insert_statement(table_name, columns, data=None):
"""
Generates an INSERT statement for given `table_name`.
:param str table_name: table name
:param tuple columns: tuple of column names
:param data: dict of column name => value mapping
:type data: dict or None
:return: SQL statement template suitable for sqlalchemy.execute()
:rtype: str
"""
data = {} if data is None else data
columns_list = []
values_list = []
for column in columns:
if column not in data:
continue
columns_list.append(column)
values_list.append(":{column}".format(column=column))
return "INSERT INTO {table_name} ({columns_list}) VALUES ({values_list})".format(
table_name=table_name,
columns_list=', '.join(columns_list),
values_list=', '.join(values_list)
)
|
def safe_filename(filename):
"""Turn a URL into a safe filename"""
filename = filename.replace(':', "%"+hex(ord(':')))
filename = filename.replace('/', "%"+hex(ord('/')))
return filename
|
def isWhite(pixel):
"""Green pixels in the source image are mapped to white pixels."""
return pixel[0] == 0 and pixel[1] == 255 and pixel[2] == 0
|
def extract_features_base(curr_sign, next_sign, nextnext_sign, prev_sign, prevprev_sign, prev_trans, prevprev_trans):
"""
Builds the features according to the sign context
:param curr_sign: current sign
:param next_sign: next sign
:param nextnext_sign: the sign after the next sign
:param prev_sign: previous sign
:param prevprev_sign: the sign before the previous sign
:param prev_trans: previous classified transliteration
:param prevprev_trans: the classified transliteration before the previous
:return: The word's features
"""
features = {}
features['sign'] = curr_sign
features['prev_sign'] = prev_sign
features['prevprev_sign'] = prevprev_sign
features['next_sign'] = next_sign
features["nextnext_sign"] = nextnext_sign
features["lower"] = prev_trans[0].islower()
features["deter"] = 1 if prev_trans[-1] == '}' else 0
features["part"] = 1 if prev_trans[-1] == ')' else 0
features["bigram"] = prev_trans
#features["trigram"] = prevprev_trans + ',' + prev_trans
return features
|
def from_pt(value, units, dpi=96):
"""
convert length from pt to given units
Arguments
---------
value : float
length in pt
units : str
unit type (e.g. "pt", "px", "in", "cm", "mm") to convert to
dpi : float / int
dots per inch (conversion between inches and px)
Return
------
length given units
"""
if units not in ["pt", "cm", "mm", "in", "inches", "px"]:
raise ValueError("please constrain units string parameter to "+\
"options listed in doc string")
if units == "pt":
return value
# metric to inches
if units == "cm":
value = value * 2.54
units = "in"
if units == "mm":
value = value * 25.4
units = 'in'
# inches to pixels
if units == "in" or units == "inches":
value = value / dpi
units = "px"
# pt to px
if units == "px":
value = value * 4/3
return value
|
def _default_tokenizer(s):
"""Default string tokenizer which splits on newlines."""
return s.split('\n')
|
def sieve(range_max):
"""
Is is O(n) because every number is marked prime/composite exactly once.
Lets say there is a number num = p1^e1 * p2^e2 * p3^e3.
It will be marked only once when i = p1^(e1-1) * p2^e2 * p3^e3 and primes[j] = p1.
Very clever, yet easy to implement.
https://cp-algorithms.com/algebra/prime-sieve-linear.html
"""
is_prime = [True] * range_max
primes = []
spf = [None] * range_max
is_prime[0] = is_prime[1] = False
for i in range(2, range_max):
if is_prime[i]:
primes.append(i)
# A prime number is its own smallest prime factor
spf[i] = i
j = 0
while j < len(primes) and i * primes[j] < range_max and primes[j] <= spf[i]:
# This loop will only run once for even numbers.
is_prime[i * primes[j]] = False
spf[i * primes[j]] = primes[j]
j += 1
return primes
|
def filter_split(boxes, filter_fns):
""" Split bounding boxes in 2 sets, based upon whether or not they pass the filters.
Args:
boxes (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of bounding boxes
filter_fns (list or fn): List of filter functions that get applied or single filter function
Returns:
(tuple of dict or list): pass,fail bounding boxes
"""
if callable(filter_fns):
filter_fns = [filter_fns]
if isinstance(boxes, dict):
ok, nok = dict(), dict()
for key, values in boxes.items():
ok[key] = []
nok[key] = []
for box in values:
failed = False
for fn in filter_fns:
if not fn(box):
nok[key].append(box)
failed = True
break
if not failed:
ok[key].append(box)
else:
ok, nok = [], []
for box in boxes:
failed = False
for fn in filter_fns:
if not fn(box):
nok.append(box)
failed = True
break
if not failed:
ok.append(box)
return ok, nok
|
def setter_name(a):
"""
>>> setter_name('userId')
'setUserId'
"""
return 'set%s%s' % (a[0].upper(), a[1:])
|
def fmt_size(size):
"""Convert byte size to a more readable format (mb, etc.)."""
unit, units = ' b', ['gb', 'mb', 'kb']
while size > 1024 and units:
size /= 1024.0
unit = units.pop()
return '{} {}'.format(round(size, 2), unit)
|
def rchop(string, ending):
"""Removes a substring at the end of a string."""
if string.endswith(ending):
return string[:-len(ending)]
return string
|
def round_up_to_even(n, maxprimes=3):
"""
Round up the given value to minimize the number of prime factors. Factors
other than 2, 3, and 5 are not allowed, and the number of factors of 3 and
5 cannot exceed maxprimes.
"""
if n % 2 != 0:
n += 1
while True:
r = n
nPrimes = 0
while r > 1 and r % 2 == 0:
r //= 2
while r > 1 and r % 3 == 0:
r //= 3
nPrimes += 1
while r > 1 and r % 5 == 0:
r //= 5
nPrimes += 1
if r == 1 and nPrimes <= maxprimes:
return n
n += 2
|
def isDefinitelyEqual_Solver(state, a, b):
"""
Does 'a' definitely equal 'b', i.e., is it impossible for them to be not equal.
More expensive than isDefinitelyEqual() (above), because it takes into account the
current context.
May catch some cases where 'a' definitely equals 'b' in the current context but
isDefinitelyEqual() returned False.
"""
if isinstance(a, int) and isinstance(b, int): return a == b
return not state.solver.satisfiable(extra_constraints=[a != b])
|
def time_str(t):
"""
time to str
:param t:
:return: time
"""
if t >= 3600:
return '{:.1f}h'.format(t / 3600)
if t >= 60:
return '{:.1f}m'.format(t / 60)
return '{:.1f}s'.format(t)
|
def get_floor_distribution(N_floors, N_classes):
"""
Distribute the number of classes evenly over the number of available floors.
Parameters
----------
N_floors : int
Number of available floors.
N_classes : int
Number of classes in the school.
Returns
-------
floors : dictionary
Dictionary of the form {floor1:[class_1, class_2, ...], ...}
floors_inv : dictionary
Dictionary of the form {class1:floor1, ..., class_N:floor_N}
"""
floors = {i:[] for i in range(N_floors)} # starts with 0 (ground floor)
classes = list(range(1, N_classes + 1))
classes_per_floor = int(N_classes / N_floors)
# easiest case: the number of classes is divisible by the number of floors
if N_classes % N_floors == 0:
for i, floor in enumerate(range(N_floors)):
floors[floor] = classes[i * classes_per_floor: \
i * classes_per_floor + classes_per_floor]
# if there are leftover classes: assign them one-by-one to the existing
# floors, starting with the lowest
else:
leftover_classes = N_classes % N_floors
classes_per_floor += 1
for i, floor in enumerate(range(N_floors)):
if i < leftover_classes:
floors[floor] = classes[i * classes_per_floor: \
i * classes_per_floor + classes_per_floor]
# hooray, index magic!
else:
floors[floor] = classes[leftover_classes * classes_per_floor + \
(i - leftover_classes) * (classes_per_floor - 1):
leftover_classes * (classes_per_floor) + \
(i - leftover_classes) * (classes_per_floor - 1) + \
classes_per_floor - 1]
# invert dict for easier use
floors_inv = {}
for floor, classes in floors.items():
for c in classes:
floors_inv.update({c:floor})
return floors, floors_inv
|
def dashes(k):
"""Transform to dashes:
>>> dashes("class-name")
'class-name'
>>> dashes("class_name")
'class-name'
>>> dashes("Class_name_longer")
'class-name-longer'
>>> dashes("")
''
>>> dashes("a")
'a'
>>> dashes("A")
'a'
"""
return k.lower().replace("_", "-")
|
def is_int(value):
"""
Check if an object is an integer
:param value:
:return:
"""
return isinstance(value, int)
|
def validate_row(row):
"""
Helper function to validate a row read in from CSV.
If validation fails, returns tuple of (False, error message).
If validation succeeds, returns tuple of (True, (username, field_name, score))
"""
if not isinstance(row, list):
return (False, "Badly formatted row")
if len(row) != 3:
return (False, "Too few or too many items in row (should be 3)")
[username, field_name, score_str] = row
if not isinstance(username, str):
return (False, "Username is not a string")
if not username:
return (False, "Username is empty string")
if not isinstance(field_name, str):
return (False, "Field name is not a string")
if not field_name:
return (False, "Field name is empty string")
try:
score = int(score_str)
if score > 10000 or score < -10000:
return (False, "The score must be between -10000 and 10000")
except ValueError:
return (False, "Score cannot be converted to integer")
return (True, (username, field_name, score))
|
def is_not_a_credit_line(line):
""" sometimes there are lines that we would like to be automatically removed from the credits section. AKA for these lines, the system won't even ask if user wants to ignore them. """
# if ( "a string" in line and "another" in line
# or
# "EFT" in line and 'payment' in line and "returned" in line
# or
# "another string" in line and 'Cr' in line and 'another' in line
# ):
# return True
""" return False if there are not lines to remove in credits section"""
return False
|
def format_time(seconds):
"""Transforms seconds in human readable time string
Arguments:
seconds {float} -- seconds to convert
Returns:
string -- seconds as human readable string
"""
mg, sg = divmod(seconds, 60)
hg, mg = divmod(mg, 60)
return "{:02.0f}:{:02.0f}:{:02.3f}".format(hg, mg, sg)
|
def reply_is_success(reply: dict):
"""
Predicate to check if `reply` is a dict and contains the key-value pair
"status" = "success".
@param reply A python dict
@return True if the dict contains "status" = "success"
"""
return (
reply
and type(reply) is dict
and reply.get("status", None)
and reply["status"] == "success"
)
|
def get_entry_size(dt_version):
"""Returns the entry size according to the dt version"""
if dt_version == 1:
return 20
elif dt_version == 2:
return 24
else:
return 40
|
def valid_arguments(valip, valch, valc, valii):
"""
Valid the arguments
"""
bvalid = True
# Type converssion
valch = int(valch)
valii = int(valii)
# Valid the parameters
# Valid - IP
if valip == "":
print("IP is invalid.")
bvalid = False
# Valid - Channel
if (valch < 1) | (valch > 4):
print("Channel number is invalid.")
bvalid = False
# Valid - Index
if valc == 1:
# Lamp index
if (valii < 0) | (valii > 63):
print("Lamp index is invalid.")
bvalid = False
elif valc == 2:
# Group index
if (valii < 0) | (valii > 15):
print("Group index is invalid.")
bvalid = False
else:
# Channel
if valii != -1:
print("Channel index is invalid.")
bvalid = False
return bvalid
|
def extract_dict_to_ordered_key_lists(dictionary, data_name, key_name='times'):
"""
Create a dictionary with each element lists, one giving the "times" that the list element refers to and the others
giving the data content that these times refer to - maintaining the order that the keys were originally in.
Args:
dictionary: The dictionary containing the data to be extracted (N.B. Assumption is that the keys refer to times)
data_name: The key of interest within the input dictionary
key_name: String to assign to the dictionary element that is the sorted list of keys of the input dictionary
Returns:
Dictionary containing the extracted lists with keys 'times' and "key_string"
"""
return {key_name: sorted(dictionary.keys()), data_name: [dictionary[time] for time in sorted(dictionary.keys())]}
|
def remove_suffix(text: str, suffix: str) -> str:
"""Removes the suffix from the string if it exists, and returns the result."""
if suffix != "" and text.endswith(suffix):
return text[:-len(suffix)]
return text
|
def get_language_to_text_map(element):
"""Return a map of languages to text in an InternationalizedText element."""
language_map = {}
if element is None:
return language_map
intl_strings = element.findall("Text")
for intl_string in intl_strings:
text = intl_string.text
if text is None or not text:
continue
language = intl_string.get("language")
if language is None or not language:
continue
language_map[language] = text
return language_map
|
def nifty_reg_resample(nifty_bin, ref, flo, trans=None, res=None, inter=None, pad=0):
"""
call niftyreg resample http://cmictig.cs.ucl.ac.uk/wiki/index.php/Reg_resample
:param nifty_bin: the path of nifityreg bin
:param ref:
:param flo:
:return:
"""
executable = nifty_bin + '/reg_resample'
cmd = executable + ' -ref ' + ref + ' -flo ' + flo
if trans != None:
cmd += ' -trans ' + trans
if res != None:
cmd += ' -res ' + res
if inter != None:
cmd +=' -inter ' + str(inter)
if pad !=0:
cmd +=' -pad ' + str(pad)
return cmd
|
def try_ex(func):
"""
Call passed in function in try block. If KeyError is encountered return None.
This function is intended to be used to safely access dictionary.
Note that this function would have negative impact on performance.
"""
try:
return func()
except KeyError:
return None
|
def byte_list_to_string(byte_list):
"""Convert a list to human readable string in HEX
:returns: a string representing the list
"""
hex_str = [hex(x) for x in byte_list]
return "[{0}]".format(' '.join(hex_str))
|
def ANY_IN_MULTISELECT(input, reference):
"""
For 'this multiselect contains any one of these N items'
"""
return any([subval in (input or '').split(' ') for subval in reference])
|
def get_function_input(inputs, input_name, optional=False, default=None):
"""Given input_name, checks if it defined. Raises ValueError if a mandatory input is None"""
the_input = inputs.get(input_name, default)
if the_input is None and optional is False:
err = "'{0}' is a mandatory function input".format(input_name)
raise ValueError(err)
else:
return the_input
|
def ear(APN, N):
"""
EAR - Effective Annual Interest Rate
Returns the EAR given the APN
:param APN: APN Interest Rate as fraction
:param N: compound Frequency thats is equal to a year
:return: Effective annual interest rate
Example:
Consider a stated annual rate of 10%. Compounded
yearly, this rate will turn $1000 into $1100.
However, if compounding occurs monthly, $1000 would
grow to $1104.70 by the end of the year, rendering
an effective annual interest rate of 10.47%.
>>> ear(0.1, 12) # 10% compound monthly, result 10.47%
0.10471306744129683
"""
return (1.0 + APN / N) ** N - 1
|
def fontawesome(icon_name, size=""):
"""
Generate fontawesome syntax for HTML.
Usage:
{% fontawesome "iconname" %}
{% fontawesome "iconname" "size" %}
Size values are: lg, 2x, 3x, 4x, 5x
"""
if len(size) > 0:
size = "fa-%s" % size
return '<i class="fa fa-%s %s"></i>' % (icon_name, size)
|
def character_frequency(filename):
"""Counts the frequency of each character in the given file."""
# First try to open the file
try:
f = open(filename)
except OSError:
return None
# Now process the file
characters = {}
for line in f:
for char in line:
characters[char] = characters.get(char, 0) + 1
f.close()
return characters
|
def getRevisedComboSplitSegs(comboSplitSegs, newSegsIgnore):
"""
This function removes the segments to ignore from the non-overlapping list of ground truth and diarization
segments.
Inputs:
- comboSplitSegs: list of ground truth and diarization segments after all iterations have reduced it to
non-overlapping segments with a constant number of speakers in each segment
- newSegsIgnore: the non-overlapping list of segments to ignore, form: "[[1270.14, 1270.64], [1274.63, 1275.88], ..."
Outputs:
- segs: the revised list of ground truth and diarization segments after removing segments within the collars
"""
segs = comboSplitSegs.copy()
newRows = []
for row in newSegsIgnore:
for j in range(len(segs)):
if row[0] <= segs[j]['tbeg'] and row[1] >= segs[j]['tend']:
segs[j] = {'tbeg': 0, 'tend': 0, 'name': {'oname': [], 'dname': []}}
elif row[0] <= segs[j]['tbeg'] and row[1] > segs[j]['tbeg'] and row[1] < segs[j]['tend']:
segs[j] = {'tbeg': row[1], 'tend': segs[j]['tend'], 'name': segs[j]['name']}
elif row[0] > segs[j]['tbeg'] and row[0] < segs[j]['tend'] and row[1] >= segs[j]['tend']:
segs[j] = {'tbeg': segs[j]['tbeg'], 'tend': row[0], 'name': segs[j]['name']}
elif row[0] > segs[j]['tbeg'] and row[0] < segs[j]['tend'] and row[1] < segs[j]['tend']:
segs[j] = {'tbeg': segs[j]['tbeg'], 'tend': row[0], 'name': segs[j]['name']}
newRows.append({'tbeg': row[1], 'tend': segs[j]['tend'], 'name': segs[j]['name']})
segs = segs + newRows
segs.sort(key=lambda x:x['tbeg'])
for i in reversed(range(len(segs))):
if segs[i] == {'tbeg': 0, 'tend': 0, 'name': {'oname': [], 'dname': []}}:
del segs[i]
return segs
|
def clean(expr):
"""
cleans up an expression string
Arguments:
expr: string, expression
"""
expr = expr.replace("^", "**")
return expr
|
def accuracy(gold, cand):
"""Compute fraction of equivalent pairs in two sequences."""
return sum(a == b for a, b in zip(gold, cand)) / len(gold)
|
def mask(n: int) -> int:
"""
Return a bitmask of length n.
>>> bin(mask(5))
'0b11111'
"""
return (2 << (n - 1)) - 1 if n >= 0 else 0
|
def consistency_guarantees(X, y, model_generator, method_name):
""" Consistency Guarantees
transform = "identity"
sort_order = 3
"""
# 1.0 - perfect consistency
# 0.8 - guarantees depend on sampling
# 0.6 - guarantees depend on approximation
# 0.0 - no garuntees
guarantees = {
"linear_shap_corr": 1.0,
"linear_shap_ind": 1.0,
"coef": 0.0,
"kernel_shap_1000_meanref": 0.8,
"sampling_shap_1000": 0.8,
"random": 0.0,
"saabas": 0.0,
"tree_gain": 0.0,
"tree_shap_tree_path_dependent": 1.0,
"tree_shap_independent_200": 1.0,
"mean_abs_tree_shap": 1.0,
"lime_tabular_regression_1000": 0.8,
"deep_shap": 0.6,
"expected_gradients": 0.6
}
return None, guarantees[method_name]
|
def isPhysicalUnit(x):
"""
@param x: an object
@type x: any
@returns: C{True} if x is a L{PhysicalUnit}
@rtype: C{bool}
"""
return hasattr(x, 'factor') and hasattr(x, 'powers')
|
def binaryStringDigitDiff(binstr1, binstr2):
"""
Count the number of digits that differ between two
same-length binary strings
Parameters
----------
binstr1 : string
A binary (base-2) numeric string.
binstr2 : string
A binary (base-2) numeric string.
Returns
-------
An integer, the number of digits that differ between
the two strings.
"""
digitDiff = 0
if not len(binstr1) == len(binstr2):
raise Exception("binaryStringDigitDiff requires arguments to have same length")
for n in range(0, len(binstr1)):
if binstr1[n] != binstr2[n]:
digitDiff += 1
return digitDiff
|
def rearrange_args(*args, **kwargs):
"""
Given positional arguments and keyword arguments, return a list
of tuples containing the type of argument and number of the
argument as well as its value.
Parameters:
*args: Random positional arguments.
**kwargs: Random keyword arguments.
Returns:
List of tuples where the first value of the tuple is the
type of argument with its number and the second value is
the value of the argument. Keyword arguments' first value
also contains the key of the keyword argument.
>>> rearrange_args(10, False, player1=[25, 30], player2=[5, 50])
[('positional_0', 10), ('positional_1', False), \
('keyword_0_player1', [25, 30]), ('keyword_1_player2', [5, 50])]
>>> rearrange_args('L', 'A', 'N', 'G', L='O', I='S')
[('positional_0', 'L'), ('positional_1', 'A'), ('positional_2', 'N'), \
('positional_3', 'G'), ('keyword_0_L', 'O'), ('keyword_1_I', 'S')]
>>> rearrange_args(no_positional=True)
[('keyword_0_no_positional', True)]
# Add AT LEAST 3 doctests below, DO NOT delete this line
>>> rearrange_args(11, 'no_kw')
[('positional_0', 11), ('positional_1', 'no_kw')]
>>> rearrange_args(11, keyW = 'MONKEY!!!!')
[('positional_0', 11), ('keyword_0_keyW', 'MONKEY!!!!')]
>>> rearrange_args(keyW = 4.0, d11='C')
[('keyword_0_keyW', 4.0), ('keyword_1_d11', 'C')]
"""
key_index = 0
val_index = 1
return [('positional_' + str(count), arg) for count, arg\
in list(enumerate(args))] + [('keyword_' + str(num) + '_' + \
str(dic[key_index]), dic[val_index]) \
for num, dic in enumerate(kwargs.items())]
|
def aliased2seabed(aliased, rlog, tpi, f, c=1500,
rmax={18:7000, 38:2800, 70:1100, 120:850, 200:550}):
"""
Estimate true seabed, given the aliased seabed range. It might provide
a list of ranges, corresponding to seabed reflections from several pings
before, or provide an empty list if true seabed occurs within the logging
range or beyond the maximum detection range.
Args:
aliased (float): Range of aliased seabed (m).
rlog (float): Maximum logging range (m).
tpi (float): Transmit pulse interval (s).
f (int): Frequency (kHz).
c (int): Sound speed in seawater (m s-1). Defaults to 1500.
rmax (dict): Maximum seabed detection range per frequency. Defaults
to {18:7000, 38:2800, 70:1100, 120:850, 200:550}.
Returns:
float: list with estimated seabed ranges, reflected from preceeding
pings (ping -1, ping -2, ping -3, etc.).
"""
ping = 0
seabed = 0
seabeds = []
while seabed<=rmax[f]:
ping = ping + 1
seabed = (c*tpi*ping)/2 + aliased
if (seabed>rlog) & (seabed<rmax[f]):
seabeds.append(seabed)
return seabeds
|
def stdout_step_format(step) -> str:
"""Create strp logging string for stdout.
Args:
step (list): a list, like [epoch, iteration, iterations]
Returns:
str: String prepared for logging to stdout.
Example: `epoch 1 | iter 81/312`
"""
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>3}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
|
def ceil_div(a, b):
"""Handy, from: https://stackoverflow.com/questions/14822184/
"""
return -(-a // b)
|
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
|
def build_message(result, action, data, time_stamp, error_code=None, error_message=None, path=None, line=None):
"""
Result message format: Success message format: {'result', 'action', 'target', 'data:{'dirsize, }', 'timestamp'}
Failure message format: {'result', 'action', 'error_code', 'error_message', 'path', 'linenumber', 'timestamp',
'data:{}'}
"""
if result == 'success':
message = {'result': result, 'action': action, 'target': path,
'timestamp': time_stamp, 'data': data}
else:
message = {'result': result, 'action': action, 'error_code': error_code, 'error_message': error_message,
'target': path, 'linenum': line,
'timestamp': time_stamp, 'data': data}
return message
|
def rgb_to_bgr_int(rgb_tuple):
"""Convert an ``(r, g, b)`` tuple into an integer BGR value.
Converse of `bgr_int_to_rgb`.
"""
red, green, blue = rgb_tuple
return red | (green << 8) | (blue << 16)
|
def find_entity(entity_dict, entity_type, name):
"""
Find an entity by its type and name.
:param dict entity_dict: dictionary of parsed entities
:param str entity_name: entity type to search for
:param str name: entity name to search for
"""
return entity_dict.get(entity_type, {}).get(name)
|
def isValidSymbol(command, sortedSymbols):
""" isValidSymbol checks whether given command has Stock Symbol name and it is in the loaded Stock Symbols. """
return ((len(command.strip()) > 0) and command.strip().upper() in sortedSymbols)
|
def unique(l):
"""Returns a new list with all the unique elements in l"""
return list(set(l))
|
def revert_escape(txt, transform=True):
"""
transform replaces the '<ins ' or '<del ' with '<div '
:type transform: bool
"""
html = txt.replace("&", "&").replace("<", "<").replace(">", ">").replace("¶<br>", "\n")
if transform:
html = html.replace('<ins ', '<div ').replace('<del ', '<div ').replace('</ins>', '</div>')\
.replace('</del>', '</div>')
return html
|
def get_author_email(pypi_pkg):
"""Retrieve author email"""
author_email = pypi_pkg["pypi_data"]["info"]["author_email"]
return author_email
|
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
|
def cleanstr(value):
"""Removes all values of arg from the given string"""
return value.replace('_', ' ')
|
def unescape_sql(inp):
"""
:param inp: an input string to be unescaped
:return: return the unescaped version of the string.
"""
if inp.startswith('"') and inp.endswith('"'):
inp = inp[1:-1]
return inp.replace('""','"').replace('\\\\','\\')
|
def extsort(lst):
"""
>>> extsort(['a.c', 'a.py', 'b.py', 'bar.txt', 'foo.txt', 'x.c'])
['a.c', 'x.c', 'a.py', 'b.py', 'bar.txt', 'foo.txt']
"""
return sorted(lst, key=lambda x: x.split('.')[1])
|
def fibonacci(n):
"""Fibonacci series"""
fib_values = [0, 1]
for i in range(2, n):
fib_values.append(fib_values[i-1] + fib_values[i-2])
return fib_values
|
def check_type(obj,
base,
*,
name=None,
func=None,
allow_none=False,
default=None,
error_message=None):
"""Check whether an object is an instance of a base type.
Parameters
----------
obj : object
The object to be validated.
name : str
The name of `obj` in the calling function.
base : type or tuple of type
The base type that `obj` should be an instance of.
func: callable, optional
A function to be applied to `obj` if it is of type `base`. If None, no
function will be applied and `obj` will be returned as-is.
allow_none : bool, optional
Indicates whether the value None should be allowed to pass through.
default : object, optional
The default value to return if `obj` is None and `allow_none` is True.
If `default` is not None, it must be of type `base`, and it will have
`func` applied to it if `func` is not None.
error_message : str or None, optional
Custom error message to display if the type is incorrect.
Returns
-------
base type or None
The validated object.
Raises
------
TypeError
If `obj` is not an instance of `base`.
Examples
--------
>>> check_type(1, int)
1
>>> check_type(1, (int, str))
1
>>> check_type(1, str)
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: str. Actual: int.
>>> check_type(1, (str, bool))
Traceback (most recent call last):
...
TypeError: Invalid type. Expected: (str, bool). Actual: int.
>>> print(check_type(None, str, allow_none=True))
None
>>> check_type(1, str, name='num')
Traceback (most recent call last):
...
TypeError: Invalid type for parameter 'num'. Expected: str. Actual: int.
>>> check_type(1, int, func=str)
'1'
>>> check_type(1, int, func='not callable')
Traceback (most recent call last):
...
ValueError: Parameter 'func' must be callable or None.
>>> check_type(2.0, str, error_message='Not a string!')
Traceback (most recent call last):
...
TypeError: Not a string!
>>> check_type(None, int, allow_none=True, default=0)
0
"""
if allow_none and obj is None:
if default is not None:
return check_type(default,
base=base,
name=name,
func=func,
allow_none=False)
return None
if isinstance(obj, base):
if func is None:
return obj
elif callable(func):
return func(obj)
else:
raise ValueError('Parameter \'func\' must be callable or None.')
# Handle wrong type
if isinstance(base, tuple):
expect = '(' + ', '.join(cls.__name__ for cls in base) + ')'
else:
expect = base.__name__
actual = type(obj).__name__
if error_message is None:
error_message = 'Invalid type'
if name is not None:
error_message += f' for parameter \'{name}\''
error_message += f'. Expected: {expect}. Actual: {actual}.'
raise TypeError(error_message)
|
def replace_by_dict(mapping, node_str):
"""
This function replaces the string by the given replacement rule dict.
:param mapping: The replacement rule dictionary we have built.
:param node_str: The target node string to be replaced
:return: The replaced string
"""
return ' '.join([(mapping[i] if i in mapping else i)
for i in node_str.split(' ')])
|
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
|
def split2float(source: str, idx: int, default: float = 0.0) -> float:
"""
Helper function to split a source and to get its idx value, besides converting it to float.
If the operations fail, the default value is returned
:param source: Source string
:param idx: Desired index
:param default: Default value in case of failure
:return: The float value obtained from the split source
"""
try:
return float(source.split()[idx])
except (IndexError, ValueError):
raise
|
def evaluate_reverse_polish_notation(tokens):
"""
Returns the result of evaluation of an arithmetic expression in Reverse Polish Notation
:param tokens: Array of tokens that for Reverse Polish Notation
:return: result of the expression
"""
def __add(stack):
right_operand = stack.pop()
left_operand = stack.pop()
return left_operand + right_operand
def __subtract(stack):
right_operand = stack.pop()
left_operand = stack.pop()
return left_operand - right_operand
def __multiply(stack):
right_operand = stack.pop()
left_operand = stack.pop()
return left_operand * right_operand
def __divide(stack):
right_operand = stack.pop()
left_operand = stack.pop()
return int(round(left_operand / right_operand))
stack = []
operations = {
"+": __add,
"-": __subtract,
"*": __multiply,
"/": __divide,
}
for token in tokens:
if token in operations:
result = operations[token](stack)
stack.append(result)
else:
stack.append(int(token))
if len(stack) == 1:
return stack.pop()
else:
raise ValueError("invalid expression")
|
def encode_units(x):
""" Returns 1 if x is higher than 1, returns 0 if x is lower than 0
Args:
x (int): integer to be examined
Returns:
integer 0 or 1
"""
if x <= 0:
return 0
if x >= 1:
return 1
|
def clean_string(text=""):
"""
:param text:
:return:
"""
text = text.strip().replace('\n', '')
return text
|
def _reorder(xs, indexes):
"""Reorder list xs by indexes"""
if not len(indexes) == len(xs):
raise ValueError("xs and indexes must be the same size")
ys = [None] * len(xs)
for i, j in enumerate(indexes):
ys[j] = xs[i]
return ys
|
def prepare_kwargs(raw, string_parameter='name'):
"""
Utility method to convert raw string/diction input into a dictionary to pass
into a function. Always returns a dictionary.
Args:
raw: string or dictionary, string is assumed to be the name of the activation
activation function. Dictionary will be passed through unchanged.
Returns: kwargs dictionary for **kwargs
"""
kwargs = dict()
if isinstance(raw, dict):
kwargs.update(raw)
elif isinstance(raw, str):
kwargs[string_parameter] = raw
return kwargs
|
def all_worddict(wordset, flairs):
"""A list of all the words seperately for all the flairs
Returns:
dict: with key as flair
"""
worddicts = {}
for flair in flairs:
worddicts[flair] = dict.fromkeys(wordset, 0)
return worddicts
|
def max_distance(x1, y1, x2, y2):
""" Returns max(abs(dx), abs(dy))."""
return max(abs(x1-x2), abs(y1-y2))
|
def get_BTD(tran1, tran2):
"""Poulis suggested to use BTD (Bit-vector Transaction Distance)
to compute distance between transactions rather than Tum. As Tum
cause huge runing time.
"""
value_list = tran1[:]
value_list.extend(tran2[:])
value_list = list(set(value_list))
andcount = 1
xorcount = 1
for t in value_list:
if t in tran1 and t in tran2:
andcount += 1
elif t not in tran1 and t not in tran2:
pass
else:
xorcount += 1
return (xorcount * 1.0 / andcount)
|
def write_journal(journal_file_path, journal_template, full_model_path, command):
"""
Writes a journal to be used with revit, based on
:param journal_file_path: journal output path.
:param journal_template: journal template to be used as base.
:param full_model_path: file path to the model.
:param command: action to be performed on the model.
:return: journal_file_path
"""
rvt_model_file_path = '"' + full_model_path + '"'
journal_template = journal_template.format(rvt_model_file_path, command)
with open(journal_file_path, "w") as jrn_file:
jrn_file.write(journal_template)
return journal_file_path
|
def midiname2num(patch, rev_diva_midi_desc):
"""
converts param dict {param_name: value,...} to librenderman patch [(param no., value),..]
"""
return [(rev_diva_midi_desc[k], float(v)) for k, v in patch.items()]
|
def dicts_deep_merge(one, two):
"""
Deep merge two dicts.
If left and right parts is dict typed, merge it,
else replace value from one by value from two.
>>> dicts_deep_merge({1:{10:10}, 2:2},
>>> {1:{10:11,11:11}, 2:22, 3:33})
{1:{10:11, 11:11,}, 2:22, 3:33,}
"""
for key in two:
if (key in one and
isinstance(one[key], dict) and isinstance(two[key], dict)):
dicts_deep_merge(one[key], two[key])
elif not key in one or not one[key] == two[key]:
one[key] = two[key]
return one
|
def rankCombineGenerateScores(ScoresList):
"""
orders genes based on each scoring method in modesList,
and returns their scores as sum of ranks when ordered by these methods
"""
MaxCTDict = {}
for ct in ScoresList[0]:
for pair in ScoresList[0][ct]:
MaxCTDict[pair[1]] = ct
totalRanks = {}
for curScores in ScoresList:
listScores = []
for ct in curScores:
for pair in curScores[ct]:
score = pair[0]
geneName = pair[1]
listScores.append([score,geneName])
sortedScores = sorted(listScores)[::-1]
for i in range(len(sortedScores)):
score = sortedScores[i][0]
geneName = sortedScores[i][1]
if geneName not in totalRanks:
totalRanks[geneName] = 0
totalRanks[geneName] += i
scoresByCT = {}
for gene in totalRanks:
ct = MaxCTDict[gene]
if ct not in scoresByCT:
scoresByCT[ct] = []
totalRank = totalRanks[gene]
scoresByCT[ct].append((-1*totalRank, gene))
# we negate here so that low rank is highest score
return scoresByCT
|
def _no_negative_zero(val):
"""Make sure -0 is never output. Makes diff tests easier."""
if val == 0:
return 0
return val
|
def get_class_identifier(qualified_name):
"""Takes com.some.thing.Class and returns Class."""
parts = qualified_name.split(".")
return parts[-1]
|
def parse_percent(val):
"""Turns values like "97.92%" into 0.9792."""
return float(val.replace('%', '')) / 100
|
def write_mini_chains_file(s, outfile, enum):
"""Enumerates all mini chains and writes them to a file."""
list = [l + "\n" for l in s.split("\n") if l]
ouf = open(outfile, "a")
for element in list:
if element.startswith("chain"):
element = " ".join(element.split()[:-1]) + "\t{}\n".format(enum)
enum += 1
ouf.write(element)
else:
ouf.write(element)
ouf.close()
return enum
|
def decode(raw: bytes):
"""Read a varint from `raw` bytes, return the remainder"""
if raw is None or len(raw) == 0:
return 0
result = 0
data = raw
while True:
i, data = ord(data[:1]), data[1:]
result <<= 7
result += i & 0x7F
if i < 0x80:
break
return result, data
|
def check_zones(domain, zones):
"""
Check if the provided domain exists within the zone
"""
for zone in zones:
if domain == zone or domain.endswith("." + zone):
return zone
return None
|
def convert_dms_to_decimal(elem):
"""Convert latitude/ longitude dms form to dms form
Keyword arguments:
elem -- latitude or longitude : (degree, min, sec)
"""
return elem[0] + elem[1]/60 + elem[2]/3600
|
def _ase_type(ase_canonical, ase_alternative):
"""
Return the type of ASE, it does not define mutual exclusivity.
"""
ase_type = ""
if len(ase_canonical) == 2:
ase_type = "insertion"
elif len(ase_alternative) == 2:
ase_type = "deletion"
elif ase_alternative[0] == "start":
if ase_alternative[-1] == "stop":
ase_type = "fully_alternative"
else:
ase_type = "alternative_start"
elif ase_alternative[-1] == "stop":
ase_type = "alternative_end"
else:
ase_type = "alternative"
return ase_type
|
def flatten(*seqs):
"""flattens a sequence of sequences"""
result = []
for seq in seqs:
result.extend(seq)
return tuple(result)
|
def modpow(k, n, m):
""" Calculate "k^n" modular "m" efficiently.
Even with python2 this is 100's of times faster than "(k**n) % m",
particularly for large "n".
Note however that python's built-in pow() also supports an optional 3rd
modular argument and is faster than this.
"""
ans = 1
while n:
if n & 1:
ans = (ans*k) % m
k = (k*k) % m
n >>= 1
return ans
|
def step_lr(lr_max, epoch, num_epochs):
"""Step Scheduler"""
ratio = epoch/float(num_epochs)
if ratio < 0.3: return lr_max
elif ratio < 0.6: return lr_max*0.2
elif ratio <0.8: return lr_max*0.2*0.2
else: return lr_max*0.2*0.2*0.2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.