content stringlengths 42 6.51k |
|---|
def rst_heading(value, arg):
"""Provides an underline for restructured text heading.
Syntax::
{{ value|rst_heading:"=" }}
Results in:
``value``
``=====``
"""
return ''.join([value, '\n', arg*len(value)]) |
def mk_kws(name, ct, pk, verbs=None):
"""
Helper function to generate query dictionaries
"""
kws = {'%s_ct' % name: ct}
if pk is not None:
kws['%s_pk' % name] = pk
if verbs:
kws['verb__in'] = verbs
return kws |
def country_code_transform(country_code):
""" Transform country code to the code used by VIES """
transform_dict = {
"GR": "EL",
}
return transform_dict.get(country_code, country_code) |
def fixed(_groups, colour, _period):
"""
The Fixed pattern is simply an always-on light in the given colour.
groups and period are irrelevant.
"""
return [(colour, 1)] |
def extract_pattern(fmt):
"""Extracts used strings from a %(foo)s pattern."""
class FakeDict(object):
def __init__(self):
self.seen_keys = set()
def __getitem__(self, key):
self.seen_keys.add(key)
return ''
def keys(self):
return self.seen_keys
fake = FakeDict()
try:
fmt % fake
except TypeError:
# Formatting error
pass
return set(fake.keys()) |
def process_input_dict(input_dict, set_permissions=False):
"""
Turn string values from checkboxes into booleans
:param input_dict:
:param set_permissions: whether or not to insert missing permissions
:return:
"""
# if we get a list (which is valid JSON), expand the list
if isinstance(input_dict, list):
return [process_input_dict(entry) for entry in input_dict]
# We transform 'true' and 'false' strings (mostly from checkboxes) to True and False python boolean values.
boolean_keys = {
'all_can_read',
'all_can_write',
'group_can_read',
'group_can_write',
'_all_can_read',
'_all_can_write',
'_group_can_read',
'_group_can_write'
}
# If a blank is passed from <select> for one of these, we want to set it to None
id_keys = {
'user_group_id',
'primary_user_group_id',
'sample_ids',
'collection_ids',
'analysis_ids',
'_user_group_id',
'_primary_user_group_id',
'_sample_ids',
'_collection_ids',
'_analysis_ids'
}
new_dict = {
key: (False if value.lower() == 'false' else True) if isinstance(value, str) and key in boolean_keys
else (None if value == '' else value) if key in id_keys
else value
for key, value in input_dict.items()
}
if set_permissions:
for key in boolean_keys:
if key not in new_dict:
new_dict[key] = False
return new_dict |
def get_jetson_gstreamer_source(capture_width=1280, capture_height=720, display_width=1280, display_height=720, framerate=2, flip_method=0):
"""
Return an OpenCV-compatible video source description that uses gstreamer to capture video from the camera on a Jetson Nano
"""
return (
f'nvarguscamerasrc ! video/x-raw(memory:NVMM), ' +
f'width=(int){capture_width}, height=(int){capture_height}, ' +
f'format=(string)NV12, framerate=(fraction){framerate}/1 ! ' +
f'nvvidconv flip-method={flip_method} ! ' +
f'video/x-raw, width=(int){display_width}, height=(int){display_height}, format=(string)BGRx ! ' +
'videoconvert ! video/x-raw, format=(string)BGR ! appsink'
) |
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
_, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
_, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches |
def dot3(v1, v2):
"""
Quick convenient function to compute dot product betwee two 3-element vectors
dot3: 231 ns | np.dot: 745 ns
"""
a1, a2, a3 = v1
b1, b2, b3 = v2
return a1*b1 + a2*b2 + a3*b3 |
def _get_span_text(text, span):
"""Get the text that is demarcated by a span in a prodigy dict
"""
return text[span['start']:span['end']] |
def inorder_traversal_i(root):
"""
output list
keep track of the data using a stack
# keep looping
# while root exists
# append root to the stack
# traverse to the left
# if the stack is empty
# return my output
# grab the node off the top of the stack
# append the nodes value to the output
#traverse to the right
"""
output = []
stack = []
while True:
while root:
stack.append(root)
root = root.left
if not stack:
return output
node = stack.pop()
output.append(node.val)
root = node.right |
def pointerize(decl: str, name: str) -> str:
"""Given a C decl and its name, modify it to be a declaration to a pointer."""
# This doesn't work in general but does work for all our types...
if '(' in decl:
# Function pointer. Stick an * in front of the name and wrap it in parens.
return decl.replace(name, '(*{})'.format(name))
else:
# Non-function pointer. Just stick an * in front of the name.
return decl.replace(name, '*{}'.format(name)) |
def attribs_from_raw(raw_credentials, app_meta):
"""Transform API response object into coresponding
names inside the credentials-file"""
# FIXME OrderedDict?
return {
"__appended_by_script__": app_meta["name"] + " " + app_meta["ver"],
"__homepage__": app_meta["homepage"],
"aws_access_key_id": raw_credentials['AccessKeyId'],
"aws_secret_access_key": raw_credentials['SecretAccessKey'],
"aws_session_token": raw_credentials['SessionToken'],
"expiration": raw_credentials['Expiration']
} |
def stringifyOptionValue(value):
"""
Convert option value from in-memory representation to a suitable string.
In particular, boolean values are converted to '0' or '1'.
"""
if value is True:
return '1'
elif value is False:
return '0'
else:
return str(value) |
def namespaces(labels):
"""
Converts fully-qualified names to a list of namespaces.
namespaces(['clojure.core/map']) => ['clojure.core']
"""
return list(map(lambda label: label.split('/')[0], labels)) |
def cmp_rcsdates(date1, date2):
"""
Compares two RCS dates. Accounts for RCS dates being of the form
YY.mm.dd.HH.MM.SS before 2000 and YYYY.mm.dd.HH.MM.SS afterwards.
"""
dates = [date1, date2]
for i, date in enumerate(dates):
if len(date) == 17:
dates[i] = '19' + date
return min(dates) |
def remove_empty_strings(x):
"""
Arguments:
- `x`:
"""
if x:
x = x.strip()
if x == "" or x == ":" or x == ".":
return None
else:
return x |
def exponential_ease_in(p):
"""Modeled after the exponential function y = 2^(10(x - 1))"""
if p == 0.0:
return p
else:
return pow(2, 10 * (p - 1)) |
def validate_scheme(scheme):
""" Validate that the scheme is identical to scheme(s) we support. (HTTPS.)
>>> assert validate_scheme('https')
>>> validate_scheme('http') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ... HTTPS ...
>>> validate_scheme('httpss') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ... HTTPS ...
>>> validate_scheme(None) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ... HTTPS ...
"""
if (scheme or '').lower() == u'https':
return True
else:
raise ValueError("Only HTTPS APIs are supported by Veracode.") |
def str2bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
"""
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
if str(value).lower() in ("no", "n", "false", "f", "0", "0.0", "", "none", "[]", "{}"): return False
raise Exception('Invalid value for boolean conversion: ' + str(value)) |
def get_individual(individual, ensembl_json):
"""Return a list with the genotypes of the individual."""
genotypes = []
for individual_genotype in ensembl_json["genotypes"]:
if individual in individual_genotype["sample"]:
genotypes.append(individual_genotype)
return genotypes |
def mean(iterator, length):
""" Returns the arithmetic mean of the values in the given iterator.
"""
return sum(iterator) / float(length or 1) |
def get_parameter_for_sharding(sharding_instances):
"""Return the parameter for sharding, based on the given number of
sharding instances.
Args:
sharding_instances: int. How many sharding instances to be running.
Returns:
list(str): A list of parameters to represent the sharding configuration.
"""
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
if sharding_instances == 1:
return []
else:
return ['--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%s' % sharding_instances] |
def lux2W(intensity, efficiency):
"""
intensity in lux (lm/m2)
efficency is a factor
"""
return intensity / (efficiency * 683) |
def append_ext(id):
"""Appends jpg to a number"""
return str(id)+".jpg" |
def connectCSES(aList, connectionSymbol="=", seperationSymbol=","):
""" Return a string represents a list of the form [name, value] using the form name=value. """
if not aList: return ""
return seperationSymbol.join(map(lambda x: connectionSymbol.join(x), aList)) |
def volt_extremes(filtdat):
"""Find the maximum and minimum voltage values for
the filtered ECG signal
Args:
filtdat: array of filtered voltage values from
imported ECG signal
Returns:
voltage_extremes: tuple containing minimum and
maximum lead voltages from ECG signal
"""
max_volt = max(filtdat)
min_volt = min(filtdat)
voltage_extremes = (min_volt, max_volt)
# print(max_volt)
# print(min_volt)
print(voltage_extremes)
return voltage_extremes |
def allergens_from_text(text: str) -> dict:
"""For parm string containing a list of foods, return a dictionary. Each item in dictionary is an allergen and
the set of ingredients that might contain that allergens.
Returned dictionary has items as follows,
k = Name of the allergen.
v = A list. Each item in the list is a set. Each set is the ingredients in each food that might contain
the allergen."""
possible = {}
for food in text.split('\n'): # Foods start on separate lines.
food_no_brackets = food.replace('(', '').replace(')', '')
ingredients, allergens = food_no_brackets.split(' contains ')
ingredient_set = set() # The set of ingredients that might include this allergen.
for ingredient in ingredients.split(' '): # Ingredients are delimited with spaces.
ingredient_set.add(ingredient)
for allergen in allergens.split(', '): # Allergens are delimited with comma + space.
if allergen not in possible:
possible[allergen] = [ingredient_set]
else:
possible[allergen].append(ingredient_set)
return possible |
def get_grant_name(grant):
"""Get grant name based on Grantee type."""
grant_name = ""
if grant["Grantee"]["Type"] == "Group":
uri = grant["Grantee"]["URI"]
grant_name = uri.rsplit("/", 1)[-1]
if grant["Grantee"]["Type"] == "CanonicalUser":
grant_name = grant["Grantee"]["DisplayName"]
return grant_name |
def convert_keys_to_string(dictionary):
"""Recursively converts dictionary keys to strings."""
if not isinstance(dictionary, dict):
return dictionary
return dict((str(k), convert_keys_to_string(v))
for k, v in dictionary.items()) |
def en_segundos(tiempo: str) -> int:
"""Convierte un tiempo en segundos
:param tiempo: Tiempo expresado en dias:horas:minutos:segundos
:tiempo type: str
:return: Tiempo en segundos
:rtype: int
.. Nota::
u: unidad
t: tiempo(int)
>>> en_segundos('1:0:0:0')
86400
>>> en_segundos('1:0:10:4')
87004
>>> en_segundos('2:12:46:29')
218789
"""
unidades = [60*60*24, 60*60, 60, 1]
return sum([u*t for u, t in zip(unidades, map(int, tiempo.split(":")))]) |
def get_time_string(seconds):
"""Returns seconds as Slurm-compatible time string
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
time_string = "{:02d}:{:02d}:{:02d}".format(int(h), int(m), int(s))
if h > 99999 or h < 0:
return "00:00:00"
return time_string |
def constrain(val, min_val, max_val):
"""
Method to constrain values to between the min_val and max_val.
Keyword arguments:
val -- The unconstrained value
min_val -- The lowest allowed value
max_val -- The highest allowed value
"""
return min(max_val, max(min_val, val)) |
def doesJSONPathExist(json_obj, tree_sequence):
"""
A function which tests whether a path exists within JSON file.
:param json_obj:
:param tree_sequence: list of strings
:return: boolean
"""
curr_json_obj = json_obj
steps_str = ""
pathExists = True
for tree_step in tree_sequence:
steps_str = steps_str+":"+tree_step
if tree_step in curr_json_obj:
curr_json_obj = curr_json_obj[tree_step]
else:
pathExists = False
break
return pathExists |
def sir(
s,
v,
i,
i_v,
r,
vaccination_rate,
beta,
gamma_unvaccinated,
gamma_vaccinated,
vaccine_efficacy,
n,
):
"""
The SIR model, one time step
:param s: Current amount of individuals that are susceptible
:param v: Current amount of individuals that are vaccinated
:param i: Current amount of individuals that are infectious
:param i_v: Current amount of vaccinated individuals that are infectious
:param r: Current amount of individuals that are recovered
:param beta: The rate of exposure of individuals to persons infected with COVID-19
:param gamma_unvaccinated: Rate of recovery for infected unvaccinated individuals
:param gamma_vaccinated: Rate of recovery for infected vaccinated individuals
:param vaccination_rate: The rate of vaccination of susceptible individuals
:param vaccine_efficacy: The efficacy of the vaccine
:param n: Total population size
:return:
"""
s_n = (
-beta * s * i - beta * s * i_v - vaccination_rate * s
) + s # Update to the amount of individuals that are susceptible ## sir_s_n_exp
v_n = (
vaccination_rate * s
- beta * (1 - vaccine_efficacy) * v * i
- beta * (1 - vaccine_efficacy) * v * i_v
) + v # Update to the amount of individuals that are susceptible ## sir_v_n_exp
i_n = (
beta * s * i + beta * s * i_v - gamma_unvaccinated * i
) + i # Update to the amount of individuals that are infectious ## sir_i_n_exp
i_v_n = (
beta * (1 - vaccine_efficacy) * v * i
+ beta * (1 - vaccine_efficacy) * v * i_v
- gamma_vaccinated * i_v
) + i_v # Update to the amount of individuals that are infectious ## sir_i_v_n_exp
r_n = (
gamma_vaccinated * i_v + gamma_unvaccinated * i + r
) # Update to the amount of individuals that are recovered ## sir_r_n_exp
scale = n / (
s_n + v_n + i_n + i_v_n + r_n
) # A scaling factor to compute updated disease variables ## sir_scale_exp
s = s_n * scale ## sir_s_exp
v = v_n * scale ## sir_v_exp
i = i_n * scale ## sir_i_exp
i_v = i_v_n * scale ## sir_i_v_exp
r = r_n * scale ## sir_r_exp
return s, v, i, i_v, r |
def delimit(items):
"""
Delimit the iterable of strings
"""
return '; '.join(i for i in items) |
def smooth_freqs(freqs):
"""
Smooths freqs vector, guarantees sum == 1
:param freqs: vector of frequencies
:return: vector of frequencies guaranteed to sum to 1
"""
s = sum(freqs)
return [f/s for f in freqs] |
def one_of_k_encoding(x, allowable_set):
"""
taken from https://github.com/thinng/GraphDTA
function which one hot encodes x w.r.t. allowable_set and x has to be in allowable_set
x:
element from allowable_set
allowable_set: list
list of elements x is from
"""
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set)) |
def correct_input(row):
"""
:param row: str, user input
:return: bool, if the user input is correct
"""
for i in range(len(row)):
if len(row[i]) != 1 or len(row) != 4:
print('Illegal input')
return False
return True |
def _censor_with(x, range, value=None):
"""
Censor any values outside of range with ``None``
"""
return [val if range[0] <= val <= range[1] else value
for val in x] |
def end(w, Indo = False):
"""
Kembalikan senarai rentetan akhir yg mungkin.
@param Indo: Jika benar, -in juga termasuk.
"""
lapis0 = ["kan","i"]
if Indo:
lapis0 = ["kan","i", "in"]
lapis1 = ["ku","mu","kau","nya","Nya"]
lapis2 = ["lah","kah"]
endlist = set(["an", "anan"] + lapis0 + lapis1 + lapis2 + \
["an"+item0+item1+item2 for item0 in lapis0 for item1 in lapis1 for item2 in lapis2] + \
["an"+item0+item1 for item0 in lapis0 for item1 in lapis1] + \
["an"+item0+item2 for item0 in lapis0 for item2 in lapis2] + \
["an"+item1+item2 for item1 in lapis1 for item2 in lapis2] + \
["an"+item0 for item0 in lapis0] + \
["an"+item1 for item1 in lapis1] + \
["an"+item2 for item2 in lapis2] + \
[item0+item1+item2 for item0 in lapis0 for item1 in lapis1 for item2 in lapis2] + \
[item0+item1 for item0 in lapis0 for item1 in lapis1] + \
[item0+item2 for item0 in lapis0 for item2 in lapis2] + \
[item1+item2 for item1 in lapis1 for item2 in lapis2] + \
["anan"+item0+item1+item2 for item0 in lapis0 for item1 in lapis1 for item2 in lapis2] + \
["anan"+item0+item1 for item0 in lapis0 for item1 in lapis1] + \
["anan"+item0+item2 for item0 in lapis0 for item2 in lapis2] + \
["anan"+item1+item2 for item1 in lapis1 for item2 in lapis2] + \
["anan"+item0 for item0 in lapis0] + \
["anan"+item1 for item1 in lapis1] + \
["anan"+item2 for item2 in lapis2]
)
ending = set()
for item in endlist:
if w.lower().endswith(item):
ending.add(item)
return ending |
def _plat_idx_to_val(idx, edge=0.5, FIO_IO_U_PLAT_BITS=6, FIO_IO_U_PLAT_VAL=64):
""" Taken from fio's stat.c for calculating the latency value of a bin
from that bin's index.
idx : the value of the index into the histogram bins
edge : fractional value in the range [0,1]** indicating how far into
the bin we wish to compute the latency value of.
** edge = 0.0 and 1.0 computes the lower and upper latency bounds
respectively of the given bin index. """
# MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
# all bits of the sample as index
if (idx < (FIO_IO_U_PLAT_VAL << 1)):
return idx
# Find the group and compute the minimum value of that group
error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS)
# Find its bucket number of the group
k = idx % FIO_IO_U_PLAT_VAL
# Return the mean (if edge=0.5) of the range of the bucket
return base + ((k + edge) * (1 << error_bits)) |
def _transcript_matches(cur_id, id_list):
"""Check if a transcript matches one of the alternatively spliced names.
"""
for index, (test_id, test_re) in enumerate(id_list):
if cur_id == test_id or test_re.search(cur_id):
return index
return -1 |
def flag(value):
"""Display flag value"""
value = value.strip()
return {
'Y': 'Yes',
'N': 'No',
'U': 'Unknown',
}.get(value, 'Not set') |
def order(get_order):
"""
Order fixture
"""
order = get_order()
return {k: order[k] for k in [
"userId", "products", "address", "deliveryPrice", "paymentToken"
]} |
def clothoid(points):
"""Path instructions for a clothoid spline. The spline interpolates the control points.
Args:
points (list of 2-tuples): The control points for the spline
Returns:
string: Ipe path instructions
"""
instructions = [ str(points[0][0]), str(points[0][1]), 'm' ] + [ f(p) for p in points[1:] for f in [ lambda p: str(p[0]), lambda p: str(p[1])] ] + ['L ']
return ' '.join(instructions) |
def cal_weight(from_x, from_y, to_x, to_y):
"""
calculate distance
Args:
from_x: x coordinate
from_y: y coordinate
to_x: x coordinate
to_y: y coordinate
Returns:
distance
"""
# return abs(from_x - to_x) + abs(from_y - to_y) # manhattan
return ((from_x - to_x) ** 2 + (from_y - to_y) ** 2) ** 0.5 |
def _substitute(valueish, replacements, root=()):
"""Substitutes the values in valueish with those in replacements where the
keys are as in _flatten.
For example,
```
_substitute(
{a: x, b: (y, z)},
{(a,): X, (b, 0): Y, (b, 1): Z})
```
returns `{a: X, b: (Y, Z)}`.
"""
if isinstance(valueish, dict):
return type(valueish)({
k: _substitute(v, replacements, root + (k, ))
for (k, v) in valueish.items()
})
elif isinstance(valueish, (tuple, list)):
return type(valueish)((
_substitute(v, replacements, root + (ix, ))
for (ix, v) in enumerate(valueish)))
else:
return replacements[root] |
def mm_sorter(list_name): # list_name
"""It takes a list, float, int or string and sorts them out from min to max
Arguments:
list_name {int, boo, float, string} -- list to be sorted
Returns:
int, boo, float, string -- a copy of the original list sorted from min to max
"""
# list_copy = list_name[:]
# list_name = list_float
sorted_list = []
minc = 0
maxc = -1
list_range = len(list_name)
for item in range(list_range):
sorted_list.append('-')
sort_range = int(list_range/2)
if list_range % 2 != 0:
sort_range += 1
for item in range(sort_range):
if not list_name:
pass
else:
sorted_list[minc] = min(list_name)
list_name.remove(min(list_name))
minc += 1
if not list_name:
pass
else:
sorted_list[maxc] = max(list_name)
list_name.remove(max(list_name))
maxc += -1
return sorted_list |
def MovingAvg(Ls, w=3):
""" Moving average of input data, with time window of w days """
return [sum(Ls[max(0,i-w):min(len(Ls),i+w)])/(2*w) for i in range(len(Ls))] |
def summarize_validation_report(report_json, report_uri, allValid):
"""product summary json for validation report"""
summary = report_json.get('summary')
results = report_json.get('productLevelValidationResults')
validated_count = len(results)
pass_count = len(list(filter(lambda x: x.get('status') == 'PASS', results)))
fail_count = len(list(filter(lambda x: x.get('status') == 'FAIL', results)))
output = {
'allValid': allValid,
'report': report_uri,
'summary': {
'validationSummary': summary,
'productsValidated': validated_count,
'passCount': pass_count,
'failCount': fail_count
}
}
return output |
def get_range_around(range_value, current_item, padding):
"""
Returns a range of numbers around the given number.
This is useful for pagination, where you might want to show something
like this::
<< < ... 4 5 (6) 7 8 .. > >>
In this example `6` would be the current page and we show 2 items around
that page (including the page itself).
Usage::
{% load libs_tags %}
{% get_range_around page_obj.paginator.num_pages page_obj.number 5
as pages %}
:param range_amount: Number of total items in your range (1 indexed)
:param current_item: The item around which the result should be centered
(1 indexed)
:param padding: Number of items to show left and right from the current
item.
"""
total_items = 1 + padding * 2
left_bound = padding
right_bound = range_value - padding
if range_value <= total_items:
range_items = range(1, range_value + 1)
return {
'range_items': range_items,
'left_padding': False,
'right_padding': False,
}
if current_item <= left_bound:
range_items = range(1, range_value + 1)[:total_items]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
if current_item >= right_bound:
range_items = range(1, range_value + 1)[-total_items:]
return {
'range_items': range_items,
'left_padding': range_items[0] > 1,
'right_padding': range_items[-1] < range_value,
}
range_items = range(current_item - padding, current_item + padding + 1)
return {
'range_items': range_items,
'left_padding': True,
'right_padding': True,
} |
def compareTriplets(a, b):
"""Rate two tuples, if a[i] > b[i] give a 1 point, b[i] > a[i] give b 1 point, give 0 otherwise"""
cmp = lambda x,y: x > y
return [sum(map(cmp, a,b)), sum(map(cmp, b, a))] |
def github_split_owner_project(url):
"""
Parses the owner and project name out of a GitHub URL
Examples
--------
>>> github_split_owner_project("https://github.com/intel/dffml")
('intel', 'dffml')
"""
return dict(
zip(
("owner", "project"),
tuple("/".join(url.split("/")[-2:]).split("/")),
)
) |
def get_primary_stress_vector(phonemes_stress):
"""
Return a dictionary mapping word strings to lexical stress vectors.
:type phonemes_stress: str
:param phonemes_stress: word strings for which you want to obtain phonological vectors.
"""
phonemes_by_syllable = phonemes_stress.split('-')
stress_markers_by_syllable = [0, 0, 0]
for idx, s in enumerate(phonemes_by_syllable):
if "'" in s:
stress_markers_by_syllable[idx] = 1
# function words may be unstressed; else, for all other words, there can only be one primary stress marker
assert sum(stress_markers_by_syllable) == 0 or sum(stress_markers_by_syllable) == 1
return stress_markers_by_syllable |
def speed_section_bot(speed_limit_bot, D_bot, D):
"""
Calculates the section speed of vapor at the bottom
Parameters
----------
speed_limit_bot: float
The limit speed of the vapor at the bottom of column, [m/s]
D_bot : float
The calculating bottom diameter of column, [m]
D : float
The choosing diameter of column, [m]
Returns
-------
speed_section_bot : float
The section speed of vapor at the bottom, [m/s]
References
----------
&&&&&
"""
return speed_limit_bot * (D_bot / D)**2 |
def norm_isbn_str(s):
"""
Given an ISBN string, normalize the string so that it only contains
the relevant digits.
This function drops all ASCII whitespace characters (tab, space,
carriage return, line feed) and all ASCII characters that are not
alphanumeric.
It also converts all ASCII letters to uppercase. Note that ISBN-10
numbers may have an "X" as their check digit!
This function does NOT guarantee that the value it returns is a valid
ISBN.
Passing a non-string as the parameter is equivalent to passing an
empty string.
Parameters:
s : str | mixed - the ISBN number string to normalize
Return:
the normalized ISBN string, which is NOT guaranteed to be valid
"""
# If non-string passed, replace with empty string
if not isinstance(s, str):
s = ''
# Begin with an empty result
isbn = ''
# Go through each character of the string
for cc in s:
# Get current character code
c = ord(cc)
# Handle based on character type
if (c >= ord('a')) and (c <= ord('z')):
# Lowercase letter, so transfer uppercase to normalized ISBN
isbn = isbn + chr(c - 0x20)
elif (c >= ord('A')) and (c <= ord('Z')):
# Uppercase letter, so transfer to normalized ISBN
isbn = isbn + chr(c)
elif (c >= ord('0')) and (c <= ord('9')):
# Digit, so transfer to normalized ISBN
isbn = isbn + chr(c)
elif (c >= 0x21) and (c <= 0x7e):
# Non-alphanumeric symbol, so don't transfer
pass
elif (c == ord('\t')) or (c == ord('\r')) or \
(c == ord('\n')) or (c == ord(' ')):
# Whitespace, so don't transfer
pass
else:
# Control or extended character, so transfer to normalized
isbn = isbn + chr(c)
# Return normalized string
return isbn |
def validate_nb(nb):
"""
Validate that given notebook JSON is importable
- Check for nbformat == 4
- Check that language is python
Do not re-implement nbformat here :D
"""
if nb['nbformat'] != 4:
return False
language_name = (nb.get('metadata', {})
.get('kernelspec', {})
.get('language', '').lower())
return language_name == 'python' |
def token_ep(auth_domain):
"""Construct the token endpoint URL, given the authentication domain."""
if auth_domain.endswith("/"):
return "%soauth2/token" % auth_domain
return "%s/oauth2/token" % auth_domain |
def bpe_postprocess(string) -> str:
"""
Post-processor for BPE output. Recombines BPE-split tokens.
:param string:
:return: post-processed string
"""
return string.replace("@@ ", "") |
def get_set_with_most_elements(sets_dict,elements):
"""
Returns set key that contains the most elements in the given list
ARGS:
sets_dict(Dict{}): keys: set names
values: List[] items must be same type as items
in elements List
elements(List[]): items must have same type as sets_dict values items
RETURNS:
key of set name that has most elements
"""
max_set_name = None
max_set_num = 0
for set_name in sets_dict.keys():
num = len(set(elements).intersection(sets_dict[set_name]))
if num > max_set_num:
max_set_num = num
max_set_name = set_name
return max_set_name |
def uri_leaf(uri):
"""
Get the "leaf" - fragment id or last segment - of a URI. Useful e.g. for
getting a term from a "namespace like" URI. Examples:
>>> uri_leaf('http://example.org/ns/things#item')
'item'
>>> uri_leaf('http://example.org/ns/stuff/item')
'item'
>>> uri_leaf('http://example.org/ns/stuff/')
>>>
>>> uri_leaf('urn:example.org:stuff')
'stuff'
>>> uri_leaf('example.org')
>>>
"""
for char in ('#', '/', ':'):
if uri.endswith(char):
break
# base, sep, leaf = uri.rpartition(char)
if char in uri:
sep = char
leaf = uri.rsplit(char)[-1]
else:
sep = ''
leaf = uri
if sep and leaf:
return leaf |
def resolve(name, module=None):
"""Resole dotted name to python module
"""
name = name.split('.')
if not name[0]:
if module is None:
raise ValueError('relative name without base module')
module = module.split('.')
name.pop(0)
while not name[0]:
module.pop()
name.pop(0)
name = module + name
used = name.pop(0)
found = __import__(used)
for n in name:
used += '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found |
def extract_data(selected):
"""Returns the text data from a selected sentences list
"""
data = []
for line in selected:
data.append(line.split('\t')[8])
return data |
def get_var(input_dict, accessor_string):
"""Gets data from a dictionary using a dotted accessor-string"""
current_data = {}
if 'desired' in input_dict:
current_data['desired'] = input_dict['desired']
if 'reported' in input_dict:
current_data['reported'] = input_dict['reported']
if accessor_string == '':
return current_data
for chunk in accessor_string.split('.'):
if chunk == '':
return current_data
if chunk not in current_data:
return current_data
current_data = current_data[chunk]
return current_data |
def get_delete_rows_data(column_names, row_deletion_data):
"""
Returns query segment to delete rows whose :param column_names are equal to any of the tuples
in :param row_deletion_data
:param column_names: List of column name to match
:param row_deletion_data: Values corresponding to the :param column_names to match
:return: Row deletion query segements like '(col1=val1 AND col2=val2) OR (col1=val3 AND col2=val4)
"""
query_segment_list = []
for row in row_deletion_data:
assert len(column_names) == len(row)
zipped_list = list(zip(column_names, row))
zipped_list = [str(item[0]) + "=" + str(item[1]) for item in zipped_list]
query_segment_list.append("(" + " AND ".join(zipped_list) + ")")
return " OR ".join(query_segment_list) |
def _shape_to_3d(shape):
"""Return a shape with 3-dimensions, even if lower dimensional
shape is provided.
>>> _shape_to_3d([5])
[5, 1, 1]
>>> _shape_to_3d([5, 2])
[5, 2, 1]
>>> _shape_to_3d([5, 3, 1])
[5, 3, 1]
>>> try:
... _shape_to_3d([5, 3, 3, 1])
... except ValueError:
... pass
"""
shape = list(shape)
L = len(shape)
if L > 3:
raise ValueError("Shape cannot be higher than 3-dimensional.")
shape += [1,]*(3 - L)
return shape |
def format_internal_exception_output(result):
"""
Output a formatted version of any internal
errors that Ansible runs into when executing,
if any are present.
:param result: result to inspect
:return: formatted output message
"""
if 'exception' in result:
return 'An internal exception occurred:\n{}'.format(result['exception'])
return '' |
def armlength(armlen):
""" Length of a padlock probe arm """
success = False
if not armlen > 6:
print("Padlock arm length too short. Should be at least 7. Try again")
else:
success = True
return success |
def decorator_noop(**kwargs):
"""NOOOOOOO OPERATION"""
del kwargs
return '', 204 |
def discard_non_duplicates(field_occurrences):
"""
Create a dict of fieldnames (if and only if they're included more than once)
and the forms that include them.
Put differently: remove any fieldnames that appear in only one form.
This is currently also what's removing the cruft (download timestamp, etc.).
Args:
field_occurrences (dict): A dictionary of the sort we'll be returning.
Returns:
A dictionary of lists, indexed by fieldname; values are names of the
forms that include those fields.
"""
repeated_fields = dict()
for k, v in field_occurrences.items():
if len(v) < 2:
continue
else:
repeated_fields[k] = v
return repeated_fields |
def sorted_squares(nums):
"""Given an integer array nums sorted in non-decreasing order,
return an array of the squares of each number sorted in non-decreasing order.
:type nums: List[int]
:rtype: List[int]
"""
for i in range(len(nums)):
nums[i] *= nums[i]
return sorted(nums) |
def isHost(obj):
"""Returns true of the object is a host, false if not
@return: If the object is a host
@rtype: bool"""
return obj.__class__.__name__ in ["NestedHost", "Host"] |
def rivers_with_station(stations):
"""For a list of MonitoringStation objects, returns set with
the names of rivers with a monitoring station."""
rivers = {x.river for x in stations}
return rivers |
def one(n = 10):
"""
a strange function with an ambiguous name
Parameters
----------
:param n: int
the number of points to test
"""
sum = 0.0
for i in range(n):
denom = 1.0
sum += 1/n
return sum |
def _strip_h5(value: str) -> str:
""" remove trailing .h5, if present """
if value.endswith(".h5"):
value = value[:-3]
return value |
def biot(h, r, k):
"""
Calculate the dimensionless Biot number represented by Bi.
Parameters
----------
h = heat transfer coefficient, W/m^2K
r = radius of particle, m
k = thermal conductivity, W/mK
Returns
-------
Bi = Biot number, -
"""
Bi = (h*r) / k
return Bi |
def _image(u, v, umap, vmap, qvars, bdd, forall, cache):
"""Recursive (pre)image computation.
Renaming requires that in each pair
the variables are adjacent.
@param u, v: nodes
@param umap: renaming of variables in `u`
that occurs after conjunction of `u` with `v`
and quantification.
@param vmap: renaming of variables in `v`
that occurs before conjunction with `u`.
"""
# controlling values for conjunction ?
if u == -1 or v == -1:
return -1
if u == 1 and v == 1:
return 1
# already computed ?
t = (u, v)
w = cache.get(t)
if w is not None:
return w
# recurse (descend)
iu, _, _ = bdd._succ[abs(u)]
jv, _, _ = bdd._succ[abs(v)]
if vmap is None:
iv = jv
else:
iv = vmap.get(jv, jv)
z = min(iu, iv)
u0, u1 = bdd._top_cofactor(u, z)
v0, v1 = bdd._top_cofactor(v, jv + z - iv)
p = _image(u0, v0, umap, vmap, qvars, bdd, forall, cache)
q = _image(u1, v1, umap, vmap, qvars, bdd, forall, cache)
# quantified ?
if z in qvars:
if forall:
r = bdd.ite(p, q, -1) # conjoin
else:
r = bdd.ite(p, 1, q) # disjoin
else:
if umap is None:
m = z
else:
m = umap.get(z, z)
g = bdd.find_or_add(m, -1, 1)
r = bdd.ite(g, q, p)
cache[t] = r
return r |
def try_int(intstr):
"""
Try converting a string into int. Trims empty space.
"""
try:
num = int(intstr.strip())
except ValueError:
num = 0
return num |
def gen_idx_byclass(labels):
"""
Neatly organize indices of labeled samples by their classes.
Parameters
----------
labels : list
Note that labels should be a simple Python list instead of a tensor.
Returns
-------
idx_byclass : dictionary {[class_label (int) : indices (list)]}
"""
# print("in gen_idx_byclass...")
from collections import Counter
classes = Counter(labels).keys() # obtain a list of classes
idx_byclass = {}
for class_label in classes:
# Find samples of this class:
class_idx = [] # indices for samples that belong to this class
for idx in range(len(labels)):
if labels[idx] == class_label:
class_idx.append(idx)
idx_byclass[class_label] = class_idx
return idx_byclass |
def extract_records (json_query_result):
"""
Extract and return a list of records (dictionaries) from the given
query result dictionary.
"""
return json_query_result.get('_items', []) |
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m |
def get_suspecious_items(_items):
"""Items with libraryCatalog==Zotero
These items are suspecious, cause they were imported from
pdf files and maybe Zotero did not import the metadata properly.
:param _items: Zotero library items
:type _items: list containing dicts
:returns: list containing dicts
"""
list_catalog_zotero = []
for item in _items:
if "libraryCatalog" in item["data"]:
catalog = item["data"]["libraryCatalog"]
if catalog == "Zotero":
list_catalog_zotero.append(item)
return list_catalog_zotero |
def calculateAthreshold(Afinal, kfinal, k, L=1):
""" Calculate D-eff threshold at specified number of columns """
mfinal = 1 + kfinal + kfinal * (kfinal - 1) / 2
m = 1 + k + k * (k - 1) / 2
Cfinal = Afinal**mfinal
Cthr = Cfinal / (L**(kfinal - k))
Athr = Cthr**(float(1) / float(m))
return Athr |
def HPBW(feedtaper,wavelength,diameter):
"""
Half-power beamwidth estimate
@param feedtaper : feed pattern amplitude at edge of primary, in dB
@type feedtaper : float
@param wavelength : in same units as diameter
@type wavelength : float
@param diameter : of main aperture, in same units as wavelength
@type diameter : float
@return: HPBW in radians (float)
"""
return (1.02 + 0.0135 * feedtaper) * wavelength/diameter |
def nano_mod(x, y):
"""
>>> 0.7 % 0.2
0.09999999999999992
>>> -0.7 % 0.2
0.10000000000000009
>>> nano_mod(0.7, 0.2)
0.1
>>> nano_mod(-0.7, 0.2)
0.1
:param x:
:param y:
:return:
"""
number = type(x)
nx = int(1000000000 * x)
ny = int(1000000000 * y)
q, r = divmod(nx, ny)
return number(r / 1000000000) |
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors) |
def makeHtmlInlineImage(text):
"""Create HTML code for an inline image.
"""
return """<IMG SRC="%s" ALT="%s">""" % (text, text) |
def remove_trailing_slash(p):
"""Returns a path with trailing slashes removed or None if not a path"""
if not p:
return p
return type(p)(p.rstrip('/')) |
def document_category(value):
"""Verify value of document/category follow rules."""
parent, prop = 'document', 'category'
jp = f'property {parent}.{prop}'
if not isinstance(value, str):
return 1, f'{jp} present but no text'
if not value:
return 1, f'{jp} present but empty'
return 0, '' |
def get_video_parts(video_path):
"""Given a full path to a video, return its parts."""
video_path = video_path.replace('\\','/') # Adapt for win
parts = video_path.split('/')
filename = parts[3]
filename_no_ext = filename.split('.')[0]
classname = parts[2]
train_or_test = parts[1]
return train_or_test, classname, filename_no_ext, filename |
def strictly_equal(obj1: object, obj2: object) -> bool:
"""Checks if the objects are equal and are of the same type."""
return obj1 == obj2 and type(obj1) is type(obj2) |
def latest_price(prices):
""" returns the latest (i.e., last) price in a list of prices.
input: prices is a list of 1 or more numbers.
"""
return prices[-1] |
def is_prime(x: int) -> bool:
"""Checks if integer is prime."""
int_root = int(x ** 0.5)
for i in range(2, int_root):
if x / i == x // i:
return False
return True |
def tsnames_in_dct(pes_idx, chnl_idx, spc_dct, config_idxs=None):
""" Get the names of all configuratons of a transition state
for the channel of a PES.
"""
_tsname = f'ts_{pes_idx+1:d}_{chnl_idx+1:d}'
_tsname = _tsname + '_'
if config_idxs is None:
_tsnames = tuple(name for name in spc_dct.keys()
if _tsname in name)
else:
_tsnames = tuple(f'{_tsname}{idx}' for idx in config_idxs)
return _tsnames |
def get_common_shape(shape1, shape2):
""" Get a common shape that fits both shapes. Dimensions that differ in size are set to None.
Example: [None, 20, 100, 50], [None, 20, 200, 50] -> [None, 20, None, 50]
"""
if len(shape1) != len(shape2):
raise ValueError("Can't compute common shape. Ndims is different.")
common_shape = [dim1 if dim1==dim2 else None for dim1, dim2 in zip(shape1, shape2)]
return common_shape |
def vari(n, k):
"""Variations.
n = total number of elements
k = number of elements chosen
k
Notation: V
n
All elements included: no
Can elements repeat: no
Order matters: yes
See: Number of ways there are to rearrange k elements
from the set of n elements without repetition.
Practical example: amount of numbers with 3 digits.
Let the elements be: 1, 2, 3, 4:
123, 132, 213, 231, 312, 321,
124, 142, 214, 241, 412, 421
"""
# n! / (n-k)!
result = 1
for i in range(n-k+1, n+1):
result *= i
return result |
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0 |
def binary_tree_level_order_traversal(tree):
""" Binary Tree level order traversal, a.k.a. breadth-first search. """
def traverse(node, level, out):
if node == None:
return
if level not in out:
out[level] = set([])
out[level].add(node.key)
for child in node.children:
traverse(child, level+1, out)
output = {}
traverse(tree, 1, output)
return output |
def _join_char_list(alignment_tuple):
""" Post-process alignment results for unicode support """
gt_char_list, noise_char_list, score, start, end = alignment_tuple
return "".join(gt_char_list), "".join(noise_char_list), score, start, end |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.