content stringlengths 42 6.51k |
|---|
def get_topkeys(n_perc, my_dict):
"""
Get top key, value pairs within nth percentile of max value of dictionary.
Use dictionary response from topwords_by_branch or topwords_by_frequency as my_dict arg.
"""
# get max key
maxkey = max(my_dict, key = my_dict.get)
maxvalue = my_dict[maxkey]
top_pairs = {}
for key, value in my_dict.items():
if value >= n_perc * maxvalue:
top_pairs[key] = value
return top_pairs |
def find_secondary_structure_segments(sse_string, offset=0):
"""
Identify segments of secondary structure elements in string
Parameters
----------
sse_string : str
String with secondary structure states of sequence
("H", "E", "-"/"C")
offset : int, optional (default: 0)
Shift start/end indices of segments by this offset
Returns
-------
start : int
Index of first position (equal to "offset")
end : int
Index of last position
segments : list
List of tuples with the following elements:
1. secondary structure element (str)
2. start position of segment (int)
3. end position of segment, exlusive (int)
"""
if len(sse_string) < 1:
raise ValueError("Secondary structure string must have length > 0.")
end = len(sse_string) - 1
sse_list = list(sse_string)
change_points = [
(i, (c1, c2)) for (i, (c1, c2)) in
enumerate(zip(sse_list[:-1], sse_list[1:]))
if c1 != c2
]
segments = []
last_start = 0
# set s2 for the case of only one continuous segment
s2 = sse_string[0]
for (p, (s1, s2)) in change_points:
segments.append((s1, offset + last_start, offset + p + 1))
last_start = p + 1
segments.append((s2, offset + last_start, offset + end + 1))
return offset, end + offset, segments |
def _dictContainsSubset(expected, actual):
"""Checks whether actual is a superset of expected.
Helper for deprecated assertDictContainsSubset"""
missing = False
mismatched = False
for key, value in expected.items():
if key not in actual:
missing = True
elif value != actual[key]:
mismatched = True
return False if missing or mismatched else True |
def find_empty_lines(line):
"""
Returns 1 for an empty line and 0 for non-empty
:param line:
:return:
"""
if line.strip("\t").strip(" ").strip("\n") == '':
return 1
return 0 |
def num(s):
"""Function will try to convert the variable to a float. If not possible it will return the original variable."""
try:
return float(s)
except:
return s |
def calcModDuration(duration, freq, ytm):
""" Calculates the Modified Duration """
tmp = 1 + (ytm / freq)
return duration / tmp |
def blklim(coord, blksiz, totsiz):
"""Return block dimensions, limited by the totsiz of the image."""
if (coord + blksiz) < totsiz:
return blksiz
else:
return totsiz - coord |
def get_number_of_ones(n):
"""
Deterine the number of 1s ins the binary representation of
and integer n.
"""
return bin(n).count("1") |
def RPL_LUSERCLIENT(sender, receipient, message):
""" Reply Code 251 """
return "<" + sender + ">: " + message |
def alpha_check(word, target_s):
"""
This function will check if every alphabets of word in target_s
:param word: str, a word want to check alphabets
:param target_s: str, a string of alphabet wanted.
:return: bool, if every alphabets of word in target_s
"""
for alpha in word:
if alpha not in target_s:
return False
return True |
def epsilon(l, S, J):
""" scaling of the fine structure shift.
"""
if S == 0:
# singlet
epsilon = 0.0
elif S == 1:
# triplet
delta = int(l == 0)
if J == l + 1:
omega = (3*l + 4)/ ((l + 1) * (2*l + 3))
elif J == l:
omega = -1.0 / (l*(l + 1))
elif J == l - 1:
omega = - (3*l - 1.0)/ (l*(2*l - 1))
else:
raise ValueError("The total angular momentum quantum number 'J' must " + \
"be in the range l - 1 < J < l + 1")
epsilon = 7.0 / 6.0 * delta + (1 - delta) / (2.0 * (2 * l + 1)) * omega
else:
raise ValueError("The total spin quantum number 'S' must be 0 or 1.")
return epsilon |
def _get_axes_ndim(axes):
"""
Quick function to determine if an Axes object is 3D (can accept x, y, z data)
or 2d (can only accept x, y data)
"""
if hasattr(axes, "get_zlim"):
n = 3
else:
n = 2
return n |
def get_feature_list(feature_number):
"""
:param feature_number: an int indicates the number of features
:return: a list of features n
"""
if feature_number == 1:
type_list = ["close"]
elif feature_number == 2:
# type_list = ["close", "volume"]
raise NotImplementedError("the feature volume is not "
"supported currently")
elif feature_number == 3:
type_list = ["close", "high", "low"]
elif feature_number == 4:
type_list = ["close", "high", "low", "open"]
else:
raise ValueError("feature number could not be %s" % feature_number)
return type_list |
def bounding_sphere(points):
"""A fast, approximate method for finding the sphere containing a set of points.
See https://www.researchgate.net/publication/242453691_An_Efficient_Bounding_Sphere
This method is approximate. While the sphere is guaranteed to contain all the points
it is a few percent larger than necessary on average.
"""
# euclidean metric
def dist(a, b):
return ((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 + (a[2] - b[2]) ** 2) ** 0.5
def cent(a, b):
return ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2, (a[2] + b[2]) / 2)
p0 = points[0] # any arbitrary point in the point cloud works
# choose point y furthest away from x
p1 = max(points, key=lambda p: dist(p, p0))
# choose point z furthest away from y
p2 = max(points, key=lambda p: dist(p, p1))
# initial bounding sphere
center = cent(p1, p2)
radius = dist(p1, p2) / 2
# while there are points lying outside the bounding sphere, update the sphere by
# growing it to fit
for p in points:
distance = dist(p, center)
if distance > radius:
delta = (distance - radius) / 2
radius = (radius + distance) / 2
cx, cy, cz = center
x, y, z = p
cx += (x - cx) / distance * delta
cy += (y - cy) / distance * delta
cz += (z - cz) / distance * delta
center = (cx, cy, cz)
return (center, radius) |
def anudatta_apply(accent, syllabe):
"""
Apply the accentuation with anudatta and svarita to syllable.
"""
out_syllab = ""
if accent == "A":
out_syllab = syllabe + "="
elif accent == "S":
out_syllab = syllabe + "\\"
else:
out_syllab = syllabe.replace("/", "")
return out_syllab |
def Dadgostar_Shaw_terms(similarity_variable):
"""
Return terms for the computation of Dadgostar-Shaw heat capacity equation.
Parameters
----------
similarity_variable : float
Similarity variable, [mol/g]
Returns
-------
first : float
First term, [-]
second : float
Second term, [-]
third : float
Third term, [-]
See Also
--------
Dadgostar_Shaw
"""
a = similarity_variable
a2 = a*a
a11 = -0.3416
a12 = 2.2671
a21 = 0.1064
a22 = -0.3874
a31 = -9.8231E-05
a32 = 4.182E-04
# Didn't seem to improve the comparison; sum of errors on some
# points included went from 65.5 to 286.
# Author probably used more precision in their calculation.
# constant = 3*R*(theta/T)**2*exp(theta/T)/(exp(theta/T)-1)**2
constant = 24.5
return (constant * (a11*a + a12*a2),
a21*a + a22*a2,
a31*a + a32*a2) |
def scaleTuple(t, v):
"""Scale the elements of the tuple by v"""
#print(f'scaleTuple: t={t} v={v}')
return tuple(map(lambda p: p * v, t)) |
def _format_function(func_name):
"""Provide better context for a "function" of the caller.
"""
if func_name is None:
return ""
elif func_name == "<module>":
return " (top level stmt)"
else:
return " in " + func_name |
def rotations(num):
"""Return all rotations of the given number"""
if abs(num) < 10:
return [num]
numstr = str(num)
strlen = len(numstr)
returnarray = []
for i in range(strlen):
if i==0:
pass
else:
start = numstr[i:]
end = numstr[0:i]
returnarray.append(int(start+end))
return returnarray |
def reverse(B):
"""
Returns the list of in-neighbors from the list of out-neighbors.
"""
R = [[] for _ in range(len(B))]
for i, neighbors in enumerate(B):
for _, j in neighbors:
R[j].append(i)
return R |
def partition(seq, n_bins):
"""
This function takes an input sequence and bins it into discrete points.
Parameters
----------
seq : list/tuple of float
Collection of floats.
n_bins : int
Number of bins/paritions to create.
Returns
-------
list
Collection of integers. Contains unique integers from 1 to n_bins.
"""
assert (
isinstance(n_bins, int) and n_bins > 1
), "ERROR: Number of bins should be a positive integer"
# Get smallest value
a = min(seq)
# Compute reciprocal of peak-to-peak per bin
delta_inv = n_bins / (max(seq) - a + 1e-6)
# Transform each element and return
return [1 + int((elem - a) * delta_inv) for elem in seq] |
def get_skel(s):
"""Get a tuple representing an instrution skeleton.
Args:
s: String of chars in {0, 1, x} of the skeleton
Returns:
Tuple (before, length, after), where
- before is the number before the mask
- length is the length of x's in the mask
- after is the number after the mask
"""
i = 0
# get number before x's
before = 0
while i < len(s):
if s[i] != 'x':
assert s[i] == '0' or s[i] == '1'
before += before
before += int(s[i])
else:
break
i += 1
# get number of x's
xlen = 0
while i < len(s):
if s[i] == 'x':
xlen += 1
else:
break
i += 1
# get number of 0s after x's
zerolen = 0
while i < len(s):
if s[i] == '0':
zerolen += 1
else:
break
i += 1
# get number afer x's
after = 0
while i < len(s):
assert s[i] == '0' or s[i] == '1'
after += after
after += int(s[i])
i += 1
return (before, xlen, zerolen, after) |
def convert_lr(eff_lr, momentum=0.0, beta1=0.0, beta2=0.0, batch_size=1):
"""Calculates what learning rate to use for rough equivalence with plain SGD
Useful for supplying one set of hyper-parameters to sweep across with multiple optimizers
and getting them all to converge with hyper-parameters that are somewhere near the same order
of magnitude. Accounts for the effects of optimizer batch size, momentum, and adaptive
learning rates in Adam and SGD variants.
All params except the effective learning rate are optional; only supply the params that are
relevant to the optimizer you want to use.
Args:
eff_lr (float): The effective learning rate you want.
momentum (float, optional): The SGD momentum coefficient. Defaults to 0.0, but 0.9 is typical.
beta1 (float, optional): The Adam first moment coefficient. Defaults to 0.0, but 0.9 is typical.
beta2 (float, optional): The Adam second moment coefficient. Defaults to 0.0, but 0.999 is typical.
batch_size (int, optional): The number of examples in a mini-batch. Defaults to 1.
Returns:
lr (float): The adjusted learning rate to supply to the optimizer
"""
lr = eff_lr
if beta1 != 1.0 or beta2 != 1.0:
lr = lr * (1 - beta2) / (1 - beta1)
if momentum != 0.0:
lr = lr * (1 - momentum)
if batch_size > 1:
lr = lr * batch_size
return lr |
def replace_css_class(html_string, old_class, new_class):
"""
Replaces the css class in teh html element string.
:param html_string: a string of html
:param old_class: the text segment to replace. It should be a css class designator
:param new_class: the text to add in place of the old css class
:return: a string of html
"""
return html_string.replace(old_class, new_class) |
def instance_or_id_to_snowflake(obj, type_, name):
"""
Validates the given `obj` whether it is instance of the given `type_`, or is a valid snowflake representation.
Parameters
----------
obj : `int`, `str` or`type_`
The object to validate.
type_ : `type` of (`tuple` of `type`)
Expected type.
name : `str`
The respective name of the object.
Returns
-------
snowflake : `int`
Raises
------
TypeError
If `obj` was not given neither as `type_`, `str`, `int`.
ValueError
If `obj` was given as `str`, `int`, but not as a valid snowflake.
Notes
-----
The given `type_`'s instances must have a `.id` attribute.
"""
obj_type = obj.__class__
if issubclass(obj_type, type_):
snowflake = obj.id
else:
if obj_type is int:
snowflake = obj
elif issubclass(obj_type, str):
if 6 < len(obj) < 22 and obj.isdigit():
snowflake = int(obj)
else:
raise ValueError(f'`{name}` was given as `str`, but not as a valid snowflake, got {obj!r}.')
elif issubclass(obj_type, int):
snowflake = int(obj)
else:
if type(type_) is tuple:
type_name = ', '.join(t.__name__ for t in type_)
else:
type_name = type_.__name__
raise TypeError(f'`{name}` can be given either as {type_name} instance, or as `int`, `str` representing '
f'a snowflake, got {obj_type.__name__}.')
if (snowflake < 0) or (snowflake > ((1 << 64) - 1)):
raise ValueError(f'`{name}` was given either as `int`, `str`, but not as representing a '
f'`uint64`, got {obj!r}.')
return snowflake |
def split_with_square_brackets(input_str):
"""
Split a string using "," as delimiter while maintaining continuity within "[" and "]"
Args:
input_str: Input string
Returns:
substrings: List of substrings
"""
substrings = []
bracket_level = 0
current_substr = []
for next_char in (input_str + ","):
if next_char == "," and bracket_level == 0:
substrings.append("".join(current_substr))
current_substr = []
else:
if next_char == "[":
bracket_level += 1
elif next_char == "]":
bracket_level -= 1
current_substr.append(next_char)
return substrings |
def Q(lambda_0, lambda_, eps_c, Delta, norm_zeta2, nu):
"""
Quadratic upper bound of the duality gap function initialized at lambda_0
"""
lmd = lambda_ / lambda_0
Q_lambda = (lmd * eps_c + Delta * (1. - lmd) +
0.5 * nu * norm_zeta2 * (1. - lmd) ** 2)
return Q_lambda |
def gen_config(cycle, offset=None):
"""Generate configuration."""
config = {
"utility_meter": {"energy_bill": {"source": "sensor.energy", "cycle": cycle}}
}
if offset:
config["utility_meter"]["energy_bill"]["offset"] = {
"days": offset.days,
"seconds": offset.seconds,
}
return config |
def lstringstrip(s_orig, s_strip):
""" Left-strip a whole string, not 'any of these characters' like str.lstrip does """
if s_orig.startswith(s_strip):
return s_orig[len(s_strip):]
else:
return s_orig |
def complete_graph(vertices_number: int) -> dict:
"""
Generate a complete graph with vertices_number vertices.
@input: vertices_number (number of vertices),
directed (False if the graph is undirected, True otherwise)
@example:
>>> print(complete_graph(3))
{0: [1, 2], 1: [0, 2], 2: [0, 1]}
"""
return {
i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number)
} |
def distance_to_miles(distance):
""" Distance in meters to miles """
return str(round(distance / 1609.3, 1)) + ' mi' |
def getVariablesAsSet(cnfFormula):
"""Return a set of variables in the given formula.
Args:
cnfFormula (list of dict -- see readSat() for details of this
data structure): a CNF formula
Returns:
set of str: the names of all variables that appear in
cnfFormula
"""
variables = set()
for clause in cnfFormula:
variables |= set(clause.keys())
return variables |
def opposite_direction(direction):
"""
Return the opposite direction of the given one. The direction must be one of "up", "down", "left" or "right".
"""
return {"up": "down", "left": "right", "down": "up", "right": "left"}[direction] |
def query_or_command(op):
"""
Given an operation from currentOp, return the query or command
field as it changed in MongoDB 3.2 for indexing operations:
https://docs.mongodb.com/manual/reference/method/db.currentOp/#active-indexing-operations
"""
return op.get('command') or op.get('query') |
def bit_len(int_type):
"""Helper function returning the number of bits required to binary encode an integer."""
length = 0
while int_type:
int_type >>= 1
length += 1
return length |
def merge(left, right):
"""Merge sort merging function."""
result = []
while left and right:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
return result + left + right |
def human_readable_size(bytes):
"""
Converts bytes to human readable string
"""
for x in ['bytes','KB','MB','GB']:
if bytes < 1024.0:
return "%3.1f %s" % (bytes, x)
bytes /= 1000.0 # This seems to be the Apple default
return "%3.1f %s" % (bytes, 'TB') |
def stations_by_river(stations):
"""Returns a dictionary mapping the names of rivers with a list of their monitoring station"""
# Creates a dictionary of rivers that map to a list of their monitoring stations
rivers = dict()
for station in stations:
# Adds names of monitoring stations into the list under each river
if station.river is not None:
if station.river in rivers.keys():
rivers[station.river].append(station.name)
else:
rivers[station.river] = [station.name]
else:
pass
# Sorts the lists of monitoring stations alphabetically
for river in rivers.keys():
rivers[river].sort()
return rivers |
def _sf(string):
""" Make a string CSV-safe. """
if not string:
return ''
return string.replace('"', '""').encode('utf-8') |
def append_write(filename="", text=""):
"""Appends a string at the end of a text file (UTF8)
and returns the number of characters added:
Keyword Arguments:
filename {str} -- file name (default: {""})
text {str} -- text (default: {""})
Returns:
Int -- number of characters added.
"""
with open(filename, "a", encoding="utf-8") as file:
return file.write(text) |
def lambda_green_highlight(val) -> str:
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return f"[green]{val}[/green]" |
def fix_old_requirements_txt(reqs_in):
"""Add common missing requirements"""
result = [reqs_in]
reqs_set = set(reqs_in.splitlines())
if 'naucse_render<1.0' in reqs_set:
result.append('')
result.append('# compatibility requirements')
result.append('naucse_render < 1.4')
result.append('nbconvert < 6')
return '\n'.join(result) |
def is_c2d_topic(topic, device_id):
"""
Topics for c2d message are of the following format:
devices/<deviceId>/messages/devicebound
:param topic: The topic string
"""
if "devices/{}/messages/devicebound".format(device_id) in topic:
return True
return False |
def issubset(list1, list2):
""" Checks if list1 is subset of list2 """
list3 = []
for elem in list2:
if elem in list1:
list3.append(elem)
return list2 == list3 |
def is_sequence(arg):
"""Return whether arg is list or tuple or some other sequence."""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__")) |
def str_to_intfl(value):
""" Cast to int/float or send back """
if not value:
return value
# i know...
try:
return_value = int(value)
except ValueError:
pass
else:
return return_value
try:
return_value = float(value)
except ValueError:
pass
else:
return return_value
return value.strip() |
def format_inbound_rules(rules):
""" Format security group ingress rules.
:param rules: security group rules
"""
formatted_rules = []
for rule in rules:
from_port = rule.get('FromPort')
to_port = rule.get('ToPort')
if from_port and to_port:
ports = str(from_port) + "-" + str(to_port)
else:
ports = 'all'
if rule['IpProtocol'] == '-1':
protocol = 'all'
else:
protocol = rule['IpProtocol']
for ip_addr in rule['IpRanges']:
source = ip_addr['CidrIp']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
for ip_prefix in rule['PrefixListIds']:
source = ip_prefix['GroupId']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
for sg_group in rule['UserIdGroupPairs']:
source = sg_group['GroupId']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
return formatted_rules |
def wrap(data):
"""Wrap the data if an array if it is ont a list of tuple.
:param data: data to wrap
:returns: wrapped data
"""
return data if type(data) in [list, tuple] else [data] |
def replace_special_characters(text):
"""Replace special characters in a string
Makes a copy of string 'text' with special characters (i.e.
non-alphanumeric) replaced by underscores, and spaces replaced
by hyphens.
This is useful for generating strings to use in HTML documents."""
ele = []
for c in list(str(text).lower()):
if c.isspace():
ch = "-"
elif not c.isalnum():
ch = "_"
else:
ch = c
ele.append(ch)
return "".join(ele) |
def buildHeader(dst, src, cmd, buf=None):
"""Create header for Teensy message"""
if buf is None:
buf = []
buf.append(dst)
buf.append(src)
buf.append(cmd)
buf.append(0) # Reserved field
return buf |
def prepare_definitions(defs, prefix=None):
"""
prepares definitions from a dictionary
With a provided dictionary of definitions in key-value pairs and builds them
into an definition list. For example, if a dictionary contains a key ``foo``
with a value ``bar``, the returns definitions will be a list with the values
``['foo=bar']``. If a key contains a value of ``None``, the key will be
ignored and will not be part of the final definition list. If a ``prefix``
value is provided, each definition entry will be prefixed with the provided
value.
Args:
defs: the arguments to process
prefix (optional): prefix value to prefix each definition
Returns:
list of arguments
"""
final = []
if defs:
for key, val in defs.items():
if val is None:
continue
if prefix:
key = prefix + key
if val:
final.append('{}={}'.format(key, val))
else:
final.append(key)
return final |
def account_info(info):
"""Extract user information from IdP response."""
return dict(
user=dict(
email=info["User.email"][0],
profile=dict(
username=info["User.FirstName"][0], full_name=info["User.FirstName"][0]
),
),
external_id=info["User.email"][0],
external_method="onelogin",
active=True,
) |
def listparams_sampling(dms=[0], sizes=[50], windows=[5], mincounts=[2],
samples=[1e-5], negatives=[5], hses=[0], workers=4, epochs=100):
"""
return: a list of parameter combinations
"""
return [{
'dm': dm,
'size': size,
'window': window,
'min_count': mincount,
'sample': sample,
'negative': neg,
'hs': hs,
'workers': workers,
'iter': epochs
} for neg in negatives for sample in samples for hs in hses
for mincount in mincounts for window in windows
for size in sizes for dm in dms] |
def ssh_auth(username, address):
"""Render username and address part."""
if username:
return '{}@{}'.format(username, address)
return '{}'.format(address) |
def add_hashtags(msg: str, area: str, max_length: int = 140):
"""Add hashtags if length allows."""
tags = ("#hiihto", f"#{area.lower()}")
for tag in tags:
if len(msg) + len(tag) + 1 <= max_length:
msg = " ".join((msg, tag))
return msg[:max_length] |
def vector_add(vector1, vector2):
"""
Args:
vector1 (list): 3 value list
vector2 (list): 3 value list
Return:
list: 3 value list
"""
return [ vector1[0] + vector2[0], vector1[1] + vector2[1], vector1[2] + vector2[2] ] |
def isPalindrome(x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
else:
s = str(x)
print(s[::0])
for i, j in enumerate(s):
if i >= len(s) / 2:
return True
else:
if j != s[len(s) - i - 1]:
return False |
def idfn(val):
"""Generates names for the parametrized tests."""
if str(val).endswith(".txt"):
return val.stem
return "" |
def distance(coord1, coord2):
"""
Return Manhattan Distance between two coordinates
"""
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]) |
def create_labels_RAVDESS(emotion, id, config, labels_merge=False):
""" Create labels for the RAVDESS dataset by merging 'Calm' and 'Neutral' emotions
resulting in 7 labels for each gender are created yielding 14 labels
if labels_merge=True is passed then additional processing is done to also merge [Happy, Angry, Fear] are combined with [Surprise, Disgust, Sad]
are combined respectively resulting in 4 emotion for each gender and in total 8 emotions.
i.e. 2 with 7, 3 with 5, 4 with 6 are combined
"""
# handle neutral/calm emotion
if (emotion > 1):
emotion_label = emotion - 1
else:
emotion_label = emotion
if labels_merge:
if (emotion_label == 7):
emotion_label = 2
elif (emotion_label == 6):
emotion_label = 4
elif (emotion_label == 5):
emotion_label = 3
if (id % 2 == 0):
emotion_label = (len(config['emotions']) - 4) + emotion_label
else:
if (id % 2 == 0):
emotion_label = (len(config['emotions']) - 1) + emotion_label
return emotion_label |
def speed_convert(size):
"""
Hi human, you can't read bytes?
"""
power = 2 ** 10
zero = 0
units = {0: '', 1: 'Kb/s', 2: 'Mb/s', 3: 'Gb/s', 4: 'Tb/s'}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}" |
def abbr(a: str, b: str) -> bool:
"""
"""
n = len(a)
m = len(b)
dp = [[False for _ in range(m + 1)] for _ in range(n + 1)]
dp[0][0] = True
for i in range(n):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
dp[i + 1][j + 1] = True
if a[i].islower():
dp[i + 1][j] = True
return dp[n][m] |
def filter_dpkg_arch(arch, to_dpkg=True):
"""Convert DPKG-compatible from processor arch and vice-versa"""
# Processor architecture (as reported by $(uname -m))
# vs DPKG architecture mapping
dpkg_arch_table = {
'aarch64': 'arm64',
'x86_64': 'amd64',
}
arch_dpkg_table = dict(
zip(dpkg_arch_table.values(), dpkg_arch_table.keys()))
if to_dpkg:
return dpkg_arch_table[arch]
else:
return arch_dpkg_table[arch] |
def humansize(nbytes, suffixes=('B', 'KB', 'MB', 'GB', 'TB', 'PB')):
"""
Return file size in a human-friendly format
"""
if nbytes == 0:
return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i]) |
def kwot(s):
"""Single quote a string, doubling contained quotes as needed."""
return "'" + "''".join(s.split("'")) + "'" |
def construct_payload(build_number, build_result, build_result_color, build_url, jenkins_url, job_base_name,
job_name, timestamp):
"""Composes the payload for an MQTT message."""
return f"""
BUILD_NUMBER: '{build_number}'
BUILD_RESULT: '{build_result}'
BUILD_RESULT_COLOR: '{build_result_color}'
BUILD_URL: '{build_url}'
JENKINS_URL: '{jenkins_url}'
JOB_BASE_NAME: '{job_base_name}'
JOB_NAME: '{job_name}'
TIMESTAMP: {timestamp}
""" |
def check_results(results, expected):
"""
Compare the two MoonLight results dictionaries: one from
moonlight_solution.json and the other from the execpted result.
"""
# Mapping of result keys to their data types
keys_to_compare = dict(corpus_size=int,
solution_size=int,
solution_weight=float,
initial_singularities=int,
num_basic_blocks=int,
solution=set)
passed = True
fail_msg = ''
for key, type_ in keys_to_compare.items():
results_value = type_(results[key])
expected_value = type_(expected[key])
if results_value != expected_value:
fail_msg = '{}\n {}: {} v {}'.format(fail_msg,
key,
expected_value,
results_value)
passed = False
if passed:
print('PASSED')
else:
print('FAILED {}'.format(fail_msg))
return passed |
def normalize_name(name):
""" Return normalized event/function name. """
if '(' in name:
return name[:name.find('(')]
return name |
def get_largest_number(n):
"""
docstring for this function:
Function to determine if the largest integer that can be represented
with [sys.argv[2]] digits
"""
# return (pow(2,n*4) - 1)
max_limit = {
'1': 9,
'2': 99,
'3': 999,
'4': 9999,
'5': 99999,
'6': 999999,
'7': 9999999
}
try:
return max_limit[n]
except KeyError:
return 99999999999999 |
def validate_and_format_annotator_metadata(annotator_metadata_str: str):
"""Validate metadata passed in through arguments to be shown to the annotator."""
annotator_metadata = dict()
annotator_metadata_parts = annotator_metadata_str.split(',')
if len(annotator_metadata_parts) >= 2:
key_value_array = [annotator_metadata_parts[i:i+2] for i in range(0, len(annotator_metadata_parts), 2)]
for key_str, val_str in key_value_array:
if not (key_str.lower().startswith('key=') and val_str.lower().startswith('value=')):
raise ValueError('Incorrect annotator metadata argument')
annotator_metadata[key_str.split('=')[1]] = val_str.split('=')[1]
return annotator_metadata
return ValueError('Incorrect annotator metadata argument') |
def search(seq, val):
"""Search location of key in a sorted list.
The method searches the location of a value in a list using
binary searching algorithm. If it does not find the value, it return -1.
Args:
seq: A sorted list where to search the value.
val: A value to search for.
Returns:
The location of the value in the sorted list if found.
Otherwise returns -1.
"""
lo, hi = 0, len(seq)-1
while lo <= hi:
mid = (lo + hi) // 2
if val < seq[mid]:
hi = mid - 1
elif val > seq[mid]:
lo = mid + 1
else:
return mid
return -1 |
def mean_arithmetic(values):
"""Get the arithmetic mean of the input list."""
if len(values) == 0:
return None
return sum(values) / len(values) |
def vector( relation ):
""" Returns a Python tuple containing the Python values in the first row.
"""
for t in relation:
return t |
def replace_fields(field_list, *pairs):
"""Given a list of field names and one or more pairs,
replace each item named in a pair by the pair.
fl = 'one two three'.split()
replace_fields(fl, ('two', 'spam'))
# ['one', ('two', 'spam'), 'three']
"""
result = list(field_list)
for field_name, source in pairs:
index = field_list.index(field_name)
result[index] = field_name, source
return result |
def parse_string_literal(string):
"""Evaluate a string with certain special values, or return the string. Any
further parsing must be done outside this module, as this is as specialized
as we're willing to be in assuming/interpreting what a string is supposed
to mean.
Parameters
----------
string : string
Returns
-------
val : bool, None, or str
Examples
--------
>>> print(parse_string_literal('true'))
True
>>> print(parse_string_literal('False'))
False
>>> print(parse_string_literal('none'))
None
>>> print(parse_string_literal('something else'))
'something else'
"""
if string.strip().lower() == 'true':
return True
if string.strip().lower() == 'false':
return False
if string.strip().lower() == 'none':
return None
return string |
def strip_punctuation(text):
"""
Strip string from punctuations
"""
return "".join(c for c in text if c not in (',', '.')) |
def font_loader(context, font):
"""
{% font_loader "Raleway:300,400,500,600,700,800|Ubuntu:300,400,500,700" %}
"""
return {'font': font} |
def _GetLastWindow(series, window_size):
"""Returns the last "window" of points in the data series."""
if not window_size:
return series
return series[-window_size:] |
def expand_corepath(pathname, session=None, node=None):
"""
Expand a file path given session information.
:param str pathname: file path to expand
:param core.emulator.session.Session session: core session object to expand path with
:param core.nodes.base.CoreNode node: node to expand path with
:return: expanded path
:rtype: str
"""
if session is not None:
pathname = pathname.replace("~", "/home/%s" % session.user)
pathname = pathname.replace("%SESSION%", str(session.id))
pathname = pathname.replace("%SESSION_DIR%", session.session_dir)
pathname = pathname.replace("%SESSION_USER%", session.user)
if node is not None:
pathname = pathname.replace("%NODE%", str(node.id))
pathname = pathname.replace("%NODENAME%", node.name)
return pathname |
def count_dt_focus(marked_dict):
"""Developing function, only used during development!
Counts the words (determiners) which has been marked as "focused" in the dictionary (specifically in the word metadata).
Used to compare to the count found in count_en_ett().
Also counts nouns marked as focused. Number will be 0 if nouns were not marked in the dictionary.
Found an issue with single word sentences (often only a bracket or another such character in the XML-text). The "isinstance()" check on word_meta is a bandaid-fix for this. It simply skips these one-word sentences, since they most likely are not relevant to the issue at hand and because I found no relatively quick fix for the issue.
Args:
marked_dict: a dictionary representation of the XML-text, with the added word metadata attribute "focus".
Returns:
dt: the count of "focus"-marked words (determiners).
nn: the count of "focus"-marked nouns (0 if nouns were not marked)."""
dt = 0
nn = 0
for paragraph in marked_dict['corpus']['text']['lessontext']['paragraph']:
sentence_lvl = paragraph['sentence']
if isinstance(sentence_lvl, dict):
for word_meta in sentence_lvl['w']:
if isinstance(word_meta, dict):
if word_meta['focus'] == 1:
dt += 1
elif word_meta['focus'] == 2:
nn += 1
elif isinstance(sentence_lvl, list):
for sentence in sentence_lvl:
for word_meta in sentence['w']:
if isinstance(word_meta, dict):
if word_meta['focus'] == 1:
dt += 1
elif word_meta['focus'] == 2:
nn += 1
else:
print("Found something that is not a dict/list!")
return dt, nn |
def generate_fw_query(project, seed, state, query=None):
"""Helper function to generate query dictionary for filtering Fireworks"""
if not query:
query = {}
if project:
query['spec.project_name'] = {'$regex': project}
if seed:
query['spec.seed_name'] = {'$regex': seed}
if state:
query['state'] = state.upper()
return query |
def f2(x):
"""
An exponential function.
"""
from numpy import exp
y = exp(4.*x)
return y |
def is_triple_double(word):
"""Tests if a word contains three consecutive double letters."""
i = 0
count = 0
while i < len(word)-1:
if word[i] == word[i+1]:
count = count + 1
if count == 3:
return True
i = i + 2
else:
count = 0
i = i + 1
return False |
def convert_retention_to_seconds(desired_retention, retention_unit):
"""Convert desired retention to seconds.
:param desired_retention: The desired retention for snapshot
schedule
:param retention_unit: The retention unit for snapshot schedule
:return: The integer value in seconds
"""
duration_in_sec = None
if desired_retention:
if retention_unit == 'hours':
duration_in_sec = desired_retention * 60 * 60
else:
duration_in_sec = desired_retention * 24 * 60 * 60
return duration_in_sec |
def dot_esc(str):
"""Escape str for use in DOT"""
return str.replace('"', '\\"') |
def color_to_hex(rgb_color):
""" Takes a tuple with 3 floats between 0 and 1. Useful to convert occ
colors to web color code
Parameters
----------
rgb_color : tuple of floats between 0. and 1.
Returns
-------
Returns a hex.
"""
r, g, b = rgb_color
assert 0 <= r <= 1.
assert 0 <= g <= 1.
assert 0 <= b <= 1.
rh = int(r * 255.)
gh = int(g * 255.)
bh = int(b * 255.)
return "0x%.02x%.02x%.02x" % (rh, gh, bh) |
def hex_to_rgb(hex_val):
"""
https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python
"""
return tuple(int(hex_val[i : i + 2], 16) for i in (0, 2, 4)) |
def decimal_to_float(num):
"""
Convert Decimal to float
:param num: decimal type
:return: float format or 0
"""
try:
return float(num)
except:
return 0 |
def split_thousands(s):
"""
Splits a number on thousands.
>>> split_thousands(1000012)
"1'000'012"
"""
# Check input #
if s is None: return "0"
# If it's a string #
if isinstance(s, str): s = float(s)
# If it's a float that should be an int #
if isinstance(s, float) and s.is_integer(): s = int(s)
# Use python built-in #
result = "{:,}".format(s)
# But we want single quotes #
result = result.replace(',', "'")
# Return #
return result |
def convertor(value, fromunits, tounits):
"""Convert from one set of units to another.
>>> print "%.1f" % convertor(8, "eV", "cm-1")
64524.8
"""
_convertor = {"eV_to_cm-1": lambda x: x*8065.6,
"hartree_to_eV": lambda x: x*27.2113845,
"bohr_to_Angstrom": lambda x: x*0.529177,
"Angstrom_to_bohr": lambda x: x*1.889716,
"nm_to_cm-1": lambda x: 1e7/x,
"cm-1_to_nm": lambda x: 1e7/x,
"hartree_to_cm-1": lambda x: x*219474.6,
# Taken from GAMESS docs, "Further information",
# "Molecular Properties and Conversion Factors"
"Debye^2/amu-Angstrom^2_to_km/mol": lambda x: x*42.255}
return _convertor["%s_to_%s" % (fromunits, tounits)] (value) |
def db_convert_text(value: object) -> str:
"""Convert from python object to postgresql TEXT"""
if value is None:
result = 'NULL'
elif value:
result = str(value)
result = result.replace('\'', '\'\'')
result = f'\'{result}\''
else:
result = ''
return result |
def get_layer_arn(layer: dict) -> str:
"""
:param layer:
:return:
"""
return layer["Layer_arn"] + ":" + str(layer["Layer_version"]) |
def python_module_name(s):
"""
>>> python_module_name("aaa.bbb.ccc")
'aaa.bbb.ccc'
"""
return s.replace("_", "").replace("-", "") |
def cociente(cu, cd):
"""(list, list) -> list
Cociente entre dos numeros complejos"""
a = (cu[0]*cd[0] + cu[1]*cd[1]) / (cu[1] ** 2 + cd[1] ** 2 )
b = (cu[1]*cd[0] - cu[0]*cd[1]) / (cu[1] ** 2 + cd[1] ** 2)
r = [a,b]
return r |
def get_filter_arg_boolean(f, arg):
"""Convert boolean value to scheme boolean string representation."""
if arg == 1:
return '#t'
else:
return '#f' |
def col2str(num, run=0):
""" Converts column number to literal format (eg. 27 = 'AA') """
if run:
inum = num
res = ''
while inum > 0:
buf = (inum - 1) % 26
res += chr(buf + 65)
inum = int((inum - buf) / 26)
res = res[::-1]
else:
res = num
return res |
def add_linebreaks(msg: str):
"""This function is used to add the <br> tag to the response message."""
return "<br>" + msg |
def fix_url_path(url: str) -> str:
"""
Add "/" to end of URL, if URL has path and doesn't end with "/"
the path will be removed by urljoin.
"""
return url if url.endswith("/") else url + "/" |
def get_available_months(months_list):
"""Parse out months from xanthos output format of '198001' which is year 1980 month 1
:params months_list: list of dates parsed from file
:type months_list: list
:return: list of months available for months form control
"""
# Parse list to determine which months it contains
months = list()
if "01" in months_list:
months.append({'label': "January", 'value': "01"})
if "02" in months_list:
months.append({'label': "February", 'value': "02"})
if "03" in months_list:
months.append({'label': "March", 'value': "03"})
if "04" in months_list:
months.append({'label': "April", 'value': "04"})
if "05" in months_list:
months.append({'label': "May", 'value': "05"})
if "06" in months_list:
months.append({'label': "June", 'value': "06"})
if "07" in months_list:
months.append({'label': "July", 'value': "07"})
if "08" in months_list:
months.append({'label': "August", 'value': "08"})
if "09" in months_list:
months.append({'label': "September", 'value': "09"})
if "10" in months_list:
months.append({'label': "October", 'value': "10"})
if "11" in months_list:
months.append({'label': "November", 'value': "11"})
if "12" in months_list:
months.append({'label': "December", 'value': "12"})
return months |
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only:
return s
if isinstance(s, memoryview):
return bytes(s)
if not isinstance(s, str):
return str(s).encode(encoding, errors)
else:
return s.encode(encoding, errors) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.