content stringlengths 42 6.51k |
|---|
def _merge_weights(spin_d1, spin_d2):
"""Sum the weights stored in two dictionaries with keys being the spins"""
if len(spin_d1) != len(spin_d2):
raise RuntimeError("Critical - mismatch spin-dict length")
out = {}
for spin in spin_d1:
out[spin] = spin_d1[spin] + spin_d2[spin]
return out |
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a |
def translate(x, y):
"""
Generate an SVG transform statement representing a simple translation.
"""
return "translate(%i %i)" % (x, y) |
def bb_IoU(boxA, boxB):
"""Compute Intersection over Union (IoU) for two bboxes
Args:
boxA: [x1, y1, x2, y2]
boxB: [x1, y1, x2, y2]
Returns:
the iou score between 0-1
"""
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou |
def cumulative_sum(t):
"""
Return a new list where the ith element is the sum of all elements up to that
position in the list. Ex: [1, 2, 3] returns [1, 3, 6]
"""
res = [t[0]]
for i in range(1, len(t)):
res.append(res[-1] + t[i])
return res |
def postprocess_answer_extraction_output(answer_extraction_output: str):
"""
Args:
answer_extraction_output (str): decoded answer extraction output
Returns:
answer_text_list (List[str])
"""
# parse answers
answers = answer_extraction_output.split("<sep>")[:-1]
# normalize and append answers
answer_text_list = []
for answer_text in answers:
# append if not present
if answer_text and (answer_text not in answer_text_list):
answer_text_list.append(answer_text)
return answer_text_list |
def dockerize_windows_path(dkrpath: str) -> str:
"""Returns a path that can be mounted as a docker volume on windows Docker
uses non-standard formats for windows mounts. Note that different
components of the docker ecosystem may support a different set of formats
for paths. This one seems to work across docker cp, docker compose and
command-line volume mounts on Windows. Specifically, this routine converts
C:\a\b -> C:/a/b
Note that the above colon + forward-slash notation is *necessary* to build
images with docker-py.
Args:
dkrpath(str): a python path
Returns:
str: path that can be handed to Docker for a volume mount
"""
return dkrpath.replace('\\', '/') |
def check_key_value(data, key, value):
"""Checks a key for the given value within a dictionary recursively."""
if isinstance(key, dict):
for k, v in key.items():
return check_key_value(data[k], v, value)
if data[key] == value:
return True
return False |
def single(sequence, condition=None):
"""
Returns the single item in a sequence that satisfies specified condition or
raises error if none or more items found.
Args:
sequence: iterable
Sequence of items to go through.
condition: callable
Condition to test.
Returns:
any
Single valid item.
"""
items = list(sequence) if condition is None else [d for d in sequence if condition(d)]
if len(items) == 1:
return items[0]
raise ValueError |
def exp(x) -> float:
"""Returns e ^x"""
if x == 0:
return 1
if x < 0:
return 1 / exp(-x)
total = 1
denominator = 1
last = float('inf')
k_times = 1
x_top = x
# Uses e^x taylor series
# to compute the value
# e^x = sum n^x / n!
while True:
try:
denominator *= k_times
total += x_top / denominator
x_top *= x
diff = last - total
last = total
if abs(diff) < 0.00000000001:
return last
k_times += 1
except OverflowError:
return total
return last |
def relativeBCPIn(anchor, BCPIn):
"""convert absolute incoming bcp value to a relative value"""
return (BCPIn[0] - anchor[0], BCPIn[1] - anchor[1]) |
def merge_segments(lst):
"""Try to merge segments in a given list in-place."""
ii = 0
while True:
jj = ii + 1
if len(lst) <= jj:
return lst
seg1 = lst[ii]
seg2 = lst[jj]
if seg1.merge(seg2):
if seg2.empty():
del lst[jj]
else:
ii += 1
else:
ii += 1
return lst |
def bytes_in_context(data, index):
"""Helper method to display a useful extract from a buffer."""
start = max(0, index - 10)
end = min(len(data), index + 15)
return data[start:end] |
def set_local_fonts_enabled(enabled: bool) -> dict:
"""Enables/disables rendering of local CSS fonts (enabled by default).
Parameters
----------
enabled: bool
Whether rendering of local fonts is enabled.
**Experimental**
"""
return {"method": "CSS.setLocalFontsEnabled", "params": {"enabled": enabled}} |
def is_blank(x):
"""Checks if x is blank."""
return not x.strip() |
def extract_type(item):
"""Extract item possible types from jsonschema definition.
>>> extract_type({'type': 'string'})
['string']
>>> extract_type(None)
[]
>>> extract_type({})
[]
>>> extract_type({'type': ['string', 'null']})
['string', 'null']
"""
if not item or "type" not in item:
return []
type_ = item["type"]
if not isinstance(type_, list):
type_ = [type_]
return type_ |
def factorial_loop(number):
"""Calculates factorial using loop.
:param number: A number for which factorial should be calculated.
:return: Factorial number.
>>> factorial_loop(-1)
1
>>> factorial_loop(0)
1
>>> factorial_loop(1)
1
>>> factorial_loop(3)
6
>>> factorial_loop(5)
120
"""
result = 1
while number > 1:
result *= number
number -= 1
return result |
def filter_units(line, units="imperial"):
"""Filter or convert units in a line of text between US/UK and metric."""
import re
# filter lines with both pressures in the form of "X inches (Y hPa)" or
# "X in. Hg (Y hPa)"
dual_p = re.match(
"(.* )(\d*(\.\d+)? (inches|in\. Hg)) \((\d*(\.\d+)? hPa)\)(.*)",
line
)
if dual_p:
preamble, in_hg, i_fr, i_un, hpa, h_fr, trailer = dual_p.groups()
if units == "imperial": line = preamble + in_hg + trailer
elif units == "metric": line = preamble + hpa + trailer
# filter lines with both temperatures in the form of "X F (Y C)"
dual_t = re.match(
"(.* )(-?\d*(\.\d+)? F) \((-?\d*(\.\d+)? C)\)(.*)",
line
)
if dual_t:
preamble, fahrenheit, f_fr, celsius, c_fr, trailer = dual_t.groups()
if units == "imperial": line = preamble + fahrenheit + trailer
elif units == "metric": line = preamble + celsius + trailer
# if metric is desired, convert distances in the form of "X mile(s)" to
# "Y kilometer(s)"
if units == "metric":
imperial_d = re.match(
"(.* )(\d+)( mile\(s\))(.*)",
line
)
if imperial_d:
preamble, mi, m_u, trailer = imperial_d.groups()
line = preamble + str(int(round(int(mi)*1.609344))) \
+ " kilometer(s)" + trailer
# filter speeds in the form of "X MPH (Y KT)" to just "X MPH"; if metric is
# desired, convert to "Z KPH"
imperial_s = re.match(
"(.* )(\d+)( MPH)( \(\d+ KT\))(.*)",
line
)
if imperial_s:
preamble, mph, m_u, kt, trailer = imperial_s.groups()
if units == "imperial": line = preamble + mph + m_u + trailer
elif units == "metric":
line = preamble + str(int(round(int(mph)*1.609344))) + " KPH" + \
trailer
imperial_s = re.match(
"(.* )(\d+)( MPH)( \(\d+ KT\))(.*)",
line
)
if imperial_s:
preamble, mph, m_u, kt, trailer = imperial_s.groups()
if units == "imperial": line = preamble + mph + m_u + trailer
elif units == "metric":
line = preamble + str(int(round(int(mph)*1.609344))) + " KPH" + \
trailer
# if imperial is desired, qualify given forcast temperatures like "X F"; if
# metric is desired, convert to "Y C"
imperial_t = re.match(
"(.* )(High |high |Low |low )(\d+)(\.|,)(.*)",
line
)
if imperial_t:
preamble, parameter, fahrenheit, sep, trailer = imperial_t.groups()
if units == "imperial":
line = preamble + parameter + fahrenheit + " F" + sep + trailer
elif units == "metric":
line = preamble + parameter \
+ str(int(round((int(fahrenheit)-32)*5/9))) + " C" + sep \
+ trailer
# hand off the resulting line
return line |
def is_valid_key(key: str) -> bool:
"""Check if an exiftool key is valid and interesting."""
# https://exiftool.org/TagNames/Extra.html
file_keys = (
'FileName', 'Directory', 'FileSize', 'FileModifyDate', 'FileAccessDate',
'FileInodeChangeDate', 'FilePermissions', 'FileType', 'FileType',
'FileTypeExtension', 'MIMEType', 'ExifByteOrder'
)
invalid_prefixes = ("ExifTool", "System", "SourceFile") + tuple(
f"File:{k}" for k in file_keys
)
for invalid_prefix in invalid_prefixes:
if key.startswith(invalid_prefix):
return False
return True |
def nb_year(p0, percent, aug, p):
"""
In a small town the population is p0 = 1000 at the beginning of a year.
The population regularly increases by 2 percent per year and moreover 50
new inhabitants per year come to live in the town. How many years does the
town need to see its population greater or equal to p = 1200 inhabitants?
More generally given parameters:
- p0, percent, aug (inhabitants coming or leaving each year), p (population to surpass)
- the function nb_year should return n number of entire years needed to get a population greater or equal to p.
- aug is an integer, percent a positive or null floating number, p0 and p are positive integers (> 0)
Don't forget to convert the percent parameter as a percentage in the body of your function: if the parameter percent is 2 you have to convert it to 0.02.
"""
years = 0
while p0 < p:
p0 += int(p0 * (percent / 100)+aug)
years += 1
return years |
def get_log_number_of_restricted_partitions(m, n):
"""Get Number of restricted partitions of m into at most n parts"""
if n <= 0 or m <= 0:
if m == 0:
return 0
return float('-inf')
elif m < NUMBER_OF_RESTRICTED_PARTITIONS_PRECOMPUTED_LIMIT:
if m < n:
return LOG_NUMBER_OF_RESTRICTED_PARTITIONS_MATRIX[m][m]
return LOG_NUMBER_OF_RESTRICTED_PARTITIONS_MATRIX[m][n]
else:
if n == 1:
return 1
elif n < m ** (1.0 / 4):
# value is always below zero... think about another option
return log_binom(m - 1, n - 1) - math.lgamma(m + 1)
else:
u = n / math.sqrt(m)
v = u
for _ in range(100):
# formula from Peixoto's implementation
# https://git.skewed.de/count0/graph-tool/blob/master/src/graph/inference/support/int_part.cc
v_new = u * math.sqrt(spence(math.exp(-v)))
if math.fabs(v - v_new) < .0001:
v = v_new
break
v = v_new
else:
raise Exception("Fix Point iteration as search for v not converged in 100 steps:", v)
log_f_u = math.log(v) - 1.5 * math.log(2) - math.log(math.pi) - math.log(u) - .5 * math.log(
1 - (1 + u * u / 2) * math.exp(-v))
# log_g_u = math.log(2 * v / u - u * math.log(1 - math.exp(-v)))
# f_u = v / (2 ** 1.5 * math.pi * u * math.sqrt(1 - (1 + u * u / 2) * math.exp(-v)))
g_u = 2 * v / u - u * math.log(1 - math.exp(-v))
return log_f_u - math.log(m) + math.sqrt(m) * g_u |
def get_broken_fuzz_targets(bad_build_results, fuzz_targets):
"""Returns a list of broken fuzz targets and their process results in
|fuzz_targets| where each item in |bad_build_results| is the result of
bad_build_check on the corresponding element in |fuzz_targets|."""
broken = []
for result, fuzz_target in zip(bad_build_results, fuzz_targets):
if result.returncode != 0:
broken.append((fuzz_target, result))
return broken |
def increase_parameter_closer_to_value(old_value, target_value, coverage):
"""
Simple but commonly used calculation for interventions. Acts to increment from the original or baseline value
closer to the target or intervention value according to the coverage of the intervention being implemented.
Args:
old_value: Baseline or original value to be incremented
target_value: Target value or value at full intervention coverage
coverage: Intervention coverage or proportion of the intervention value to apply
"""
return old_value + (target_value - old_value) * coverage if old_value < target_value else old_value |
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
# You have to code here
# REMEMBER: Tests first!!!
area = height * ((base_minor + base_major)/ 2)
return area |
def replace_all(text, dic):
"""
Replaces all occurrences in text by provided dictionary of replacements.
"""
for i, j in list(dic.items()):
text = text.replace(i, j)
return text |
def find_gcd(number1: int, number2: int) -> int:
"""Returns the greatest common divisor of number1 and number2."""
remainder: int = number1 % number2
return number2 if remainder == 0 else find_gcd(number2, remainder) |
def partition_at_level(dendogram, level) :
"""Function which return the partition of the nodes at the given level.
A dendogram is a tree and each level is a partition of the graph nodes.
Level 0 is the first partition, which contains the smallest communities,
and the best partition is at height [len(dendogram) - 1].
The higher the level in the dendogram, the bigger the communities in that level.
This function merely processes an existing dendogram, which is created by
:mod:`estrangement.agglomerate.generate_dendogram`.
Parameters
----------
dendogram : list of dict
A list of partitions, i.e. dictionaries where keys at level (i+1) are the values at level i.
level : int
The level in the dendogram of the desired partitioning, which belongs to [0..len(dendogram)-1].
Returns
-------
partition : dictionary
A dictionary where keys are the nodes and the values are the set (community) to which it belongs.
Raises
------
KeyError
If the dendogram is not well formed or the level is greater than the height of the dendogram.
See Also
--------
best_partition
generate_dendogram
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> dendo = generate_dendogram(G)
>>> for level in range(len(dendo) - 1) :
>>> print "partition at level", level, "is", partition_at_level(dendo, level)
"""
partition = dendogram[0].copy()
for index in range(1, level + 1) :
for node, community in partition.items() :
partition[node] = dendogram[index][community]
return partition |
def matrix_transpose(matrix: list) -> list:
"""
Compute the transpose of a matrix
"""
transpose: list = []
for row in range(len(matrix[1])):
column = []
for col in range(len(matrix)):
column.append(matrix[col][row])
transpose.append(column)
return transpose |
def get_subs(relativize_fn, links):
""" Return a list of substitution pairs, where the first item is the
original string (link) and the second item is the string to replace it
(relativized link). Duplicate subs are filtered out."""
subs = ((l, relativize_fn(l)) for l in links)
subs = filter(lambda p: p[0] != p[1], subs) # filter out no-op substitutions
return list(subs) |
def wordSlices(a_string):
"""assumes a_string is a string of lenght 1 or greater
returns a list of strings, representing the slices of 2+ chars
that can be taken from a_string
"""
# handle short strings
if len(a_string) < 3:
return [a_string]
# innitialize variables
slice_list = []
# get slices
for i in range(len(a_string)-1):
for j in range(i+2, len(a_string)+1):
slice_list.append(a_string[i:j])
return slice_list |
def filter_nbases(Seq):
"""This command takes a seq and returns the Seq after removing n bases."""
Seq = Seq.upper()
for i in Seq:
if i not in 'AGCTN':
return 'Invalid Seq'
Seq = Seq.replace("N", "")
return Seq |
def Tree(data, *subtrees):
"""
"""
t = [data]
t.extend(subtrees)
return t |
def _add_dot(ext_list):
"""
PURPOSE:
This private function is used to add a dot ('.') to the beginning
of each file extension in an *_exts list; if a dot is not already
present.
"""
# LOOP THROUGH EXTENSIONS
for idx, ext in enumerate(ext_list):
# TEST FOR DOT (.ext) >> IF NOT, ADD IT AND UPDATE LIST
if not ext.startswith('.'): ext_list[idx] = '.%s' % ext
# RETURN MODIFIED EXTENSION LIST
return ext_list |
def equalizer(n: int, m: int, total: int):
"""
Receives total, m and n [0..total]
Returns a tuple (a, b) so that their sum -> total, and a / b -> 1
"""
oddity = total % 2
smallest = min(n, m, total // 2 + oddity)
if smallest == n:
return (n, min(m, total-n))
elif smallest == m:
return (min(n, total-m), m)
else:
return (total // 2, total // 2 + oddity) |
def traverse(start_cell, direction, num_steps):
"""
Iterates over the cells in a grid in a linear fashion
and forms a list of traversed elements.
start_cell is a tuple (row, col) where the iteration starts.
direction is a tuple that contains the difference between the
positions of consecutive elements.
"""
traverse_lst = []
for step in range(num_steps):
row = start_cell[0] + direction[0] * step
col = start_cell[1] + direction[1] * step
traverse_lst.append((row, col, step))
return traverse_lst |
def latex_safe(value):
"""
Filter that replace latex forbidden character by safe character
"""
return str(value).replace('_', '\_').replace('$', '\$').replace('&', '\&').replace('#', '\#').replace('{', '\{').replace('}','\}') |
def parse_line_contents(line):
"""Extract the sigma, gamma, lambda and m from the line:
just read until we get something looking like a float and then store the first three of them"""
#Split by spaces
spl = line.split()
res = []
for ss in spl:
try:
float(ss)
res.append(float(ss))
except ValueError:
pass
if len(res) == 3:
break
return res[0], res[1], res[2] |
def classify_segment(height_r, dist_r, height_l, dist_l):
"""
Classify a road segment based on height and distance values.
This function is used by the averaging method.
"""
# Sometimes the buildings in the dataset don't have any data.
# Function returns 0 then and we just classify as 4 because we
# cannot really say what to classify it otherwise.
if (height_r == 0) and (height_l == 0):
r_class = 4
# Class 3
elif (height_l == 0) and (height_r != 0):
if (dist_r / height_r) < 3:
r_class = 3
else:
r_class = 4
# Class 3
elif (height_r == 0) and (height_l != 0):
if (dist_l / height_l) < 3:
r_class = 3
else:
r_class = 4
# Class 1
elif ((dist_r / height_r) < 3) and (1.5 < (dist_l / height_l) < 3):
r_class = 1
# Class 1
elif ((dist_l / height_l) < 3) and \
(1.5 < (dist_r / height_r) < 3):
r_class = 1
# Class 2
elif ((dist_l / height_l) < 1.5) and ((dist_r / height_r) < 1.5):
r_class = 2
# Class 4
else:
r_class = 4
return r_class |
def _get_opening_root_tag(html_input):
"""Read through html input and return the full html tag (< to >) of the opening tag
Args:
html_input (str or bytes): HTML string to read the opening tag from
Returns:
str: the full opening tag string, e.g. <div id="ires">
Raises:
ValueError: if the provided html does not contain a valid opening tag structure
"""
# for each character in the html_input, start reading, looking for opening and closing tags at start and end indices
start_index = None
end_index = None
cur_index = 0
# Make sure that if byte string is passed in, we modify it to be a string
html_input = html_input.decode() if isinstance(html_input, bytes) else html_input
for character in html_input:
if character == "<":
# If we've already seen an opening tag before seeing the closing tag, bomb out
if start_index is not None:
raise ValueError("Parameter html_input does not contain valid HTML - too many opening brackets")
start_index = cur_index
elif character == ">":
# If we haven't seen an opening tag yet, bomb out
if start_index is None:
raise ValueError("Parameter html_input does not contain valid HTML - no opening bracket seen")
end_index = cur_index
# Break out of the for loop as soon as we find the closing tag to the first tag we found
break
cur_index += 1
# If either an opening tag or a closing tag hasn't been seen,
# assume this is just text, and return None since this isn't valid HTML
if start_index is None or end_index is None:
return None
# Return the section of html_input that represents the first valid tag we found
return html_input[start_index:end_index + 1] |
def nCkModp(n, k, p):
"""
Returns nCk % p
Parameters
----------
n : int
denotes n in nCk%p
k : int
denotes k in nCk%p
p : int
denotes p in nCk%p
return : int
returns an integer
"""
if (k > n- k):
k = n - k
Coef = [0 for i in range(k + 1)]
Coef[0] = 1
for i in range(1, n + 1):
for j in range(min(i, k), 0, -1):
Coef[j] = (Coef[j] + Coef[j-1]) % p
return Coef[k] |
def Mj_from_spt(x):
"""
Lepine Mj to SpT relationship (Lepine et al. 2013, Equation 23, page 24)
:param x: numpy array, SpT
:return:
"""
return 5.680 + 0.393*x + 0.040*x**2 |
def recurse_mirror(s, olds=""):
"""Attempt at mirror() recursively though i'm failing. olds gets pushed onto
the stack before the final return value collection so the order of olds and s
is reversed."""
if not s:
return olds
elif olds < s:
olds = s
return s[-1] + recurse_mirror(s[:-1], olds) |
def is_ref(prop):
"""
Returns True if prop is a reference.
"""
return list(prop.keys()) == ['$ref'] |
def knapsack01(W, wt, val, n):
"""
Soluzione dello zaino 0-1 utilizzando la programmazione dinamica
Argomenti:
W (int): peso totale
wt (list): lista dei pesi degli oggetti
val (list): lista dei valori degli oggetti
n (int): numero degli oggetti
Valore di Ritorno:
massimo valore che si puo ottenere
"""
# Tabella dove vado a memorizzare i valori migliori per ogni peso
dp = [[0 for _ in range(W+1)] for x in range(n+1)]
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
dp[i][w] = 0
elif wt[i-1] <= w:
dp[i][w] = max(val[i - 1] + dp[i - 1]
[w - wt[i - 1]], dp[i - 1][w])
else:
dp[i][w] = dp[i - 1][w]
return dp[n][W] |
def human_readable_size(byte_size):
"""Convert a number of bytes to a human-readable string."""
i = 0
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
while byte_size >= 1024 and i < len(suffixes) - 1:
byte_size /= 1024.
i += 1
size = ('{0:.2f}'.format(byte_size)).rstrip('0').rstrip('.')
return '{0} {1}'.format(size, suffixes[i]) |
def strlist(ls):
"""Format a list as a comma-separated string."""
return ','.join(str(p) for p in ls) |
def wav2RGB(wavelength):
"""
Converts a wavelength to RGB.
Arguments:
wavelength (float) : the wavelength (in nm).
Returns:
(tuple of int, int, int): the converted RGB values.
"""
wavelength = int(wavelength)
R, G, B, SSS = 0, 0, 0, 0
# get RGB values
if wavelength < 380:
R, G, B = 1.0, 0.0, 1.0
elif (wavelength >= 380) and (wavelength < 440):
R = -(wavelength - 440.) / (440. - 350.)
G, B = 0.0, 1.0
elif (wavelength >= 440) and (wavelength < 490):
R, B = 0.0, 1.0
G = (wavelength - 440.) / (490. - 440.)
elif (wavelength >= 490) and (wavelength < 510):
R, G = 0.0, 1.0
B = -(wavelength - 510.) / (510. - 490.)
elif (wavelength >= 510) and (wavelength < 580):
R = (wavelength - 510.) / (580. - 510.)
G, B = 1.0, 0.0
elif (wavelength >= 580) and (wavelength < 645):
R, B = 1.0, 0.0
G = -(wavelength - 645.) / (645. - 580.)
elif (wavelength >= 645) and (wavelength <= 780):
R, G, B = 1.0, 0.0, 0.0
elif wavelength > 780:
R, G, B = 1.0, 0.0, 0.0
# intensity correction
if wavelength < 380:
SSS = 0.6
elif (wavelength >= 380) and (wavelength < 420):
SSS = 0.3 + 0.7 * (wavelength - 350) / (420 - 350)
elif (wavelength >= 420) and (wavelength <= 700):
SSS = 1.0
elif (wavelength > 700) and (wavelength <= 780):
SSS = 0.3 + 0.7 * (780 - wavelength) / (780 - 700)
elif wavelength > 780:
SSS = 0.3
SSS *= 255
return int(SSS * R), int(SSS * G), int(SSS * B) |
def parse_years(year_range):
"""Parse year_range into year list.
Args:
year_range: A string in the format aaaa-bbbb.
Returns:
A list of years from aaaa to bbbb, including both ends.
"""
st, ed = year_range.split("-")
st, ed = int(st), int(ed)
return [year for year in range(st, ed + 1)] |
def list_to_set(lst):
"""convert list to set"""
res = {}
for each in lst:
res[each] = True
return res |
def buildLabel(nodeId, labelText, labelLink):
"""Build a label cell
nodeId -- name of the node that the popup refers to
labelText -- text of the label itself
labelLink -- link to follow when label is clicked (if any)
"""
if len(labelLink) > 0:
onClick = ''
href = labelLink
else:
onClick = '\nonclick="return false;"'
href = 'javascript:;'
html = """<tr>
<td class="labelscell" nowrap="nowrap">
<a
href="%s"
onmouseout="javascript:hideNode('%s')"
onmouseover="javascript:showNode('%s')"
target="_top"
class="labellink"%s>%s</a></td>
<td><img src="img-static/no.gif" width="1" height="13" alt="" border="0" /></td>
</tr>\n
""" % (href, nodeId, nodeId, onClick, labelText)
return html |
def replace_key_value(lookup, new_value, expected_dict):
"""
Replaces the value matching the key 'lookup' in the 'expected_dict' with
the new value 'new_value'.
"""
for key, value in expected_dict.items():
if lookup == key:
if isinstance(value, dict) and isinstance(new_value, dict):
value.update(new_value)
else:
expected_dict[key] = new_value
elif isinstance(value, dict):
expected_dict[key] = replace_key_value(lookup, new_value, value)
return expected_dict |
def babylonian_sqrt(rough, n):
"""https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method"""
iterations = 10
for _ in range(iterations):
rough = 0.5 * (rough + (n / rough))
return rough |
def chunks(l, n):
"""Yield successive n-sized chunks from l.
Parameters
----------
l : list
The list to split in_ chunks
n : int
The target numbers of items in_ each chunk
Returns
-------
list
List of chunks
"""
pieces = []
for i in range(0, len(l), n):
pieces.append(l[i:i + n])
return pieces |
def force_correct(word):
"""
arg: commonly misspelt word that the spell-checker cannot catch
return: correct spelling of word
"""
if word=='unc':
return 'nunc'
elif word=='gnus':
return 'agnus'
elif word=='yrie':
return 'kyrie'
elif word=='redo':
return 'credo'
elif word=='ominus':
return 'dominus'
elif word=='remus':
return 'oremus'
elif word=='ectio':
return 'lectio'
elif word=='er':
return 'per'
elif word=='eus':
return 'deus'
elif word=='hriste':
return 'christe'
elif word=='ector':
return 'rector'
elif word=='niquo':
return 'iniquo'
elif word=='ucis':
return 'lucis'
elif word=='iliae':
return 'filiae'
elif word=='isirere':
return 'misirere'
elif word=='alva':
return 'salva'
elif word=='ripe':
return 'eripe'
else:
return word |
def nome(inpt):
"""
Towards a tool that can get the name of a variable
"""
for k, v in locals().items():
if v == inpt:
return k |
def make_bool(mixed):
"""
Convert value to boolean
"""
if mixed is None:
return False
if isinstance(mixed, bool):
return mixed
try:
return int(mixed) != 0
except ValueError:
pass
if isinstance(mixed, str):
mixed = mixed.lower()
if mixed in ['', 'false', 'no']:
return False
if mixed in ['true', 'yes']:
return True
raise ValueError |
def split(list):
"""
divide the unsorted list at midpoint into sublists
Return two sublists - left and right
takes overall O(log n) time, it is the ideal runtime for the merge sort and not for the given one
runtime for this operation is O(k log n) similarity for the merge operation it becomes O(kn log n)
"""
mid = len(list)//2
left = list[:mid]
right = list[mid:]
return left, right |
def make_valid_filename(str):
"""
From http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python
"""
return "".join((x if x.isalnum() else "_") for x in str) |
def find_first_slice_value(slices, key):
"""For a list of slices, get the first value for a certain key."""
for s in slices:
if key in s and s[key] is not None:
return s[key]
return None |
def remove_doubles(lst):
"""given a sorted list returns a new sorted list with duplicates removed"""
if len(lst) == 1:
return [lst[0]]
newlist = [lst[0]]
for i in range(1,len(lst)):
if newlist[-1] != lst[i]:
newlist.append(lst[i])
return newlist |
def currency_filter(value):
"""Outputs comma separated rounded off figure"""
number = float(value)
rounded_number = round(number)
integer_number = int(rounded_number)
return "{:,}".format(integer_number) |
def time_delta(t1: str, t2: str, fmt='%a %d %b %Y %X %z') -> int:
"""
>>> time_delta('Sun 10 May 2015 13:54:36 -0700',
... 'Sun 10 May 2015 13:54:36 -0000')
25200
>>> time_delta('Sat 02 May 2015 19:54:36 +0530',
... 'Fri 01 May 2015 13:54:36 -0000')
88200
>>> time_delta('Wed 12 May 2269 23:22:15 -0500',
... 'Tue 05 Oct 2269 02:12:07 -0200')
12527392
"""
from datetime import datetime
dt1, dt2 = datetime.strptime(t1, fmt), datetime.strptime(t2, fmt)
delta = dt1 - dt2
return abs(int(delta.total_seconds())) |
def corrections(mean_onbit_density):
"""Calculate corrections
See :func:`similarity` for explanation of corrections.
Args:
mean_onbit_density (float): Mean on bit density
Returns:
float: S\ :sub:`T` correction, S\ :sub:`T0` correction
"""
p0 = mean_onbit_density
corr_st = (2 - p0) / 3
corr_sto = (1 + p0) / 3
return corr_st, corr_sto |
def get_default_extra_suffix(related_docs=True):
"""Return extra suffix"""
extra_suffix = "-related-fullcontent" if related_docs else "-random-fullcontent"
return extra_suffix |
def dedent(string):
"""Remove the maximum common indent of the lines making up the string."""
lines = string.splitlines()
indent = min(
len(line) - len(line.lstrip())
for line in lines
if line
)
return "\n".join(
line[indent:] if line else line
for line in lines
) |
def get_next_satisfying(vector, starting_position, condition_fun):
"""find next pixel in the vector after starting position
that satisfies the condition (boolean)
return -1 if not found"""
position = starting_position
while(position < len(vector) and
not(condition_fun(vector[position]))):
position += 1
if(position == (len(vector) - 1) and
not(condition_fun(vector[position]))):
position = - 1
return(position) |
def remove_first_space(x):
"""
remove_first_space from word x
:param x: word
:type x: str
:return: word withou space in front
:rtype: str
"""
try:
if x[0] == " ":
return x[1:]
else:
return x
except IndexError:
return x |
def make_build_cmd(parameters, import_path_labels=()):
"""Build Cap'n Proto schema for Python packages."""
cmd = ['build']
if import_path_labels:
cmd.append('compile_schemas')
for import_path_label in import_path_labels:
cmd.append('--import-path')
cmd.append(parameters[import_path_label])
return cmd |
def fileno(fil):
"""Return the file descriptor representation of the file.
If int is passed in, it is returned unchanged. Otherwise fileno()
is called and its value is returned as long as it is an int
object. In all other cases, TypeError is raised.
"""
if isinstance(fil, int):
return fil
elif hasattr(fil, "fileno"):
fileno = fil.fileno()
if not isinstance(fileno, int):
raise TypeError("expected fileno to return an int, not " + type(fileno).__name__)
return fileno
raise TypeError("expected int or an object with a fileno() method, not " + type(fil).__name__) |
def make_tsv_line(vals,outfields,empty_string_replacement='',sep='\t'):
"""Does not have the \n at the end"""
l = []
for tag in outfields:
val = vals[tag]
if type(val) is str:
if empty_string_replacement and not val:
l.append( empty_string_replacement )
else:
l.append(val)
else:
l.append(str(val))
return sep.join( l ) |
def convert(s):
"""Convert to integer."""
x = -1
try:
x = int(s)
# print("Conversion succeeded! x =", x)
except (ValueError, TypeError): # can accept tuple of types
# print("Conversion failed!")
pass # syntactically permissable, semantically empty
return x |
def _show_capture_callback(x):
"""Validate the passed options for showing captured output."""
if x in [None, "None", "none"]:
x = None
elif x in ["no", "stdout", "stderr", "all"]:
pass
else:
raise ValueError(
"'show_capture' must be one of ['no', 'stdout', 'stderr', 'all']."
)
return x |
def tail_slices (l) :
"""Returns the list of all slices anchored at tail of `l`
>>> tail_slices ("abcdef")
['abcdef', 'bcdef', 'cdef', 'def', 'ef', 'f']
"""
return [l [i:] for i in list (range (len (l)))] |
def unique(sequence):
"""
Return a list of unique items found in sequence. Preserve the original
sequence order.
For example:
>>> unique([1, 5, 3, 5])
[1, 5, 3]
"""
deduped = []
for item in sequence:
if item not in deduped:
deduped.append(item)
return deduped |
def fixdotslashspacehyphen(to_translate):
"""for paths, . and / to _, also space to _
"""
dotslash = u'./ '
translate_to = u'-'
translate_table = dict((ord(char), translate_to) for char in dotslash)
return to_translate.translate(translate_table) |
def iterative(array, element):
"""
Perform Linear Search by Iterative Method.
:param array: Iterable of elements.
:param element: element to be searched.
:return: returns value of index of element (if found) else return None.
"""
for i in range(len(array)):
if array[i] == element:
return i
return None |
def send_analytics_tracker(name, uid=None):
"""Send setup events to Google analytics"""
# This function is not required anymore as we expect
# users to report usage through github issues, or by
# giving a 'star'.
# Only thing we learnt from this is, External, Replica3
# and Replica1 are preferred in that order (So far,
# as of Sept 2020)
return (name, uid) |
def to_dict(object, classkey='__class__'):
"""
Get dict recursively from object.
https://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
:param object: object
:type object: object or dict
:param classkey: save class name in this key
:type classkey: str
:return: object as dict
:rtype: ditct
"""
if isinstance(object, dict):
data = {}
for (k, v) in object.items():
data[k] = to_dict(v, classkey)
return data
#elif hasattr(object, '_ast'):
# return to_dict(object._ast())
#elif hasattr(object, '__iter__') and not isinstance(object, str):
# return [to_dict(v, classkey) for v in object]
elif hasattr(object, '__dict__'):
data = dict([(key, to_dict(value, classkey)) for key, value in object.__dict__.items() if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(object, '__class__'):
data[classkey] = object.__class__.__name__
return data
else:
return object |
def is_tomodir(subdirectories):
"""provided with the subdirectories of a given directory, check if this is
a tomodir
"""
required = (
'exe',
'config',
'rho',
'mod',
'inv'
)
is_tomodir = True
for subdir in required:
if subdir not in subdirectories:
is_tomodir = False
return is_tomodir |
def midi_to_pitch(midi: int) -> float:
"""Returns the absolute pitch in Hz for the given MIDI note value."""
return 440 * (2 ** ((midi - 69) / 12)) |
def wrap_server_method_handler(wrapper, handler):
"""Wraps the server method handler function.
The server implementation requires all server handlers being wrapped as
RpcMethodHandler objects. This helper function ease the pain of writing
server handler wrappers.
Args:
wrapper: A wrapper function that takes in a method handler behavior
(the actual function) and returns a wrapped function.
handler: A RpcMethodHandler object to be wrapped.
Returns:
A newly created RpcMethodHandler.
"""
if not handler:
return None
if not handler.request_streaming:
if not handler.response_streaming:
# NOTE(lidiz) _replace is a public API:
# https://docs.python.org/dev/library/collections.html
return handler._replace(unary_unary=wrapper(handler.unary_unary))
else:
return handler._replace(unary_stream=wrapper(handler.unary_stream))
else:
if not handler.response_streaming:
return handler._replace(stream_unary=wrapper(handler.stream_unary))
else:
return handler._replace(
stream_stream=wrapper(handler.stream_stream)) |
def remove_empty(d):
"""
Helper function that removes all keys from a dictionary (d),
that have an empty value.
"""
for key in list(d):
if not d[key]:
del d[key]
return d |
def convert_clip_ids_to_windows(clip_ids):
""" Inverse function of convert_windows_to_clip_ids
Args:
clip_ids: list(int), each is a index of a clip, starting from 0
Returns:
list(list(int)), each sublist contains two integers which are clip indices.
[10, 19] meaning a 9 clip window [20, 40] (seconds), if each clip is 2 seconds.
>>> test_clip_ids = [56, 57, 58, 59, 60, 61, 62] + [64, ] + [67, 68, 69, 70, 71]
>>> convert_clip_ids_to_windows(test_clip_ids)
[[56, 62], [64, 64], [67, 71]]
"""
windows = []
_window = [clip_ids[0], None]
last_clip_id = clip_ids[0]
for clip_id in clip_ids:
if clip_id - last_clip_id > 1: # find gap
_window[1] = last_clip_id
windows.append(_window)
_window = [clip_id, None]
last_clip_id = clip_id
_window[1] = last_clip_id
windows.append(_window)
return windows |
def intersection_over_union(box1, box2):
"""
Input : box1: [xmin, ymin, xmax, ymax], box2: [xmin, ymin, xmax, ymax]\n
Output: IoU
"""
xmin1, ymin1, xmax1, ymax1 = box1
xmin2, ymin2, xmax2, ymax2 = box2
width1 = xmax1 - xmin1 + 1
height1 = ymax1 - ymin1 + 1
width2 = xmax2 - xmin2 + 1
height2 = ymax2 - ymin2 + 1
area1 = width1 * height1
area2 = width2 * height2
x_min_max = max(xmin1, xmin2)
y_min_max = max(ymin1, ymin2)
x_max_min = min(xmax1, xmax2)
y_max_min = min(ymax1, ymax2)
# compute the width and height of the bounding box
intersection_width = max(0, x_max_min - x_min_max + 1)
intersection_height = max(0, y_max_min - y_min_max + 1)
area_intersection = intersection_width * intersection_height
return area_intersection / (area1 + area2 - area_intersection) |
def coalesce(*xs):
"""
Coalescing monoid operation: return the first non-null argument or None.
Examples:
>>> coalesce(None, None, "not null")
'not null'
"""
if len(xs) == 1:
xs = xs[0]
for x in xs:
if x is not None:
return x
return None |
def extract_full_names(people):
"""Return list of names, extracting from first+last keys in people dicts.
- people: list of dictionaries, each with 'first' and 'last' keys for
first and last names
Returns list of space-separated first and last names.
>>> names = [
... {'first': 'Ada', 'last': 'Lovelace'},
... {'first': 'Grace', 'last': 'Hopper'},
... ]
>>> extract_full_names(names)
['Ada Lovelace', 'Grace Hopper']
"""
return_list = []
# return [f"{x[0]} {x[1]}" for x in people] # Ugh its so good. Why did I not learn python earlier
for person in people:
first = person["first"]
last = person["last"]
return_list.append(f"{first} {last}")
return return_list |
def __discord_id_from_mention(discordid:str) -> str:
"""Checks Discord ID from possible mention and returns Discord ID"""
if discordid.startswith("<@!"): #This checks to see if Discord ID is actually a mention, if it is, unwrap the id
discordid = discordid[3:-1] # format is <@!0123456789> and we need 0123456789
elif discordid.startswith("<@"):
discordid = discordid[2:-1] #If user hasn't change their nickname, mention becomes <@ instead of <@!
return discordid |
def check_restraint_pairs_for_doubles(list): # Also consider that a1 and a2 can be switches
"""
check_restraint_pairs_for_doubles checks a list of pairs for doubles. Pairs count as doubles if the order of elements is changed.
Parameters
----------
list : t.List[t.Tuple]
A list of tuples
Returns
-------
bool
Does the list contain doubles?
"""
for i in range(len(list) - 1):
for j in range(i + 1, len(list)):
if (list[i].r1 == list[j].r1 and list[i].r2 == list[j].r2) or (
list[i].r1 == list[j].r2 and list[i].r2 == list[j].r1) or list[i].distance == list[j].distance:
return True
return False |
def count_char(char, text):
"""Count number of occurences of char in text."""
return text.count(char) |
def strip_dash(text):
""" Strip leading dashes from 'text' """
if not text:
return text
return text.strip("-") |
def arshift(x, disp):
"""Return x floor div (//) of two to the power of <disp>."""
return x // (2 ** disp) |
def sim_file_to_run(file):
"""Extracts run number from a simulation file path
Parameters
----------
file : str
Simulation file path.
Returns
-------
run : int
Run number for simulation file
Examples
--------
>>> file = '/data/ana/CosmicRay/IceTop_level3/sim/IC79/7241/Level3_IC79_7241_Run005347.i3.gz'
>>> sim_file_to_run(file)
5347
"""
start_idx = file.find('Run')
run = int(file[start_idx+3: start_idx+9])
return run |
def _check_type(type_, value):
"""Return true if *value* is an instance of the specified type
or if *value* is the specified type.
"""
return value is type_ or isinstance(value, type_) |
def axes_ticks_style(ticks=True):
"""toggle axes ticks on/off"""
ticks_style = {
"xtick.bottom": False,
"ytick.left": False,
}
# Show or hide the axes ticks
if ticks:
ticks_style.update({
"xtick.bottom": True,
"ytick.left": True,
})
return ticks_style |
def concatSC(m, n, k=1):
"""Worst case state complecity for concatenation
:arg m: number of states
:arg n: number of states
:arg k: number of letters
:type m: integer
:type n: integer
:type k: integer
:returns: state compelxity
:rtype: integer"""
return m * 2 ** n - k * 2 ** (n - 1) |
def get_index_action(index_name, document_type, document):
"""Generate index action for a given document.
:param index_name: Elasticsearch index to use
:type index_name: str
:param document_type: Elasticsearch document type to use
:type index_name: str
:param document: Document to be indexed
:type row: dict
:return: Action to be passed in bulk request
:rtype: dict
"""
action = {
'_index': index_name,
'_type': document_type,
'_source': document,
}
# Use the same _id field in elasticsearch as in the database table
if '_id' in document:
action['_id'] = document['_id']
return action |
def unblock_list(blocked_ips_list, to_block_list):
""" This function creates list of IPs that are present in the firewall block list, but not in
the list of new blockings which will be sent to the firewall.
:param blocked_ips_list: List of blocked IPs.
:param to_block_list: List of new blockings.
:return: List of IPs to be unblocked.
"""
to_be_unblocked_list = []
for blocked in blocked_ips_list:
found_ip = False
blocked_ip = blocked['ip']
for host in to_block_list:
if host['host']['ip_address'] == blocked_ip:
found_ip = True
# if the blocked_ip was not found in list of blockings, unblock it
if not found_ip:
to_be_unblocked_list.append(blocked_ip)
return to_be_unblocked_list |
def parse_parameters(parameters):
"""Parse job parameters"""
return parameters.get('working_directory', '.'), parameters.get('extra', '') |
def solution(N, A):
"""
This problem took me a good while to solve. The problem in itself is not
hard, but the description is not very clear. I had to read it several times
and even then, it took me a good few tries until I realised what it was
asking me to do.
If I had been given this task in a ticket, in all honesty I probably would
have contacted the owner directly to ask for clarification...
Anyway I digress...
The solution is fairly straightforward in itself: to get the time complexity
down to the required level, simply keep track of the both the counter X
and max counter while you iterate through A.
After that we can flatten the counter list and return it.
Complexity here would be O(len(A) + N)
"""
# initialise variables to keep track of the counters
counters = [0] * N
counter_x, max_counter = 0, 0
for val in A:
# increase(X)
if 1 <= val <= N:
# increase counter X by 1
counters[val - 1] = max(counters[val - 1], counter_x) + 1
# update max counter
max_counter = max(counters[val - 1], max_counter)
# update counter to current max
else:
counter_x = max_counter
# use list comprehension to re-calculate each counter
return [max(val, counter_x) for val in counters] |
def Levenshtein(s1,s2):
"""return Levenshtein distance
Parameters
----------
s1 : list
list of activities, which is the first sequence to be aligned
s2 : list
list of activities, which is the second sequence to be aligned
Returns
-------
score : float
Distance score between two sequence
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return float(distances[-1]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.