content stringlengths 42 6.51k |
|---|
def header_level(line):
"""
Examples
--------
>>> header_level('# title')
1
>>> header_level('### title')
3
"""
i = 0
title = line + 'e'
while title[0] == "#":
i += 1
title = title[1:]
return i |
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width
Argument: field size
"""
return value.ljust(int(arg)) |
def calc_labour(yield_required):
"""
Labour Costs Formaula
Notes
------
Direct farm labour cost = Number of staff working full-time x wages x 30 hours
Generalisation if statement on farm labour required if unknown
"""
farm_hours = yield_required*1.2
labour_cost = farm_hours * 7 # wage
return labour_cost |
def _int(i, fallback=0):
""" int helper """
# pylint: disable=broad-except
try:
return int(i)
except BaseException:
return fallback |
def fibonacci(n: int) -> int:
"""Implement Fibonacci recursively
Raise:
- TypeError for given non integers
- ValueError for given negative integers
"""
if type(n) is not int:
raise TypeError("n isn't integer")
if n < 0:
raise ValueError("n is negative")
if n == 0:
return 0
if n == 1:
return 1
return fibonacci(n-1) + fibonacci(n-2) |
def _extract_name(line: str) -> str:
"""Accepts a UniProt DE line (string) as input. Returns the name with
evidence tags removed.
"""
tokens = line[19:-2].split(" {")
name = tokens[0]
return name |
def saponificationIndex (sample_weight, HCl_molarity, HCl_fc, HCl_spent, blank_volume):
"""
Function to calculate the saponification index in mg of KOH per grams
"""
V_HCl = blank_volume - HCl_spent
mols_KOH = HCl_molarity * HCl_fc * V_HCl
KOH_mass = mols_KOH * 56 # 56 is the molecular weight of the KOH
saponification_index = KOH_mass / sample_weight
return saponification_index |
def _get_reduce_batch_axis(axis, x_dim, x_ndim):
"""get batch_axis for reduce* operation."""
if not isinstance(axis, tuple):
axis = (axis,)
batch_axis = ()
if axis:
for index in axis:
if index < x_dim:
batch_axis = batch_axis + (index,)
else:
batch_axis = batch_axis + (index + 1,)
else:
batch_axis_list = [index for index in range(x_ndim)]
del batch_axis_list[x_dim]
batch_axis = tuple(batch_axis_list)
return batch_axis |
def get_font_color(average_color_metric):
"""Returns tuple of primary and secondary colors depending on average color metric"""
if average_color_metric[3] < 128:
return (255, 255, 255), average_color_metric[:3]
else:
return (0, 0, 0), average_color_metric[:3] |
def query_merge_rels_unwind(start_node_labels, end_node_labels, start_node_properties,
end_node_properties, rel_type, property_identifier=None):
"""
Merge relationship query with explicit arguments.
Note: The MERGE on relationships does not take relationship properties into account!
UNWIND $rels AS rel
MATCH (a:Gene), (b:GeneSymbol)
WHERE a.sid = rel.start_sid AND b.sid = rel.end_sid AND b.taxid = rel.end_taxid
MERGE (a)-[r:MAPS]->(b)
SET r = rel.properties
Call with params:
{'start_sid': 1, 'end_sid': 2, 'end_taxid': '9606', 'properties': {'foo': 'bar} }
Within UNWIND you cannot access nested dictionaries such as 'rel.start_node.sid'. Thus the
parameters are created in a separate function.
:param relationship: A Relationship object to create the query.
:param property_identifier: The variable used in UNWIND.
:return: Query
"""
if not property_identifier:
property_identifier = 'rels'
start_node_label_string = ':'.join(start_node_labels)
end_node_label_string = ':'.join(end_node_labels)
q = "UNWIND ${0} AS rel \n".format(property_identifier)
q += "MATCH (a:{0}), (b:{1}) \n".format(start_node_label_string, end_node_label_string)
# collect WHERE clauses
where_clauses = []
for property in start_node_properties:
where_clauses.append('a.{0} = rel.start_{0}'.format(property))
for property in end_node_properties:
where_clauses.append('b.{0} = rel.end_{0}'.format(property))
q += "WHERE " + ' AND '.join(where_clauses) + " \n"
q += "MERGE (a)-[r:{0}]->(b) \n".format(rel_type)
q += "SET r = rel.properties RETURN count(r) as cnt\n"
return q |
def str_visible_len(s):
"""
:param str s:
:return: len without escape chars
:rtype: int
"""
import re
# via: https://github.com/chalk/ansi-regex/blob/master/index.js
s = re.sub("[\x1b\x9b][\\[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]", "", s)
return len(s) |
def getValue(struct, path, default=None):
"""
Read a value from the data structure.
Arguments:
struct can comprise one or more levels of dicts and lists.
path should be a string using dots to separate levels.
default will be returned if the path cannot be traced.
Example:
getValue({'a': [1, 2, 3]}, "a.1") -> 2
getValue({'a': [1, 2, 3]}, "a.3") -> None
"""
parts = path.split(".")
try:
current = struct
for part in parts:
if isinstance(current, list):
part = int(part)
current = current[part]
return current
except:
return default |
def pagealign(data, address, page_size, data_size=1):
"""
Aligns data to the start of a page
"""
# Pre-pad the data if it does not start at the start of a page
offset = address % page_size
for _ in range(offset):
data.insert(0, 0xFF)
# In case of other data sizes, post-pad the data
while len(data) % data_size:
data.append(0xFF)
return data, address-offset |
def best_of_if(first, second):
"""
Tells which move is the best
Parameters
----------
first: dictionnary
The following required (string) keys are :
cleared: number of line cleared by the move
holes: number of holes of the resulting board
bumpiness: bumpiness score of the resulting board
height: maximum height of the resulting board
aheight: sum of all column heights of the resulting board
second: dictionnary
The following required (string) keys are :
cleared: number of line cleared by the move
holes: number of holes of the resulting board
bumpiness: bumpiness score of the resulting board
height: maximum height of the resulting board
aheight: sum of all column heights of the resulting board
Returns
-------
dict
Either first or second
"""
# Greater number of line cleared
if first["cleared"] >= 3:
if first["cleared"] > second["cleared"]:
return first
elif first["cleared"] < second["cleared"]:
return second
if first["holes"] > second["holes"]:
return second
elif first["holes"] < second["holes"]:
return first
else:
if first["bumpiness"] > second["bumpiness"]:
return second
elif first["bumpiness"] < second["bumpiness"]:
return first
else:
if first["aheight"] > second["aheight"]:
return second
elif first["aheight"] < second["aheight"]:
return second
else:
if first["lines"] > second["lines"]:
return first
elif first["lines"] < second["lines"]:
return second
else:
if first["score"] > second["score"]:
return first
return second |
def unique_elements(values):
"""List the unique elements of a list."""
return list(set(values)) |
def UTCSecondToLocalDatetime(utcsec, timezone="Europe/Berlin"):
"""
Convert utc second to local datetime (specify correct local timezone with string).
Parameters
----------
utcsec: int
Time in utc seconds (unix time).
timezone: string
Time zone string compatible with pytz format
Returns
-------
dt: Datetime object corresponding to local time.
Notes
-----
Adapted from a stackoverflow answer.
To list all available time zones:
>> import pytz
>> pytz.all_timezones
To print the returned datetime object in a certain format:
>> from pyik.time_conversion import UTCSecondToLocalDatetime
>> dt = UTCSecondToLocalDatetime(1484314559)
>> dt.strftime("%d/%m/%Y, %H:%M:%S")
"""
import pytz
from datetime import datetime
local_tz = pytz.timezone(timezone)
utc_dt = datetime.utcfromtimestamp(utcsec)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) |
def fnr_score(conf_mtx):
"""Computes FNR (false negative rate) given confusion matrix"""
[tp, fp], [fn, tn] = conf_mtx
a = tp + fn
return fn / a if a > 0 else 0 |
def get_label_name_from_dict(labels_dict_list):
"""
Parses the labels dict and returns just the names of the labels.
Args:
labels_dict_list (list): list of dictionaries
Returns:
str: name of each label separated by commas.
"""
label_names = [a_dict["name"] for a_dict in labels_dict_list]
return ",".join(label_names) |
def get_cal_total_info(data):
"""Get total information of calculation amount from origin data
Parameters
----------
data : list[dict[str, dict[str, object]]]
Original data
res : dict
Total information
"""
res = {
"fused_mul_add": 0,
"mul": 0,
"add": 0,
"sub": 0,
"exp": 0,
"comp": 0,
"div": 0,
}
for d in data:
inner_data = d["calculation_amount"]
for k, v in inner_data.items():
res[k] += v
return res |
def average_precision(y_true, y_pred):
"""Calculate the mean average precision score
Args:
y_true (List(Permission)): A list of permissions used.
y_pred (List(Permission)): A Ranked list of permissions recommended
Returns:
ap: Average precision
"""
score = 0.0
num_hits = 0.0
for i, p in enumerate(y_pred):
if p in y_true and p not in y_pred[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if min(len(y_true), len(y_pred)) == 0:
return 0.0
return score / len(y_true) |
def minima_in_list(lx, ly):
"""
in the list, it find points
that may be local minima
Returns the list of x-guesses
"""
np = len(lx)
# initialize guesses
guesses = []
# initial point
if ly[0] <= ly[1]:
guesses.append(lx[0])
# mid points
for idx in range(1, np - 1):
xi, yi = lx[idx - 1], ly[idx - 1]
xj, yj = lx[idx], ly[idx]
xk, yk = lx[idx + 1], ly[idx + 1]
if yj <= yi and yj <= yk:
guesses.append(xj)
# final points
if ly[-1] <= ly[-2]:
guesses.append(lx[-1])
return guesses |
def GetChunks(data, size=None):
"""
Get chunks of the data.
"""
if size == None:
size = len(data)
start = 0
end = size
chunks = []
while start < len(data):
chunks.append(data[start:end])
start = end
end += size
if end > len(data):
end = len(data)
return chunks |
def splitstring(data, maxlen):
""" Split string by length """
return "\n".join((data[i:maxlen + i] for i in range(0, len(data), maxlen))) |
def sanitize_title(title):
"""
Remove forbidden characters from title that will prevent Windows
from creating directory.
Also change ' ' to '_' to preserve previous behavior.
"""
forbidden_chars = ' *"/\<>:|(),'
replace_char = "_"
for ch in forbidden_chars:
title = title.replace(ch, replace_char)
return title |
def RemoveExonPermutationsFromFront(segments):
"""remove exon permutations from the front.
Only permutations are removed, that are completely out of sync
with query. Overlapping segments are retained, as they might
correspond to conflicting starting points and both should be
checked by genewise.
"""
if len(segments) <= 1:
return segments
first_index = 0
while first_index + 1 < len(segments):
this_query_from, this_query_to = segments[first_index][1:3]
next_query_from, next_query_to = segments[first_index + 1][1:3]
if this_query_from < next_query_to:
break
first_index += 1
return segments[first_index:] |
def edge(geom):
"""
return a polygon representing the edge of `geom`
"""
h = 1e-8
try:
geomext = geom.exterior
except:
try:
geomext = geom.buffer(h).exterior
except:
geomext = geom
return geomext |
def find_in_list(l, pred):
"""lamba function for finding item in a list
"""
for i in l:
if pred(i):
return i
return None |
def get_byte_order(ei_data):
"""Get the endian-ness of the header."""
if ei_data == b'\x01':
return 'little'
elif ei_data == b'\x02':
return 'big'
else:
raise ValueError |
def filter_common_words(tokens, word_list):
"""
Filter out words that appear in many of the documents
Args:
tokens: a list of word lemmas
word_list: a list of common words
Returns:
a set of word lemmas with the common words removed
"""
return list(set([word for word in tokens if word not in word_list])) |
def build_fib_iterative(n):
"""
n: number of elements in the sequence
Returns a Fibonacci sequence of n elements by iterative method
"""
if n == 1:
return [0]
elif n == 2:
return [0, 1]
else:
fib = [0, 1]
count = 2
while count < n:
last_elem = fib[-1] + fib[-2]
fib.append(last_elem)
count += 1
return fib |
def get_file_offset(vfp: int) -> int:
"""Convert a block compressed virtual file pointer to a file offset."""
address_mask = 0xFFFFFFFFFFFF
return vfp >> 16 & address_mask |
def solve_by_dichotomy(f, objective, a, b, tolerance):
"""
Apply the dichotomy method to solve the equation f(x)=objective, x being the unknown variable.
a and b are initial x values satisfying the following inequation: (f(a) - objective) * (f(b) - objective) < 0.
The iterative method stops once f(x) is found sufficiently close to the objective, that is when |objective-f(x)|
<= tolerance. Returns the value of x.
"""
# check that f(a) - objective) * (f(b) - objective) < 0
y_a, y_b = f(a), f(b)
assert (y_a - objective) * (y_b - objective) <= 0.
# check the order
assert a < b
# will be used as a multiplier in various tests that depend on the function monotony (incr. or decr.)
monotony = -1. if y_a >= objective else 1.
while min(abs(y_a - objective), abs(y_b - objective)) > tolerance:
dist_a, dist_b = abs(y_a - objective), abs(y_b - objective)
# the x update is based on the Thales theorem
# - that is, if f is linear, the exact solution is obtained in one step
x = a + (dist_a * b - a * dist_a) / (dist_a + dist_b)
y = f(x)
if (y - objective) * monotony >= 0:
b, y_b = x, y
else:
a, y_a = x, y
if abs(y_a - objective) <= abs(y_b - objective):
return a
else:
return b |
def index_sets(items):
"""
Create a dictionary of sets from an iterable of `(key, value)` pairs.
Each item is stored in a set at `key`. More than one item with same key
means items get appended to same list.
This means items in indices are unique, but they must be hashable.
"""
index = {}
for key, value in items:
try:
index[key].add(value)
except KeyError:
index[key] = set((value,))
return index |
def log_mean(T_hi, T_lo, exact=False):
"""
compute the logarithmic mean
"""
if exact:
from numpy import log
return (T_hi-T_lo)/log(T_hi/T_lo)
else:
d = T_hi - T_lo
return T_hi - d/2*(1 + d/6/T_hi*(1 + d/2/T_hi)) |
def is_triangular(k):
"""
k, a positive integer
returns True if k is triangular and False if not
"""
if k == 0 or k < 0:
return False
total = 0
# starting from 1 and until we reach k , we sum the numbers to get the triangular ones
# if we hit the number with this sum, we return true because we found it
# if we don't find a result, we return false
for n in range(0, k+1):
total += n
if total == k:
return True
return False |
def coefficients(debug=False):
"""
Generate Ranking System Coefficients.
Parameters
----------
debug : bool, optional
What level of debugging/prints to do. The default is False.
Returns
-------
ratingCoeff : dict
DESCRIPTION.
"""
# Initiailize for All Ranking Types
ratingCoeff = {}
ratingCoeff['simpleElo'] = {'initRating': 1500,
'avgRating': 1500,
'kRating': 25,
'regress': 0,
'hfAdvantage': 0,
'hiAdvantage': 0,
'goalDiffExp': 0}
ratingCoeff['basicElo'] = {'initRating': 1150,
'avgRating': 1500,
'kRating': 25,
'regress': 0.1,
'hfAdvantage': 0,
'hiAdvantage': 0,
'goalDiffExp': 0}
ratingCoeff['hfAdvElo'] = {'initRating': 1300,
'avgRating': 1500,
'kRating': 30,
'regress': 0.3,
'hfAdvantage': 30,
'hiAdvantage': 0,
'goalDiffExp': 0}
ratingCoeff['fullElo'] = {'initRating': 1300,
'avgRating': 1500,
'kRating': 30,
'regress': 0.3,
'hfAdvantage': 10,
'hiAdvantage': 20,
'goalDiffExp': 0.2}
if debug:
print(list(ratingCoeff.keys()))
return ratingCoeff |
def getQueryString( bindings, variableName ):
""" Columns a bunch of data about the bindings. Will return properly formatted strings for
updating, inserting, and querying the SQLite table specified in the bindings dictionary. Will also
return the table name and a string that lists the columns (properly formatted for use in an SQLite
query).
variableName is the name to use for the SQLiteC++ Statement variable in the generated methods.
"""
table = ''
columns = []
queryData = []
insertData = []
updateData = []
whereClaus = []
bindData = []
index = 0
for b in bindings:
# Process table
if (b['type'] == 'table'):
table = b['table']
# Process column
elif (b['type'] == 'column'):
columns.append( b['column'] )
# Process query data
if (b['variableType'] == 'string'):
text = '{variable} = std::string( {query}.getColumn({index}).getText() );'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
else:
text = '{variable} = {query}.getColumn({index});'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
index = index + 1
# Process insert data
if (b['variableType'] == 'string' or b['variableType'] == 'char*'):
insertData.append( "\"'\" << " + b['variable'] + " << \"'\"" )
else:
insertData.append( b['variable'] )
# Process id
if (b.get('id')):
whereClaus.append( b['column'] + ' = ?' )
text = 'query.bind({index}, {variableName});'
text = text.format(index = len(whereClaus), variableName = b['variable'])
bindData.append( text )
# Process update data
for i in range(0, len(columns)):
t = columns[i] + '=" << ' + insertData[i]
updateData.append(t)
columns = ', '.join( columns )
updateData = ' << ", '.join( updateData )
insertData = ' << \", " << '.join( insertData )
queryData = '\n'.join( queryData )
whereClaus = ' AND '.join( whereClaus )
bindData = '\n\t'.join( bindData )
return {
'table': table,
'updateData': updateData,
'columns': columns,
'insertData': insertData,
'queryData': queryData,
'whereClaus': whereClaus,
'bindData': bindData
} |
def double_factorial(k: int) -> int:
"""
Returns the double factorial of the number, the product of all positive integers of the same parity smaller or
equal to the number.
By convention an empty product is considered 1, meaning double_factorial(0) will return 1.
:param k: A positive integer
:return: The double factorial of that integer
"""
result = 1
for i in range(1 if k % 2 == 1 else 2, k + 1, 2):
result *= i
return result |
def si_prefix(prefix=None):
"""
Return the SI-prefix for a unit
"""
if prefix is None:
prefix='none'
prefix=prefix.lower()
prefixes = {
'micro':'$\mu$',
'milli':'m',
'none':'',
'kilo':'k',
'mega':'M'
}
return prefixes[prefix] |
def unpad_list(listA, val=-1):
"""Unpad list of lists with which has values equal to 'val'.
Parameters
----------
listA : list
List of lists of equal sizes.
val : number, optional
Value to unpad the lists.
Returns
-------
list
A list of lists without the padding values.
Examples
--------
Remove the padding values of a list of lists.
>>> from dbcollection.utils.pad import unpad_list
>>> unpad_list([[1,2,3,-1,-1],[5,6,-1,-1,-1]])
[[1, 2, 3], [5, 6]]
>>> unpad_list([[5,0,-1],[1,2,3,4,5]], 5)
[[0, -1], [1, 2, 3, 4]]
"""
# pad list with zeros in order to have all lists of the same size
assert isinstance(listA, list), 'Input must be a list. Got {}, expected {}' \
.format(type(listA), type(list))
if isinstance(listA[0], list):
return [list(filter(lambda x: x != val, l)) for i, l in enumerate(listA)]
else:
return list(filter(lambda x: x != val, listA)) |
def parse_color(color):
"""Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
"""
r = g = b = a = type = None
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
type = '#rgb'
color = color + 'f'
if len(color) == 4:
type = type or '#rgba'
color = ''.join([c * 2 for c in color])
if len(color) == 6:
type = type or '#rrggbb'
color = color + 'ff'
assert len(color) == 8
type = type or '#rrggbbaa'
r, g, b, a = [
int(''.join(c), 16) for c in zip(color[::2], color[1::2])]
a /= 255
elif color.startswith('rgb('):
type = 'rgb'
color = color[4:-1]
r, g, b, a = [int(c) for c in color.split(',')] + [1]
elif color.startswith('rgba('):
type = 'rgba'
color = color[5:-1]
r, g, b, a = [int(c) for c in color.split(',')[:-1]] + [
float(color.split(',')[-1])]
return r, g, b, a, type |
def grading(score) -> str:
"""
Converts percentage into letter grade.
"""
if score < 50:
grade = "F"
elif score >= 50 and score < 60:
grade = "D"
elif score >= 60 and score < 70:
grade = "C"
elif score >= 70 and score < 80:
grade = "B"
elif score >= 80 and score <= 100:
grade = "A"
else:
print("Please a positive number that is less than or equal to 100.")
grade = "Unknown"
return grade |
def henon_heiles_potential(x, y):
"""The values of henon heiles poptential (\lambda = 1) for given x/y."""
return (x**2 + y**2) / 2 + (x**2 * y - y**3 / 3) |
def clean_headers_client(header):
"""
Only allows through headers which are safe to pass to the server
"""
valid = ['user_agent', 'x-mining-extensions', 'x-mining-hashrate']
for name, value in header.items():
if name.lower() not in valid:
del header[name]
else:
del header[name]
header[name.lower()] = value
return header |
def calculateSimularity(data1, data2):
"""
On a scale of 0 to 1 returns how simular the two datasets are
INPUTS
-------
data1 : list of numbers - ranging from 0 - 1
data2 : list of numbers - ranging from 0 - 1
OUTPUTS
-------
simularity : how similar are the two datasets?
0 is totally different 1 is the same data
"""
total = 0
if len(data1) != len(data2):
return 0
for i, d in enumerate(data1):
total += abs(d-data2[i])
return total/len(data1) |
def reverse_template(retro_template):
"""
Reverse the reaction template to swith product and reactants
:param retro_template: the reaction template
:type retro_template: str
:return: the reverse template
:rtype: str
"""
return ">>".join(retro_template.split(">>")[::-1]) |
def calculate_relative_metric(curr_score, best_score):
"""
Calculate the difference between the best score and the current score, as a percentage
relative to the best score.
A negative result indicates that the current score is higher than the best score
Parameters
----------
curr_score : float
The current score
best_score : float
The best score
Returns
-------
float
The relative score in percent
"""
return (100 / best_score) * (best_score - curr_score) |
def list2dict(
l=['A', 'B', 'C'],
keylist=['a', 'b', 'c']
):
"""
task 0.5.31 && task 0.6.3
implement one line procedure taking a list of letters and a keylist
output should be a dict. that maps them together
e.g. input l=['A', 'B', 'C'] keylist=['a', 'b', 'c'] output={'a':'A', 'b':'B', 'c':'C'}
def list2dict(l=['A', 'B', 'C'], keylist=['a', 'b', 'c']): return {k:l[i] for i,k in enumerate(keylist)}
"""
return {k:l[i] for i,k in enumerate(keylist)} |
def align_int_down(val, align):
"""Align a value down to the given alignment
Args:
val: Integer value to align
align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
Returns:
integer value aligned to the required boundary, rounding down if
necessary
"""
return int(val / align) * align |
def get_items(items_conf):
"""generic function creating a list of objects from a config specification
objects are: analyses, robots, worlds, tasks, losses, ...
"""
items = []
for i, item_conf in enumerate(items_conf):
# instantiate an item of class "class" with the given configuration
# and append to list
items.append(item_conf["class"](item_conf))
# return list
return items |
def create_db_links(txt_tuple_iter, detail_page):
"""
From an iterable containing DB info for records in DB or 'not in DB' when no
instances were found, returns info formatted as url links to detail pages of
the records.
:param txt_tuple_iter: an iterable of strings and tuples where the 0 element
of the tuple is the text to display in the link and
element 1 is the key value to build the link
(iter of tuples and strs)
:detail_page: the details page relevant to the entries being processed (str)
:return: string containing links to each entry (str)
"""
if txt_tuple_iter != 'not in DB':
line_links = []
for txt, key in txt_tuple_iter:
line_links.append('<a target="_blank" href="/%s/%s">%s</a>'
% (detail_page, key, txt))
line = ', '.join(line_links)
else:
line = 'not in DB'
return line |
def solr_field(name=None, type='string', multiValued=False, stored=True, docValues=False):
"""solr_field: convert python dict structure to Solr field structure"""
if not name:
raise TypeError('solar() missing 1 required positional \
argument: "name"')
lookup_bool = {True: 'true', False: 'false'}
return {'name': name, 'type': type,
'multiValued': lookup_bool[multiValued],
'stored': lookup_bool[stored],
'docValues': lookup_bool[docValues]} |
def splitFullFileName(fileName):
"""
split a full file name into path, fileName and suffix
@param fileName
@return a list containing the path (with a trailing slash added), the
file name (without the suffix) and the file suffix (without the
preceding dot)
"""
tmp = fileName.split('/')
path = '/'.join(tmp[:-1]) + '/'
fullFileName = tmp[-1]
tmp2 = fullFileName.split('.')
fileName = '.'.join(tmp2[:-1])
suffix = tmp2[-1]
return path, fileName, suffix |
def find_largest_digit(n):
"""
Do recursion to find max integer.
n: (int) input number.
return: max number in input number.
"""
n = str(n)
if len(n) == 1: # base point
return n
else:
if n[0] <= n[1]: # if head number is smaller than next number, trimming header.
return find_largest_digit(n[1:])
else:
n = n[0] + n[2:] # if header greater than next number, trimming next number.
return find_largest_digit(n) |
def xor(x, y):
"""Return truth value of ``x`` XOR ``y``."""
return bool(x) != bool(y) |
def _remove_filename_quotes(filename):
"""Removes the quotes from a filename returned by git status."""
if filename.startswith('"') and filename.endswith('"'):
return filename[1:-1]
return filename |
def has_recursive_magic(s):
"""
Return ``True`` if the given string (`str` or `bytes`) contains the ``**``
sequence
"""
if isinstance(s, bytes):
return b'**' in s
else:
return '**' in s |
def create_local_cluster_name(service: str, color: str, index: int) -> str:
"""Create the local service-color cluster name."""
return "local-{0}-{1}-{2}".format(service, color, index) |
def scale_l2_to_ip(l2_scores, max_norm=None, query_norm=None):
"""
sqrt(m^2 + q^2 - 2qx) -> m^2 + q^2 - 2qx -> qx - 0.5 (q^2 + m^2)
Note that faiss index returns squared euclidean distance, so no need to square it again.
"""
if max_norm is None:
return -0.5 * l2_scores
assert query_norm is not None
return -0.5 * (l2_scores - query_norm ** 2 - max_norm ** 2) |
def line_from_2_points(x1, y1, x2, y2):
"""
Gets the coefficients of a line, given 2 points
:param x1:
:param y1:
:param x2:
:param y2:
:return:
"""
a = y1 - y2
b = x2 - x1
c = x1 * y2 - x2 * y1
return a, b, c |
def build_auth_url(url, token=None):
"""build_auth_url
Helper for constructing authenticated IEX urls
using an ``IEX Publishable Token`` with a valid
`IEX Account <https://iexcloud.io/cloud-login#/register/>`__
This will return a string with the token as a query
parameter on the HTTP url
:param url: initial url to make authenticated
:param token: optional - string ``IEX Publishable Token``
(defaults to ``IEX_TOKEN`` environment variable or
``None``)
"""
if token:
return (
f'{url}?token={token}')
else:
return url |
def stream_type_kwargs(stream_type_param):
"""The kwargs for stream_type to pass when invoking a method on `Events`.
:rtype:
`dict`
"""
if stream_type_param:
return {'stream_type': stream_type_param}
return {} |
def unpack_ushort(data: bytes) -> int:
"""Unpacks unsigned short number from bytes.
Keyword arguments:
data -- bytes to unpack number from
"""
return int.from_bytes(data, byteorder="little", signed=False) |
def list_2d(width, length, default=None):
""" Create a 2-dimensional list of width x length """
return [[default] * width for i in range(length)] |
def search(L, el):
"""
Returns the index of `el` in `L` if it exists, or `None` otherwise.
"""
# We're observing the part of the list between indices `left` and `right`.
# When we start, it's the whole list, so from `left=0` to `right=len(L)-1`.
left = 0
right = len(L)-1
# The length of the observed part of the list is `right-left+1`, so
# the list is NOT empty as long as `right-left+1 > 0`, i.e.,
# the list is NOT empty as long as `right>left-1`, i.e.,
# the list is NOT empty as long as `right>=left`.
while right >= left:
mid = (left + right) // 2
if L[mid] == el:
for i in range(right, mid, -1):
if L[i] == el:
return i
return mid
if L[mid] < el:
left = mid + 1
else:
right = mid - 1
return None |
def rows_distributed(thelist, n):
"""
Break a list into ``n`` rows, distributing columns as evenly as possible
across the rows. For example::
>>> l = range(10)
>>> rows_distributed(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> rows_distributed(l, 3)
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> rows_distributed(l, 4)
[[0, 1, 2], [3, 4, 5], [6, 7], [8, 9]]
>>> rows_distributed(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
>>> rows_distributed(l, 9)
[[0, 1], [2], [3], [4], [5], [6], [7], [8], [9]]
# This filter will always return `n` rows, even if some are empty:
>>> rows(range(2), 3)
[[0], [1], []]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = list_len // n
remainder = list_len % n
offset = 0
rows = []
for i in range(n):
if remainder:
start, end = (split+1)*i, (split+1)*(i+1)
else:
start, end = split*i+offset, split*(i+1)+offset
rows.append(thelist[start:end])
if remainder:
remainder -= 1
offset += 1
return rows |
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
## pause()
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains |
def final_strategy(score, opponent_score):
"""Write a brief description of your final strategy.
*** YOUR DESCRIPTION HERE ***
"""
# BEGIN PROBLEM 11
"*** REPLACE THIS LINE ***"
return 4 # Replace this statement
# END PROBLEM 11 |
def extract_bindings(config_str):
"""Extracts bindings from a Gin config string.
Args:
config_str (str): Config string to parse.
Returns:
List of (name, value) pairs of the extracted Gin bindings.
"""
# Really crude parsing of gin configs.
# Remove line breaks preceded by '\'.
config_str = config_str.replace('\\\n', '')
# Remove line breaks inside parentheses. Those are followed by indents.
config_str = config_str.replace('\n ', '')
# Indents starting with parentheses are 3-space.
config_str = config_str.replace('\n ', '')
# Lines containing ' = ' are treated as bindings, everything else is
# ignored.
sep = ' = '
bindings = []
for line in config_str.split('\n'):
line = line.strip()
if sep in line:
chunks = line.split(sep)
name = chunks[0].strip()
value = sep.join(chunks[1:]).strip()
bindings.append((name, value))
return bindings |
def find_list_index(a_list, item):
"""
Finds the index of an item in a list.
:param a_list: A list to find the index in.
:type a_list: list
:param item: The item to find the index for.
:type item: str
:return: The index of the item, or None if not in the list.
:rtype: int | None
"""
if item in a_list:
for i, value in enumerate(a_list):
if value == item:
return i |
def get_provenance_record(attributes, ancestor_files, plot_type):
"""Create a provenance record describing the diagnostic data and plot."""
caption = ("Correlation of {long_name} between {dataset} and "
"{reference_dataset}.".format(**attributes))
record = {
'caption': caption,
'statistics': ['corr'],
'domains': ['global'],
'plot_type': plot_type,
'authors': [
'ande_bo',
],
'references': [
'acknow_project',
],
'ancestors': ancestor_files,
}
return record |
def _process_for_bulk_op(raw_value):
"""
Converts a raw value into the accepted format for jsonb in PostgreSQL.
"""
if isinstance(raw_value, str):
raw_value = raw_value.replace("\\","\\\\")
raw_value = raw_value.replace("\n","\\n")
raw_value = raw_value.replace('\"','\\"')
return f'"{raw_value}"'
if isinstance(raw_value, bool):
return f"{str(raw_value).lower()}"
return raw_value |
def max_height(v0):
"""
Compute maximum height reached, given the initial vertical
velocity v0. Assume negligible air resistance.
"""
g = 9.81
return v0**2/(2*g) |
def Average(lst):
"""
Compute the average of a data distribution
"""
return sum(lst) / len(lst) |
def string_to_points(s):
"""Convert a PageXml valid string to a list of (x,y) values."""
l_s = s.split(' ')
l_xy = list()
for s_pair in l_s: # s_pair = 'x,y'
try:
(sx, sy) = s_pair.split(',')
l_xy.append((int(sx), int(sy)))
except ValueError:
print("Can't convert string '{}' to a point.".format(s_pair))
exit(1)
return l_xy |
def is_valid_classification(classification):
""" Check if the classification is a MISRA defined classification """
allowed_classifications = ['Rule']
return classification in allowed_classifications |
def getValAfterZero(stepLength: int, nStep: int = 50000000) -> int:
"""
The idea here is to just care about the value if it is inserted
after the position 0 which holds Zero, and not really constructing
the list itself
"""
currentPosition = 0
listLength = 1
valAfterZero = 0
for i in range(1, nStep + 1):
currentPosition = (currentPosition + stepLength) % listLength
listLength += 1 # like if we inserted a number
currentPosition +=1 # the position of the new inserted number
if currentPosition == 1:
valAfterZero = i
return valAfterZero |
def format_val(val):
""" format val (probably switch to one in ptt later)
"""
if val == 'True':
formtd_val = True
elif val == 'False':
formtd_val = False
elif val.isdigit():
formtd_val = int(val)
else:
formtd_val = val
return formtd_val |
def part1(data):
"""
>>> part1([
... '00100', '11110', '10110', '10111', '10101', '01111',
... '00111', '11100', '10000', '11001', '00010', '01010'
... ])
198
>>> part1(read_input())
2250414
"""
count = [0 for _ in range(len(data[0]))]
for string in data:
for i, c in enumerate(string):
if c == '1':
count[i] += 1
digit = 1
middle = len(data) // 2
gamma = 0
epsilon = 0
for tally in count[::-1]:
if tally > middle:
gamma += digit
else:
epsilon += digit
digit *= 2
return gamma * epsilon |
def Dvalue2col(A, k, kout, Lmax=1):
""" Convert D-efficiency to equivalent value at another number of columns """
m = 1 + k + k * (k - 1) / 2
mout = 1 + kout + kout * (kout - 1) / 2
dc = kout - k
Aout = (A**(float(m)) * (Lmax**dc))**(1. / mout)
return Aout |
def encrypt(string):
"""Encrpts a string with a XOR cipher.
(assumes security is not necessarily required)
params:
string: A string to encrypt
return: A bytes object
"""
key = 175
string = map(ord, string)
result = []
for plain in string:
key ^= plain
result.append(key)
return bytes(result) |
def sanitize_input ( s ):
"""Given an arbitrary string,
escape '/' characters
"""
return s.replace('/',r"%2F") |
def sort_annotations_by_offset(annotations):
"""
This function expects as input a list of dictionaries with this structure:
{'ann_id': u'T34',
'continuation': False,
'entity_type': u'Registry',
'positions': [{'end': 2465, 'start': 2448}],
'surface': u'reg 38 Padavinus,'},
And sorts them by offset. When the annotations spans more than one token
(i.e. there is more than one position in `positions`) the offset_start
of the first token is considered.
"""
return sorted(annotations, key=lambda k: k['positions'][0]['start']) |
def unit_to_agg(row):
"""
args:
row - should look like (joinkey, (ten, hhgq, geo))
returns tuple with cnt appended
"""
assert len(row) == 2, f"Unit row tuple {row} is not of length 2"
_, (ten, hhgq, geo) = row
return ((geo, 8), 1) if hhgq == 0 and ten == 0 else ((geo, hhgq), 1) |
def square_matrix_sum(A, B):
"""
Sum two square matrices of equal
size.
"""
n = len(A)
C = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
C[i][j] = A[i][j] + B[i][j]
return C |
def find_min(nums):
"""
Find minimum element in rotated sorted array
:param nums: given array
:type nums: list[int]
:return: minimum element
:rtype: int
"""
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) // 2
if nums[mid] < nums[right]:
right = mid
elif nums[mid] > nums[right]:
left = mid
else:
# we cannot determine which side is sorted subarray
# when nums[left] == nums[mid]
# so just move right pointer step backward
right -= 1
if nums[left] < nums[right]:
return nums[left]
else:
return nums[right] |
def skyValue(value):
"""Method to return a simple scalar as a string with minor processing"""
try:
return str(int((value+5)/10.))
except:
return "999" |
def transpose(lists):
""" Transpose a list of lists. """
if not lists: return []
return map(lambda *row: list(row), *lists) |
def count_matching_pairs(arr, k):
"""
given a unique sorted list, count the pairs of elements which sum to equal k.
an element cannot pair with itself, and each pair counts as only one match.
:param arr: a unique sorted list
:param k: the target sum value
:return: the number of elements which sum to equal k
"""
forward_index = 0
reverse_index = len(arr) - 1
count = 0
# if the array is empty or contains only one value, no matches are possible
if reverse_index < 1:
return 0
while forward_index < reverse_index:
test_sum = arr[forward_index] + arr[reverse_index]
# we found a match, advance both the forward and reverse indexes
if test_sum == k:
count += 1
forward_index += 1
reverse_index -= 1
# test sum is too low, we need bigger numbers. advance forward_index
elif test_sum < k:
forward_index += 1
# test sum is too high, we need smaller numbers. advance reverse_index
elif test_sum > k:
reverse_index -= 1
return count |
def remove_dups(orig):
"""Remove duplicates from a list but maintain ordering"""
uniq = set(orig)
r = []
for o in orig:
if o in uniq:
r.append(o)
uniq.discard(o)
return r |
def lerp(point_a, point_b, fraction):
""" linear interpolation between two quantities with linear operators """
return point_a + fraction * (point_b - point_a) |
def underscored2camel_case(v):
"""converts ott_id to ottId."""
vlist = v.split('_')
c = []
for n, el in enumerate(vlist):
if el:
if n == 0:
c.append(el)
else:
c.extend([el[0].upper(), el[1:]])
return ''.join(c) |
def same_side(p1, p2, a, b):
"""
Checks whether two points are on the same side of a line segment.
:param p1: A point represented as a 2-element sequence of numbers
:type p1: ``list`` or ``tuple``
:param p2: A point represented as a 2-element sequence of numbers
:type p2: ``list`` or ``tuple``
:param a: One end of a line segment, represented as a 2-element sequence of numbers
:type a: ``list`` or ``tuple``
:param b: Another end of a line segment, represented as a 2-element sequence of numbers
:type b: ``list`` or ``tuple``
:return: True if ``p1``, ``p2`` are on the same side of segment ``ba``; False otherwise
:rtype: ``bool``
"""
import numpy as np
ba = np.append(np.subtract(b,a),[0])
cp1 = np.cross(ba,np.subtract(p1,a))
cp2 = np.cross(ba,np.subtract(p2,a))
return np.dot(cp1,cp2) >= 0 |
def calc_lighting_radiation(ppfd):
"""Values taken from paper
# E = hf = hc/w
photon_energy = AVOGADRO_NUMBER * PLANK_CONSTANT * n * SPEED_OF_LIGHT / wavelength
PPFD measured in mol m-2 s-1
Flux density measured in W m-2
# https://www.researchgate.net/post/Can_I_convert_PAR_photo_active_radiation_value_of_micro_mole_M2_S_to_Solar_radiation_in_Watt_m22
# Rule of thumb is 1 W m-2 = 4.57 umol m-2 so 140 ppfd ~= 30.6
# import scipy.integrate
# ppfd = 140 #umol
# def pe(wavelength, ppfd):
# # ppfd in umol
# # wavelength in nm
# n = ppfd * 10**-6 #
# return AVOGADRO_NUMBER * PLANK_CONSTANT * SPEED_OF_LIGHT * n / (wavelength * 10**-9)
# #r = scipy.integrate.quad(pe, 400, 700)
# #print(pe(700))
# #print(r)
#
# # ppfd = 140
# # e = 20.82
# # w = 804.4165185104332
# ppfd = 200
# e = 41.0
# #w = 555
# #e = AVOGADRO_NUMBER * PLANK_CONSTANT * SPEED_OF_LIGHT * ppfd * 10**-6 / (w * 10**-9)
# # print(e)
#
# w = AVOGADRO_NUMBER * PLANK_CONSTANT * SPEED_OF_LIGHT * ppfd * 10**-6 / (e * 10**-9)
#
# print(w)
"""
# Guess from paper
if ppfd == 140:
lighting_radiation = 28
elif ppfd == 200:
lighting_radiation = 41
elif ppfd == 300:
lighting_radiation = 59
elif ppfd == 400:
lighting_radiation = 79.6
elif ppfd == 450:
lighting_radiation = 90.8
elif ppfd == 600:
lighting_radiation = 120
else:
assert False
return lighting_radiation |
def slug(value):
"""
Slugify (lower case, replace spaces with dashes) a given value.
:param value: value to be slugified
:return: slugified value
ex: {{ "Abc deF" | slug }} should generate abc-def
"""
return value.lower().replace(' ', '-') |
def convert_from_submodel_by_value_to_plain(key_value: list) -> dict:
"""
Convert from the structure where every key/value pair is a separate object
into a simpler, plain key/value structure
"""
result = {}
for item in key_value:
for key, value in item.items():
result[key] = value
return result |
def adapt_glob(regex):
"""
Supply legacy expectation on Python 3.5
"""
return regex
return regex.replace('(?s:', '').replace(r')\Z', r'\Z(?ms)') |
def grad_refactor_4(a):
""" if_test """
if a > 3:
return 3 * a
return 0 |
def loop_params(params: dict, stop: int) -> dict:
"""Loops through the parameters and deletes until stop index."""
print("params:", params)
for i, key in enumerate(params.copy()):
if i > stop:
break
del params[key]
print("params:", params)
return params |
def miller_rabin(n):
""" primality Test
if n < 3,825,123,056,546,413,051, it is enough to test
a = 2, 3, 5, 7, 11, 13, 17, 19, and 23.
Complexity: O(log^3 n)
"""
if n == 2:
return True
if n <= 1 or not n & 1:
return False
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]
d = n - 1
s = 0
while not d & 1:
d >>= 1
s += 1
for prime in primes:
if prime >= n:
continue
x = pow(prime, d, n)
if x == 1:
break
for r in range(s):
if x == n - 1:
break
if r + 1 == s:
return False
x = x * x % n
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.