content
stringlengths 42
6.51k
|
|---|
def producer_config(config):
"""Filter the producer config"""
for field in ["group.id", "partition.assignment.strategy", "session.timeout.ms", "default.topic.config"]:
if field in config:
del config[field]
return config
|
def get_normal_form(obj_name, with_space=True):
"""Transform object name to title form.
Example:
if with_space=True then risk_assessments -> Risk Assessments
if with_space=False then risk_assessments -> RiskAssessments
"""
normal = obj_name.replace("_", " ").title()
if with_space is True:
return normal
elif with_space is False:
return normal.replace(" ", "")
|
def last(l):
"""
Returns the last element of a collection.
.. note::
Useful when we do not want to have numbers in the code.
Furthermore, it can be directly used in the function composition.
Examples
--------
>>> last([1, 2, 3])
3
"""
return l[-1]
|
def meets_requirements(count, r):
"""Does the password have enough of each type of character to meet the
requirements?"""
if (count['d'] >= r['d'] and count['l'] >= r['l'] and
count['u'] >= r['u'] and count['s'] >= r['s']):
return True
else:
return False
|
def getFactoryCreditMultiplier(factoryId):
"""
Returns the skill credit multiplier for a particular factory.
factoryId is the factory-interior zone defined in ToontownGlobals.py.
"""
# for now, there's only one factory
return 2.
|
def _frequency_of_pair(a, b, v, w, matrix):
""" builds a map from (v, w) -> (p(a=v and b=w) for all possible pairs (v, w)
a, b indeces of columns to be compared
v, w respective values of columns a and b, we're looking at
(not cheap)
"""
count=0
worry_count=0
for row in matrix:
if a < len(row) and b < len(row):
if row[a] == v and row[b] == w:
count += 1
else :
##print "worry?", a, b, len(row), row
worry_count += 1
return count
|
def format_month(month):
"""Formats a month to first 3 characters of the month input
Args:
month: user input month
Returns:
A ValueError if the input is not a month, or a 3 character month.
"""
months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
if (month.isdigit()):
if(len(month) > 2):
raise ValueError
else:
month = int(month)
if((month > 12) | (month <= 0)): raise ValueError
return months[month - 1]
elif not(month.istitle() | month.islower()| month.isupper()):
raise ValueError
elif(month.capitalize() in months):
return month.capitalize()
else:
raise ValueError
|
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if number**2 == number:
return number
low, high = 0, number
while low <= high:
m = (low + high) // 2
if (m**2 == number) or (m**2 <= number and (m+1)**2 > number):
return m
elif (m**2 > number):
high = m
else:
low = m
|
def get_status_from_msg(finished_msg):
"""Return a job status based on the finished job message.
'complete' if the job created both output files successfully.
'failed' otherwise.
"""
if(len(finished_msg['upload_files']) == 2):
return 'complete'
else:
return 'failed'
|
def _try_int(value):
"""Try to make some value into an int."""
try:
return int(value)
except (ValueError, TypeError):
return None
|
def ensure_set(value):
"""
Given a single value or multiple values, ensure that we return a set.
"""
if isinstance(value, int):
return {value}
return set(value)
|
def transform_with(sample, transformers):
"""Transform a list of values using a list of functions.
:param sample: list of values
:param transformers: list of functions
"""
assert not isinstance(sample, dict)
assert isinstance(sample, (tuple, list))
if transformers is None or len(transformers) == 0:
return sample
result = list(sample)
ntransformers = len(transformers)
for i in range(len(sample)):
f = transformers[i%ntransformers]
if f is not None:
result[i] = f(sample[i])
return result
|
def is_consecutive_rows(lst):
"""Check if a list of integers is consecutive.
Args:
lst (list): The list of integers.
Returns:
True/False: If the list contains consecutive integers.
Originally taken from and modified:
http://stackoverflow.com/
questions/40091617/test-for-consecutive-numbers-in-list
"""
assert 0 not in lst, '0th index is invalid!'
lst = list(set(lst))
if not lst:
return True
setl = set(lst)
return len(lst) == len(setl) and setl == set(range(min(lst), max(lst) + 1))
|
def getSafe(dict,key):
"""
Tests if the dictionary has the given key. If so, it retrieves the value and returns it. If not, returns None.
:param dict:
:param key:
:return:
"""
if key in dict and dict[key]:
return dict[key]
else:
return None
|
def _check(key: str) -> bool:
"""Returns `True` if `key` is valid identifier string else `False`."""
return isinstance(key, str) and key.isidentifier()
|
def print_pipeline(message, args, pipeline_data):
""" Displays the current state of the pipeline in the console """
print("Message: ", message)
print("Data", pipeline_data)
return True
|
def euler1(lim=1000):
"""Solution for problem 1."""
# could use sum formula here
return sum(i for i in range(lim) if i % 3 == 0 or i % 5 == 0)
|
def genpixmap(targetlevel):
"""Generate pixel value map."""
v=256.0/(targetlevel+1)
m=[]
j=0
for i in range(256):
m.append(int(i/v))
return m
|
def sort_by_size(L):
"""
Return a copy of precinct list L, sorted into decreasing order by size.
"""
answer = L[:]
answer.sort()
answer.reverse()
return answer
|
def _format_key(key: str) -> str:
"""Internal function for formatting keys in Tensorboard format."""
return key.title().replace('_', '')
|
def luminace(color_component):
"""Luminance of an individual Red, Green, or Blue, color component.
:param color_component: Value between 0 and 255 (inclusive)
:type color_component: int
:return: Luminance value of the color component
:rtype: float
"""
i = float(color_component) / 255
if i < 0.03928:
return i / 12.92
else:
return ((i + 0.055) / 1.055) ** 2.4
|
def split_grad_list(grad_list):
"""
Args:
grad_list: K x N x 2
Returns:
K x N: gradients
K x N: variables
"""
g = []
v = []
for tower in grad_list:
g.append([x[0] for x in tower])
v.append([x[1] for x in tower])
return g, v
|
def sdf_count(file_obj):
"""Count the number of molecules in an SDF file.
Counts the number of times '$$$$' occurs at the start of lines
in the file.
Parameters
----------
file_obj: A file-like object
Returns
-------
count: The number of molecules in the file (int)
"""
return sum(1 for line in file_obj if line[:4] == b'$$$$')
|
def bold_first_italics(graph):
"""For a newly broken-up graph, convert the first italics text to bold."""
if graph.count('*') > 1:
return graph.replace('*', '**', 2)
else:
return graph
|
def reverse(current_block, *args):
"""Reverses the data of the current block."""
return current_block[::-1]
|
def create_ngrams(tokens, ngram=2, determiner='_'):
"""Create n_grams.
"""
new_tokens = []
for i in range(len(tokens)-ngram+1):
new_token = determiner.join(tokens[i:i+ngram])
new_tokens.append(new_token)
return new_tokens
|
def research_order(technology_id, after_technology_id, civ_ids):
"""Report research that began after a specified research finished."""
query = """
select
imp.match_id, imp.player_number as number, imp.finished as imp_time, late.started::interval(0) as timestamp
from research as imp
join players on imp.match_id=players.match_id and imp.player_number=players.number
left join (
select match_id, player_number, started, finished from research
where technology_id=:technology_id
) as late on late.match_id=imp.match_id and imp.player_number=late.player_number
where imp.technology_id=:after_technology_id and imp.finished is not null and late.finished is null and not (players.civilization_id = any(:civ_ids))
"""
return query, dict(technology_id=technology_id, after_technology_id=after_technology_id, civ_ids=civ_ids)
|
def _num_pre_blank_frames(stim_dict):
"""
_num_pre_blank_frames(stim_dict)
Retrieves number of blank frames before stimulus starts.
Arguments:
stim_dict (dict): An Allen Institute session stimulus dictionary.
Returns:
num_pre_blank_frames (int): number of blank frames before stimulus starts.
"""
stimulus_fps = stim_dict["fps"]
num_pre_blank_frames = int(stim_dict["pre_blank_sec"] * stimulus_fps)
return num_pre_blank_frames
|
def check_valid_base_fraction(base_fraction):
"""
Checks that base fraction specified is either None, and therefore won't be used, or is between 0 and 1.
:param base_fraction: Base fraction, which should be either None or a float.
:return: True if base fraction is valid, False if not.
"""
if base_fraction is None:
return True
if 0 <= base_fraction <= 1:
return True
else:
return False
|
def neighbor_candidate_generator(src_cortical_area, src_neuron_id, dst_cortical_area):
"""
Identifies the list of candidate neurons in the destination cortical area that based on the rules defined by
source cortical area are suitable fit for synapse creation.
Args:
src_cortical_area:
src_neuron_id:
dst_cortical_area:
Returns:
List of candidate Neurons
"""
synapse_candidate_list = []
return synapse_candidate_list
|
def threshold_counts(counts, threshold=30, number=1):
""" Returns True
Makes sure that at least one of the samples meet a read count threshold.
"""
counter = 0
for i in counts:
if sum(i) > threshold: counter += 1
if counter >= number:return True
else: return False
|
def arg_keys(arg_name, keys):
"""Appends arg_name to the front of all values in keys.
Args:
arg_name: (string) String containing argument name.
keys: (list of strings) Possible inputs of argument.
Returns:
List of strings with arg_name append to front of keys.
"""
return ["{0} {1}".format(arg_name, key) for key in keys]
|
def parse_int(token):
"""Convert a token of a integer literal to its integer value.
>>> parse_int("100")
100
>>> parse_int("0xA")
10
"""
if token.startswith("0x"):
return int(token, base=16)
elif token.startswith("0o"):
return int(token, base=8)
else:
return int(token)
|
def _make_iter(obj, make_fn, **options):
"""
:param obj: An original mapping object
:param make_fn: Function to make/convert to
"""
return type(obj)(make_fn(v, **options) for v in obj)
|
def countit(objs):
"""Return a dict with counts for each item in a list."""
out = {}
for el in objs:
out[el] = 1 + out.get(el, 0)
out = {k: v for k, v in out.items()}
return out
|
def check_bidi_comp(cond0, cond1, arg0, arg1):
"""
Check whether conditions are True for two arguments regardless of order.
Parameters
----------
cond0, cond1 : callable
Function of one variable that evaluate to a bool.
arg0, arg1 : object
Arguments to pass to `cond0` and `cond1`.
Returns
-------
result : bool
True if `cond0` and `cond1` are respectively True for
`arg0` and `arg1` or `arg1` and `arg0`.
"""
assert callable(cond0) and callable(cond1)
return (cond0(arg0) and cond1(arg1)) or \
(cond1(arg0) and cond0(arg1))
|
def lorentz1D(x, x0, w):
"""Returns the probability density function of the Lorentzian (aka Cauchy) function with maximum at 1.
Parameters
----------
x: ndarray or list
A 1D vector of points for the dependent variable
x0: float
The center of the peak maximum
w: float
The parameter that modifies the width of the distribution.
Returns
-------
l: ndarray
A vector of size (N,) of the Lorentzian distribution evaluated
at the input x values.
"""
return w ** 2 / ((x - x0) ** 2 + w ** 2)
|
def anyhasprefix(value, prefixes):
""" Check if `value` starts with on of the possible `prefixes` """
for p in prefixes:
if value.startswith(p):
return True
return False
|
def find_group_missing_numbers(squares: list, grid: list) -> list:
"""
Gets the missing numbers from the group of squares
:param squares: A list of tuples (row, column) coordinates of squares
:param grid: The sudoku grid as a 3D list
:return: A list containing all the numbers (1-9) missing from the box
"""
missing_nums = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for square in squares:
if grid[square[0]][square[1]][0] != 0:
missing_nums.remove(grid[square[0]][square[1]][0])
return missing_nums
|
def number(lines):
"""Apply enumerate approach."""
# dict: {number->c}
# list: ['number: c']
# number_c_d = {i + 1: c for i, c in enumerate(lines)}
# return number_c_d
if lines == []:
return []
number_c_ls = [str(i + 1) + ': ' + c for i, c in enumerate(lines)]
return number_c_ls
|
def parsimony_pressure(fitness: float,
size: int,
p_coeff: float) -> float:
"""Parsimony pressure method.
Koza, 1992; Zhang & Muhlenbein, 1993; Zhang et al., 1993
:param fitness: Original fitness
:param size: Size of individual
:param p_coeff: Parsimony coefficient
:return:
"""
return fitness - p_coeff * size
|
def merge_two_dicts(dict_01, dict_02):
"""
Merge 2 dictionaries and return the merged dictionary.
Compatible with python 3.4 or lower
"""
merged_dict = dict_01.copy() # start with x's keys and values
merged_dict.update(dict_02) # modifies z with y's keys and values & returns None
return merged_dict
|
def concat(l1, l2):
""" Join two possibly None lists """
if l1 is None:
return l2
if l2 is None:
return l1
return l1 + l2
|
def score_calc_5(sumI, n, beta_a_b, beta_aB, beta_c_d, beta_w_x, beta_y_z_P, gamma, L, sumI_all):
"""
Scoring function with 5 ion series: sumI(matched) / sumI(all) * n / L * ( 1 + beta[a + b] + beta[a-B] + beta[c + d]
+ beta[w + x] + beta[y + z + y-P + z-P])
n = total number of matching MS2 ions for the given precursor, excluding non sequence-meaningful ions
(only 11 main series) within the specified m/z window, charge independent
beta is determined as:
if no consecutive matches in the same ion series: beta = 0
if at least one consecutive match in the same ion series: beta = sum(beta_increment +
(x_beta * consecutive_matches ) * beta_increment) over all consecutive matches in the same
ion series (a,a-b,b..) or
ion series group (e.g. a/b, w/x...). beta_increment and x_beta are tunable via input options
L = normalization factor base on total number of theoretical predicted ions for a precursor ion excluding
non-sequence meaningful ions (only 11 main series), wihin the specified m/z window
"""
return round(sumI * n * (1 + beta_a_b + beta_aB + beta_c_d + beta_w_x + beta_y_z_P) / (L * sumI_all),
3)
|
def make_spaces(in_string: str) -> str:
"""
This filter takes a string and replaces all dashes and underscores with spaces
:param in_string: The string to change
:type in_string: str
:returns: A string with no dashes or underscores
:rtype: str
"""
return in_string.replace("_", " ").replace("-", " ")
|
def sanitize(name):
"""Make the name able to be a valid path name.
No spaces or slashes, everything lowercase"""
return name.lower().replace(" ", "_").replace("/", "-slash-")
|
def sum_of_matrices(matrix1, matrix2):
"""
Sums the input matrices (matrix1 + matrix2). If dimension sizes are not match returns -1!
Args:
matrix1: 2D list
matrix2: 2D list
Return:
result: 2D list. If dimension sizes are not match returns -1!
result = []
"""
result = [[0 for i in range(len(matrix1))] for i in range(len(matrix1))]
if len(matrix1)==len(matrix2) and len(matrix1[0])==len(matrix2[0]):
for i in range(len(matrix1)):
for j in range(len(matrix1[0])):
result[i][j] = matrix1[i][j]+matrix2[i][j]
return result
else:
print('Error dimension sizes of the matrixes are not the same')
return -1
|
def map_pressure_to_coeff(pressure_str):
"""
:param str pressure:
:return:
"""
pressure = int(pressure_str)
if pressure <= 1008:
pressure_coeff = 3
elif pressure >= 1023:
pressure_coeff = 1
else:
pressure_coeff = 2
return pressure_coeff
|
def _revert(converter,
result,
converted=False,
result_conversion=None,
estimator=None,
**converter_args):
"""
Use the `converter` to convert the given `result` if necessary.
sklearn toolkit produces all its results as numpy format by default.
However here, as we need to convert inputs to H2OFrames,
and as we may chain H2O transformers and estimators,
then we apply some detection logic to return results in the same format as input by default.
This can be overridden at decorator level for specific methods (e.g. transform)
or at estimator level for other cases (predict on a pipeline with H2O transformers and numpy inputs).
"""
estimator_conversion = getattr(estimator, 'data_conversion', None)
do_convert = (
estimator_conversion is True
or (estimator_conversion in (None, 'auto')
and (result_conversion is True
or (result_conversion in (None, 'auto') and converted)
)
)
)
return converter(result, estimator=estimator, **converter_args) if do_convert else result
|
def human_format(num: int) -> str:
"""Returns num in human-redabale format
from https://stackoverflow.com/a/579376
Args:
num (int): number.
Returns:
str: Human-readable number.
"""
# f
if num < 10000:
return str(num)
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0 # type: ignore
if num == int(num):
formatter = "%.1d%s"
else:
formatter = "%.1f%s"
return formatter % (num, ["", "K", "M", "G", "T", "P"][magnitude])
|
def fix_segment_table(segment_table):
"""Given a list of dictionaries in the form [{"start":start_frequency,
"stop":stop_frequency,"number_points":number_points,"step":frequency_step}...] returns a table that is ordered by start
frequency and has no overlapping points"""
segment_table = sorted(segment_table, key=lambda x: x["start"])
i = 0
while (i + 1 < len(segment_table)):
if segment_table[i]["stop"] == segment_table[i + 1]["start"]:
segment_table[i + 1]["start"] = segment_table[i + 1]["start"] + segment_table[i + 1]["step"]
segment_table[i + 1]["number_points"] -= 1
i += 1
return segment_table
|
def _get_padding(num_bins, bin_size):
"""
For parallel iteration: gets the smallest number L' >= bin_size
such that num_bins is smaller than the lowest factor of L'.
"""
trial_size = bin_size
while True:
success_flag = True
for divisor in range(2, num_bins - 1):
if trial_size % divisor == 0:
success_flag = False
break
if success_flag:
return trial_size
trial_size += 1
|
def handle_domainlist(url, _domain, _method, **_):
"""
:param url: Incoming URL dictionary
:type url: dict
:param _domain: Incoming domain (it's not being used for this handler)
:type _domain: str
:param _method: Incoming request method (it's not being used for this handler)
:type _method: str
:param _: kwargs
:return: final url for domainlist endpoint
"""
return url["base"] + "domains"
|
def _split_name(name):
"""Splits a name in two components divided by '.'"""
comp = name.split('.')
if len(comp) > 1:
return (comp[0], '.'.join(comp[1:]))
return (None, name)
|
def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, bytes):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [decipher_all(decipher, objid, genno, v) for v in x]
elif isinstance(x, dict):
for (k, v) in x.items():
x[k] = decipher_all(decipher, objid, genno, v)
return x
|
def is_one_to_one(d):
"""
(dict of {str: int}) -> bool
Return True if and only if no two of d's keys
map to the same value.
>>> is_one_to_one({'a': 1, 'b': 2, 'c': 3})
True
>>> is_one_to_one({'a': 1, 'b': 2, 'c': 1})
False
>>> is_one_to_one({})
True
"""
values = []
for val in d.values():
if val in values:
return False
values.append(val)
return True
|
def csc_norm(n, Ap, Ax):
"""
Computes the 1-norm of a sparse matrix = max (sum (abs (A))), largest
column sum.
@param A: column-compressed matrix
@return: the 1-norm if successful, -1 on error
"""
norm = 0
for j in range(n):
s = 0
for p in range(Ap[j], Ap[j + 1]):
s += abs(Ax[p])
norm = max(norm, s)
return norm
|
def argument_name(name: str) -> str:
"""Standardises argument name.
Examples:
```python
argument_name("hello") == "--hello"
argument_name("hello_world") == "--hello-world"
```
Args:
name (str): Name of the argument.
Returns:
str: Standardised name of the argument.
"""
# Add '--', replace '_' with '-'
return f"--{name.replace('_', '-')}"
|
def scaling_transform(w1, h1, w2, h2):
"""Rescale rectangle (w1, h1) to fit in rectangle (w2, h2) without changing the ratio."""
r1 = w1 / h1
r2 = w2 / h2
if r1 <= r2:
h = h2
w = r1 * h2
else:
w = w2
h = w2 / r1
return w, h
|
def clipped(piece_idx, move_idx):
"""
Determines whether the move is a valid knight move and is not clipped given
the 8x8 geometry
"""
if move_idx not in range(64):
return True
move_x, move_y = (move_idx % 8, move_idx // 8)
piece_x, piece_y = (piece_idx % 8, piece_idx // 8)
x_diff = (piece_x - move_x)
y_diff = (piece_y - move_y)
return bool(x_diff**2 > 9 or y_diff**2 > 9)
|
def remove_trailing(number: int):
"""
Verwijderd de trailing 0 van integers
https://stackoverflow.com/questions/52908011/better-way-to-remove-trailing-zeros-from-an-integer
"""
while number % 10 == 0 and number != 0:
number //= 10
return number
|
def apply_deltas(start,deltas):
""" Return a list of the sums of DELTAS starting from START. """
result = []
current = start
for delta in deltas:
current = current + delta
result.append(current)
return result
|
def buckle_thickness(D_o, P_p, sig_y):
"""Return the nominal buckle thickness [t] based on the propagation pressure.
Considers the worst case maximum external pressure and ignores internal
pressure - PD8010-2 Equation (G.21).
:param float D_o: Outside Diameter [m]
:param float P_p: Propagation pressure [Pa]
:param float sig_y: Yield strength [Pa]
"""
return D_o * (P_p / (10.7 * sig_y))**(4 / 9)
|
def get_content(context, target):
"""
It gets the content from any file with
data in it(auto generated) and returns in list
"""
lines = []
try:
with open(target,encoding='UTF-8') as file:
for line in file:
line = line.strip()
lines.append(line)
except FileNotFoundError:
print(f"{context} file missing")
return lines
|
def _clean_2007_text(s):
"""Replace special 2007 formatting strings (XML escaped, etc.) with
actual text.
@param s (str) The string to clean.
@return (str) The cleaned string.
"""
s = s.replace("&", "&")\
.replace(">", ">")\
.replace("<", "<")\
.replace("'", "'")\
.replace(""", '"')\
.replace("_x000d_", "\r")
return s
|
def min_board(list_of_board):
"""
get the board with the min cost
"""
cost = float("inf")
board = None
index = -1
for i in range(len(list_of_board)):
elem_cost, elem_board = list_of_board[i]
if (elem_cost < cost):
cost = elem_cost
board = elem_board
index = i
return (cost,board,index)
|
def idToMQTTClientID(id:str, isCSE:bool=True) -> str:
""" Convert a oneM2M ID to an MQTT client ID.
"""
return f'{"C::" if isCSE else "A::"}{id.lstrip("/")}'
|
def NormalizeEmail(email):
"""Normalizes the email from git repo.
Some email is like: test@chromium.org@bbb929c8-8fbe-4397-9dbb-9b2b20218538.
"""
parts = email.split('@')
return '@'.join(parts[0:2])
|
def hsl_to_rgb(hue, saturation, lightness):
"""
:param hue: degrees
:param saturation: percentage
:param lightness: percentage
:returns: (r, g, b) as floats in the 0..1 range
"""
hue = (hue / 360) % 1
saturation = min(1, max(0, saturation / 100))
lightness = min(1, max(0, lightness / 100))
# Translated from ABC: http://www.w3.org/TR/css3-color/#hsl-color
def hue_to_rgb(m1, m2, h):
if h < 0:
h += 1
if h > 1:
h -= 1
if h * 6 < 1:
return m1 + (m2 - m1) * h * 6
if h * 2 < 1:
return m2
if h * 3 < 2:
return m1 + (m2 - m1) * (2 / 3 - h) * 6
return m1
if lightness <= 0.5:
m2 = lightness * (saturation + 1)
else:
m2 = lightness + saturation - lightness * saturation
m1 = lightness * 2 - m2
return (
hue_to_rgb(m1, m2, hue + 1 / 3),
hue_to_rgb(m1, m2, hue),
hue_to_rgb(m1, m2, hue - 1 / 3),
)
|
def format_assignments(text):
"""
Aligns assignment statements in the source file and return a text.
"""
lines = text.split("\n")
# process text line by and store each line at its starting index
formated_text = []
a_block_left = []
a_block_right = []
# these statements may contain = too are not assignment
skip_tokens = ['if', 'for', 'while', '(', ')', 'else']
def format_assignment_block():
"""
Process an assignment block, returns formatted list of
assignment lines in that block.
"""
max_left = max([len(left) for left in a_block_left])
f_assignments = []
for left, right in zip(a_block_left, a_block_right):
new_line = left + ' '*(max_left-len(left)) + ' = ' + right
f_assignments.append(new_line)
return f_assignments
for line in lines:
# assignment should contain = and shouldn't contain anything from skip_tokens
# empty list is considered false
if "=" in line and not ["bad" for t in skip_tokens if t in line.split("=")[0]]:
left = line.split("=")[0]
right = "= ".join(line.split("=")[1:] )
# need to preserve spaces on left
a_block_left.append(left.rstrip())
a_block_right.append(right.strip())
else:
# if not assingment, process the block if not empty
if len(a_block_left) != 0:
f_assignments = format_assignment_block()
formated_text.extend(f_assignments)
a_block_left = []
a_block_right = []
# if not assingment, preserve the line
formated_text.append(line)
# check if the block is non empty at the end
# because the else will not trigger if assignment lines are at the last
if len(a_block_left) != 0:
f_assignments = format_assignment_block()
formated_text.extend(f_assignments)
# join individual lines in list and returns as text string
return '\n'.join(formated_text)
|
def threads(threads: int, single_threaded: bool) -> int:
"""
Number of threads to run in each Gunicorn worker.
"""
if single_threaded:
return 1
return threads
|
def is_in_mapping_cache(referenced_revision_tag, referenced_section,
mapping_cache):
""" Checks if the referenced section is in the mapping cache
Supports both entry formats (1.2:3.4 and 1.2/3.4)
"""
s1 = ":".join(referenced_section)
s2 = "/".join(referenced_section)
if referenced_revision_tag in mapping_cache:
cache_tag = mapping_cache[referenced_revision_tag]
if s1 in cache_tag:
return cache_tag[s1]
if s2 in cache_tag:
return cache_tag[s2]
return None
|
def _find_room_helper(room_obj_list, room):
"""simple list search for finding object in first part of a tuple"""
for r_obj in room_obj_list:
if r_obj[0] == room:
return r_obj[1]
return None
|
def match(patternSuffix: str, wordSuffix: str):
"""Returns False if patternSuffix[0] does not match wordSuffix[0].
A match is when patternSuffix[0] == '?' or patternSuffix[0] == wordSuffix[0].
This function recures until both or one of the two inputs are exhausted or a match
of patternSuffix[0] to wordSuffix[0] is false.
"""
if len(patternSuffix) == 0 and len(wordSuffix) == 0:
# The inputs are exhausted
return True
try:
matches = patternSuffix[0] == '?' or patternSuffix[0] == wordSuffix[0]
if matches:
# There is still more characters to check
return match(patternSuffix[1:], wordSuffix[1:])
else:
return False
except IndexError:
# The two inputs are not of equal length, and therefore cannot match
return False
|
def obj_of(k, v):
"""Creates an object containing a single key:value pair"""
return {k: v}
|
def extent(obj):
"""Get the start and end offset attributes of a dict-like object"""
return obj.get('startOffset', -1), obj.get('endOffset', -1)
|
def parse_boolean(value, length, cursor):
"""Typecast the postgres boolean to a python boolean.
Postgres returns the boolean as a string with 'true' or 'false'
"""
return value[:1] == b"t" if value is not None else None
|
def lagrange_poly(x, xp, fp):
""" given points (xp, fp), fit a lagrange polynomial and return
the value at point x """
f = 0.0
# sum over points
m = 0
while (m < len(xp)):
# create the Lagrange basis polynomial for point m
l = None
n = 0
while (n < len(xp)):
if n == m:
n += 1
continue
if l == None:
l = (x - xp[n])/(xp[m] - xp[n])
else:
l *= (x - xp[n])/(xp[m] - xp[n])
n += 1
f += fp[m]*l
m += 1
return f
|
def pixels_to_figsize(opt_dim, opt_dpi):
"""Converts pixel dimension to inches figsize
"""
w, h = opt_dim
return (w / opt_dpi, h / opt_dpi)
|
def _get_proxy_type(type_id):
"""
Return human readable proxy type
Args:
type_id: 0=frontend, 1=backend, 2=server, 3=socket/listener
"""
proxy_types = {
0: 'frontend',
1: 'backend',
2: 'server',
3: 'socket/listener',
}
return proxy_types.get(int(type_id))
|
def prime_factors(n):
"""Returns all the prime factors of a positive integer"""
factors = []
d = 2
while n > 1:
while n % d == 0:
factors.append(d)
n //= d
d = d + 1
if d*d > n:
if n > 1: factors.append(n)
break
return factors
|
def confirm_membership(required_set, proposed):
"""
Return True if all elements in required set are present in proposed
>>> confirm_membership({'a', 'b'}, {'c': 0, 'a': 2, 'b': 0})
True
>>> confirm_membership({'a':3, 'b':4}, {'c': 0, 'a': 2, 'b': 0})
True
"""
return set(required_set).issubset(proposed)
|
def parse_ranges(s):
"""Parse s as a list of range specs."""
range_list = [] # return value is a list of doc indexes
ranges = s.split(",")
# list of ranges
for r in ranges:
try:
i = int(r)
range_list.append(i)
except ValueError:
# check if range
if "-" in r:
start, end = r.split("-")
start = int(start)
end = int(end)
range_list.extend([x for x in range(start, end + 1)])
return range_list
|
def modexp5(b,m):
"""e=5, use addition chain"""
b2=(b*b)%m
b4=(b2*b2)%m
b5=(b*b4)%m
assert(b5==pow(b,5,m))
return b5
|
def limit_labels_for_label_normalizer(
label_normalizer,
acceptable_labels):
"""Limits keys and values in label_normalizer to acceptable labels."""
limited_label_normalizer = {}
for k, v in label_normalizer.items():
if k in acceptable_labels:
limited_label_normalizer[k] = sorted(
acceptable_labels.intersection(set(v)))
return limited_label_normalizer
|
def ParseGTestListTests(raw_list):
"""Parses a raw test list as provided by --gtest_list_tests.
Args:
raw_list: The raw test listing with the following format:
IPCChannelTest.
SendMessageInChannelConnected
IPCSyncChannelTest.
Simple
DISABLED_SendWithTimeoutMixedOKAndTimeout
Returns:
A list of all tests. For the above raw listing:
[IPCChannelTest.SendMessageInChannelConnected, IPCSyncChannelTest.Simple,
IPCSyncChannelTest.DISABLED_SendWithTimeoutMixedOKAndTimeout]
"""
ret = []
current = ''
for test in raw_list:
if not test:
continue
if not test.startswith(' '):
test_case = test.split()[0]
if test_case.endswith('.'):
current = test_case
else:
test = test.strip()
if test and not 'YOU HAVE' in test:
test_name = test.split()[0]
ret += [current + test_name]
return ret
|
def _is_gse2(filename):
"""
Checks whether a file is GSE2.0 format.
:type filename: str
:param filename: Name of the GSE2.0 file to be checked.
:rtype: bool
:return: ``True`` if GSE2.0 file.
"""
try:
with open(filename, 'rb') as fh:
temp = fh.read(12)
except Exception:
return False
if temp != b'BEGIN GSE2.0':
return False
return True
|
def short_doc(obj):
"""
Returns the first line of the object's docstring
"""
if obj.__doc__:
lines = obj.__doc__.strip(' \n').splitlines()
if lines:
return lines[0]
return None
|
def atmDensPoly6th(ht, dens_co):
""" Compute the atmosphere density using a 6th order polynomial. This is used in the ablation simulation
for faster execution.
Arguments:
ht: [float] Height above sea level (m).
dens_co: [list] Coeffs of the 6th order polynomial.
Return:
atm_dens: [float] Atmosphere neutral mass density in kg/m^3.
"""
# Compute the density
rho_a = 1000*(10**(dens_co[0]
+ dens_co[1]*(ht/1000)
+ dens_co[2]*(ht/1000)**2
+ dens_co[3]*(ht/1000)**3
+ dens_co[4]*(ht/1000)**4
+ dens_co[5]*(ht/1000)**5))
return rho_a
|
def replace_ellipsis(n, index):
"""Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
>>> replace_ellipsis(2, (Ellipsis, None))
(slice(None, None, None), slice(None, None, None), None)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
if not isellipsis:
return index
else:
loc = isellipsis[0]
extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)
return index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1:]
|
def dev_merge_dicts(dicta: dict, dictb: dict):
"""merge two dicts.
under development.
"""
if not isinstance(dicta, dict) or not isinstance(dictb, dict):
return dicta
new_dicta = {**dicta, **dictb}
for k, v in new_dicta.items():
if isinstance(v, dict):
if (k in dicta) and isinstance(dicta[k], dict):
if dicta[k] != v:
new_dicta[k] = dev_merge_dicts(dicta[k], v)
return new_dicta
|
def area(r):
"""returns surface"""
return(333/106*r**2)
|
def _strip_double_quote(value: str) -> str:
"""Strip one leading/single trailing double-quote."""
if value[0] == '"':
value = value[1:]
if value[-1] == '"':
value = value[:-1]
return value
|
def capability(event, endpoint, name, description, others=None):
"""
A helper function to generate one capability line for the capabilities()
"""
path = event['path']
named_path_element = path.rfind('/capabilities')
if named_path_element>0:
prefix = path[0:named_path_element]
else:
prefix = path
ret = {'name': name, 'path': prefix+endpoint, 'description': description}
if others is not None:
for item in others:
if others[item] is not None:
ret[item] = others[item]
return ret
|
def add_file_to_dict(dictionary, file):
"""
Populates a dictionary with
key: file name
value: number of instances in the directory
Args:
dictionary: dictionary of file names
file: file name to be added to dictionary
Returns:
The modified dictionary
"""
# If file exists in dictionary
if file in dictionary:
# Add number of instances of the file
dictionary[file] += 1
# If file does not exist
else:
# Add it to our dictionary
dictionary[file] = 1
return dictionary
|
def flatten_keys(dictionary):
"""
Flattens the keys for a nested dictionary using dot notation. This
returns all the keys which can be accessed via `get_by_path`.
Example:
For example, {'a': None, 'b': {'x': None}} would return ['a', 'b.x']
Args:
dictionary (dict):
A dictionary which should be flattened.
Returns:
list[str]: list of flattened keys
"""
if not isinstance(dictionary, dict):
return []
keys = []
for k, v in dictionary.items():
if isinstance(v, dict):
for x in flatten_keys(v):
keys.append(k + '.' + x)
else:
keys.append(k)
return keys
|
def _make_triplets(seq, phase=0):
"""Select a valid amino acid sequence given a 3-letter code input (PRIVATE).
This function takes a single three-letter amino acid sequence and the phase
of the sequence to return the longest intact amino acid sequence possible.
Parts of the input sequence before and after the selected sequence are also
returned.
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _make_triplets
>>> _make_triplets('GlyThrSerAlaPro')
('', ['Gly', 'Thr', 'Ser', 'Ala', 'Pro'], '')
>>> _make_triplets('yThrSerAla', phase=1)
('y', ['Thr', 'Ser', 'Ala'], '')
>>> _make_triplets('yThrSerAlaPr', phase=1)
('y', ['Thr', 'Ser', 'Ala'], 'Pr')
"""
pre = seq[:phase]
np_seq = seq[phase:]
non_triplets = len(np_seq) % 3
post = "" if not non_triplets else np_seq[-1 * non_triplets:]
intacts = [np_seq[3 * i:3 * (i + 1)]
for i in range(len(np_seq) // 3)]
return pre, intacts, post
|
def get_suit_of_card(position_of_card, deck):
"""Returns the suit of the card that has the specific position in the deck"""
suit_int = deck[position_of_card][0]
if suit_int == 0:
return "Spades"
elif suit_int == 1:
return "Hearts"
|
def xeval(sexpr):
"""
>>> xeval('')
0
>>> xeval('1234567')
1234567
>>> xeval('+1234567')
1234567
>>> xeval('-1234567')
-1234567
>>> xeval('2+3')
5
>>> xeval('2-3')
-1
>>> xeval('2-23+4')
-17
>>> xeval('2-01+3-1')
3
>>> xeval('1-2-3-4-5')
-13
"""
if not sexpr:
return 0
size = len(sexpr)
start = 0
curr = 1 if sexpr[0] == '+' or sexpr[0] == '-' else 0
while curr < size and sexpr[curr] != '+' and sexpr[curr] != '-':
curr += 1
this = int(sexpr[start:curr])
if curr >= size - 1:
return this
return this + xeval(sexpr[curr:])
|
def duration(s):
"""Turn a duration in seconds into a human readable string"""
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
parts = []
if d:
parts.append('%dd' % (d))
if h:
parts.append('%dh' % (h))
if m:
parts.append('%dm' % (m))
if s:
parts.append('%ds' % (s))
return ' '.join(parts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.