content stringlengths 42 6.51k |
|---|
def doubleSelectionSort(array):
"""Double Selection Sort
Best : O(n^2) Time | O(1) Space
Average : O(n^2) Time | O(1) Space
Worst : O(n^2) Time | O(1) Space
"""
N = len(array)
for currentIdx in range(N // 2):
smallest = biggest = currentIdx
for i in range(currentIdx + 1, N - currentIdx):
if array[i] >= array[biggest]:
biggest = i
if array[i] < array[smallest]:
smallest = i
array[currentIdx], array[smallest] = array[smallest], array[currentIdx]
if biggest == currentIdx:
biggest = smallest
array[N - currentIdx - 1], array[biggest] = (
array[biggest],
array[N - currentIdx - 1],
)
return array |
def checkpoint2epoch(_checkpoint_name):
"""
:param _checkpoint_name: [str], e.g. 'epoch=2.ckpt' or 'epoch=2_v0.ckpt'
:return: _epoch: [int], e.g. 2
"""
return int(_checkpoint_name.split("epoch=")[-1].split("_")[0].replace(".ckpt", "")) |
def _ParseAnswer(answer, options, allow_freeform):
"""Parses answer and returns 1-based index in options list.
Args:
answer: str, The answer input by the user to be parsed as a choice.
options: [object], A list of objects to select. Their str()
method will be used to select them via freeform text.
allow_freeform: bool, A flag which, if defined, will allow the user to input
the choice as a str, not just as a number. If not set, only numbers
will be accepted.
Returns:
int, The 1-indexed value in the options list that corresponds to the answer
that was given, or None if the selection is invalid. Note that this
function does not do any validation that the value is a valid index
(in the case that an integer answer was given)
"""
try:
# If this fails to parse, will throw a ValueError
return int(answer)
except ValueError:
# Answer is not an int
pass
# If the user has specified that they want to allow freeform selections,
# we will attempt to find a match.
if not allow_freeform:
return None
try:
return list(map(str, options)).index(answer) + 1
except ValueError:
# Answer not an entry in the options list
pass
# Couldn't interpret the user's input
return None |
def translate_microsoft(txt):
""" turn word document ungliness into ascii """
# double quotes
txt = txt.replace(u"\u201c", '"')
txt = txt.replace(u"\u201d", '"')
# signle quotes
txt = txt.replace(u"\u2018", "'")
txt = txt.replace(u"\u2019", "'")
txt = txt.replace(u"\u02BC", "'")
# invisible spaces. So nothings - nothings that take up space.
txt = txt.replace(u"\u2063", " ")
# these are ... and then the guy farted, right in front of the priest!
txt = txt.replace(u"\u2026", "...")
# bullets
txt = txt.replace(u"\u2022", "-")
txt = txt.replace(u"\u25cf", "-")
# longdash
txt = txt.replace(u"\u2012", "-")
txt = txt.replace(u"\u2013", "-")
txt = txt.replace(u"\u2014", "-")
txt = txt.replace(u"\u2015", "-")
txt = txt.replace(u"\u2053", "-")
txt = txt.replace(u"\u2E3A", "-")
txt = txt.replace(u"\u2E3B", "-")
# a space is a space
txt = txt.replace(u"\u2025", ' ')
txt = txt.replace(u"\xa0", ' ')
return txt |
def sort_words(boxes):
"""Sort boxes - (x, y, x+w, y+h) from left to right, top to bottom."""
if(len(boxes) == 0):
return []
mean_height = sum([y2 - y1 for _, y1, _, y2 in boxes]) / len(boxes)
#boxes.view('i8,i8,i8,i8').sort(order=['f1'], axis=0)
current_line = boxes[0][1]
lines = []
tmp_line = []
for box in boxes:
if box[1] > current_line + mean_height:
lines.append(tmp_line)
tmp_line = [box]
current_line = box[1]
continue
tmp_line.append(box)
lines.append(tmp_line)
for line in lines:
line.sort(key=lambda box: box[0])
# print(lines)
return lines |
def draft_anchor(m) -> str:
"""
An anchor (string of letters)
"""
return "".join(m) |
def ymd2jd (year, month, day):
"""
Converts a calender date (year, month, day) to a Julian day number
"""
julian_day = int((1461*(year + 4800 + int((month - 14)/12.)))/4.) \
+ int((367*(month - 2 - 12*int((month - 14)/12.)))/12.) \
- int((3*int((year + 4900 + int((month - 14)/12.))/100.))/4.) \
+ day - 32075 # 12h based
return julian_day |
def repack(data):
"""
Recreate a nested object structure from a flat dictionary.
Example:
repack({"p.x": 1}) -> {"p": {"x": 1}}
"""
result = dict()
for key, value in data.items():
steps = key.split(".")
pointer = result
while len(steps) > 1:
if steps[0] not in pointer:
pointer[steps[0]] = dict()
pointer = pointer[steps.pop(0)]
pointer[steps[0]] = value
return result |
def split(value, sep=' '):
"""
The ``split`` template filter splits a given string by spaces. If you want
to split by something else, provide the devider as a argument to the filter.
.. code-block:: text
{{ "foo bar"|split }}
{{ "foo-bar"|split"-" }}
"""
return value.split(sep) |
def format_percent(x, _pos=None):
"""
plt.gca().yaxis.set_major_formatter(format_percent)
"""
x = 100 * x
if abs(x - round(x)) > 0.05:
return r"${:.1f}\%$".format(x)
else:
return r"${:.0f}\%$".format(x) |
def realign_shifted_streams(tokens, durations, F0s, shifts):
"""
Durations are shifted by 1, F0 by 2
>>> tokens = ["<s>", "t1", "t2", "t3", "</s>", "x", "x"]
>>> durations = ["<0>", "<0>", "d1", "d2", "d3", "<0>", "x"]
>>> F0s = ["<0>", "<0>", "<0>", "f1", "f2", "f3", "<0>"]
>>> shifts = [1,2]
>>> realign_shifted_streams(tokens, durations, F0s, shifts)
(['<s>', 't1', 't2', 't3', '</s>'], ['<0>', 'd1', 'd2', 'd3', '<0>'], ['<0>', 'f1', 'f2', 'f3', '<0>'])
"""
max_shift = max(shifts)
if max_shift > 0:
shift_durations, shift_F0s = shifts
tokens = tokens[:-max_shift]
durations = durations[shift_durations:]
if shift_durations < max_shift:
durations = durations[: -(max_shift - shift_durations)]
if F0s is not None:
F0s = F0s[shift_F0s:]
if shift_F0s < max_shift:
F0s = F0s[: -(max_shift - shift_F0s)]
assert len(tokens) == len(durations), f"{len(tokens)} =! {len(durations)}"
if F0s is not None:
assert len(tokens) == len(F0s), f"{len(tokens)} =! {len(F0s)}"
return tokens, durations, F0s |
def revComp(seq):
""" Rev comp DNA string."""
revcompl = lambda x: ''.join([{'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N':'N'}[B] for B in x][::-1])
return revcompl(seq) |
def get_patch_level(kernel_release):
"""Return the running kernel version string."""
# 4.13.6-1-ARCH
return kernel_release.split('-')[0] |
def i2b(i, wide):
"""
i2b is a wrapper for int.to_bytes
"""
return int.to_bytes(i, wide, byteorder="big") |
def all_in(phrases, list):
"""Used to check whether all of the phrases are somewhere in the list
Useful for error checking when given a return value which may be something
like:
['most recent snapshot of rpool/hcfs/mysql-hybridcluster does not\nmatch incremental snapshot',...]
You can call all_in(['most recent snapshot of','does not','match incremental snapshot'], statuses)
"""
combined = ''.join(list)
for phrase in phrases:
if phrase not in combined:
return False
return True |
def is_windows_path(path):
"""Checks if the path argument is a Windows platform path."""
return '\\' in path or ':' in path or '|' in path |
def unique_id(ID, IDList):
"""
Assigns a unique ID to each spectral target.
A spectral target may appear in multiple files so
unique_id assigns IDs by appending _<New number> to
the spectral target ID.
Parameters
----------
ID : String
Spectral target ID.
IDList : Dictionary
Keys are original IDs and the values are the
numbers that were last used for ID generation.
returns
-------
ID : String
Unique ID.
IDList : Dictionary
Updated IDList.
"""
keys = IDList.keys()
if ID not in keys:
IDList[ID] = 0
return ID, IDList
IDList[ID] += 1
ID = ID+"_%s" % (IDList[ID])
return ID, IDList |
def _get_params(locale):
"""HTTP GET params."""
return {'loc': locale} |
def make_bold(x, disable=False):
"""Make input bold
:param disable: If *True*, don't apply formatting.
:param x: string input
"""
if disable:
return x
return f'\33[1m{x}\33[0m' |
def is_ipv6(hosts):
"""
Function to validate IPv6 Addresses
:param hosts: Takes a single host or subnet
:return: Boolean True or False
"""
hosts = hosts.strip()
if "/" not in hosts:
# Assume that if no mask is specified, use a single host mask
hosts = hosts + "/128"
if ":" in hosts:
mask = int(hosts.split("/")[-1])
groups = hosts.split("/")[0].split(":")
for group in groups:
if len(group) is not 0 and len(group) <= 4:
try:
group = int(group, 16)
except Exception as e:
return False
else:
return False
return True
else:
return False |
def build_key_split(key, index):
"""
Used for documentation.
"""
try:
new_key = str(key).split('`')[1].split('<')[0].strip()
except IndexError:
new_key = str(key)
if 'SVC' in new_key or 'SVR' in new_key or 'SVM' in new_key:
return 'SVM' # pragma: no cover
if 'Neighbors' in new_key:
return 'Neighbors' # pragma: no cover
if 'Scaler' in new_key:
return 'Scaler' # pragma: no cover
if 'Normalizer' in new_key:
return 'Scaler' # pragma: no cover
if new_key.endswith("NB"):
return "...NB" # pragma: no cover
if new_key.endswith("RBM"):
return "...NB" # pragma: no cover
if "KMeans" in new_key:
return "KMeans" # pragma: no cover
if ('XGB' in new_key or 'LGBM' in new_key or 'Tree' in new_key or
'Forest' in new_key):
return 'Trees' # pragma: no cover
if ('ARDRegression' in new_key or 'ElasticNet' in new_key or
'HuberRegressor' in new_key or 'Lars' in new_key or
'Lasso' in new_key or 'LinearRegression' in new_key or
'LogisticRegression' in new_key or
'Ridge' in new_key or 'SGD' in new_key or
'TheilSen' in new_key):
return 'Linear' # pragma: no cover
for begin in ["Lasso", "Select", "Label", 'Tfidf', 'Feature',
'Bernoulli', 'MultiTask', 'OneVs', 'PLS',
'Sparse', 'Spectral', 'MiniBatch',
'Bayesian']:
if new_key.startswith(begin):
return begin + '...'
for end in ['CV', 'Regressor', 'Classifier']: # pragma: no cover
if new_key.endswith(end):
new_key = new_key[:-len(end)]
return new_key |
def normalize_knot_vector(knot_vector=(), decimals=4):
""" Normalizes the input knot vector between 0 and 1.
:param knot_vector: input knot vector
:type knot_vector: tuple
:param decimals: rounding number
:type decimals: int
:return: normalized knot vector
:rtype: list
"""
if not knot_vector:
return knot_vector
first_knot = float(knot_vector[0])
last_knot = float(knot_vector[-1])
denominator = last_knot - first_knot
knot_vector_out = [(float(("%0." + str(decimals) + "f") % ((float(kv) - first_knot) / denominator)))
for kv in knot_vector]
return knot_vector_out |
def getTranslation(m):
"""
get the translation vector of a homogeneous 4x4 row-major transformation matrix
"""
return [m[3], m[7], m[11]] |
def is_markdown_cpp_src(ipynb_cell):
"""
True if a cell is markdown && multiline source code && C++
```'s wrap multiline code blocks
C++ source code blocks have C++ right after starting ```
"""
result = False
# Markdown
if 'markdown' == ipynb_cell['cell_type']:
src = ipynb_cell['source'].strip()
# Multiline code block within ```'s
if (src.startswith('```')
and src.endswith('```')):
# check C++ right after ```
if "c++" in src.splitlines()[0].lower():
result = True
return result |
def isTrue(str):
"""Checks for yes, true, t and 1
@param str: The string containing a bool expression
@return: A bool object
@since 0.0.1-beta
"""
return str in ("yes", "true", "t", "1") |
def mock_algo_owns_shares_in_ticker_before_starting(
obj,
ticker):
"""mock_algo_owns_shares_in_ticker_before_starting
Support mocking owned shares to test indicator selling
If you can modify your algorithm ``config_dict`` you can
also set a ``positions`` dictionary like:
.. code-block:: python
algo_config_dict = {
# other values omitted for docs
'positions': {
'SPY': {
'shares': 10000,
'buys': [],
'sells': []
}
}
}
Use with your custom algorithm unittests:
.. code-block:: python
import mock
import analysis_engine.mocks.mock_algo_trading as mock_trading
@mock.patch(
('analysis_engine.algo.BaseAlgo.get_ticker_positions'),
new=mock_trading.mock_algo_owns_shares_in_ticker_before_starting)
:param obj: algorithm object
:param ticker: ticker symbol
"""
num_owned = 10000
buys = []
sells = []
return num_owned, buys, sells |
def remove_unneccessary_words(lyrics: list) -> list:
""" Removes words that are irrelevant to analytics
Args:
lyrics: list of all words in lyrics of a song
Return:
Lyrics with unneccesary words removed
"""
# list of words we want to remove
words = ['', 'the', 'i', 'a', 'an', 'of', 'with', 'at', 'from', 'into', 'and',
'or', 'but', 'so', 'for', 'yet', 'as', 'because', 'since', 'this', 'that',
'these', 'those', 'in', 'to', 'on', 'all', 'you', 'my', 'it', 'me', 'your',
'when', 'out', 'up', 'be', 'is', 'if']
return list(set(lyrics) - set(words)) |
def getCurrentPosition(grid, locationMarker='*'):
"""
Return the row and col coordinates of my location
"""
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col] == locationMarker:
return (row, col)
return -1 |
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b) where both a and b are
positive integers, and b is odd
"""
if a == 0: return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
a, b = b % a, a
else:
if (((b * b) - 1) >> 3) & 1:
result = -result
a >>= 1
if a == 0: return 0
return result |
def rotate(shape, times=1):
""" Rotate a shape to the right """
return shape if times == 0 else rotate(tuple(zip(*shape[::-1])), times-1) |
def _get_group(username: str) -> str:
"""
Get the group from a username
Usernames are in the format <initials>-<group>. The group names are
therefore extracted by splitting the usernames about the hyphen.
"""
return username.split("-")[-1] |
def astrom_precision(fwhm, snr):
"""
Calculate the approximate precision of astrometric measurements,
given a particular seeing and SNR value.
Parameters
----------
fwhm : float or numpy.ndarray
The seeing (FWHMgeom) of the observation.
snr : float or numpy.ndarray
The SNR of the object.
Returns
-------
float or numpy.ndarray
The astrometric precision.
"""
result = fwhm/(snr)
return result |
def split(path):
"""Split the pathname *path* into a pair, ``(head, tail)`` where *tail* is the
last pathname component and *head* is everything leading up to that. The
*tail* part will never contain a slash; if *path* ends in a slash, *tail*
will be empty. If there is no slash in *path*, *head* will be empty. If
*path* is empty, both *head* and *tail* are empty. Trailing slashes are
stripped from *head* unless it is the root (one or more slashes only). In
all cases, ``join(head, tail)`` returns a path to the same location as *path*
(but the strings may differ)."""
return ('', '') |
def gen_linedata(x0, y0, x1, y1):
"""
Generate slope and y-intercept for two points
"""
slope = (y1 - y0) / (x1 - x0)
intercept = (x1 * y0 - x0 * y1) / (x1 - x0)
return (slope, intercept) |
def eval_frac(s):
"""
Returns string s, which represents a fraction, as a float
If s does not represent a fraction (int over int), it
returns None.
Parameter s: The string to evaluate
Precondition: s is a string with a / in it
"""
assert type(s) == str, repr(s)+' is not a string'
assert '/' in s, repr(s)+' is missing a /'
try:
pos = s.find('/')
top = int(s[:pos]) # Error?
bot = int(s[pos+1:]) # Error?
return top/bot # Error?
except:
return None |
def check_ifbool(val):
"""
Used for dcc.Checklist components when receiving value from its
tkinter.CheckButton counterparts
"""
if isinstance(val, bool):
if val is True:
return [1]
else:
return []
else:
return val |
def chunk(list, n):
"""Breaks a list into chunks of size n."""
return [list[i:i + n] for i in range(0, len(list), n)] |
def extract_history(history):
"""Extract CANDLE benchmark model training/validation statistics.
CANDLE models return a history object containing a dictionary of
training and validation statistics, loss and R2 for example. Each
value is either a scalar or a list, where each list entry contains
a score associated with the corresponding epoch. This function
returns a new dictionary having the same keys as found in the
argument with scalar values taken from the last epoch, i.e. the
end of the list.
Args
history: model training/validation result dictionary
Returns
A dictionary, possibly empty, containing the keyas and final
values of the given history dictionary is returned.
"""
final = {}
for key, value in history.items():
if type(value) is list:
value = value[-1]
final[key] = value
return final |
def bits_to_bytes(n):
"""The number of bytes required to represent an n-bit number.
Equivalent to (n + 7) // 8, but slightly faster. This really is
called enough times that that matters."""
return (n + 7) >> 3 |
def parse_requirements(file_text):
"""
Parse a python requirements.txt string into a list of package names
:param file_text: requirements.txt text
:return: List of package names
"""
lines = file_text.split("\n")
packages = []
for line in lines:
if len(line) == 0:
continue
packages.append(line.split("=="))
return packages |
def get_name(obj, dob):
"""Dictionary function for getting name and dob from string"""
return "{} is born on {}".format(obj, dob) |
def nest_array(json_prm, key):
"""Function that nests an array given a key and returns a dictionary
"""
return dict((
json_prm[y][key],
list(dict((k, v) for k, v in x.items() if k != key)
for x in json_prm if x[key] == json_prm[y][key])
)
for y in range(len(json_prm))) |
def upper(s):
""" Returns the string `s` converted to uppercase. """
return s.upper() |
def dm_smear(dm, BW, center_freq):
"""
dm_smear(dm, BW, center_freq):
Return the smearing in sec caused by a 'dm' over a bandwidth
of 'BW' MHz centered at 'center_freq' MHz.
"""
return dm * BW / (0.0001205 * center_freq * center_freq * center_freq) |
def user(rel):
"""Add relationship category prefix for user resources.
Parameters
----------
rel: string
Link relationship identifier
Returns
-------
string
"""
return 'users:{}'.format(rel) |
def CorrectPool(out_size,current_pool):
"""makes convolved size divisible by pooling kernel"""
ratio=out_size/current_pool
if (ratio)%1==0:#whole number
return int(current_pool)
else:
whole_ratio=round(ratio)
if whole_ratio==0:
whole_ratio+=1
return int(out_size/whole_ratio) |
def _get_last_layer_units_and_activation(num_classes):
"""Gets the # units and activation function for the last network layer.
Args:
num_classes: Number of classes.
Returns:
units, activation values.
"""
if num_classes == 2:
activation = 'sigmoid'
units = 1
else:
activation = 'softmax'
units = num_classes
return units, activation |
def remove_namespace_from_string(name):
"""
Removes the namespace from the given string
:param name: str, string we want to remove namespace from
:return: str
"""
sub_name = name.split(':')
new_name = ''
if sub_name:
new_name = sub_name[-1]
return new_name |
def allsame(iterable):
"""Returns whether all elements of 'iterable' are the same.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("iterable cannot be empty")
return all(element == first_item for element in it) |
def rulejoin(class_rule, method_rule):
"""
Join class and method rules. Used internally by :class:`ClassView` to
combine rules from the :func:`route` decorators on the class and on the
individual view handler methods::
>>> rulejoin('/', '')
'/'
>>> rulejoin('/', 'first')
'/first'
>>> rulejoin('/first', '/second')
'/second'
>>> rulejoin('/first', 'second')
'/first/second'
>>> rulejoin('/first/', 'second')
'/first/second'
>>> rulejoin('/first/<second>', '')
'/first/<second>'
>>> rulejoin('/first/<second>', 'third')
'/first/<second>/third'
"""
if method_rule.startswith('/'):
return method_rule
else:
return (
class_rule
+ ('' if class_rule.endswith('/') or not method_rule else '/')
+ method_rule
) |
def d4(depth: int, width: int, in_list: list = list()):
""" return a list of lists of tcl subinterpreter names for the
specified depth and width. note that to make an "x0 x0 x0" one
must have made "x0 x0" a 3 x 3 makes 39 subinterpreters; a
4x4 makes 390. the ordering will be correct that no tcl child
interpreter will be created without all needed parents having
been created first"""
depth -= 1
new = list()
for w in range(width):
piece = in_list.copy()
piece.append('x' + str(w))
new.append(piece)
if depth > 0:
new.extend(d4(depth, width, in_list = piece))
return new |
def valid_state(floors):
"""Check if state is valid."""
for floor in floors:
machines = set(x[0] for x in floor if x[1] == 'M')
generators = set(x[0] for x in floor if x[1] == 'G')
unshielded_machines = machines - generators
if generators and unshielded_machines:
return False
return True |
def slice_dict(d, keys):
"""
Only return subset of dict keys
:param d: dict
:param keys: list
:return: dict
"""
keys = set(keys).intersection(d.keys())
sd = {k:d[k] for k in keys}
return sd |
def calculate_age(year_of_birth: int, current_year: int) -> str:
"""Year report depending on year of birth and current year."""
def year_str(y:int) -> str:
return "1 year" if y == 1 else str(y)+" years"
delta = current_year - year_of_birth
if delta == 0:
return "You were born this very year!"
elif delta > 0:
return "You are " + year_str(delta) + " old."
else:
return "You will be born in " + year_str(-delta) + "." |
def patched_to_marshallable_type(obj):
"""adds __marshallable__ support; see https://github.com/twilio/flask-restful/pull/32"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
return dict(obj.__dict__) |
def encode(value, is_py3_flag):
"""A helper function for wrapping strings as bytes for py3."""
if is_py3_flag:
return value.encode('utf-8')
else:
return value |
def html_escape(input_string: str) -> str:
"""escape all the html content
:param input_string: A string that may contain html tags
:return: the string with all the html syntax escaped so that it will be
safe to put the returned string to html
"""
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c, c) for c in input_string) |
def pad_array_index(low,high,segment_length,reverse=False):
"""
make it so that it's good for the bin to calculate local mean?
"""
remainder = (segment_length-(high-low)%segment_length)
if not reverse:
return high + remainder
else:
return low - remainder |
def minmax_recursive(S, n):
"""Find the minimum and maximum value from a sequence."""
if n == 1:
return (S[n-1], S[n-1])
else:
prev_minmax = minmax_recursive(S, n-1)
max = S[n-1] if S[n-1] > prev_minmax[1] else prev_minmax[1]
min = S[n-1] if S[n-1] < prev_minmax[0] else prev_minmax[0]
return (min, max) |
def step(seq):
"""
>>> step('1')
'11'
>>> step('11')
'21'
>>> step('21')
'1211'
>>> step('1211')
'111221'
>>> step('111221')
'312211'
"""
result = ""
count = 0
lc = seq[0]
for c in seq:
if c == lc:
count += 1
else:
result += str(count) + lc
count = 1
lc = c
result += str(count) + lc
return result |
def join_query_string(qs_dict):
"""
Joins query string from a dictionary
>>> join_query_string({'a': '1', 'y': '', 'b': '2', 'name': 'abc', 'z': 'ab23'})
'a=1&y=&b=2&name=abc&z=ab23'
"""
parts = []
for k,v in qs_dict.items():
parts.append("{}={}".format(k, v))
return "&".join(parts) |
def my_reverse(s):
"""implementation of built-in 'reverse' function"""
return( s[::-1]) |
def compare_log_to_resp(log, resp):
""" Search the log list for the responses in the response list
Search through the log list for the lines in the response list. The
response list may contain substrings found in the log list lines. The
response list lines must be found in the log list in the order they
are specified in the response list (the log list may have extra lines
which are ignored).
Returns None if all the strings in the response list were found in the
log list. Otherwise, returns the first missing response line.
"""
response_line_no = 0
response_line = resp[response_line_no].rstrip()
for log_line in log:
log_line = log_line.rstrip()
if response_line in log_line:
# Found a match, step to the next non-blank line in the response
# list
while True:
response_line_no += 1
if response_line_no >= len(resp):
# We ran through all of our respones lines, success!
return None
else:
response_line = resp[response_line_no].rstrip()
if len(response_line) > 0:
break
#print("Log missing '{0:s}'".format(response_line))
return response_line |
def get_year(date):
""" Get the year from the date """
index = '-' in date
if index:
index = date.index('-')
return int(date[0:index])
else:
size = len(date)
if len(date) <= 4:
return int(date)
else:
raise Exception('Incorrect date supplied. Date should be of the format YYYY-MM-DD or just the year') |
def get_current_application(request):
"""Get current application."""
try:
app_name = request.resolver_match.namespace
if not app_name:
app_name = "home"
except Exception as e:
app_name = "home"
return app_name |
def sanitize(number, minimum, maximum=None):
""" Sanity check a given number.
:param number: the number to sanitize
:param minimum: the minimum acceptable number
:param maximum: the maximum acceptable number (optional)
if maximum is not given sanitize return the given value superior
at minimum
:returns: an integer who respect the given allowed minimum and maximum
"""
if number < minimum:
number = minimum
elif maximum is not None and number > maximum:
number = maximum
return number |
def simple(k, n):
""" Simple example function with no additional parameters.
"""
if (n % 2) == 0:
return((k + 3) % 255)
else:
return((k + 5) % 255) |
def clues_too_many(text: str) -> bool:
""" Check for any "too many connections" clues in the response code """
text = text.lower()
for clue in ("exceed", "connections", "too many", "threads", "limit"):
# Not 'download limit exceeded' error
if (clue in text) and ("download" not in text) and ("byte" not in text):
return True
return False |
def is_bot_ua(useragent, bots_ua_dict, bots_ua_prefix_dict, bots_ua_suffix_dict, bots_ua_re):
"""Check if user-agent string is blacklisted as a bot, using
given blacklist dictionaries for exact match, prefix, suffix, and regexp matches"""
if not useragent:
return False
if useragent in bots_ua_dict:
# Exact match hit for host or useragent
return True
else:
# Try prefix matching on user agent
for prefix in bots_ua_prefix_dict:
if useragent.startswith(prefix):
return True
else:
# Try suffix matching on user agent
for suffix in bots_ua_suffix_dict:
if useragent.endswith(suffix):
return True
else:
# Try Regular expression matching on user agent
for ua_re in bots_ua_re:
if ua_re.match(useragent):
return True
return False |
def data_file_data(base_data_file_data, data_file):
"""Return data file creation data."""
return {
"file": data_file,
**base_data_file_data
} |
def timer_summary(timers, name):
"""
Generate a summary of the timers that name has started.
Args:
timers: A dictionary whose values are Timer objects.
name: The name of the author, will be used to select timers they own.
Returns:
A string that summarizes name's timers.
"""
msg = "Active timers for __{}__:\n\n".format(name)
user_timers = [x for x in timers.values() if name in x.key]
if user_timers:
for ind, timer in enumerate(user_timers, start=1):
msg += " **{}**) ".format(ind) + str(timer)
else:
msg += "**None**"
return msg |
def calc_undervolt_mv(msr_value):
"""Return the offset voltage (in mV) from the given raw MSR 150h value.
"""
offset = (msr_value & 0xFFE00000) >> 21
offset = offset if offset <= 0x400 else -(0x800 - offset)
return int(round(offset / 1.024)) |
def pathway_names(pathways):
""" Split the pathway names and descriptions
Args:
pathways (list): A list of pathway names and descriptions
(from a pathway abundance or coverage file)
Requires:
None
Returns:
(dict): A dictionary of pathway names to descriptions
"""
path_names = {}
for path in pathways:
# ignore stratified pathways
if not "|" in path:
try:
description = path.split(":")
name = description.pop(0)
description=":".join(description)
except ValueError:
continue
path_names[name]=description
return path_names |
def constructUserMovieHist(userRatingGroup):
""" Construct the rating list of a user
Returns:
(user, ([movie], [rating]))
"""
userID = userRatingGroup[0]
movieList = [item[0] for item in userRatingGroup[1]]
ratingList = [item[1] for item in userRatingGroup[1]]
return (userID, (movieList, ratingList)) |
def binary_search_interval(key, ary, start, end):
"""
Inputs:
key : value to find
ary : sorted arry in which to find the key
start, end : interval of the array in which to perform the search
Outputs:
if the search was successful the output is a positive number with the
index of where the key exits in ary; if not the output is a negative
number; the symmetric of that number plus 1 is the index of where the
key would be inserted in such a way that the array would still be sorted
"""
imin = start
imax = end
while imin < imax:
imid = (imax + imin) / 2
imid_val = ary[imid]
# key is before
if key < imid_val:
imax = imid
# key is after
elif key > imid_val:
imin = imid + 1
# key is between first edge of imid and next first edge
else:
return imid
return -imin - 1 |
def extract_num_options(val):
"""
convert a categorical to a number of options
"""
if len(val) == 1:
for v in val:
if isinstance(v, tuple):
# this looks nightmarish...
# but i think it should always work
# should also check if the tuple has length one here.
# that will only be an issue if a trailing comma was used to make the tuple ('beep',)
# but not ('beep') - the latter is not actually a tuple
return len(v)
else:
return 0
else:
return len(val) |
def fib(n):
"""Fibonacci:
fib(n) = fib(n-1) + fib(n-2) se n > 1
fib(n) = 1 se n <= 1
"""
if n > 1:
return fib(n - 1) + fib(n - 2)
else:
return 1 |
def _assert_date_prams(start, end, calendar, busy):
"""assertion for existence of all parameters"""
return start and end and calendar and busy != None and (end > start) and (end is not start) |
def poincare_ball_params(n_samples):
"""Generate poincare ball benchmarking parameters.
Parameters
----------
n_samples : int
Number of samples to be used.
Returns
-------
_ : list.
List of params.
"""
manifold = "PoincareBall"
manifold_args = [(3,), (5,)]
module = "geomstats.geometry.poincare_ball"
def poincare_ball_metric_params():
params = []
metric = "PoincareBallMetric"
metric_args = manifold_args
kwargs = {}
common = manifold, module, metric, n_samples, kwargs
for manifold_arg, metric_arg in zip(manifold_args, metric_args):
params += [common + (manifold_arg, metric_arg)]
return params
return poincare_ball_metric_params() |
def variant_overlaps_interval(
variant_start,
n_ref_bases,
interval_start,
interval_end):
"""
Does a variant overlap a given interval on the same chromosome?
Parameters
----------
variant_start : int
Inclusive base-1 position of variant's starting location
(or location before an insertion)
n_ref_bases : int
Number of reference bases affect by variant (used to compute
end coordinate or determine whether variant is an insertion)
interval_start : int
Interval's inclusive base-1 start position
interval_end : int
Interval's inclusive base-1 end position
"""
if n_ref_bases == 0:
# insertions only overlap intervals which start before and
# end after the insertion point, they must be fully contained
# by the other interval
return interval_start <= variant_start and interval_end >= variant_start
variant_end = variant_start + n_ref_bases
"""
if self._changes_exonic_splice_site(
strand_ref,
strand_alt,)
"""
# overlap means other interval starts before this variant ends
# and the interval ends after this variant starts
return interval_start <= variant_end and interval_end >= variant_start |
def hex_to_rgb(color):
"""Convert a hex color code to an 8 bit rgb tuple.
Parameters
----------
color : string, must be in #112233 syntax
Returns
-------
tuple : (red, green, blue) as 8 bit numbers
"""
if not len(color) == 7 and color[0] == "#":
raise ValueError("Color must be in #112233 format")
color = color.lstrip("#")
return tuple(int(color[i : i + 2], 16) for i in (0, 2, 4)) |
def mfacebookToBasic(url):
"""Reformat a url to load mbasic facebook instead of regular facebook, return the same string if
the url don't contains facebook"""
if "m.facebook.com" in url:
return url.replace("m.facebook.com", "mbasic.facebook.com")
elif "www.facebook.com" in url:
return url.replace("www.facebook.com", "mbasic.facebook.com")
else:
return url |
def ascii_str(str, ar):
"""function to process a string.
"""
retstr = [None] * len(str)
for i in range(0, len(str)):
retstr[i] = str[i]
return retstr |
def PatternCount(text, pattern):
"""
Exercice 1.2.7 PatternCount
Description: Counts the number of times Pattern happens in Text WITH OVERLAPS
Input: Strings Text and Pattern.
Output: Count(Text, Pattern).
Sample Input:
GCGCG
GCG
Sample Output:
2
"""
n = 0
for i in range(len(text)-len(pattern)+1):
if text[i:i+len(pattern)] == pattern:
n += 1
return n |
def _substr_for_error_message(content):
"""Returns content string to include in the error message"""
return content if len(content) <= 100 else content[0:97] + "..." |
def dim(x):
"""Format a string dimly.
Returns:
The string `x` made dim by terminal escape sequence.
"""
return f"\033[2m{x}\033[0m" |
def is_header_line(line):
"""Returns True if line is a header line"""
return line.startswith('#=GF') |
def from_tuple(tup):
"""Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3.
"""
if len(tup) not in (2, 3):
raise ValueError(
'tuple must contain 2 or 3 elements, not: %d (%r' % (
len(tup),
tup,
),
)
return range(*tup) |
def rect_perimeter(w, h):
"""Calc perimeter of a rectangle
:param w: width of rectangle
:param h: height of rectangle
:return: perimeter of rectangle
"""
return w * 2 + h * 2 |
def _list_comprehensions(obj, item=None, return_tuple=False, make_none=False):
"""
Generates a new list/tuple by list comprehension.
Args:
obj (Union[int, list, tuple]):
If integer, it will be the length of the returned tuple/list.
item: The value to be filled. Default: None.
If None, the values in the new list/tuple are the same as obj
or range(obj) when obj is integer.
return_tuple(bool): If true, returns tuple, else returns list.
Returns:
List or tuple.
"""
res = []
lst = obj
if isinstance(obj, int):
lst = range(obj)
if make_none:
res = [None for _ in lst]
elif item is None:
res = [i for i in lst]
else:
res = [item for i in lst]
if return_tuple:
return tuple(res)
return res |
def obter_pos_l(pos): # posicao -> str
"""
Permite obter a linha de uma posicao.
:param pos: posicao
:return: linha da posicao
"""
if pos[1] == 0:
return '1'
elif pos[1] == 1:
return '2'
elif pos[1] == 2:
return '3'
else:
raise ValueError('obter_pos_c: argumento invalido') |
def segment(segment):
"""Select a single segment."""
return {"segment": segment} |
def getOrdIndicator(number):
"""Returns the "ordinal indicator" of number, e.g. 'st' for 1 and
'rd' for 23, because we write them as "1st" and "23rd"."""
lastDigit = str(number)[-1]
if len(str(number)) >= 2:
secondToLastDigit = str(number)[-2]
else:
secondToLastDigit = ''
if secondToLastDigit + lastDigit == '11':
return 'th'
elif secondToLastDigit + lastDigit == '12':
return 'th'
elif secondToLastDigit + lastDigit == '13':
return 'th'
elif lastDigit == '1':
return 'st'
elif lastDigit == '2':
return 'nd'
elif lastDigit == '3':
return 'rd'
else:
return 'th' |
def determineAlpha(mag_ratio):
"""
Bug: No docstring
Bug: Each image should have its own alpha, because the different
images have different magnifications (these are in the OM10 "MAG" columns, and need to be applied to each image's sourceX
magnitude.)
Bug: function name should be "determine_alpha" to be PEP8 compliant
"""
if(mag_ratio>1):
quasar_alpha = 1/mag_ratio
lens_alpha = 1
else:
quasar_alpha = 1
lens_alpha = mag_ratio
return quasar_alpha, lens_alpha |
def half_up(n: int) -> int:
"""
Divide by 2 and round up when input is odd.
even -> n//2
odd -> n//2 + 1
"""
return (n + 1) // 2 |
def removeDuplicates(list):
""" remove duplicates from a list """
return set((item for item in list)) |
def is_from_module(cls, module):
"""Return True if a class comes from a module."""
return cls.__module__ == module.__name__ and cls.__name__ in dir(module) |
def fermat_number(n: int) -> int:
"""
https://en.wikipedia.org/wiki/Fermat_number
https://oeis.org/A000215
>>> [fermat_number(i) for i in range(5)]
[3, 5, 17, 257, 65537]
"""
return 3 if n == 0 else (2 << ((2 << (n - 1)) - 1)) + 1 |
def normalize_start(start, dim_size):
"""
Normalize `start` according to the number of dimensions (`dim_size`).
If the number of dimensions is not given, return the original input directly.
"""
if start is None:
return 0
if start < 0:
return 0 if start < -dim_size else start % dim_size
return start if start < dim_size else dim_size |
def convert_optional_str_by_type(str_val, type_func, default_val):
"""Converts an optional string using the type_func, else uses the default.
"""
if (str_val == '<UNK>') or (str_val is None):
return type_func(default_val)
return type_func(str_val) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.