content stringlengths 42 6.51k |
|---|
def convert_db_dict_into_list(db_dict):
"""Convert dictionary of processes into list of processes."""
db_list = []
for key in db_dict.keys():
assert key == db_dict[key]["@id"]
db_list.append(db_dict[key])
return db_list |
def remove_using_lower_case(body, body_lower, to_replace):
"""
Replace `to_replace` (which will always be lower case) in `body`
(which is the original string) using `body_lower` as base for the
search.
Example input:
body: Hello World World
body_lower: hello world world
to_replace: world
Output:
body: Hello
body_lower: hello
:param body: The original string
:param body_lower: The original string after .lower()
:param to_replace: The string to replace
:return: A tuple containing:
The original string without `to_replace`
The string from the first item after .lower()
"""
idx = 0
to_replace_len = len(to_replace)
while idx < len(body):
index_l = body_lower.find(to_replace, idx)
if index_l == -1:
return body, body_lower
body = body[:index_l] + body[index_l + to_replace_len:]
body_lower = body.lower()
idx = index_l + 1
return body, body_lower |
def lists_agree_up_to_ordering(l1, l2):
"""Use of dictionaries mean that returned lists might be in any order,
so we need to allow order to vary..."""
if len(l1) != len(l2):
return False
li1, li2 = list(l1), list(l2)
try:
for x in li1:
index = li2.index(x)
del li2[index]
return True
except ValueError:
return False |
def normalize_string(s):
"""Return a normalized string for use by the template engine
Different sources of data (i.e. the given resource.md files, the jekyll
templates, etc.) expect and use different ways of encoding the names of
various components of the resource object. This function just normalizes
resource fields of the form "I Have Capital Letters and Spaces" to the form
"i_have_capital_letters_and_spaces" so that the jinja template can properly
render anything thrown at it.
"""
return s.lower().replace(' ', '_') |
def interpolate(a, b, weight):
"""output:
if a==None: return b
else: return a*weight + b*(1-weight)
"""
if a is None:
return b
else:
return weight * a + (1 - weight) * b |
def find_direction(start, end):
"""
Find direction from start to end
"""
if start[0] == end[0]:
if start[1] < end[1]:
return 5
else:
return 1
elif start[1] == end[1]:
if start[0] < end[0]:
return 3
else:
return 7
elif start[0] < end[0]:
if start[1] < end[1]:
return 4
else:
return 2
elif start[0] > end[0]:
if start[1] < end[1]:
return 6
else:
return 8 |
def as_latex(ordinal):
"""
Convert the Ordinal object to a LaTeX string.
"""
if isinstance(ordinal, int):
return str(ordinal)
term = r"\omega"
if ordinal.exponent != 1:
term += f"^{{{as_latex(ordinal.exponent)}}}"
if ordinal.copies != 1:
term += rf"\cdot{as_latex(ordinal.copies)}"
if ordinal.addend != 0:
term += f"+{as_latex(ordinal.addend)}"
return term |
def dot_product(a, b):
"""dots two vectors
Args:
a: vector 1 (tuple)
b: vector 2 (tuple)
Returns:
dot product of two vectors
"""
return (a[0] * b[0]) + (a[1] * b[1]) |
def _bool_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bool):
value = 'true' if value else 'false'
return value |
def crisscross(xa, ya, xb, yb, xc, yc, xd, yd):
"""
Check whether two line segments A-->B and C-->D intersect,
and return True if they do.
Crossing node paths are an eyesore, and have to be avoided at all cost.
Usage
-----
>>> guilty = crisscross(xa, ya, xb, yb, xc, yc, xd, yd)
Notes
-----
Naturally this check is performed by validating a bunch of determinants.
Touching start (A & C) or end points (B & D) also count as an intersection.
Coordinates must be integers;
the equality check will fail in case of floats.
http://stackoverflow.com/questions/3838329/how-can-i-check-if-two-segments-intersect
"""
def ccw(xa, ya, xb, yb, xc, yc):
# Check whether point C is counter-clockwise with respect to A-B.
return (yc - ya) * (xb - xa) > (yb - ya) * (xc - xa)
# Same start point?
if xa == xc and ya == yc: return True
if xb == xd and yb == yd: return True
# Are A & B at different sides of C-D?
ok1 = ccw(xa, ya, xc, yc, xd, yd) != ccw(xb, yb, xc, yc, xd, yd)
# Are C & D at different sides of A - B?
ok2 = ccw(xa, ya, xb, yb, xc, yc) != ccw(xa, ya, xb, yb, xd, yd)
return ok1 and ok2 |
def ParseDurationToSeconds(duration):
"""Parses a string duration of the form HH:MM:SS into seconds.
Args:
duration: A string such as '12:43:12' (representing in this case
12 hours, 43 minutes, 12 seconds).
Returns:
An integer number of seconds.
"""
h, m, s = [int(t) for t in duration.split(':')]
return s + 60 * m + 3600 * h |
def get_line(string, idx):
""" Given a string and the index of a character in the string, returns the
number and contents of the line containing the referenced character and the
index of the character on that line.
Spectacularly inefficient but only called in exception handling
"""
for lineno, line in enumerate(string.splitlines(True)):
if idx < len(line):
return lineno + 1, idx, line
idx -= len(line)
raise IndexError() |
def _interval_in_bb(interval_left_coord, interval_width, bbox):
""" 1D chunk_in_bb
"""
assert len(bbox) == 2
bbox_lo, bbox_hi = bbox
interval_right_coord = interval_left_coord + interval_width
if bbox_lo < interval_left_coord < bbox_hi:
return True
elif bbox_lo < interval_right_coord < bbox_hi:
return True
# only remaining case is bbox fully contained in interval
elif ((interval_left_coord < bbox_lo < interval_right_coord) and
(interval_left_coord < bbox_hi < interval_right_coord)):
return True
else:
return False |
def get_number_guess_len(value):
"""
Safety measure against key getting one bigger (overflow) on decrypt e.g. (5)=1 -> 5 + 8 = 13 -> (13)=2
Args:
value: Number convertible to int to get it's length
Returns:
The even length of the whole part of the number
"""
guess_len = len(str(int(value)))
return guess_len if guess_len % 2 != 0 else (guess_len - 1) |
def nodesFromEdge(edge):
"""Return the nodes on either edge of a standard edge"""
if edge == 0:
return 0, 1
elif edge == 1:
return 2, 3
elif edge == 2:
return 0, 2
elif edge == 3:
return 1, 3
elif edge == 4:
return 4, 5
elif edge == 5:
return 6, 7
elif edge == 6:
return 4, 6
elif edge == 7:
return 5, 7
elif edge == 8:
return 0, 4
elif edge == 9:
return 1, 5
elif edge == 10:
return 2, 6
elif edge == 11:
return 3, 7 |
def value_to_dict(val, val_type):
"""given the value and its type, dump it to a dict
the helper function to dump values into dict ast
"""
return {"type": val_type, "value": val} |
def safe_repr(obj, short=False):
"""Truncated output of repr(obj)"""
_MAX_LENGTH = 80
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...' |
def egcd(a, m):
"""Extended GCD"""
if a == 0:
return (m, 0, 1)
g, y, x = egcd(m % a, a)
return (g, x - (m // a) * y, y) |
def is_number(value):
"""Retrun True/False depending on the provided value."""
if not type(value) in (int, float, str):
return False
try:
float(value)
return True
except ValueError:
return False |
def get_all_quadratic_residue (n):
"""
We say that an integer x is a Quadratic Residue if there exists an a such that a2 = x mod p.
If there is no such solution, then the integer is a Quadratic Non-Residue.
If a^2 = x then (-a)^2 = x. So if x is a quadratic residue in some finite field, then there are always two solutions for a.
This Function returns a dict with residue as key and its roots as value:
{
key = [a, b]
}
Therefore a*a mod n = key or b*b mod n = key
Complexity : O(n)
"""
res = {}
for i in range (1, n):
residue = i*i % n
if residue in res:
res[residue].append(i)
else:
res[residue] = [i]
return res |
def majority(L):
# snip}
"""Majority
:param L: list of elements
:returns: element that appears most in L,
tie breaking with smallest element
:complexity: :math:`O(nk)` in average,
where n = len(L) and k = max(w for w in L)
:math:`O(n^2k)` in worst case due to the use of a dictionary
"""
assert L # majority is undefined on the empty set
# snip{
count = {}
for word in L:
if word in count:
count[word] += 1
else:
count[word] = 1
# Using min() like this gives the first word with
# maximal count "for free"
val_1st_max, arg_1st_max = min((-count[word], word) for word in count)
return arg_1st_max |
def remove_comments(content: str) -> str:
"""
Remove all Comments from hotkeynet script
"""
lines = content.split("\n")
lines_new = list()
for line in lines:
line_striped = line.strip()
if not line_striped:
continue
if line_striped.startswith("//"):
continue
# if line_striped.startswith("function"):
# continue
# if line_striped.startswith("}"):
# continue
line = line.split("//", 1)[0]
lines_new.append(line)
new_content = "\n".join(lines_new)
return new_content |
def mask(bits):
"""Generate mask of 1's for n bits"""
return 2 ** bits - 1 |
def copy_infos(src_config):
"""copy src_config
returns copied src_config dict.
"""
dst_config = {}
for dsc in src_config.keys():
dst_config[dsc] = src_config[dsc]
return(dst_config) |
def chunk_string(input_str, length):
"""
Splits a string in to smaller chunks.
NOTE: http://stackoverflow.com/questions/18854620/
:param input_str: str the input string to chunk.
:param length: int the length of each chunk.
:return: list of input str chunks.
"""
return list((input_str[0 + i:length + i] for i in range(0, len(input_str), length))) |
def default_in_transformer(row, lhs, rhs):
"""
Performs the in check of the lhs in in the rhs. If the lhs has an `is_in`
method this is used, if not the `in` operator is used.
:param row: The row being checked (not used)
:param lhs: The left hand side of the operator
:param rhs: The right hand side of the operator
:return: True if lhs is in right, False otherwise
"""
if hasattr(lhs, "is_in"):
return lhs.is_in(rhs)
else:
return lhs in rhs |
def column(matrix, col):
"""
Returns a given column of a two-dimensional list.
"""
return [row[col] for row in matrix] |
def _find_duplicate_variables(strings):
"""Find all string variables that appear more than once."""
seen = set()
duplicates = []
for string in strings:
var = string.variable
if var in seen:
duplicates.append(var)
else:
seen.add(var)
return duplicates |
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid |
def clean_arxiv_id(possible_arxiv_id: str):
"""
Remove the version info from an arxiv id
"""
possible_arxiv_id = possible_arxiv_id.split("v")[0]
return possible_arxiv_id |
def obfuscated_word(word, past_guesses):
""" Returns a str with the correct guesses shown
Given the string word and list past_guesses, return a string where the
guesses are shown and the non-guessed characters are replaced with an
underscore. For example, obfuscated_word('hello', ['e', 'o'])
should return the str '_e__o'
Args:
word: a string
past_guesses: a list of strings
Returns:
a new string with guesses and non-guesses
"""
new_string = ''
for element in word:
if element in past_guesses:
new_string += element
else:
new_string += '_'
return new_string |
def ensemble_prediction_avg_1(predictions):
"""
:param predictions: a list containing the predictions of all the classifiers
:type predictions: list
:return: a value representing if a disruptive situation has been detected or not
:rtype: int
"""
threshold = 0.5
pred_conv = [p for p in predictions if p != 0 and p != 1]
pred_svm = [p for p in predictions if p == 0 or p == 1]
avg_prediction_conv = sum(pred_conv) / len(pred_conv)
pred_svm.append(avg_prediction_conv)
avg_prediction = sum(pred_svm) / len(pred_svm)
return 1 * (avg_prediction >= threshold) |
def _num_items_2_heatmap_one_day_figsize(n):
""" uses linear regression model to infer adequate figsize
from the number of items
Data used for training:
X = [2,4,6,10,15,20,30,40,50,60]
y = [[10,1],[10,2],[10,3],[10,4],[10,6],[10,8],[10,10],[10,12],[10,15],[10,17]]
Parameters
----------
n : int
number of items
Returns
-------
(w,h) : tuple
the width and the height of the figure
"""
w = 10
h = 0.27082*n + 1.38153
return (int(w), int(h)) |
def wdrvire(b5, b7, alpha=0.01):
"""
Wide Dynamic Range Vegetation Index Red-edge (Peng and Gitelson, 2011).
.. math:: t1 = (alpha * b7 - b5) / (alpha * b7 + b5)
WDRVIRE = t1 + ((1 - alpha) / (1 + alpha))
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns WDRVIRE: Index value
.. Tip::
Peng, Y., Gitelson, A. A. 2011. Application of chlorophyll-related \
vegetation indices for remote estimation of maize productivity. \
Agricultural and Forest Meteorology 151(9), 1267-1276. \
doi:10.1016/j.agrformet.2011.05.005.
"""
t1 = (alpha * b7 - b5) / (alpha * b7 + b5)
WDRVIRE = t1 + ((1 - alpha) / (1 + alpha))
return WDRVIRE |
def canonical_system_alpha(goal_z, goal_t, start_t, int_dt=0.001):
"""Compute parameter alpha of canonical system.
Parameters
----------
goal_z : float
Value of phase variable at the end of the execution (> 0).
goal_t : float
Time at which the execution should be done. Make sure that
goal_t > start_t.
start_t : float
Time at which the execution should start.
int_dt : float, optional (default: 0.001)
Time delta that is used internally for integration.
Returns
-------
alpha : float
Value of the alpha parameter of the canonical system.
Raises
------
ValueError
If input values are invalid.
"""
if goal_z <= 0.0:
raise ValueError("Final phase must be > 0!")
if start_t >= goal_t:
raise ValueError("Goal must be chronologically after start!")
execution_time = goal_t - start_t
n_phases = int(execution_time / int_dt) + 1
# assert that the execution_time is approximately divisible by int_dt
assert abs(((n_phases - 1) * int_dt) - execution_time) < 0.05
return (1.0 - goal_z ** (1.0 / (n_phases - 1))) * (n_phases - 1) |
def model_name_sanitize(model_name: str):
"""Return the model name sanitized for filename purposes,
strip whitespace, convert to lowercase etc."""
ret_name = model_name.strip().rstrip().lower()
ret_name = '_'.join(ret_name.split())
return ret_name |
def _remove_base_path_from_file(base_path, filename):
"""
Turn a file into a route. This will probably get more
complicated to account for multiple OS and strange
file names
Parameters
----------
base_path : str
normalized base path
filename : str
filename to remove base_path from
Returns
-------
s : filename with base_path removed
"""
return filename.replace(base_path, "", 1) |
def transformer_base_v1(configs):
""" Configuration of transformer_base_v1
This is equivalent to `transformer_base_v1` in tensor2tensor
"""
# model configurations
model_configs = configs.setdefault("model_configs", {})
model_configs['model'] = "Transformer"
model_configs['n_layers'] = 6
model_configs['n_head'] = 8
model_configs['d_model'] = 512
model_configs['d_word_vec'] = 512
model_configs['d_inner_hid'] = 2048
model_configs['dropout'] = 0.1
model_configs['label_smoothing'] = 0.1
model_configs["layer_norm_first"] = False
model_configs['tie_input_output_embedding'] = True
# optimizer_configs
optimizer_configs = configs.setdefault("optimizer_configs", {})
optimizer_configs['optimizer'] = "adam"
optimizer_configs['learning_rate'] = 0.1
optimizer_configs['grad_clip'] = - 1.0
optimizer_configs['optimizer_params'] = {"betas": [0.9, 0.98], "eps": 1e-9}
optimizer_configs['schedule_method'] = "noam"
optimizer_configs['scheduler_configs'] = {"d_model": 512, "warmup_steps": 4000}
return configs |
def retrieve_pincodes_from_response(reverse_geocode_result):
"""This takes the raw response from the API and gathers all the possible pincodes returned by the API
Args:
reverse_geocode_result (dict): Response from GMaps API
Returns:
List: List of all the possible pincodes
"""
codes = []
result_dicts = reverse_geocode_result[0]["address_components"]
for result_dicts_complete in reverse_geocode_result:
result_dicts = result_dicts_complete["address_components"]
for result_dicts in result_dicts:
if "postal_code" in result_dicts["types"]:
codes.append(result_dicts["long_name"])
return codes |
def split_into_lines(text,char_limit,delimiters=' \t\n',
sympathetic=False):
"""Split a string into multiple lines with maximum length
Splits a string into multiple lines on one or more delimiters
(defaults to the whitespace characters i.e. ' ',tab and newline),
such that each line is no longer than a specified length.
For example:
>>> split_into_lines("This is some text to split",10)
['This is','some text','to split']
If it's not possible to split part of the text to a suitable
length then the line is split "unsympathetically" at the
line length, e.g.
>>> split_into_lines("This is supercalifragilicous text",10)
['This is','supercalif','ragilicous','text']
Set the 'sympathetic' flag to True to include a hyphen to
indicate that a word has been broken, e.g.
>>> split_into_lines("This is supercalifragilicous text",10,
... sympathetic=True)
['This is','supercali-','fragilico-','us text']
To use an alternative set of delimiter characters, set the
'delimiters' argument, e.g.
>>> split_into_lines("This: is some text",10,delimiters=':')
['This',' is some t','ext']
Arguments:
text: string of text to be split into lines
char_limit: maximum length for any given line
delimiters: optional, specify a set of non-default
delimiter characters (defaults to whitespace)
sympathetic: optional, if True then add hyphen to
indicate when a word has been broken
Returns:
List of lines (i.e. strings).
"""
lines = []
hyphen = '-'
while len(text) > char_limit:
# Locate nearest delimiter before the character limit
i = None
splitting_word = False
try:
# Check if delimiter occurs at the line boundary
if text[char_limit] in delimiters:
i = char_limit
except IndexError:
pass
if i is None:
# Look for delimiter within the line
for delim in delimiters:
try:
j = text[:char_limit].rindex(delim)
i = max([x for x in [i,j] if x is not None])
except ValueError:
pass
if i is None:
# Unable to locate delimiter within character
# limit so set to the limit
i = char_limit
# Are we splitting a word?
try:
if text[i] not in delimiters and sympathetic:
splitting_word = True
i = i - 1
except IndexError:
pass
lines.append("%s%s" % (text[:i].rstrip(delimiters),
hyphen if splitting_word else ''))
text = text[i:].lstrip(delimiters)
# Append remainder
lines.append(text)
return lines |
def months_surrounding(month, width=1):
""" Create a tuple with the ordinal of the given month and the ones before
and after it up to a certain width, wrapping around the calendar.
Parameters
----------
month : int
Ordinal of month, e.g. July is 7
width : int
Amount of buffer months to include on each side
Examples
--------
Grab July with June and August
>>> _months_surrounding(7, 1)
(6, 7, 8)
"""
# Edge case: all months
if width >= 6:
return tuple(range(1, 12 + 1))
lo = month - width
hi = month + width
months = []
for m in range(lo, hi + 1):
if m < 1:
m += 12
elif m > 12:
m -= 12
months.append(m)
return tuple(months) |
def list_chunks(mylist, n):
"""Yield successive n-sized chunks from mylist."""
return [mylist[i:i + n] for i in range(0, len(mylist), n)] |
def to_list(x):
"""Converts input to a list if necessary."""
if isinstance(x, (list, tuple)):
return x
else:
return [x] |
def checkIfPointIsInTriangle(
x1: float, y1: float, x2: float, y2: float, x3: float, y3: float, x: float, y: float
) -> bool:
"""
Test if a point defined by x,y coordinates is within a triangle defined by verticies with x,y coordinates.
Parameters
----------
x1 : float
x coordindate of first point of the bounding triangle
y1 : float
y coordindate of first point of the bounding triangle
x2 : float
x coordindate of second point of the bounding triangle
y2 : float
y coordindate of second point of the bounding triangle
x3 : float
x coordindate of third point of the bounding triangle
y3 : float
y coordindate of third point of the bounding triangle
x : float
x coordinate of point being tested
y : float
y coordinate of point being tested
Notes
-----
This method uses the barycentric method.
See `http://totologic.blogspot.com/2014/01/accurate-point-in-triangle-test.html`
"""
a = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / (
(y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
)
b = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / (
(y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
)
c = 1.0 - a - b
epsilon = 1e-10 # need to have some tollerance in case the point lies on the edge of the triangle
aCondition = a + epsilon >= 0.0 and a - epsilon <= 1.0
bCondition = b + epsilon >= 0.0 and b - epsilon <= 1.0
cCondition = c + epsilon >= 0.0 and c - epsilon <= 1.0
return aCondition and bCondition and cCondition |
def first_non_null(values):
"""Return the first non-null value in the list"""
for v in values:
if v is None:
continue
return v
return None |
def scheduler(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 51, 101, 201 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
learning rate(float32)
"""
if epoch < 51:
return 0.1
if epoch < 101:
return 0.01
if epoch < 201:
return 0.001
return 0.0001 |
def create_dict_for_json(objs, listvalues):
"""Write to data dictionary for json insertion."""
datadict = {}
i = 0
while i < len(listvalues):
d = {listvalues[i]: objs[i]}
datadict.update(d)
i = i + 1
return datadict |
def _display_name(pname, v):
"""returns (str) like 'pname/pid/host alias' """
pinfo = v['proc_info']
host = pinfo.get('host', 'non-def')
pid = pinfo.get('pid', 'non-def')
alias = pinfo.get('alias','non-def')
return '%s/%s/%s %s' % (pname, pid, host, alias) |
def is_empty(s):
"""
True if None or string with whitespaces
>>> is_empty(None)
True
>>> is_empty("hello")
False
>>> is_empty(" \t ")
True
"""
return s is None or len(s) == 0 or s.isspace() |
def get_after(keyword, text):
"""Returns everything after the keyword in the full text provided."""
return text[text.index(keyword) + 1:] |
def validate_comments(args):
"""validate comments details"""
try:
if args["question_id"] == '' or \
args["title"] == '' or \
args["comment"] == '' :
return{
"status": 401,
"error": "Fields cannot be left empty"
}, 401
elif(args["title"]. isdigit()) or \
(args["comment"]. isdigit()):
return{
"status": 401,
"error": "The fields should be described in words"
}, 401
elif(args["question_id"]. isalpha()):
return{
"status": 401,
"error": "The field should be an integer"
}, 401
else:
return "valid"
except Exception as error:
return{
"status": 401,
"error": "please provide all the fields, missing " + str(error)
}, 401 |
def _select_imlogdir(is_train):
"""Choose log name for image logging."""
if is_train:
return 'denoised/val/'
else:
return 'denoised/test/' |
def convert_datum(datum):
""" Converts normalized data list to acceleration data (m/s^2). """
return datum * 9.81 / 1024 |
def write(fn, data):
"""
Write string to a file.
"""
f = open(fn, "w")
f.write(data)
f.close()
return True |
def remove_new_lines_in_paragraph(article):
"""When we publish articles to dev.to sometimes the paragraphs don't look very good.
So we will remove all new lines from paragraphs before we publish them. This means we
don't have to have very long lines in the document making it easier to edit.
Some elements we don't want to remove the newlines from, like code blocks or frontmatter.
So the logic is simple remove new lines from elements except specific ones like code blocks.
Of course code blocks can span multiple lines so when we see a code block ``` we skip lines
end until we see end of that code block ```. The same logic applies to all the elements
we want to ski
Args:
article (str): The article we want to publish.
Returns:
str: The article with new lines removed from article.
"""
skip_chars = ["```", "---", "-", "*", "![", ":::"]
endswith_char = ""
article_lines = article.split("\n\n")
for index, line in enumerate(article_lines):
line_startswith_skip_char = [char for char in skip_chars if line.startswith(char)]
if line_startswith_skip_char or endswith_char:
if line_startswith_skip_char:
endswith_char = line_startswith_skip_char[0]
if line.endswith(endswith_char):
endswith_char = ""
continue
article_lines[index] = line.replace("\n", " ")
return "\n\n".join(article_lines) |
def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary.
Values from items2 can be broadcast onto items1.
Args:
items1 (Iterable[KT]): full sequence
items2 (Iterable[VT]):
can either be a sequence of one item or a sequence of equal length
to ``items1``
cls (Type[dict], default=dict): dictionary type to use.
Returns:
Dict[KT, VT]: similar to ``dict(zip(items1, items2))``.
Example:
>>> import ubelt as ub
>>> assert ub.dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert ub.dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert ub.dzip([], [4]) == {}
"""
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (
len(items1), len(items2)))
return cls(zip(items1, items2)) |
def no_disp_appearance_page(on=0):
"""Esconder a Pagina Aparecia de Video
DESCRIPTION
Quando habilitado, este ajuste esconde a pagina de configuracoes de
aparencia de video.
COMPATIBILITY
Todos.
MODIFIED VALUES
NoDispAppearancePage : dword : 00000000 = Desabilitado;
00000001 = Habilitada restricao.
"""
if on:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispAppearancePage"=dword:00000001'''
else:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispAppearancePage"=dword:00000000''' |
def getSpecPath(baseName, baseDir, e0, nTraj):
"""
getSpecPath(baseName, baseDir, e0, nTraj)
Generate a file path for Monte Carlo simulated spectrum
Parameters
----------
baseName - a string. The base name for the simulation,
Example: "bulkC"
baseDir - a string. The path to the directory to write the spectrum
Example: "C:/username/Documents/work/project"
Note: no path separator!
e0 - a number. The voltage (kV) for the simulation
Example: 15
nTraj - a number. The number of trajectories for the simulation
Example: 20000
Returns
-------
path - A string. The path to the file to write
Example
-------
import dtsa2.jmMC3 as jm3
e0 = 15
nTraj = 20000
det = findDetector("Si(Li)")
c = material("C", density=2.266)
a = jm3.simBulkStd(c, det, e0, nTraj, 100, 1.0, False)
a.display()
fi = jm3.getSpecPath("bulkC", "C:/username/Documents/work/project", e0, nTraj)
a.save(fi)
"""
sName = "%s-%g-kV-%g-Traj" % (baseName, e0, nTraj)
sPath = "%s/%s.msa" % (baseDir, sName)
return sPath |
def sort_priority3(values, group):
"""
sort_priority3
:param values:
:param group:
:return:
"""
found = False
def helper(x):
nonlocal found
if x in group:
found = True
return (0, x)
return (1, x)
values.sort(key=helper)
return found |
def inherits_from(obj, a_class):
"""
Return:
True if obj is instance of class that it inherits from or is subcls of
"""
return (type(obj) is not a_class and issubclass(type(obj), a_class)) |
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
"""Worst Running time: O(n) because it must traverse through the whole text to find pattern """
"""Space complexity:O(1) because there is nothing stored"""
text_index = 0
pattern_index = 0
# base cases
if pattern == '' or text == pattern:
return True
while text_index <= len(text)-1:
if text[text_index] == pattern[pattern_index]:
if pattern_index == len(pattern) - 1:
return True
pattern_index += 1
else:
text_index -= pattern_index
pattern_index = 0
text_index += 1
return False |
def sigma_g(z, params):
"""
Non-linear velocity rms, used as the smoothing scale in the Fingers of God
expression for galaxies.
"""
sigma_g = params['sigma_g'] # Mpc
return sigma_g + 0.*z |
def rotate_turn(current, players):
"""
:param current:
:param players:
:return:
"""
try:
next_player = players[players.index(current) + 1]
except IndexError:
next_player = players[0]
return next_player |
def read(filepath):
"""
Read the contents from a file.
:param str filepath: path to the file to be read
:return: file contents
"""
with open(filepath, 'r') as file_handle:
content = file_handle.read()
return content |
def updated_minimal_record(minimal_record):
"""Update fields (done after record create) for Dublin Core serializer."""
minimal_record["access"]["status"] = "open"
for creator in minimal_record["metadata"]["creators"]:
name = creator["person_or_org"].get("name")
if not name:
creator["person_or_org"]["name"] = "Name"
return minimal_record |
def split_phylogeny(p, level="s"):
"""
Return either the full or truncated version of a QIIME-formatted taxonomy string.
:type p: str
:param p: A QIIME-formatted taxonomy string: k__Foo; p__Bar; ...
:type level: str
:param level: The different level of identification are kingdom (k), phylum (p),
class (c),order (o), family (f), genus (g) and species (s). If level is
not provided, the default level of identification is species.
:rtype: str
:return: A QIIME-formatted taxonomy string up to the classification given
by param level.
"""
level = level+"__"
result = p.split(level)
return result[0]+level+result[1].split(";")[0] |
def bin_old(n):
"""bin() that works with Python 2.4"""
if n < 1:
return '0'
result = []
while n:
if n % 2:
result.append('1')
else:
result.append('0')
n = n // 2
result.reverse()
return ''.join(result) |
def Hello(name = 'everybody'):
"""greeting the person"""
return 'Hello ' + name |
def _gi_gr(gr,dr=False,dg=False):
"""(g-i) = (g-r)+(r-i), with Juric et al. (2008) stellar locus for g-r,
BOVY: JUST USES LINEAR APPROXIMATION VALID FOR < M0"""
if dg:
return 1.+1./2.34
elif dr:
return -1.-1./2.34
else:
ri= (gr-0.12)/2.34
return gr+ri |
def getitem(dictionary, keyvar):
"""Custom django template filter that allows access to an item of a
dictionary through the key contained in a template variable. Example:
.. code-block:: python
context_data = {
'data':{
'foo':'bar',
},
'key':'foo',
}
template = Template('{% load awltags %}{{data|getitem:key}}')
context = Context(context_data)
result = template.render(context)
>>> result
'bar'
.. note::
Any KeyErrors are ignored and return an empty string
"""
try:
return dictionary[keyvar]
except KeyError:
return '' |
def get_texts_old(site, titles):
"""Given a list of titles, get the full text of each page edited."""
result = {}
for title in titles:
result[title] = site.pages[title].text()
return result |
def parens_around_char(label):
"""Place parens around first character of label.
:param str label: Must contain at least one character
"""
return "({first}){rest}".format(first=label[0], rest=label[1:]) |
def has_left_cocomponent_fragment(root, cocomp_index):
"""
Return True if cocomponent at cocomp_index has a cocomponent to its left
with same comp_num
INPUT:
- ``root`` -- The forest to which cocomponent belongs
- ``cocomp_index`` -- Index at which cocomponent is present in root
OUTPUT:
``True`` if cocomponent at cocomp_index has a cocomponent
to its left with same comp_num else ``False``
EXAMPLES::
sage: from sage.graphs.modular_decomposition import Node, NodeType, \
create_normal_node, has_left_cocomponent_fragment
sage: forest = Node(NodeType.FOREST)
sage: forest.children = [create_normal_node(2), \
create_normal_node(3), create_normal_node(1)]
sage: series_node = Node(NodeType.SERIES)
sage: series_node.children = [create_normal_node(4), \
create_normal_node(5)]
sage: parallel_node = Node(NodeType.PARALLEL)
sage: parallel_node.children = [create_normal_node(6), \
create_normal_node(7)]
sage: forest.children.insert(1, series_node)
sage: forest.children.insert(3, parallel_node)
sage: forest.children[0].comp_num = 1
sage: forest.children[1].comp_num = 1
sage: forest.children[1].children[0].comp_num = 1
sage: forest.children[1].children[1].comp_num = 1
sage: has_left_cocomponent_fragment(forest, 1)
True
sage: has_left_cocomponent_fragment(forest, 0)
False
"""
for index in range(cocomp_index):
if root.children[index].comp_num == \
root.children[cocomp_index].comp_num:
return True
return False |
def newtons_second_law_of_motion(mass: float, acceleration: float) -> float:
"""
>>> newtons_second_law_of_motion(10, 10)
100
>>> newtons_second_law_of_motion(2.0, 1)
2.0
"""
force = float()
try:
force = mass * acceleration
except Exception:
return -0.0
return force |
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options.get('withscores'):
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(zip(it, map(score_cast_func, it))) |
def set_turn(brawl, identifier):
"""
Set the current monster turn
"""
for monster in brawl:
# if monster's identifier matches set to his turn,
# else not it's turn
if str(monster['identifier']) == str(identifier):
monster['my_turn'] = True
else:
monster['my_turn'] = False
# return modified brawl
return brawl |
def assoc(d, key, val):
""" Return copy of d with key associated to val """
d = d.copy()
d[key] = val
return d |
def compact_capitalized_geography_string(s):
""" Go from lowercase "county, state-abbrev" string to Capitalized string
Args:
s:
Returns:
Examples:
"lancaster, pa" --> "LancasterPA"
"anne arundel, md" --> "AnneArundelMD"
"st. mary's, md" --> "StMarysMD"
"""
s = s.replace(',', '').replace('.', '').replace("'", '').title().replace(' ', '')
return s[:len(s) - 1] + s[(len(s) - 1):].capitalize() |
def rotational_energy(tire_moment, tire_d, v):
"""
Calculates the rotational energy stored in a tire
:param tire_moment: The tire moment of inertia
:param tire_d: The diameter of the tire
:param v: The velocity of the vehicle (m/s)
:return: The energy in Joules stored as rotational kinetic energy
"""
omega = 2 * v / tire_d
return (omega ** 2) * tire_moment / 2 |
def is_boring(cmd, boring_list):
"""Is cmd an irrelevant part of the build process?
See tuscan/boring_commands.yaml for details.
"""
for pat in boring_list:
if pat.match(cmd):
return True
return False |
def cdist(x, y):
"""
Returns the circular distance between two points on a unit circle. The
points `x` and `y` must be given by their angle (in degree) on the unit
circle.
>>> cdist(90.0, 350.0)
100.0
>>> cdist(90.0, 260.0)
170.0
>>> cdist(90.0, 280.0)
170.0
>>> cdist(-20.0, 270.0)
70.0
"""
d = x - y
return min(d % 360, -d % 360) |
def normalize(img):
"""
Normalize the image
"""
return (img - 128)/128 |
def format_time(seconds, with_hours=True):
"""Transforms seconds in a hh:mm:ss string.
If ``with_hours`` if false, the format is mm:ss.
"""
minus = seconds < 0
if minus:
seconds *= -1
m, s = divmod(seconds, 60)
if with_hours:
h, m = divmod(m, 60)
r = '%02d:%02d:%02d' % (h, m, s)
else:
r = '%02d:%02d' % (m,s)
if minus:
return '-' + r
else:
return r |
def calc_distance(l):
"""Calculates distance between list items in two lists"""
# l needs to start from zero, get lowest number and substract it from all numbers
min_l = min([x[1] for x in l if x[1] != ''], default=0)
l = [[x[0], (x[1] - min_l)] for x in l if x[1] != '']
distance = 0
for idx, item in enumerate(l):
if len(item) > 1 and item[1] != '': # check if we found a sense
diff = abs(item[1] - idx) # check how far away we are in our token list
distance += diff
return distance |
def parse_status(status):
"""Parse gino database status."""
if isinstance(status, str):
_, status_count = status.split(" ")
return bool(int(status_count))
return False |
def order_dict(iter: dict) -> dict:
"""Reorders dict by dictionary keys."""
reordered_iter = dict()
iter_keys = sorted(iter)
if len(iter_keys) == 0:
return iter
for key in iter_keys:
reordered_iter[key] = iter[key]
return reordered_iter |
def has_value(value):
"""
We want values like 0 and False to be considered values, but values like
None or blank strings to not be considered values
"""
return value or value == 0 or value is False |
def description_cleanup(s):
"""cleanup a description string"""
if s is None:
return None
s = s.strip('."')
# remove un-needed white space
return ' '.join([x.strip() for x in s.split()]) |
def threshold_RMSE(upper_bound, lower_bound, q=3):
"""Compute threshold for RMSE metric.
Args:
upper_bound (int): Upper bound of regression outputs
lower_bound (int): lower bound of regression outputs
q (int): Quantification parameter
Returns:
threshold (float): The minimal threshold
"""
return (upper_bound - lower_bound) / q |
def ld(s, t):
"""
Levenshtein distance memoized implementation from Rosetta code:
https://rosettacode.org/wiki/Levenshtein_distance#Python
"""
if not s: return len(t)
if not t: return len(s)
if s[0] == t[0]: return ld(s[1:], t[1:])
l1 = ld(s, t[1:]) # Deletion.
l2 = ld(s[1:], t) # Insertion.
l3 = ld(s[1:], t[1:]) # Substitution.
return 1 + min(l1, l2, l3) |
def get_dot_size(val, verbose=False):
"""
Get the size of the marker based on val
:param val: the value to test
:param verbose: more output
:return: the size of the marker in pixels
"""
markersizes = [6, 9, 12, 15, 18]
# there should be one less maxmakervals than markersizes and then we use markersizes[-1] for anything larger
maxmarkervals = [10, 20, 30, 40]
for i,m in enumerate(maxmarkervals):
if val <= m:
return markersizes[i]
return markersizes[-1] |
def groupOp(pair1, pair2, n1, n2):
"""Apply group operation to two pairs:
(g1,h1) x (g2, h2)
G beeing multiplicative, H additive, thus
(g1*g2 , h1+h2)"""
return ((pair1[0] * pair2[0]) % n1, (pair1[1] + pair2[1]) % n2) |
def lineupPos(i, num, spacing):
"""
use to line up a series of 'num' objects, in one dimension,
centered around zero
'i' is the index of the object in the lineup
'spacing' is the amount of space between objects in the lineup
"""
assert num >= 1
assert i >= 0 and i < num
pos = float(i) * spacing
return pos - ((float(spacing) * (num-1))/2.) |
def latexify(ticklabels):
"""Manually set LaTeX format for tick labels."""
return [r"$" + str(label) + "$" for label in ticklabels] |
def flatten(items):
"""Flattens a potentially nested sequence into a flat list.
:param items: the sequence to flatten
>>> flatten((1, 2))
[1, 2]
>>> flatten([1, (2, 3), 4])
[1, 2, 3, 4]
>>> flatten([1, (2, [3, 4]), 5])
[1, 2, 3, 4, 5]
"""
retval = []
for item in items:
if isinstance(item, (frozenset, list, set, tuple)):
retval += flatten(item)
else:
retval.append(item)
return retval |
def vector3d(vector):
""" return a 3d vector """
if len(vector) == 2:
return float(vector[0]), float(vector[1]), 0.
else:
return float(vector[0]), float(vector[1]), float(vector[2]) |
def _create_train_test_split(training_images_list, label_images_list, train_split_percent):
"""
Divides the training images folder into training images and test images, with train_split
percent being the percent of images in training set and the rest in test set. Note that,
the Nerve Segmentation Challenge gives a set of images in the 'test' folder. That folder
is not touched at all during the training process. It is referred to as the validation
data throughout the documentation of this code. Test images in our case is a fraction of
the images in the 'train' folder
:param training_images_list: list of images from the training folder
:param label_images_list: list of label images from the training folder
:param train_split_percent: percentage of images used for training
:return:
"""
assert len(training_images_list) == len(label_images_list), ('Number of training images and label '
'images must be same. Please make sure '
'equal number of training images and '
'label images')
split_index = int(len(training_images_list) * train_split_percent)
train_split_images_list = training_images_list[0:split_index]
train_split_labels_list = label_images_list[0:split_index]
test_split_images_list = training_images_list[split_index:len(training_images_list)]
test_split_labels_list = label_images_list[split_index:len(training_images_list)]
print('Finished splitting data into %s training images and %s '
'test images' % (len(train_split_images_list), len(test_split_images_list)))
return train_split_images_list, train_split_labels_list, test_split_images_list, test_split_labels_list |
def get_event_rule_name(stackname, instanceId):
"""
Generate the name of the event rule.
:param stackname:
:param instanceId:
:return: str
"""
name = stackname + '-cw-event-rule-' + str(instanceId)
return name[-63:len(name)] |
def add_args_to_json_and_context(alert_id, assigned_to, status, classification, determination, comment):
"""Gets arguments and returns the json and context with the arguments inside
"""
json = {}
context = {
'ID': alert_id
}
if assigned_to:
json['assignedTo'] = assigned_to
context['AssignedTo'] = assigned_to
if status:
json['status'] = status
context['Status'] = status
if classification:
json['classification'] = classification
context['Classification'] = classification
if determination:
json['determination'] = determination
context['Determination'] = determination
if comment:
json['comment'] = comment
context['Comment'] = comment
return json, context |
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.