content
stringlengths 42
6.51k
|
|---|
def sumblock(sequence, right_index, total):
"""
Finds the contigous block whose sum adds up to total, starting at index i
and works backward (to the left)
>>> sumblock([1, 2, 3, 4, 5], right_index=4, total=5)
[4]
>>> sumblock([1, 2, 3, 4, 5], right_index=3, total=7)
[2, 3]
>>> sumblock([1, 2, 3, 4, 5], right_index=3, total=9)
[1, 2, 3]
"""
if sequence[right_index] == total:
return [right_index]
sub_total = sequence[right_index]
for left_index in range(right_index - 1, -1, -1):
sub_total += sequence[left_index]
if sub_total == total:
return list(range(left_index, right_index + 1))
elif sub_total > total:
return sumblock(sequence, right_index - 1, total)
return []
|
def buble_sort_imp_1(l):
"""
Te idea here is that by doing the first run, the biggest element will
be the last element in the list, and that is definitly is
best position, so no need to try to swap him again.
The complexity of this algorithm is O(?)
"""
swap = True
n = len(l)
while swap:
swap = False
for i in range(n-1):
if l[i]>l[i+1]:
l[i], l[i+1] = l[i+1], l[i]
swap = True
n-=1
return l
|
def is_std_logic_vector(value, bits):
"""Returns whether the given value is the Python equivalent of an
std_logic_vector with the given length."""
return value & ~(2**bits-1) in [0, -1]
|
def get_exports(pe):
"""Gets exported symbols.
@return: exported symbols dict or None.
"""
exp_list = []
if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"):
for exported_symbol in pe.DIRECTORY_ENTRY_EXPORT.symbols:
if exported_symbol.name is not None:
export_name = exported_symbol.name.decode('utf-8',errors='ignore')
else:
export_name = 'n/a'
exp_list.append({
"address": hex(pe.OPTIONAL_HEADER.ImageBase +
exported_symbol.address),
'name': export_name,
"ordinal": exported_symbol.ordinal})
return exp_list
|
def signed2bytes(signed_list):
"""Two complement conversion 1 bit sign + 7 bits number."""
unsigned_list = []
for n in signed_list:
if n < 0:
unsigned_list.append(256 + n)
else:
unsigned_list.append(n)
return bytes(unsigned_list)
|
def points(start, end):
"""
Bresenham's Line Drawing Algorithm in 2D
"""
l = []
x0, y0 = start
x1, y1 = end
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if x0 < x1:
sx = 1
else:
sx = -1
if y0 < y1:
sy = 1
else:
sy = -1
err = dx - dy
while True:
l.append((x0, y0))
if x0 == x1 and y0 == y1:
break
e2 = 2 * err
if e2 > -dy:
# overshot in the y direction
err = err - dy
x0 = x0 + sx
if e2 < dx:
# overshot in the x direction
err = err + dx
y0 = y0 + sy
return l
|
def knight_danger(row, column, attacking_row, attacking_column):
"""Check if a piece is safe from a knight.
Check if the locations between a knight and another piece,
it checks for the combinations of L-shape positions and returns True if
one of them is found.
Arguments:
row -- first piece's row number
column -- first piece's column number
attacking_row -- second piece's row number
attacking_column -- second piece-s column number
"""
return (row == attacking_row and column == attacking_column) or any(
(attacking_row - row, attacking_column - column) == delta for delta in
[(-1, -2), (1, -2), (-2, -1), (2, -1), (-2, 1), (2, 1), (-1, 2),
(1, 2)])
|
def isOdd(v):
"""
>>> isOdd(1)
True
>>> isOdd(2)
False
"""
return v%2 != 0
|
def get_count_opt(input_str: str) -> int:
"""Count vowel from input string (efficient).
Args:
input_str: <str> inout string.
Returns: <int> total amount of vowels.
Examples:
>>> assert get_count_opt('abracadabra') == 5
"""
return sum(letter in 'aeiou' for letter in input_str)
|
def map(value, low1, high1, low2, high2):
"""Re-maps a number from one range to another.
CAUTION: this function overwrites Python's map builtin.
"""
return float(value - low1) / (high1 - low1) * (high2 - low2) + low2
|
def _is_list(s: str) -> bool:
"""Check a value is a simple list."""
if s.startswith('[') and s.endswith(']'):
return True
return False
|
def check_numeric(diceamount, dicepips):
"""check if input numeric"""
if diceamount.isnumeric() and dicepips.isnumeric():
rollparams = list(())
rollparams.append(int(diceamount))
rollparams.append(int(dicepips))
return rollparams
else:
raise TypeError
|
def _make_param_bounds_standard(probe_counts, step_size=0.001):
"""Calculate bounds on parameter values for only mismatches and cover_extension.
For each dataset d, this calculates bounds on the values of the
mismatches and cover extension parameters based on which values
have a number of probes calculated for them. Namely, we wish to
ensure we can find a bounding box around an arbitrary point
(mismatches, cover_extension); based on the values in probe_counts,
the bounds should ensure this.
Args:
probe_counts: dict giving the number of probes for each dataset
and choice of parameters
step_size: small value subtracted from each upper bound so
that the minimizer does not exceed it
Returns:
[m_1, e_1, m_2, e_2, ...] where m_i is a tuple (lo, hi) that
gives bounds on the number of mismatches for the i'th dataset,
and likewise for e_i
"""
bounds = []
for dataset in sorted(probe_counts.keys()):
params = probe_counts[dataset].keys()
# This requires that mismatches and cover_extension be the
# only two parameters
for p in params:
assert len(p) == 2
# Bound cover_extensions by the lowest and highest value for
# which we have a probe count result
cover_extensions = [k[1] for k in params]
cover_extensions_lo = min(cover_extensions)
cover_extensions_hi = max(cover_extensions)
# To ensure we can find a rectangular bounding box around an
# arbitrary point ((mismatches, cover_extension)), our lower
# bound on mismatches should have a corresponding cover extension
# of min(cover_extensions) and of max(cover_extensions);
# so should our upper bound on mismatches
mismatches = [k[0] for k in params]
mismatches_with_valid_cover_extension = \
[m for m in mismatches if ((m, cover_extensions_lo) in params and
(m, cover_extensions_hi) in params)]
mismatches_lo = min(mismatches_with_valid_cover_extension)
mismatches_hi = max(mismatches_with_valid_cover_extension)
bounds += [(mismatches_lo, mismatches_hi - step_size)]
bounds += [(min(cover_extensions), max(cover_extensions) - step_size)]
return bounds
|
def is_feature_icon(pull_req_data):
"""
Check whether the pullData is a feature:icon PR.
:param pull_req_data - the data on a specific pull request from GitHub.
:return true if the pullData has a label named "feature:icon"
"""
for label in pull_req_data["labels"]:
if label["name"] == "feature:icon":
return True
return False
|
def filter_words(words: str, sep='/', k=2) -> str:
"""
filter words with less then k values
:param words:
:param sep:
:param k:
:return:
"""
list_words = words.split(sep)
list_words = [word.lower() for word in list_words if len(word) >= k]
words = " ".join(list_words)
return words
|
def scope_name_for_color(foreground, background):
"""Returns a name of Sublime scope that has the specified color."""
return "lisp_highlight.%06X.%06X" % (foreground, background)
|
def stride(point, height):
"""
Return length of higher "step" in the locus.
A step is a set of points of the locus, adjacents, with limited height
difference, and containing the lowest point of the locus.
Please not that a step cannot be inclined. The result can be irrelevant
for locus containing Diracs.
"""
n_points = len(point)
# Index of lowest point
p_min = min(enumerate(point), key=lambda elem: elem[1][1])[0]
left, right = p_min - 1, p_min + 1
for right in range(p_min + 1, n_points + p_min + 1):
if point[right % n_points][1] - point[p_min][1] > height:
break
if right == n_points + p_min:
return point
# No need to use last index: we know solution is not complete locus
for left in range(p_min - 1, p_min - n_points, -1):
if point[left][1] - point[p_min][1] > height:
break
if left >= 0 and right < n_points:
return point[left:right % n_points]
return point[left:] + point[:right % n_points]
|
def strip_function(x: str):
"""
strip function to remove empty
:param x:
:return:
"""
if x != '' and x != ' ':
return x.strip()
|
def angular_distance(a1: float, a2: float) -> float:
"""Calculates the angular distance between two angles in degrees."""
phi = abs(a2 - a1) % 360;
distance = 360 - phi if phi > 180 else phi;
return distance;
|
def fred_apikey_set(apikey, filename=None):
"""Store the Fred Token in $HOME/.updoon_fred
Parameters:
-----------
apikey : str
The API Key from the Fred Website.
See https://research.stlouisfed.org/useraccount/apikeys
filename : str
Absolute path to the text where the
Fred API Key is stored (Optional)
"""
# set default path
if filename is None:
import pathlib
filename = str(pathlib.Path.home()) + "/.updoon_fred"
# write string to file
fileptr = open(filename, 'w')
fileptr.write(apikey)
fileptr.close()
return None
|
def commonprefix(a, b):
"""Find longest common prefix of `a` and `b`."""
pos = 0
length = min(len(a), len(b))
while pos < length and a[pos] == b[pos]:
pos += 1
return pos, b
|
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
|
def is_between(x, y, z):
"""Returns true if x <= y <= x or False otherwise"""
if x <= y and y <= z:
return True
else:
return False
|
def pot_sistema (P,h):
"""Calcula la potencia AC que hay que alimentar"""
Egen= P*h
return Egen
|
def quicksort(inputArray):
"""input: array
output: new sorted array
features: stable
efficiency O(n^2) (worst case), O(n log(n)) (avg case), O(n) (best case):
space complexity: O(n)
method:
Pick the last element in the array as the pivot.
Separate values into arrays based on whether they are
greater than, less than, or equal to the pivot.
Recursively sort the greater than and less than arrays.
Return an new array merging the sorted arrays and the pivot.
"""
if len(inputArray) <= 1:
return inputArray
pivot = inputArray[-1]
lesser = []
greater = []
equal = []
for value in inputArray[:-1]:
if value > pivot:
greater.append(value)
elif value < pivot:
lesser.append(value)
elif value == pivot:
equal.append(value)
lesser = quicksort(lesser)
greater = quicksort(greater)
return lesser + equal + [pivot] + greater
|
def popitem(d, last=True):
"""Remove and return the first/last key of dict"""
if last:
return d.popitem()
else:
first_key = next(iter(d.keys()))
return first_key, d.pop(first_key)
|
def get_next_polygon_points(polygon_points, percentage):
"""
Look at every pair of polygon points that follow each other:
which is every line in the polygon.
Call the two points in every line point_a and point_b.
There is also a line from the last point in the list to the first point in the list.
Calculate the next point based on the percentage and the difference between x and y values of two points.
"""
next_points = []
for index in range(len(polygon_points)):
point_a = polygon_points[index]
# modulo len(points) will make sure the last index + 1 will be zero.
point_b = polygon_points[(index + 1) % len(polygon_points)]
diff_x = point_b[0] - point_a[0]
diff_y = point_b[1] - point_a[1]
next_x = point_a[0] + percentage * diff_x
next_y = point_a[1] + percentage * diff_y
next_points.append((next_x, next_y))
return next_points
|
def three_odd_numbers(nums):
"""Is the sum of any 3 sequential numbers odd?"
>>> three_odd_numbers([1, 2, 3, 4, 5])
True
>>> three_odd_numbers([0, -2, 4, 1, 9, 12, 4, 1, 0])
True
>>> three_odd_numbers([5, 2, 1])
False
>>> three_odd_numbers([1, 2, 3, 3, 2])
False
"""
p1 = None
p2 = None
for p3 in nums:
if p1 and p2:
s = p1 + p2 + p3
if s % 2 != 0:
return True
p1 = p2
p2 = p3
return False
|
def chebyshev(point1, point2):
"""Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1]))
|
def gtf_kv(s):
""" Convert the last gtf section of key/value pairs into a dict. """
d = {}
a = s.split(";")
for key_val in a:
if key_val.strip():
eq_i = key_val.find("=")
if eq_i != -1 and key_val[eq_i - 1] != '"':
kvs = key_val.split("=")
else:
kvs = key_val.split()
key = kvs[0]
if kvs[1][0] == '"' and kvs[-1][-1] == '"':
val = (" ".join(kvs[1:]))[1:-1].strip()
else:
val = (" ".join(kvs[1:])).strip()
d[key] = val
return d
|
def var_is_1(var):
"""
Boolean if var is equal to 1 and not True.
:param var: variable
:return: boolean
"""
if var and not isinstance(var, bool):
return True
return False
|
def positionVectorFromModel(model):
"""Returns a vector with all tokens in model, used to match a token with
a position in a vector."""
return tuple(model.items())
|
def code(*args):
"""
>>> code()
[]
>>> code([1,2,3])
[[1], [2], [3]]
>>> code(['1', '2', '3'])
[[1], [2], [3]]
>>> code((1, 2, 3), ('1', '2', '3', '4'))
[[2], [4], [6]]
"""
output = []
if len(args) == 0:
return []
elif len(args) == 1:
for i in args:
return [[int(j)] for j in i]
elif len(args) == 2:
for i in range(len(args[0])):
output.append([int(args[0][i]) + int(args[1][i])])
return output
|
def label(i, label):
""" Set label only in run 0
"""
if i == 0:
return label
else:
return ""
|
def size_splitter(size, part_size):
"""Receive file size and return a list of size ranges"""
result = []
if size == 0:
result.append('0-0')
return result
# decide num of parts
span = part_size if part_size <= size else size
# print(f'span={span}, part size = {part_size}')
parts = max(size // span, 1) # will be one part if size < span
x = 0
size = size - 1 # when we start counting from zero the last byte number should be size - 1
for i in range(parts):
y = x + span - 1
if size - y < span: # last remaining bytes
y = size
result.append(f'{x}-{y}')
x = y + 1
return result
|
def overlap_area(a, b):
""" return area of overlap between two display_size """
return min(a[2] - b[0], b[2] - a[0]) * min(a[3] - b[1], b[3] - a[1])
|
def get_username(review):
"""
Gets the username of the review creator.
Parameters
----------------
review : BeutifulSoup object
The review from metacritic as a BeautifulSoup object.
Returns
----------------
username : string
Returns the username of the review creator.
If the username cant get extracted, the returned string is empty.
"""
try:
username = review.find("span", class_="author").text
if len(username) != 0:
return username
else:
return ""
except:
return ""
|
def any(*args, span=None):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpOr(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpOr(val, args[i], span) # type: ignore
return val
|
def quota_parse(quota_dict):
"""
Parse the output of ``SESConnection.get_send_quota()`` to just the results.
"""
return quota_dict['GetSendQuotaResponse']['GetSendQuotaResult']
|
def filter_roles_by_application_name(application_name, roles):
"""Filter the roles that should be scanned based on the application name returned by OAuth.
Args:
application_name (str): application name
roles (list): List of user roles (returned by Brazil Data Cube OAuth Service)
Returns:
list: List of application roles
"""
return list(
filter(
lambda x: application_name == x.split(":")[0],
[] if roles is None else roles,
)
)
|
def increased_pigment(incpwi):
"""
INCPWI: INCREASED PIGMENT AREA W/I GRID
Returns:
0, 1, 88
"""
if incpwi == 0 or incpwi == 7:
return 0
elif 1 <= incpwi <= 6:
return 1
elif incpwi == 8:
return 88
else:
raise KeyError('incpwi: %s' % incpwi)
|
def spt_vcfcov(stringy):
"""Parses string with coverage info in VCF and gets total coverage"""
return(sum([float(i) for i in
stringy.split(":")[1].split(",")]))
|
def no_limits_amp(npeaks):
"""
No limits for amplitudes for npeaks
"""
return [(None,None)]*npeaks
|
def objnm_to_dict(objnm):
""" Convert an object name or list of them into a dict
Parameters
----------
objnm : str or list of str
Returns
-------
odict : dict
Object value or list of object values
"""
if isinstance(objnm, list):
tdict = {}
for kk,iobj in enumerate(objnm):
idict = objnm_to_dict(iobj)
if kk == 0:
for key in idict.keys():
tdict[key] = []
# Fill
for key in idict.keys():
tdict[key].append(idict[key])
# Generate the Table
return tdict
# Generate the dict
prs = objnm.split('-')
odict = {}
for iprs in prs:
odict[iprs[0]] = int(iprs[1:])
# Return
return odict
|
def simple_function(arg1, arg2=1):
"""
Just a simple function.
Args:
arg1 (str): first argument
arg2 (int): second argument
Returns:
List[str]: first argument repeated second argument types.
"""
return [arg1] * arg2
|
def bezier_poly(p, t):
"""
Evaluate the Bezier polynomial at a particular @t point.
"""
return (1-t)**3*p[0] + 3*(1-t)**2*t*p[1] + 3*(1-t)*t**2*p[2] + t**3*p[3]
|
def edd_x_spt_pre_sequencing(dataset, w_edd=0.5, w_spt=0.5, *args, **kwargs):
"""
Generates an initial job sequence based on a weighted combination of
the earliest-due-date and the shortest-processing-time
dispatching strategy. The job sequence will be feed to the model.
"""
sequence = [job for job in dataset.values()]
# sort and rank according to edd
sequence = sorted(sequence, key=lambda job: job["due date"])
rank = 1
prev_job = None
for job in sequence:
if prev_job and job["due date"] != prev_job["due date"]:
rank += 1
job["rank"] = rank
prev_job = job
# sort and rank according to spt and create joint edd_spt_rank
sequence = sorted(sequence, key=lambda job: job["t_smd"] + job["t_aoi"])
rank = 1
prev_job = None
for job in sequence:
if (
prev_job
and job["t_smd"] + job["t_aoi"] != prev_job["t_smd"] + prev_job["t_aoi"]
):
rank += 1
job["rank"] = w_edd * job["rank"] + w_spt * rank
# sort according to joint edd_spt_rank
sequence = sorted(sequence, key=lambda job: job["rank"])
return sequence
|
def remove_duplicates(array):
"""Write a function called remove_duplicates that takes a list and returns a new
list with only the unique elements from the original."""
copy_array = array[:]
copy_array.sort()
to_return_array = copy_array[:]
val = copy_array[0]
for i in range (1, len(array)):
if val == copy_array[i]:
to_return_array.remove(val)
val = copy_array[i]
return to_return_array
|
def long_repeat(line):
"""
length the longest substring that consists of the same char
"""
line_length = len(line)
if line_length <= 1:
return line_length
count_list = [0]
current_count = 1
current_ch = ''
for i in range(0,line_length):
ch = line[i]
if ch == current_ch:
current_count += 1
if i == line_length - 1:
count_list.append(current_count)
else:
count_list.append(current_count)
current_count = 1
current_ch = ch
print(count_list)
return max(count_list)
|
def to_upper_case(given: str) -> str:
"""Returns 'given' in upper-case.
>>> to_upper_case("foo")
'FOO'
>>> to_upper_case("Foo")
'FOO'
>>> to_upper_case("FOO")
'FOO'
>>> to_upper_case(" ") == " "
True
"""
return given.upper()
|
def extract_initials(*forenames):
"""Return a list of initials for each name in the given forenames.
"""
initials = []
# To handle varargs
for forename in forenames:
# Allow polymorphism: forenames can be str or sequence of str
try:
names = forename.split()
except TypeError:
names = [name.strip() for name in forename]
for name in names:
name = name.strip('.')
# If names are separated by dots only (no whitespace)
if '.' in name:
new_names = ' '.join(name.split('.'))
new_initials = extract_initials(new_names)
initials += new_initials
continue
# If names are hyphenated
if '-' in name:
subnames = name.split('-')
initial = '-'.join([name[0].upper() for name in subnames])
else:
initial = name[0].upper()
initials.append(initial)
return initials
|
def parser_country_availability_Descriptor(data,i,length,end):
"""\
parser_country_availability_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "country_availability", "contents" : unparsed_descriptor_contents }
(Defined in ETSI EN 300 468 specification)
"""
return { "type" : "country_availability", "contents" : data[i+2:end] }
|
def dequote(string):
"""Remove outer quotes if present"""
string_new = string.strip()
if (string_new[0] == string_new[-1]) and string_new.startswith(("'", '"')):
return string_new[1:-1]
return string_new
|
def is_fq_screen_header_row(fields):
"""
Returns true if the input list represents a header row.
"""
return bool((len(fields) == 0) or
(fields[0][0] == '#') or
(fields[0] == 'Library'))
|
def parse_credentials(service_label, credentials):
"""
Returns the credentials that match the service label
pass credentials in the following format:
{
"watson_conversation_username": "username",
"watson_conversation_password": "password",
}
:param string serviceLabel: The service label
:param object credentials: The credentials from starterkit
"""
key = 'watson_' + service_label + '_'
creds = {}
if credentials is not None:
cred_keys = credentials.keys()
filtered_keys = list(filter(lambda x: x.find(key) != -1, cred_keys))
for k in filtered_keys:
creds[k.replace(key, '')] = credentials[k]
return creds
|
def _recursive_namedtuple_convert(data):
"""
Recursively converts the named tuples in the given object to dictionaries
:param data: An object in a named tuple or its children
:return: The converted object
"""
if isinstance(data, list):
# List
return [_recursive_namedtuple_convert(item) for item in data]
elif hasattr(data, '_asdict'):
# Named tuple
dict_value = dict(data._asdict())
for key, value in dict_value.items():
dict_value[key] = _recursive_namedtuple_convert(value)
return dict_value
else:
# Standard object
return data
|
def forward_match(seq: str, recog: list) -> (bool):
"""
Match the sequence with a recognition sequence.
:param seq: the sequence to search in
:param recogn: a list with bases that should be
matched subsequently
:return: True if the sequence matches the
recognition sequence, False if not
"""
if len(recog) > len(seq):
return False
for idx, bases in enumerate(recog):
if bases and not seq[idx] in bases:
return False
return True
|
def get_optional_element(content, element_name, index=0):
"""
Gets an optional element from json string , or dictionary.
:param content: The content from which the element needs to be retrieved.
:param element_name: The name of the element
:param index: Optional index in case the return is a collection of elements.
"""
element = None if element_name not in content else content[element_name]
if element is None:
return None
else:
if isinstance(element, list):
return element[index]
elif isinstance(element, object):
return element
else:
return str(element)
|
def _ParseClusterLocation(cluster_and_location):
"""Get the cluster and location for the Config Controller cluster.
Args:
cluster_and_location: The one line string that contains the Config
Controller resource name and its location.
Returns:
The tuple of cluster and region.
"""
if not cluster_and_location or cluster_and_location.startswith('NAME'):
return None
cluster = ''
region = ''
strings = cluster_and_location.split(' ')
for s in strings:
if s.startswith('krmapihost'):
cluster = s
elif s:
region = s
return (cluster, region)
|
def _count_libsvm_features(lines):
"""
Determine how many featues are present in a libsvm file.
:param lines: the lines of a libsvm file
:return: the number of featues in the libsvm file
"""
max_column_index = -1
for line in lines:
elements = line.split()
for element in elements[1:]:
column_index = int(element.split(":")[0])
if column_index > max_column_index:
max_column_index = column_index
if max_column_index == -1:
raise Exception("LibSVM file contains no data")
return max_column_index + 1
|
def summable(number: int, numbers: list) -> bool:
"""
Look for two entries in 'numbers' that sum to 'number'
"""
while len(numbers) > 0:
number1 = numbers.pop()
for number2 in numbers:
if number1 + number2 == number:
return True
return False
|
def average(iterable):
"""
Returns the average of a list of numbers.
"""
return sum(iterable) / len(iterable)
|
def manha_dist(a, b):
"""Measure Manhattan distance."""
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
|
def _strip(lines):
"""
Splits the given text, removing the original eol but returning the eol
so it can be written again on disk using the original eol.
:param unicode contents: full file text
:return: a triple (lines, eol, ends_with_eol), where `lines` is a list of
strings, `eol` the string to be used as newline and `ends_with_eol`
a boolean which indicates if the last line ends with a new line or not.
"""
lines = [i.rstrip() for i in lines]
return lines
|
def return_npc(mcc, mnc):
"""
Format MCC and MNC into a NPC.
:param mcc: Country code.
:type mcc: int
:param mnc: Network code.
:type mnc: int
"""
return "{0}{1}30".format(str(mcc).zfill(3), str(mnc).zfill(3))
|
def replace_string_chars(s1, pos, s2):
"""
Replace characters in a string at the specified string position (index).
@param s1: Input string for replacement.
@param pos: Input string index of replace.
@param s2: - Replacement string.
@return: New resulting string.
"""
len2 = len(s2)
if(pos <= len(s1)):
if(len2 > 0):
s1 = s1[0:pos] + s2 + s1[pos+len2:]
return(s1)
|
def tuple_to_dict(t):
""" Convert the results tuple to dict """
return {"quadkey": t[0], "ml_prediction": t[1], "osm_building_area": t[2]}
|
def calc_basic_indispensability(p, n):
"""
>>> calc_basic_indispensability(3, 0)
2
>>> calc_basic_indispensability(2, 1)
0
>>> calc_basic_indispensability(5, 4)
2
>>> calc_basic_indispensability(7, 5)
2
>>> calc_basic_indispensability(13, 4)
10
"""
table = {
2: [1, 0, ],
3: [2, 0, 1, ],
5: [4, 0, 1, 3, 2, ],
7: [6, 0, 4, 1, 5, 2, 3, ],
11: [10, 0, 6, 3, 9, 1, 7, 2, 8, 4, 5, ],
13: [12, 0, 7, 3, 10, 1, 8, 4, 11, 2, 9, 5, 6, ],
17: [16, 0, 9, 4, 13, 2, 11, 6, 15, 1, 10, 5, 14, 3, 12, 7, 8, ],
19: [18, 0, 10, 3, 13, 6, 16, 1, 11, 4, 14, 7, 17, 2, 12, 5, 15, 8, 9, ],
23: [22, 0, 12, 6, 18, 3, 15, 9, 21, 1, 13, 7, 19, 2, 14, 8, 20, 4, 16, 5, 17, 10, 11, ],
29: [28, 0, 15, 7, 22, 4, 19, 11, 26, 1, 16, 8, 23, 5, 20, 12, 27, 2, 17, 9, 24, 3, 18, 10, 25, 6, 21, 13, 14, ],
31: [30, 0, 16, 5, 21, 10, 26, 3, 19, 8, 24, 13, 29, 1, 17, 6, 22, 11, 27, 2, 18, 7, 23, 12, 28, 4, 20, 9, 25, 14, 15, ],
}
return table[p][n]
|
def simplify_benchmark_name(name):
"""Shortens or modifies the path of the benchmark in order to make the table more readable."""
i = name.rfind("/")
name = name if i == -1 else name[i+1:]
return name.replace("_3", "_03").replace("_5", "_05")
|
def _identifier(name):
"""
:param name: string
:return: name in lower case and with '_' instead of '-'
:rtype: string
"""
if name.isidentifier():
return name
return name.lower().lstrip('0123456789. ').replace('-', '_')
|
def __create_send_data(username, icon_emoji, message):
"""Create sending data
Arguments:
username {string} -- Slack display user name
icon_emoji {string} -- Slack display emoji-icon (e.g. :hogehoge:)
message {string} -- Message contents
Returns:
[dict] -- Sending data for JSON payload
"""
msg = {}
if username:
msg['username'] = username
if icon_emoji:
msg['icon_emoji'] = icon_emoji
if message:
msg['text'] = message
return msg
|
def compute_flow_parameter(L, V, rho_V, rho_L):
"""
Return the flow parameter, F_LV.
Parameters
----------
L : float
Liquid flow rate by mass.
V : float
Vapor flow rate by mass.
rho_V : float
Vapor density.
rho_L : float
Liquid density.
Notes
-----
The flow parameter is given by [3]_. See source code for details.
"""
return L/V*(rho_V/rho_L)**0.5
|
def point_on_rectangle(rect, point, border=False):
"""
Return the point on which ``point`` can be projecten on the
rectangle. ``border = True`` will make sure the point is bound to
the border of the reactangle. Otherwise, if the point is in the
rectangle, it's okay.
>>> point_on_rectangle(Rectangle(0, 0, 10, 10), (11, -1))
(10, 0)
>>> point_on_rectangle((0, 0, 10, 10), (5, 12))
(5, 10)
>>> point_on_rectangle(Rectangle(0, 0, 10, 10), (12, 5))
(10, 5)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (3, 4))
(3, 4)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (0, 3))
(1, 3)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (4, 3))
(4, 3)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (4, 9), border=True)
(4, 11)
>>> point_on_rectangle((1, 1, 10, 10), (4, 6), border=True)
(1, 6)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (5, 3), border=True)
(5, 1)
>>> point_on_rectangle(Rectangle(1, 1, 10, 10), (8, 4), border=True)
(11, 4)
>>> point_on_rectangle((1, 1, 10, 100), (5, 8), border=True)
(1, 8)
>>> point_on_rectangle((1, 1, 10, 100), (5, 98), border=True)
(5, 101)
"""
px, py = point
rx, ry, rw, rh = tuple(rect)
x_inside = y_inside = False
if px < rx:
px = rx
elif px > rx + rw:
px = rx + rw
elif border:
x_inside = True
if py < ry:
py = ry
elif py > ry + rh:
py = ry + rh
elif border:
y_inside = True
if x_inside and y_inside:
# Find point on side closest to the point
if min(abs(rx - px), abs(rx + rw - px)) > min(abs(ry - py), abs(ry + rh - py)):
if py < ry + rh / 2.0:
py = ry
else:
py = ry + rh
else:
if px < rx + rw / 2.0:
px = rx
else:
px = rx + rw
return px, py
|
def raw_coordinates(xc, yc, ma):
""" Return 2D coordinates on the specified face relative to the bottom-left corner of the face. Also from Open-GL spec."""
return (float(xc)/abs(float(ma)) + 1) / 2, (float(yc)/abs(float(ma)) + 1) / 2
|
def kleio_escape(v: str) -> str:
"""
Checks for Kleio special characters and quotes if needed::
>>> print(kleio_escape('normal string'))
normal string
>>> print(kleio_escape('oops we have a / in the middle'))
"oops we have a / in the middle"
"""
s = str(v)
if any(i in s for i in '/;=$#%\n'):
return '"' + s + '"'
else:
return s
|
def insertion_sort(nums_array: list) -> list:
"""
Insertion Sort
:param nums_array: list
:return nums_array: list
"""
for i in range(1, len(nums_array)):
n = nums_array[i]
index = i - 1
while index >= 0 and nums_array[index] > n:
nums_array[index + 1] = nums_array[index]
index -= 1
nums_array[index + 1] = n
return nums_array
|
def _remove_none_values(dictionary):
""" Remove dictionary keys whose value is None """
return list(map(dictionary.pop,
[i for i in dictionary if dictionary[i] is None]))
|
def global_pct_sim( segments ):
"""
Calculated as in global_pct_id(), but with the percent similarity field
"""
match_length = 0
similar_residues = 0
for segment in segments:
segment_length = abs(segment['contig_start'] - segment['contig_end']) + 1
match_length += segment_length
matched_residues = segment_length * (segment['pct_sim'] / 100)
similar_residues += matched_residues
return (similar_residues / match_length) * 100
|
def from_formula(formula):
"""R-style Formula Formatting.
"""
yCol = formula.replace(' ', '').split('~')[0].strip()
xCol = formula.replace(' ', '').split('~')[1].strip().split('+')
return xCol, yCol
|
def setDefaultVal(myDict, name, value):
""" checks if a value exists in the dictionary and returns it.
if there isn't a value for that property sets it to value and.
:param myDict: a dictionary we what to update
:param name: the name of the property
:param value: the default vale if it is not already set
:return: the value wither default or from myDict
"""
if name in myDict:
return myDict[name]
else:
myDict[name] = value
return value
|
def replace_spaces(text: str) -> str:
"""Replaces spaces with '+' in given text.
:param text: The text to be formatted.
:returns: Text with spaces replaced with '+'.
"""
return text.replace(" ", "+")
|
def get_row_column(play, board):
"""
prompt for user input until its correct
catch any letters or incomplete values in user input
:param play - string, user command:
:param board - object, board:
:return row, column - int, locations on board in 2-D:
"""
# loop until there is valid user input
while True:
if type(play) == str:
if play.lower() == 'q':
quit()
try:
play_list = play.strip().split(',')
row = int(play_list[0])
column = int(play_list[1])
# row index out of range
if row < 1 or column < 1:
print('Invalid position.')
print('Try again.')
print(board)
play = input("Input a row then column separated by a comma (q to quit): ")
else:
return row, column
except (TypeError, ValueError, IndexError):
print('Incorrect input.')
print('Try again.')
print(board)
play = input("Input a row then column separated by a comma (q to quit): ")
|
def talk(text, is_yelling=False, trim=False, verbose=True):
"""
Prints text
is_yelling capitalizes text
trim - trims whitespace from both ends
verbose - if you want to print something on screen
returns transformed text
"""
if trim:
text = text.strip()
if is_yelling:
text = text.upper()
if verbose:
print(text) # printing is considered a side effect inside a function
return text
|
def sizeof_fmt(num, suffix='B'):
"""https://stackoverflow.com/a/1094933/594760"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
|
def get_file_dir(location):
"""Returns the directory of the file with the file name
Keyword arguments:
location -- A file path.
"""
return location.rpartition('\\')
|
def build_endpoint(route, target=None):
"""Build a REST endpoint string by joining route and target.
Args:
route (:obj:`str`):
Route part of endpoint.
target (:obj:`str`, optional):
Target part of endpoint.
Defaults to None.
Returns:
:obj:`str`
"""
target = "" if target is None else "/{}".format(target)
return "{}{}".format(route, target)
|
def normalise_for_use_as_path(data_in):
"""Normalise the 'data_in' so it can be used as part of a path."""
return data_in.replace('|', '_').replace('.', '_dot_')
|
def get_file_split_dir_index(file_index: int, num_split_dirs: int, num_files: int) -> int:
"""
Get the split directory index where a specific file should be placed.
:param file_index: File index - 1,2..,num_files
:param num_split_dirs: Number of split directories.
:param num_files: Number of files.
:return: Split directory index.
"""
num_files_p_split = num_files // num_split_dirs
return (file_index - 1) // num_files_p_split + 1 if file_index <= num_files_p_split * num_split_dirs else num_split_dirs
|
def sqrt_newton(n, steps=20):
"""Approximate square root by Newton's Method.
- Initial guess: old_guess = n / 2
- Iterations: new_guess = 1/2 * (old_guess + n / old_guess)
"""
sqrt = n / 2
for step in range(steps):
sqrt = 1 / 2 * (sqrt + n / sqrt)
return sqrt
|
def _aggregations(search, definitions):
"""Add aggregations to query."""
if definitions:
for name, agg in definitions.items():
search.aggs[name] = agg if not callable(agg) else agg()
return search
|
def AIC( logLikelihood, nParams ):
"""Calculate the original Akaike Information Criterion for a model fit
to data, given the ln(likelihood) of the best-fit model and the number of
model parameters nParams.
Note that this should only be used for large sample sizes; for small
sample sizes (e.g., nData < 40*nParams), use the corrected AIC function
AICc [below].
"""
return -2.0*logLikelihood + 2.0*nParams
|
def to_tuple(lists: list):
"""Convert from lists to unpacked tuple
Allows us to write `X, y = to_tuple([[x1, y1], [x2, y2], [x3, y3]])`
Parameters
----------
lists: list
list of objects to convert to unpacked tuple
Examples
--------
>>> to_tuple([[x1, y1], [x2, y2], [x3, y3]])
([x1, x2, x3], [y1, y2, y3])
>>> to_tuple([[x1, y1]])
([x1], [y1])
>>> to_tuple([m1, m2, m3])
[m1, m2, m3]
"""
n_mods = len(lists)
if n_mods <= 1:
return lists
if not isinstance(lists[0], list):
return lists
n_tup = len(lists[0])
tup = [[] for _ in range(n_tup)]
for i in range(n_mods):
for j in range(n_tup):
tup[j].append(lists[i][j])
return tuple(tup)
|
def TransformShortStatus(r, undefined=''):
"""Returns a short description of the status of a logpoint or snapshot.
Status will be one of ACTIVE, COMPLETED, or a short error description. If
the status is an error, there will be additional information available in the
status field of the object.
Args:
r: a JSON-serializable object
undefined: Returns this value if the resource is not a valid status.
Returns:
One of ACTIVE, COMPLETED, or an error description.
Example:
--format="table(id, location, short_status()())"
"""
if isinstance(r, dict):
if not r.get('isFinalState'):
return 'ACTIVE'
status = r.get('status')
if not status or not isinstance(status, dict) or not status.get('isError'):
return 'COMPLETED'
refers_to = status.get('refersTo')
if refers_to:
return '{0}_ERROR'.format(refers_to)
return undefined
|
def make_grid(freq, m, n):
"""Make the geodesic pattern grid"""
grid = {}
rng = (2 * freq) // (m + n)
for i in range(rng):
for j in range(rng):
x = i * (-n) + j * (m + n)
y = i * (m + n) + j * (-m)
if x >= 0 and y >= 0 and x + y <= freq:
grid[(i, j)] = (x, y)
return grid
|
def zeros(n):
"""
Returns a sequence of zeros with n elements.
@type n: number
@param n: length of sequence
@rtype: list
@return: sequence
"""
return [0.0] * n
|
def bubble_sort(arr):
""" Sort Array Elements using Bubble Sort Algorithm
:return:
Examples:
>>> bubble_sort([1])
[1]
>>> bubble_sort([10,9,8,7,6,5,4,3,2,1])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
# Traverse through all array elements
n = len(arr)
for i in range(n):
# Last i elements are already in place
for j in range(0, n - i - 1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr
|
def get_importance(s: str):
"""counts the number of ! in the string"""
return len([c for c in s if c == "!"])
|
def jeffreys(a, b, u):
"""Given u in [0,1], return a Jeffreys random number in [a,b]."""
return a**(1-u) * b**u
|
def toalphanum(s):
"""
gets rid of the unwanted characters
"""
_s = ''
for c in s:
if c in '\\ /(.)-':
_s += '_'
else:
_s += c
return _s
|
def merge_dicts(*dicts):
""" Input is an arbitrary amount of dictionaries.
Output is a merged dictionary where the keys are the
keys from all the input dicts, and the values are a
list of all the values each key holds """
new_dict = {}
for i in dicts:
for (k, v) in i.items():
if k in new_dict:
new_dict[k].append(v)
else:
new_dict[k] = [v]
return new_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.