content stringlengths 42 6.51k |
|---|
def get_treatments(ladders):
"""Creates Array of all tretments listed in ladders
Args:
ladders ([Array]): [ladders]
Returns:
[Array]: [treatments]
"""
treatments = []
#treatments are saved in the second column
for ladder in ladders:
treatments.append(ladder[1])
treatments = list(set(treatments))
return treatments |
def component_masses_to_symmetric_mass_ratio(mass_1, mass_2):
"""
Convert the component masses of a binary to its symmetric mass ratio.
Parameters
----------
mass_1: float
Mass of the heavier object
mass_2: float
Mass of the lighter object
Return
------
symmetric_mass_ratio: float
Symmetric mass ratio of the binary
"""
return (mass_1 * mass_2) / (mass_1 + mass_2) ** 2 |
def hamming_distance(list1, list2):
""" Calculate the Hamming distance between two lists """
# Make sure we're working with lists
# Sorry, no other iterables are permitted
assert isinstance(list1, list)
assert isinstance(list2, list)
dist = 0
# 'zip' is a Python builtin, documented at
# <http://www.python.org/doc/lib/built-in-funcs.html>
for item1, item2 in zip(list1, list2):
if item1 != item2: dist += 1
return dist |
def map_node2name(node_list, node_id):
""" find name from node-id """
for node in node_list:
if node_id == node.id:
return node.name
return "none" |
def getANSIfgarray_for_ANSIcolor(ANSIcolor):
"""Return array of color codes to be used in composing an SGR escape
sequence. Using array form lets us compose multiple color updates without
putting out additional escapes"""
# We are using "256 color mode" which is available in xterm but not
# necessarily all terminals
# To set FG in 256 color you use a code like ESC[38;5;###m
return ['38', '5', str(ANSIcolor)] |
def _compute_breadcrumbs(path, show_hidden=False):
"""Returns a list of breadcrumb objects for a path."""
breadcrumbs = []
breadcrumbs.append(('[root]', '/'))
path_parts = path.split('/')[1:-1]
full_path = '/'
for part in path_parts:
full_path += part + "/"
url_append = ""
if show_hidden:
url_append = '?hidden=1'
breadcrumbs.append((part, full_path+url_append))
return breadcrumbs |
def SignExtend(val, nbits):
""" Returns a sign-extended value for the input value.
val - value to be sign-extended
nbits - number precision: 8 - byte, 16 - word etc.
"""
sign_bit = 1 << (nbits - 1)
return (val & (sign_bit - 1)) - (val & sign_bit) |
def get_source_detail_hr(sources):
"""
Iterate over source details from response.
make comma-separated string from sources.
:param sources: source details from response.
:return: String of multiple source names.
"""
return ', '.join([source.get('name', '') for source in sources]) |
def normalise_encoding_name(option_name, encoding):
"""
>>> normalise_encoding_name('c_string_encoding', 'ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'AsCIi')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'us-ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'utF8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'utF-8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'deFAuLT')
'default'
>>> normalise_encoding_name('c_string_encoding', 'default')
'default'
>>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding')
'SeriousLyNoSuch--Encoding'
"""
if not encoding:
return ''
if encoding.lower() in ('default', 'ascii', 'utf8'):
return encoding.lower()
import codecs
try:
decoder = codecs.getdecoder(encoding)
except LookupError:
return encoding # may exists at runtime ...
for name in ('ascii', 'utf8'):
if codecs.getdecoder(name) == decoder:
return name
return encoding |
def complement(dnaStrand):
"""
Complements the input DNA strand using Base pairing rules
Input:
dnaStrand DNA strand to be complemented
Output:
comp Complement of the input DNA strand
"""
complement = {"A":"T", "T":"A", "C":"G", "G":"C"}
comp = ""
for i in dnaStrand:
comp += complement[i]
return comp[::-1] |
def report_list_modes(inlist):
"""Report the mode of an input list
Parameters
----------
counts : list
List of values that can be counted
Returns
-------
list
List of mode values. If there is a tie then the list length > 1
"""
counts = {i: inlist.count(i) for i in set(inlist)}
# check for ties
maxvals = [i for i in counts if counts[i] == max(counts.values())]
return(maxvals) |
def edge_args(start, dest, direction, label):
"""
compute argument ordering for Edge constructor based on direction flag
@param direction str: 'i', 'o', or 'b' (in/out/bidir) relative to \a start
@param start str: name of starting node
@param start dest: name of destination node
"""
edge_args = []
if direction in ['o', 'b']:
edge_args.append((start, dest, label))
if direction in ['i', 'b']:
edge_args.append((dest, start, label))
return edge_args |
def to_id(s):
"""Covert text to ids."""
if s == "+": return 11
if s == "*": return 12
return int(s) + 1 |
def parse_ticket_labels(labels):
"""Get a generator that returns names of labels passed if any."""
if not labels:
return []
return (label.get("name") for label in labels) |
def _splitrange(a, b):
"""Split range with bounds a and b into two ranges at 0 and return two
tuples of numbers for use as startdepth and stopdepth arguments of
revancestors and revdescendants.
>>> _splitrange(-10, -5) # [-10:-5]
((5, 11), (None, None))
>>> _splitrange(5, 10) # [5:10]
((None, None), (5, 11))
>>> _splitrange(-10, 10) # [-10:10]
((0, 11), (0, 11))
>>> _splitrange(-10, 0) # [-10:0]
((0, 11), (None, None))
>>> _splitrange(0, 10) # [0:10]
((None, None), (0, 11))
>>> _splitrange(0, 0) # [0:0]
((0, 1), (None, None))
>>> _splitrange(1, -1) # [1:-1]
((None, None), (None, None))
"""
ancdepths = (None, None)
descdepths = (None, None)
if a == b == 0:
ancdepths = (0, 1)
if a < 0:
ancdepths = (-min(b, 0), -a + 1)
if b > 0:
descdepths = (max(a, 0), b + 1)
return ancdepths, descdepths |
def is_remove(change):
""" Boolean check if current change references a stateful resource """
return change['Action'] == 'Remove' |
def get_bits(value, start_index, end_index):
"""
Returns a set of bit in position from start_index till end_index indexes(inclusively) from byte value
:param value: integer positive value
:param start_index: the number of the first bit which will be included to result;
most-significant bit is considered on the left and has number 0.
:param end_index: the number of the last bit which will be included to result,
end_index should be greater than start_index; most-significant bit is considered on the left and has number 0.
:return: set of bit in position from start_index till end_index indexes(inclusively) from byte value
"""
mask = 2 ** (end_index - start_index + 1) - 1
mask = mask << start_index
return (value & mask) >> start_index |
def composition_approxDP_static_hetero_basic(distance_is):
"""apply composition on `distance_is`, a list of individual distances
:param distance_is: a list of (epsilon, delta), or ndarray of shape [k, 2]
"""
epsilon_is, delta_is = zip(*distance_is)
return sum(epsilon_is), sum(delta_is) |
def filter_paired(list):
"""
require that both pairs are mapped in the sam file in order to remove the reads
"""
pairs = {}
filtered = []
for id in list:
read = id.rsplit('/')[0]
if read not in pairs:
pairs[read] = []
pairs[read].append(id)
for read in pairs:
ids = pairs[read]
if len(ids) == 2:
filtered.extend(ids)
return set(filtered) |
def display_label(f_class, catalog):
"""
Predict the flower name with the topk most likely classes using a trained deep learning model.
Parameters:
image_path - path name of the flower image
model - our NN model
topk - the K most probable classes
Returns:
probability and id of the top k most likely classes
"""
# Transform the top n class indexes into class labels LIST.
return catalog[str(f_class)] |
def getLength(intervals):
"""return sum of intervals lengths.
>>> getLength([(10,20), (30,40)])
20
"""
return sum([x[1] - x[0] for x in intervals]) |
def _esc_quote(contents: str) -> str:
"""Escape string contents for DLNA search quoted values.
See ContentDirectory:v4, section 4.1.2.
"""
return contents.replace("\\", "\\\\").replace('"', '\\"') |
def version_array_to_string(version_array):
"""Given an array of numbers representing a version, such as [1,2,3], returns
a string representation of the version, such as \"1.2.3\" """
return ".".join(str(x) for x in version_array) |
def row_empty_spaces(row, puzzle):
"""
Takes a row number, and a sudoku puzzle as parameter
Returns a list of empty spaces in that row on the puzzle
"""
valid_spaces = []
for col in range(0, 9):
if puzzle[row][col] == -1:
valid_spaces.append([row, col])
i = 1
print("\nThere are {} available spaces on your selected row.".format(len(valid_spaces)))
for space in valid_spaces:
print("Space Number", str(i), ": ", space)
i += 1
return valid_spaces |
def makeExpectedFTypes(expectedPrecision,foundFType,foundFTypes=['real']):
"""A very application-specific mapping to construct an fType list
for expected. Make sure that if we're looking at complex that we
do not replicate real-real comparisons."""
retTypes = ['makeExpectedFType::ERROR']
if 'logical' in foundFTypes :
if foundFType == 'logical' :
retTypes=['logical']
elif expectedPrecision == 'def':
#
if not 'complex' in foundFTypes :
retTypes=['integer']
else :
# If we're in AssertComplex and we're not duplicating reals...
if foundFType == 'real' :
retTypes=[]
else :
retTypes=['integer']
elif expectedPrecision == 32 or expectedPrecision == 64:
# This logic is probably not correct.
if foundFType == 'integer' :
# Processing integer-found.
# mlr - fingers crossed.
retTypes=['integer','real']
elif not 'complex' in foundFTypes :
# Processing case where we're not combining with complex.
# mlr 2 - ???
retTypes=['integer','real']
# retTypes=['real']
else :
if foundFType == 'real' :
# Tom asserts that finding a real when expecting complex should be an error.
# retTypes=['complex']
retTypes=[]
else :
retTypes=['integer','real','complex']
#? retTypes=['real','complex']
return retTypes |
def map_components(notsplit_packages, components):
"""
Returns a list of packages to install based on component names
This is done by checking if a component is in notsplit_packages,
if it is, we know we need to install 'ceph' instead of the
raw component name. Essentially, this component hasn't been
'split' from the master 'ceph' package yet.
"""
packages = set()
for c in components:
if c in notsplit_packages:
packages.add('ceph')
else:
packages.add(c)
return list(packages) |
def gcd(x, y):
"""returns the Greatest Common Divisor of x and y"""
while y != 0:
(x, y) = (y, x % y)
return x |
def parse_input_arg(input_arg: str) -> str:
"""Parse input URI as a valid JSON Schema ref.
This tool accepts bare base URIs, without the JSON Pointer,
so these should be converted to a root pointer.
"""
if "#" not in input_arg:
return input_arg + "#/"
return input_arg |
def subset(target, lst):
"""use it or lose it - determines whether or not it is possible to create
target sum using the values in the list. Values in list can be positive,
negative or zero"""
if target == 0:
return True
if lst == []:
return False
# use-it lose-it (come back to later
return subset(target - lst[0], lst[1:]) or subset(target, lst[1:]) |
def opt_arg(spec, property, default_value=None):
"""Get an optional property, possibly with a default value.
@param spec: the JSON-like filter spec (a dict)(
@param property: the property name to retrieve
@param default_value: the default value to return if not found
@returns: the value if found, otherwise default_value/None
"""
value = spec.get(property)
if value is None:
return default_value
else:
return value |
def unify_1_md(bins, edges):
"""Unify 1- and multidimensional bins and edges.
Return a tuple of *(bins, edges)*.
Bins and multidimensional *edges* return unchanged,
while one-dimensional *edges* are inserted into a list.
"""
if hasattr(edges[0], '__iter__'):
# if isinstance(edges[0], (list, tuple)):
return (bins, edges)
else:
return (bins, [edges]) |
def compute_roc_auc(xs, ys, positiveYValue):
"""
Compute area under the receiver operating curve...
INPUTS:
xs : predicted floating point values for each sample
ys : truth for each sample
positiveYValue : y value to treat as positive
OUTPUT: computed AUC
"""
def trapezoid_area(X1, X2, Y1, Y2):
return 0.50*(X1 - X2)*(Y1 + Y2)
# arrange the data, sorted by decreasing confidence xs values...
data = sorted(zip(xs, ys), reverse = True)
# check for degenerate case, all xs equal...
if len(xs) == 0 or data[0][0] == data[-1][0]:
# meaningless, so return 0.50 since no information is gained...
return 0.50
# count number of each class...
P = float(ys.count(positiveYValue))
N = len(ys) - P
# check for degenerate cases...
if P == 0.0 or N == 0.0:
return 0.0
# compute ROC points...
TP = FP = 0.0
TPprev = FPprev = 0.0
fprev = None
A = 0.0
for x, y in data:
if x != fprev:
A += trapezoid_area(FP, FPprev, TP, TPprev)
FPprev = FP
TPprev = TP
fprev = x
if y == positiveYValue:
TP += 1.0
else:
FP += 1.0
A += trapezoid_area(N, FPprev, P, TPprev)
A = A/(P*N)
return A |
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
"""
# Iterateive Version is faster and uses much less stack space
while q != 0:
if p < q: (p,q) = (q,p)
(p,q) = (q, p % q)
return p |
def ci(_tuple):
""" Combine indices """
return "-".join([str(i) for i in _tuple]) |
def calculate_pcpu(utime, stime, uptime, start_time, hertz):
"""
Implement ps' algorithm to calculate the percentage cpu utilisation for a
process.::
unsigned long long total_time; /* jiffies used by this process */
unsigned pcpu = 0; /* scaled %cpu, 99 means 99% */
unsigned long long seconds; /* seconds of process life */
total_time = pp->utime + pp->stime;
if(include_dead_children) total_time += (pp->cutime + pp->cstime);
seconds = seconds_since_boot - pp->start_time / hertz;
if(seconds) pcpu = (total_time * 100ULL / hertz) / seconds;
if (pcpu > 99U) pcpu = 99U;
return snprintf(outbuf, COLWID, "%2u", pcpu);
"""
pcpu = 0
total_time = utime + stime
seconds = uptime - (start_time / hertz)
if seconds:
pcpu = total_time * 100 / hertz / seconds
return round(max(min(pcpu, 99.0), 0), 1) |
def pathjoin(a, *p):
"""
Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded.
.. note:: This function is the same as :func:`os.path.join` on
POSIX systems but is reproduced here so that Nodular can be used
in non-POSIX environments.
"""
path = a
for b in p: # pragma: no cover
if b.startswith(u'/'):
path = b
elif path == u'' or path.endswith(u'/'):
path += b
else:
path += u'/' + b
return path |
def string_boolean(value):
"""Determines the boolean value for a specified string"""
if value.lower() in ("false", "f", "0", ""):
return False
else:
return True |
def bare_msg_type(msg_type):
"""
Compute the bare data type, e.g. for arrays, get the underlying array item type
:param msg_type: ROS msg type (e.g. 'std_msgs/String'), ``str``
:returns: base type, ``str``
"""
if msg_type is None:
return None
if '[' in msg_type:
return msg_type[:msg_type.find('[')]
return msg_type |
def switch(*pairs):
"""Helper function for cond-logic in a Python lambda expression.
Part of a poor-man's functional programming suite used to define several pfainspector commands as one-liners.
:type pairs: callables
:param pairs: sequence of predicate1, consequent1, predicate2, consequent2, ..., alternate; the predicates will each be called in turn until one returns ``True``, and then the corresponding consequent will be called *or* the alternate will be called if none of the predicates return ``True``
"""
if len(pairs) % 2 != 1 or len(pairs) < 3:
raise TypeError
for predicate, consequent in zip(pairs[:-1][::2], pairs[:-1][1::2]):
if callable(predicate):
predicate = predicate()
if predicate:
if callable(consequent):
consequent = consequent()
return consequent
alterante = pairs[-1]
if callable(alterante):
alterante = alterante()
return alterante |
def normalize_commit_message(commit_message):
"""
Return a tuple of title and body from the commit message
"""
split_commit_message = commit_message.split("\n")
title = split_commit_message[0]
body = "\n".join(split_commit_message[1:])
return title, body.lstrip("\n") |
def grid_challenge(grid):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/grid-challenge/problem
Given a square grid of characters in the range ascii[a-z], rearrange elements of each row alphabetically, ascending.
Determine if the columns are also in ascending alphabetical order, top to bottom. Return YES if they are or NO if
they are not.
For example, given:
a b c
a d e
e f g
The rows are already in alphabetical order. The columns a a e, b d f and c e g are also in alphabetical order, so
the answer would be YES. Only elements within the same row can be rearranged. They cannot be moved to a different row.
Args:
grid (array): Array of strings
Returns:
str: "YES" or "NO" if columns of grid are in ascending order after sorting the rows
"""
# First sort the rows
for i, s in enumerate(grid):
grid[i] = "".join(sorted(s))
prev = "a"
# Now, check the columns and see if string is in ascending order
for i in range(len(grid[0])):
for j in range(len(grid)):
if prev > grid[j][i]:
return "NO"
prev = grid[j][i]
prev = "a"
return "YES" |
def input_format(line):
""" Converts a string into a list of vertices and a point
Receive an entry with this format with
this format '1 1, 3 2, 1 4, 3 4 | 3 3 '
Returns:
list = [(1,1), (3,2), (1,4), (3,4)] and a point = (3,3)
:param line:
:return: list, point
"""
line_strip = line.rstrip('\n').split(' | ')
point = line_strip[1].split(' ')
coord = (float(point[0]), float(point[1]))
vertices = []
for vertex in line_strip[0].split(', '):
vertices.append(
(float(vertex.split(' ')[0]),
float(vertex.split(' ')[1])))
return vertices, coord |
def reported_br_params(path1, path2):
"""Return (SUBBRANCH_RPATH, SUBBRANCH_FULLPATH).
Parameters are either (OUTER_BRANCH_FULLPATH, SUBBRANCH_RPATH) or for
a first-level branch (SUBBRANCH_RPATH, None). 'FULLPATH' means relpath
from the repo root; 'RPATH' means relpath from the outer branch.
"""
if path2 is None:
subbranch_rpath = path1
subbranch_fullpath = path1
else:
subbranch_rpath = path2
subbranch_fullpath = path1 + '/' + path2
return subbranch_rpath, subbranch_fullpath |
def mod_inverse(x, p):
"""Given 0 <= x < p, and p prime, returns y such that x * y % p == 1.
>>> mod_inverse(2, 5)
3
>>> mod_inverse(3, 5)
2
>>> mod_inverse(3, 65537)
21846
"""
return pow(x, p - 2, p) |
def _best_streak(year_of_commits):
"""
Return our longest streak in days, given a yeare of commits.
"""
best = 0
streak = 0
for commits in year_of_commits:
if commits > 0:
streak += 1
if streak > best:
best = streak
else:
streak = 0
return best |
def reverse(x):
"""
Reverse a vector.
"""
return x[::-1] |
def get_sentences(text):
""" (str) -> list
The function takes as input a string. It returns a list of strings
each representing one of the sentences from the input string.
>>> t = "No animal must ever kill any other animal. All animals are equal."
>>> get_sentences(t)
['No animal must ever kill any other animal', 'All animals are equal']
>>> t = "Hello! How are you? I'm doing fine, thank you."
>>> get_sentences(t)
['Hello', 'How are you', "I'm doing fine, thank you"]
>>> t = "A sentence without ending punctuation"
>>> get_sentences(t)
['A sentence without ending punctuation']
>>> t = "Hey!I am Groot.Who are you?" # no space behind punctuation
>>> get_sentences(t)
['Hey', 'I am Groot', 'Who are you']
>>> t = ""
>>> get_sentences(t)
[]
>>> t = " "
>>> get_sentences(t)
[]
"""
# initialize variables
sentences = []
start = 0
# locate the punctuation that separates sentences
# by iterating through each character
for index in range(len(text)):
if text[index] in ".!?":
# add the sentence to the list of sentences
sentence = text[start : index]
if sentence != "":
sentences.append(sentence.strip())
# mark the start of the next sentence
start = index + 1
# in case the sentence does not have an ending punctuation
if (len(text) >= 1) and (text[-1] not in ".!?\" "):
sentences.append(text[start : ])
# return the list of sentences
return sentences |
def handler(event: dict, context: object) -> dict:
"""
Handler of AWS Lambda function.
:param event: event data
:type event: dict
:param context: runtime information of the AWS Lambda function
:type context: LambdaContext object
"""
response = {
'isBase64Encoded': False,
'statusCode': 200,
'headers': {},
'multiValueHeaders': {},
'body': 'Hello, World!'
}
return response |
def unpack_problem(message):
"""Extract the problem incorporated in 'message'"""
problem = message['problem']
return problem |
def dot(i1, i2):
""" Dot iterables """
return sum( [i1[i]*i2[i] for i in range( len(i1) )] ) |
def se_to_varcope(se):
"""Convert standard error values to sampling variance.
.. versionadded:: 0.0.3
Parameters
----------
se : array_like
Standard error of the sample parameter
Returns
-------
varcope : array_like
Sampling variance of the parameter
Notes
-----
Sampling variance is standard error squared.
"""
varcope = se ** 2
return varcope |
def split(str, sep):
"""
Provides a filter to interface with the string.split() method
"""
return str.split(sep) |
def guess_n_initial_points(params):
"""Guess a good value for n_initial_points given params."""
return max(len(params), min(len(params) * 2, 10)) |
def get_routable_nodes_and_ways(nodes, highways):
"""
Extracts the routable nodes and ways from all nodes and highways.
:param nodes: All nodes as a dict
:param highways: All ways that have the 'highway' tag as list
:return: routable_nodes (dict), routable_ways (dict)
"""
useful_nodes = dict()
useful_ways = dict()
for way in highways:
useful = True
for node_id in way.nodes:
node = nodes.get(node_id, None)
if node is None:
useful = False
else:
if node_id not in useful_nodes:
useful_nodes[node_id] = node
if useful:
useful_ways[way.id] = way
return useful_nodes, useful_ways |
def create_layer(name, description, domain, techniques, version):
"""create a Layer"""
min_mappings = min(map(lambda t: t["score"], techniques)) if len(techniques) > 0 else 0
max_mappings = max(map(lambda t: t["score"], techniques)) if len(techniques) > 0 else 100
gradient = ["#ACD0E6", "#08336E"]
# check if all the same count of mappings
if max_mappings - min_mappings == 0:
min_mappings = 0 # set low end of gradient to 0
gradient = ["#ffffff", "#66b1ff"]
# convert version to just major version
if version.startswith("v"):
version = version[1:]
version = version.split(".")[0]
return {
"name": name,
"versions": {
"navigator": "4.3",
"layer": "4.2",
"attack": version
},
"sorting": 3, # descending order of score
"description": description,
"domain": domain,
"techniques": techniques,
"gradient": {
"colors": gradient,
"minValue": min_mappings,
"maxValue": max_mappings
},
} |
def outer2D(v1, v2):
"""Calculates the magnitude of the outer product of two 2D vectors, v1 and v2"""
return v1[0]*v2[1] - v1[1]*v2[0] |
def reset_advanced(n, k, decay_factor, algorithm):
"""Callback method for resetting advanced settings."""
return [k, decay_factor, algorithm] |
def normalize_ped_delay_hours(study_hours):
"""
Normalize the given ped delay study hours, converting them to `StudyHours` enum
values (see `bdit_flashcrow` codebase, in particular `@/lib/Constants`).
"""
study_hours_norm = study_hours.lower()
if study_hours_norm == 'pedestrian':
return 'SCHOOL'
if study_hours_norm == 'routine':
return 'ROUTINE'
return 'OTHER' |
def replicaset_statuses(members_document, module):
"""
Return a list of the statuses
"""
statuses = []
for member in members_document:
statuses.append(members_document[member])
return statuses |
def to_bottom_right_memo(grid_size=3, off_limits=()):
"""Memoized path to bottom right of grid
:param grid_size Height and width of the grid
:return Path to origin
"""
def to_bottom_right_helper(m, n, moves):
"""Helper function to return path to bottom right of grid
:param m x position
:param n y position
:param moves path taken (Ex. right, down, right)
:param failed failed points
"""
if (m, n) in off_limits:
return False
elif m < 0 or n < 0:
return False
elif (m, n) in failed:
return False
elif (m, n) == (0, 0) or to_bottom_right_helper(m-1, n, moves + ["right"]) or \
to_bottom_right_helper(m, n-1, moves + ["down"]):
return True
else:
failed.append((m, n))
return False
failed = []
return to_bottom_right_helper(grid_size, grid_size, []) |
def file2tuple(thing):
"""
Function that converts a filename to a tuple of the type: (filename,string containing file)
Input can be filename or tuple with filename and content in a string.
"""
if isinstance(thing,tuple): # it is a tuple with filename and content in a string
filename,string = thing
output = (filename.split('/')[-1],string)
else: # it is a filename
filename = thing
file = open(filename)
output = (filename.split('/')[-1],file.read())
file.close()
return output |
def p2f(x):
"""Conver percentage to float"""
return float(x.strip('%'))/100 |
def modulo_div(arg1, arg2):
""" (float, float) -> float
Modulo division of two numbers
Returns (arg1 % arg2)
"""
try:
return arg1 % arg2
except TypeError:
return 'Unsupported operation: {0} % {1} '.format(type(arg1), type(arg2))
except ZeroDivisionError as zero_error:
return 'Unsupported operation: {0} % {1} -> {2}'.format(arg1, arg2, zero_error)
except Exception as other_error:
return 'Oops... {0}'.format(other_error) |
def ranges_intersect(pair1, pair2, buffer=0):
""" true if ranges entersect by more than buffer """
return (pair1[0] < pair2[1] - buffer) and (pair1[1] > pair2[0] + buffer) |
def percentage(part, total):
""" Helper function to calculate percentage """
if total == 0:
return None
return round(100 * part / float(total), 2) |
def stringify(obj):
"""Helper method which converts any given object into a string."""
if isinstance(obj, list):
return ''.join(obj)
else:
return str(obj) |
def roman_to_int(s):
"""
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
:param s: string
:return: integer
"""
_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
prev, cur, res = 0, 0, 0
for i in s[::-1]:
cur = _dict[i]
if prev > cur:
res -= cur
else:
res += cur
prev = cur
return res |
def plural(count=0):
"""Return plural extension for provided count."""
ret = 's'
if count == 1:
ret = ''
return ret |
def flatten(L):
"""Flatten an iterable of iterables into a single list."""
return [i for x in L for i in x] |
def GetPatchJobUriPath(project, patch_job):
"""Returns the URI path of an osconfig patch job."""
return '/'.join(['projects', project, 'patchJobs', patch_job]) |
def visible_len(value: str) -> int:
"""The number of visible characters in a string.
Use this to get the length of strings that contain escape sequences.
"""
return int(len(value) - value.count('\x1b') * 4.5 + value.count('\x1b[1')) |
def average_tweets_per_user(tweets, users_with_freq):
"""
Return the average number of tweets per user from a list of tweets.
:param tweets: the list of tweets.
:param users_with_freq: a Counter of usernames with the number of tweets in 'tweets' from each user.
:return: float. average number of tweets per user
"""
tweets_number = len(tweets)
users_number = len(users_with_freq)
return tweets_number/users_number |
def validate_choice_arg(arg: dict, value: str) -> bool:
"""
Checks if the given value matches constraints of choice argument (from api.arg_type).
It checks if the given value is in the list of choices defined in argument type
:param arg: the choice argument
:param value: the value to validate
:return: True if the value passes checks, else False
"""
return value in arg["choices"] |
def quadKey_to_Bing_URL(quadKey, api_key):
"""Create a URL linking to a Bing tile server"""
tile_url = ("http://t0.tiles.virtualearth.net/tiles/a{}.jpeg?"
"g=854&mkt=en-US&token={}".format(quadKey, api_key))
#print "\nThe tile URL is: {}".format(tile_url)
return tile_url |
def dsfr_summary(items: list) -> dict:
"""
Returns a summary item. Takes a list as parameter, with the following structure:
items = [{ "link": "item1", "label": "First item title"}, {...}]
**Tag name**::
dsfr_summary
**Usage**::
{% dsfr_summary items %}
"""
return {"self": {"items": items}} |
def hex_to_ip(s):
"""
'7f000001' => '127.0.0.1'"""
try:
ip = map(lambda n: s[n:n+2], range(0, len(s), 2))
ip = map(lambda n: int(n, 16), ip)
return '.'.join(map(str, ip))
except:
return '' |
def mean(data):
"""Calculate the mean of a list of numbers
Parameters:
*data*
a list of numbers whose mean to calculate
"""
return float(sum(data))/len(data) |
def leapyear(year):
"""
Returns 1 if the provided year is a leap year, 0 if the provided
year is not a leap year.
"""
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return 1
else:
return 0
else:
return 1
else:
return 0 |
def is_hcj(character):
"""Test if a single character is a HCJ character.
HCJ is defined as the U+313x to U+318x block, sans two non-assigned code
points.
"""
return 0x3131 <= ord(character) <= 0x318E and ord(character) != 0x3164 |
def RGBToInteger(r, g, b):
"""Packs three color channels and returns a 24-bit color."""
return r << 16 | g << 8 | b |
def get_port_from_environment_vars(env_name, pod_envs):
"""Return port based on pod environment variables"""
def get_prefix(string):
return str.join('_', string.split('_')[:-1])
prefix = get_prefix(env_name)
for (key, value) in pod_envs.items():
if key.split('_')[-1:][0].lower() == 'port' and prefix == get_prefix(key):
return value
return None |
def and_sum (phrase):
"""Returns TRUE iff every element in <phrase> is TRUE"""
if len(phrase) > 0:
total = set(phrase[0])
else:
total = set()
for x in phrase:
total = total.intersection(x)
return total |
def ecl_valid(passport):
""" Check that ecl is valid
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
:param passport: passport
:return: boolean
"""
return any(color == passport['ecl'] for color in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']) |
def unique_ordered(values):
"""return a list of unique values in an input list, without changing order
(list(set(.)) would change order randomly).
"""
seen = set()
def check(value):
if value in seen:
return False
seen.add(value)
return True
return [v for v in values if check(v)] |
def search_for_item(project):
"""Generate a string search key for a project"""
elements = []
elements.append(project['attributes']['title'])
return u' '.join(elements) |
def naive_ascii_encode(string_to_encode: str, string_length: int) -> str:
"""
Encodes an ASCII string with naive ascii encoding. Where each byte of
the string is encoded into the ASCII code point and then combined
together into a string representation of that integer.
:param string_to_encode: The input string.
:param string_length: The length of the string.
:return: The encoded string.
"""
output_str=''
for i in range(0,string_length):
tmp=ord(string_to_encode[i:i+1])
tmp_str=str(tmp)
output_str=output_str+tmp_str
encoded_number=output_str
return encoded_number |
def dna_to_rna(seq):
"""(str) -> changed string
simple function to replace all T with U
"""
seq = seq.upper()
seq = seq.replace("T", "U")
return seq |
def comment_out(text, comment="#"):
"""
Comment out some text, using the specified comment character(s) at the
start of each line.
"""
text = text.strip()
result = ""
for line in text.split("\n"):
if line: result += comment+" "+line+"\n"
else: result += comment+"\n"
return result.strip() |
def parse_signing_keys(raw):
"""Parse a raw file into a dictionary of fingerprint and keys
:param raw: the raw file
:return: dictionary mapping the fingerprints to a RSA key in pem format"""
assert raw is not None
lines = raw.split('\n')
count = 0
keys = {}
next_fingerprint = None
while count < len(lines):
if lines[count].startswith('fingerprint'):
if next_fingerprint is not None:
raise ValueError("File has not the expected format")
else:
next_fingerprint = lines[count].split(" ")[1]
elif lines[count] == 'dir-signing-key':
count += 1
key = lines[count] + '\n'
count += 1
while lines[count] != '-----END RSA PUBLIC KEY-----':
key += lines[count] + '\n'
count += 1
key += '-----END RSA PUBLIC KEY-----'
keys[next_fingerprint] = key
next_fingerprint = None
count += 1
return keys |
def sequence_to_one_hot(sequence: list) -> list:
"""Gets a sequence of size L and creates a matrix of LX4 of one hot encoding of each vector.
Args:
sequence (list): a list of letters out of the alphabet 'acgt'
Returns:
One hot encoding of the sequence, such that a = [1,0,0,0] ,c = [0,1,0,0]
g = [0,0,1,0], t = [0,0,0,1]
"""
alphabet = 'acgt'
# define a mapping of chars to integers
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
# integer encode input data
integer_encoded = [char_to_int[char] for char in sequence]
# one hot encode
onehot_encoded = []
for value in integer_encoded:
letter = [0 for _ in range(len(alphabet))]
letter[value] = 1
onehot_encoded.append(letter)
return onehot_encoded |
def convert_RGB_to_BGR(color):
"""Converts a RGB color to a BGR color.
:param color: The RGB color.
:type color: Tuple[int, int, int]
:return: The corresponding BGR color.
:rtype: Tuple[int, int, int]
"""
return color[2], color[1], color[0] |
def find_flat_segment(polygon):
"""
Find beginning and end indexes of a horizontal section in the polygon.
This section can be made of several consecutive segments.
If the polygon contains several flat sections, this function only identify the first one.
The polygon is a list of 2D coordinates (x, y)
"""
start = -1
end = -1
for i, (a, b) in enumerate(zip(polygon[:-1], polygon[1:])):
if a[1] == b[1]:
if start < 0:
start = i
end = i + 1
return [start, end] |
def sum_divisors(n):
"""
Returns a sum of numbers proper divisors.
For exsample sum_divisors(28) = 1 + 2 + 4 + 7 + 14 = 28
"""
s = 1
limit = int(n ** 0.5)
if limit ** 2 == n:
s += limit
limit -= 1
for i in range(2, limit + 1):
if n % i == 0:
s += (i + n // i)
return s |
def get_collection_link(db_id, collection_id):
"""Create and return collection link based on values passed in"""
# Return a link to the relevant CosmosDB Container/Document Collection
return "dbs/" + db_id + "/colls/" + collection_id |
def reddening_correction_sf11(extinction_r):
""" Compute the reddening values using the SF11 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
A_u = E_BV * 4.239
A_g = E_BV * 3.303
A_r = E_BV * 2.285
A_i = E_BV * 1.698
A_z = E_BV * 1.263
return (A_u, A_g, A_r, A_i, A_z) |
def is_char(obj):
"""return True if obj is a char (str with lenth<=1)"""
return isinstance(obj, str) and len(obj) <= 1 |
def out(coords, dims):
""" This function recurses through the two lists, adjusting output
if a coord is too big or too small (compared to corresponding dim).
Eg. For coords = (1,3,-2) and dims=(2,2,2), (2,3,4) is returned. """
new_coords = []
for coord, dim in zip(coords, dims):
if coord > dim:
new_coords.append(coord)
elif coord < 0:
new_coords.append(abs(coord) + dim)
else:
new_coords.append(dim)
return tuple(new_coords) |
def set_value_rec(branch, keys, value):
"""
Recursivelly traverse `branch` until the end of `keys` is
reached and set the value.
:Parameters:
- `branch`: dictionary
- `keys`: a list of keys that define path to the key to be set
- `value`: a value to store
"""
if len(keys) == 1:
branch[keys[0]] = value
return branch
key = keys.pop(0)
res = set_value_rec(branch.setdefault(key, {}), keys,
value)
branch[key].update(res)
return branch |
def generate_fullname_for(o):
"""Produce a fully qualified class name for the specified instance.
:param o: The instance to generate information from.
:return: A string providing the package.module information for the
instance.
"""
if not o:
return 'None'
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__
return module + '.' + o.__class__.__name__ |
def find_category_subsets(subgraph_collection):
"""Finds subgraphs which are subsets of other subgraphs to remove redundancy, when specified.
:param subgraph_collection: A dictionary of subgraph objects (keys: subgraph name, values: subgraph object).
:return: A dictionary relating which subgraph objects are subsets of other subgraphs (keys: subset subgraph, values: superset subgraphs).
:rtype: :py:obj:`dict`
"""
is_subset_of = dict()
for subgraph in subgraph_collection.values():
for next_subgraph in subgraph_collection.values():
if len(subgraph.category_node.child_node_set) == 1 and len(next_subgraph.category_node.child_node_set) == 1:
if next(iter(subgraph.category_node.child_node_set)).id != next(iter(next_subgraph.category_node.child_node_set)).id and next(iter(subgraph.category_node.child_node_set)).id in next_subgraph.root_id_mapping.keys():
try:
is_subset_of[next(iter(subgraph.category_node.child_node_set)).id].add(next(iter(next_subgraph.category_node.child_node_set)).id)
except KeyError:
is_subset_of[next(iter(subgraph.category_node.child_node_set)).id] = {next(iter(next_subgraph.category_node.child_node_set)).id}
return is_subset_of |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.