content stringlengths 42 6.51k |
|---|
def get_resource_names_output(resources):
"""
Creates the output dict with the names of all resources to be created.
"""
names = [resource['name'] for resource in resources]
return {'name': 'resources', 'value': names} |
def tuple_add(tuple1, tuple2):
"""
returns the two tuples added together
"""
return tuple1[0] + tuple2[0], tuple1[1] + tuple2[1] |
def fib(n, start=(0, 1)):
"""iterative fibonacci function"""
a, b = start
while n > 0:
a, b = b, a + b
n -= 1
return n |
def reverse_neighbour_table(packages):
"""
Build a neighbour table for reverse dependencies.
"""
table = {}
for package in packages:
if not package.name in table: table[package.name] = set()
for dependency in package.alldepends():
if not dependency.name in table: table[dependency.name] = set()
for provides in [package.name] + [x.name for x in package.provides()]:
table[dependency.name].add(package.name)
return table |
def category_bag_of_words(restaurants):
"""
Get bag of words representation of restaurant's categories.
Parameters:
restaurants - a list of restaurant dictionary objects
Returns:
A bag of words dictionary, key-value pairings are category->category count.
"""
bag = {}
for restaurant in restaurants:
categories = restaurant["categories"]
for c in categories:
bag[c] = bag.get(c,0)+1
return bag |
def _entities_from_messages(messages):
"""Return all entities that occur in at least one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])}) |
def groups_score(str_group):
"""
>>> groups_score('<>')
{'score': 0, 'groups': 0, 'garbage': 0}
>>> groups_score('<<<<>')
{'score': 0, 'groups': 0, 'garbage: 3}
>>> groups_score('<{!>}>')
{'score': 0, 'groups': 0, 'garbage': 2}
>>> groups_score('<!!>')
{'score': 0, 'groups': 0, 'garbage': 0}
>>> groups_score('<!!!>>')
{'score': 0, 'groups': 0, 'garbage': 0}
>>> groups_score('<{o"i!a,<{i<a>')
{'score': 0, 'groups': 0, 'garbage': 10}
>>> groups_score('{}')
{'score': 1, 'groups': 1, 'garbage': 0}
>>> groups_score('{{{}}}')
{'score': 6, 'groups': 3, 'garbage': 0}
>>> groups_score('{{},{}}')
{'score': 5, 'groups': 3, 'garbage': 0}
>>> groups_score('{{{},{},{{}}}}')
{'score': 16, 'groups': 6, 'garbage': 0}
>>> groups_score('{<a>,<a>,<a>,<a>}')
{'score': 1, 'groups': 1, 'garbage': 4}
>>> groups_score('{{<a>},{<a>},{<a>},{<a>}}')
{'score': 9, 'groups': 5, 'garbage': 4}
>>> groups_score('{{<a!>},{<a!>},{<a!>},{<ab>}}')
{'score': 3, 'groups': 2, 'garbage': 17}
"""
in_garbage = False
current_level = 0
groups = 0
score = 0
garbage = 0
n = 0
while n < len(str_group):
if str_group[n] == '!':
n += 2
continue
if str_group[n] == '>':
in_garbage = False
n += 1
continue
if in_garbage:
n += 1
garbage += 1
continue
if str_group[n] == '<':
in_garbage = True
n += 1
continue
if str_group[n] == '{':
groups += 1
current_level += 1
score += current_level
n += 1
continue
if str_group[n] == '}':
current_level -= 1
n += 1
continue
n += 1
return {'score': score, 'garbage': garbage, 'groups': groups} |
def to_money(money_value: float) -> str:
"""Returns the float as a string formatted like money."""
return '{:,.2f}'.format(money_value) |
def draw_gif_all_frames(num_frames, start_frame, frame_tracker_groups, idsw_val):
"""Draw idsw box for all frame in gif
Args:
num_frames ([type]): [description]
start_frame ([type]): [description]
frame_tracker_groups ([type]): [description]
idsw_val ([type]): [description]
Returns:
[list]: Frames have been drawn
"""
drawn_frames = []
for idx in range(num_frames):
cur_objs = frame_tracker_groups[start_frame + idx]
# print(cur_objs)
for obj in cur_objs:
if((obj[1] == idsw_val[0] and idx != num_frames-1) or (obj[1] == idsw_val[1] and idx == num_frames-1)):
# print("frame: {}-> obj: {}".format(start_frame+idx, obj))
drawn_frames.append(obj)
return drawn_frames |
def command_eval(expr: str):
"""Evaluates a given expression."""
# Command callbacks can return values which will be passed to the caller of `dispatch()`
return eval(expr) |
def parse_script_interpreter(source):
"""
Extract the script interpreter and its sole argument from the module
source code.
:returns:
Tuple of `(interpreter, arg)`, where `intepreter` is the script
interpreter and `arg` is its sole argument if present, otherwise
:py:data:`None`.
"""
# Linux requires first 2 bytes with no whitespace, pretty sure it's the
# same everywhere. See binfmt_script.c.
if not source.startswith('#!'):
return None, None
# Find terminating newline. Assume last byte of binprm_buf if absent.
nl = source.find('\n', 0, 128)
if nl == -1:
nl = min(128, len(source))
# Split once on the first run of whitespace. If no whitespace exists,
# bits just contains the interpreter filename.
bits = source[2:nl].strip().split(None, 1)
if len(bits) == 1:
return bits[0], None
return bits[0], bits[1] |
def format_tags(tag_string):
"""
:param tag_string: String of tags comma separated
:return: list of strings
"""
if not tag_string.strip().replace(',', ''):
return []
return filter(None, map(lambda x: x.strip(), tag_string.lower().split(','))) |
def unwrap_classes(cls: 'tuple') -> 'tuple':
"""
Returns a tuple of classes where all dependencies (superclasses) of tuple `cls` are also listed.
"""
unwrapped = list(cls)
for x in unwrapped:
if len(x.super) > 0:
supcls = unwrap_classes(x.super)
for y in supcls:
if y not in cls:
unwrapped.append(y)
return tuple(unwrapped) |
def _get_command_prefix(properties):
"""
If multiple commands are registered with the same name, attempt to construct a unique
prefix from other information in the command's properties dictionary to distinguish one
command from another. Uses the properties' ``app`` and/or ``group`` keys to create the
prefix.
:param dict properties: Arbitrary key/value information related to a registered command.
:returns: A unique identifier for the command as a str.
"""
prefix_parts = []
if properties.get("app"):
# First, distinguish commands by app name.
prefix_parts.append(properties["app"].instance_name)
if properties.get("group"):
# Second, distinguish commands by group name.
prefix_parts.append(properties["group"])
return ":".join(prefix_parts) |
def get_date_time_from_timestamp(timestamp):
"""Get the date time from a timestamp.
The timestamp must have the following format:
'year.month.day.hour.minutes'
Example:
'2017.12.20.10.31' -> '2017/12/20 10:31'
"""
date_time_string = timestamp
sp = timestamp.split('.')
if len(sp) >= 5:
date_time_string = '{0}/{1}/{2} {3}:{4}'.format(sp[0], sp[1], sp[2], sp[3], sp[4])
return date_time_string |
def get_url(name):
"""URL to Project Gutenberg text file."""
urls = {
'frankenstein': 'https://www.gutenberg.org/files/84/84-0.txt',
'shakespeare': 'https://www.gutenberg.org/files/100/100-0.txt',
}
return urls[name.lower()] |
def convert_country(name):
"""Converts a country name from urlcrazy format to dnstwist format"""
words = name.split(" ")
for i in range(len(words)):
words[i] = words[i].lower().title()
name = " ".join(words)
return name |
def level_prefix(level, nub='+ '):
"""Determine line prefix for the given level.
Args:
level: [int] Depth to indent.
nub: [string] trailing text at the end of the indent.
"""
if not level:
return ''
return '{preamble} {nub}'.format(preamble=' |' * (level - 1), nub=nub) |
def trapezoidal_wing_area(c_r, c_t, b):
"""
Computes the area for a trapezoidal wing
>>> trapezoidal_wing_area(0.5,0.2,2)
0.7
"""
return (c_r + c_t) * b / 2 |
def cvt(item) :
"""Attempt to convert string to value it represents"""
item = item.strip() # remove leading and trailing whitespace
if not item : return ""
if item[0] == "$": return float(item.strip("$,"))
if item.strip("0123456789,.") : return item
item = item.strip(",")
try: return int(item) # try to convert it to an integer
except ValueError:
return float(item) |
def get_markdown_title(markdown_src):
"""
Get the title of a Markdown document. The title in this case is considered
to be a H1 that occurs before any other content in the document.
The procedure is then to iterate through the lines, stopping at the first
non-whitespace content. If it is a title, return that, otherwise return
None.
"""
lines = markdown_src.replace('\r\n', '\n').replace('\r', '\n').split('\n')
while lines:
line = lines.pop(0).strip()
if not line.strip():
continue
if not line.startswith('# '):
return
return line.lstrip('# ') |
def gnomad_filtered_func(raw_value):
""" We use FILTER in Gnomad3 (GRCh38 only) - need to convert back to bool """
return raw_value not in (None, "PASS") |
def is_valid_float(value):
""" Returns value if valid cast to float, otherwise none """
try:
value = float(value)
except ValueError:
value = None
return value |
def collectReviewTeams(reviewers):
"""collectReviewTeams(reviewers) -> dictionary
Takes a dictionary as returned by getReviewersAndWatchers() or
getPendingReviewers() and transform into a dictionary mapping sets of users to
sets of files that those groups of users share review responsibilities for. The
same user may appear in number of sets, as may the same file.
If None appears as a key in the returned dictionary, the set of files it is
mapped to have changes in them with no assigned reviewers."""
teams = {}
for file_id, file_reviewers in reviewers.items():
if None in file_reviewers:
teams.setdefault(None, set()).add(file_id)
team = frozenset(filter(None, file_reviewers.keys()))
if team: teams.setdefault(team, set()).add(file_id)
return teams |
def eliminate_rows(sudoku):
"""
function that removes certain values from possibilities
in all the rows of sudoku
"""
solution = []
for row in sudoku:
certain = set([item[0] for item in row if len(item)==1])
new_row = []
for item in row:
if len(item) ==1:
new_row.append(item)
else:
possible = list(set(item)- certain)
new_row.append(possible)
solution.append(new_row)
return solution |
def ijk_to_xyz(vec, patient_orientation=None):
"""
converts canonical quaternion unit vector symbols to cartesian.
:param vec: one of i, j, k +/-
:return: x, y, or z +/-
"""
vmap = {'i': 'x', 'j': 'y', 'k': 'z',
'i-': 'x-', 'j-': 'y-', 'k-': 'z-',
'-i': 'x-', '-j': 'y-', '-k': 'z-'}
return vmap[vec] |
def recycle_palette(palette, size):
"""Cycle through a palette (list) until it reaches desired length (int)."""
length = len(palette)
nrepeat = (size // length) + 1
out = palette * nrepeat
return out[0:size] |
def solution(n: int = 4000000) -> int:
"""Returns the sum of all fibonacci sequence even elements that are lower
or equals to n.
>>> solution(10)
10
>>> solution(15)
10
>>> solution(2)
2
>>> solution(1)
0
>>> solution(34)
44
"""
fib = [0, 1]
i = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
total = 0
for j in range(len(fib) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total |
def _is_autonomous(indep, exprs):
""" Whether the expressions for the dependent variables are autonomous.
Note that the system may still behave as an autonomous system on the interface
of :meth:`integrate` due to use of pre-/post-processors.
"""
if indep is None:
return True
for expr in exprs:
try:
in_there = indep in expr.free_symbols
except:
in_there = expr.has(indep)
if in_there:
return False
return True |
def parse_time_string(date_str):
"""
input:
'2018-11-18T18:30:00-08:00'
output:
{'y':2018,'t':11,'d':18,'h':18,'m':30}
"""
t = {}
# print(json_obj)
tokens = date_str.split('T')
# pull the first half with the date info
# parse it by hyphens
date_obj = tokens[0].split('-')
t['yr'] = int(date_obj[0])
t['mt'] = int(date_obj[1])
t['dy'] = int(date_obj[2])
# pull the second half of the date
# parse it by hyphens then by colons
try:
time_obj = tokens[1].split('-')
time_obj = time_obj[0].split(':')
t['hr'] = int(time_obj[0])
t['mn'] = int(time_obj[1])
except:
t['hr'] = 0
t['mn'] = 0
# return the dict obj
# print("passed obj:", json_obj, '\nparsed obj:',t)
return t |
def database_oops(exc, _):
"""View triggered when the database falls over."""
return {
'no_database': True,
'exception': exc
} |
def get_iface_speed(iface):
""" Converts iface speed from bps to DNS-friendly string """
speed = iface["iface_speed"]
if speed:
speed = int(iface["iface_speed"])
else:
return None
if speed < 1536000:
return None
elif speed == 1536000 or speed == 1544000:
return "t1"
else:
prefix = speed
suffixes = ["k", "m", "g", "t", "p"]
i = 0
while prefix > 100:
prefix = prefix / 1000
i += 1
return "{}{}".format(int(prefix), suffixes[i - 1]) |
def gen_identifier(identifier, scheme, url=None):
"""Generate identifier dictionary."""
d = {'ID': identifier, 'IDScheme': scheme}
if url:
d['IDURL'] = url
return d |
def find_first1(l, pred):
"""
Find first occurrence in list satisfying predicate.
:param l: list.
:param pred: predicate on the list elements.
:return: index of first occurrence in list satisfying predicate; length of the list if not found.
"""
length = len(l)
index = length
for i in range(length):
if pred(l[i]):
index = i
break
return index |
def json_rpc(func_name, params={}, ID=None) -> dict:
"""Generates a JSON-RPC-call for `func` with parameters `params`"""
return {"jsonrpc": "2.0", "method": func_name, "params": params, "id": ID} |
def task_count_badge(item):
"""
Return a red or green badge indicating the number of elements in
an iterable
"""
if item:
context_class = 'danger'
else:
context_class = 'success'
return '<span class="badge badge-pill badge-{}">{}</span>'.format(
context_class, len(item)) |
def get_preferred_indices(y_pos, labels, preferred_sensors):
"""Get indices of preferred sensor sets in plot.
Args:
y_pos (list): list of y positions used for sensors in plot.
labels (list-like): sensor set labels associated with y_pos.
preferred_sensors (list-like): preferred sensor sets.
Returns:
indices (list): y positions associated with preferred sets.
"""
labels = list(labels)
indices = []
for sensor in preferred_sensors:
label_idx = labels.index(sensor)
indices.append(y_pos[label_idx])
return indices |
def make_punctuator(words, replace):
"""Put some hyphens or dots, or a given punctutation.
Works via :replace in the word, but only around vowels ala "del.ic.ious"
"""
def _replace(words, replace, replace_type='.'):
return [word.replace(
replace, replace + replace_type) for word in words]
hyphens = _replace(words, replace, replace_type='-')
periods = _replace(words, replace)
return hyphens + periods |
def _subgroup_pruning_step(pre_pruning_assembly):
"""
Removes assemblies which are already all included in a bigger assembly
Parameters
----------
pre_pruning_assembly : list
contains the assemblies filtered by the significance value
Returns
--------
final_assembly : list
contains the assemblies filtered by inclusion
"""
# reversing the semifinal_assembly makes the computation quicker
# since the assembly are formed by agglomeration
pre_pruning_assembly_r = list(reversed(pre_pruning_assembly))
nns = len(pre_pruning_assembly_r)
# boolean list with the selected assemblies
selection = [True for _ in range(nns)]
for i in range(nns):
# check only in the range of the already selected assemblies
if selection[i]:
a = pre_pruning_assembly_r[i]['neurons']
for j in range(i + 1, nns):
if selection[j]:
b = pre_pruning_assembly_r[j]['neurons']
# check if a is included in b or vice versa
if set(a).issuperset(set(b)):
selection[j] = False
if set(b).issuperset(set(a)):
selection[i] = False
# only for the case in which significance_pruning=False
if set(a) == set(b):
selection[i] = True
selection[j] = True
assembly_r = []
# put into final_assembly only the selected ones
for i in range(nns):
if selection[i]:
assembly_r.append(pre_pruning_assembly_r[i])
assembly = list(reversed(assembly_r))
return assembly |
def _adjust_lines(lines):
"""Adjust linebreaks to match ';', strip leading/trailing whitespace.
list_of_commandlines=_adjust_lines(input_text)
Lines are adjusted so that no linebreaks occur within a commandline
(except matrix command line)
"""
formatted_lines=[]
for l in lines:
#Convert line endings
l=l.replace('\r\n','\n').replace('\r','\n').strip()
if l.lower().startswith('matrix'):
formatted_lines.append(l)
else:
l=l.replace('\n',' ')
if l:
formatted_lines.append(l)
return formatted_lines |
def get_manifest_out_of_files(files):
"""
Parses `files` for a file that ends with `androidmanifest.xml`.
:param Set[str] files: list of paths to files as absolute paths
:return: manifest string if in `files`, else None
"""
for file_name in files:
if file_name.lower().endswith("androidmanifest.xml"):
return file_name
return None |
def find_largest_number(numbers_list):
"""
Given a list of integers as input, this function sorts the list elements in ascending
order and returns the largest number in the list.
"""
sorted_numbers_list = sorted(numbers_list) # The list 'numbers_list' is sorted in ascending order.
largest = sorted_numbers_list[-1] # The last number is the largest number in list 'sorted_numbers_list
return largest |
def _getSubjectivityFromScore( polarity_score ):
"""
Accepts the subjectivity score and returns the label
0.00 to 0.10 - Very Objective
0.10 to 0.45 - Objective
0.45 to 0.55 - Neutral
0.55 to 0.90 - Subjective
0.90 to 1.00 - Very Subjective
"""
status = "unknown"
if ( 0.00 <= polarity_score <= 0.10 ):
return "Very Objective"
elif( 0.10 < polarity_score < 0.45 ):
return "Objective"
elif( 0.45 <= polarity_score <= 0.55 ):
return "Neutral"
elif( 0.55 < polarity_score < 0.90 ):
return "Subjective"
elif( 0.90 <= polarity_score <= 1.00 ):
return "Very Subjective"
return status |
def fit_plot_points(slope, intercept, years):
"""
Calculate start and end points for line describing best fit
:param float slope: line slope
:param float intercept: line intercept
:returns: yvalues
"""
yvalues = []
for y in years:
yvalues += [slope * y + intercept]
return yvalues |
def _filter_calibration(time_field, items, start, stop):
"""filter calibration data based on time stamp range [ns]"""
if len(items) == 0:
return []
def timestamp(x):
return x[time_field]
items = sorted(items, key=timestamp)
calibration_items = [x for x in items if start < timestamp(x) < stop]
pre = [x for x in items if timestamp(x) <= start]
if pre:
calibration_items.insert(0, pre[-1])
return calibration_items |
def compute_discounted_return(gamma, rewards):
"""
Args:
gamma: discount factor.
rewards: List of reward for the episode
Returns: $\Sum_{t=0}^T \gamma^t r_t$
"""
discounted_return = 0
discount = 1
for reward in rewards:
discounted_return += reward * discount
discount *= gamma
return discounted_return |
def _TransformOperationDone(resource):
"""Combines done and throttled fields into a single column."""
done_cell = '{0}'.format(resource.get('done', False))
if resource.get('metadata', {}).get('throttled', False):
done_cell += ' (throttled)'
return done_cell |
def count_mismatches(read):
"""
look for NM:i:<N> flag to determine number of mismatches
"""
if read is False:
return False
mm = [int(i.split(':')[2]) for i in read[11:] if i.startswith('NM:i:')]
if len(mm) > 0:
return sum(mm)
else:
return False |
def search_comment(text, pos, start_token, end_token):
"""
Search for nearest comment in <code>str</code>, starting from index <code>from</code>
@param text: Where to search
@type text: str
@param pos: Search start index
@type pos: int
@param start_token: Comment start string
@type start_token: str
@param end_token: Comment end string
@type end_token: str
@return: None if comment wasn't found, list otherwise
"""
start_ch = start_token[0]
end_ch = end_token[0]
comment_start = -1
comment_end = -1
def has_match(tx, start):
return text[start:start + len(tx)] == tx
# search for comment start
while pos:
pos -= 1
if text[pos] == start_ch and has_match(start_token, pos):
comment_start = pos
break
if comment_start != -1:
# search for comment end
pos = comment_start
content_len = len(text)
while content_len >= pos:
pos += 1
if text[pos] == end_ch and has_match(end_token, pos):
comment_end = pos + len(end_token)
break
if comment_start != -1 and comment_end != -1:
return comment_start, comment_end
else:
return None |
def _reconstruct_input_from_dict(x):
"""Reconstruct input from dict back to a list or single object.
Parameters
----------
x : dict
Returns
-------
out : pandas.DataFrame or pandas.Series or callable or list
"""
out = list(x.values())
if len(out) == 1:
out = out[0]
return out |
def int_to_dow(dayno):
""" convert an integer into a day of week string """
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday']
return days[int(dayno)] |
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element] |
def with_char_count(string: str) -> str:
"""Return whatever string is passed.
Unless that string is longer than 280 characters."""
if len(string) <= 280:
return string
else:
return "String too long to post on Twitter" |
def gcd(m, n):
"""
Compute the greatest common divisor of m and n
using the Euclidean algorithm.
"""
assert m >= 0 and n >= 0
while n != 0:
m, n = n, m % n
return m |
def median(x):
"""Median.
When x has an even number of element, mean of the two central value of the
distribution.
Args:
x (list): float (int) list
Returns:
float: Median value of x
Raises:
ValueError: when x is empty
Examples:
>>> median([5, 2, 4, 1, 3])
3.0
>>> median([5, 2, 6, 4, 1, 3])
3.5
>>> median([])
Traceback (most recent call last):
...
ValueError: An empty data list has no median []
>>> median(42)
Traceback (most recent call last):
...
TypeError: object of type 'int' has no len()
"""
if len(x) < 1:
raise ValueError("An empty data list has no median {x}".format(x=x))
y = sorted(x)
middle = len(x) / 2
if len(x) % 2 == 0:
return (y[int(middle - 0.5)] + y[int(middle + 0.5)]) / 2.0
else:
return float(y[int(middle)]) |
def fibb(x: int) -> int:
"""
pre: x>=0
post[]: _ < 5
"""
if x <= 2:
return 1
r1, r2 = fibb(x - 1), fibb(x - 2)
ret = r1 + r2
return ret |
def setup_network_mode(instance, compute, client, create_config, start_config):
"""
Based on the network configuration we choose the network mode to set in
Docker. We only really look for none, host, or container. For all
all other configurations we assume bridge mode
"""
ports_supported = True
try:
kind = instance.nics[0].network.kind
if kind == 'dockerHost':
ports_supported = False
start_config['network_mode'] = 'host'
del start_config['links']
elif kind == 'dockerNone':
ports_supported = False
create_config['network_disabled'] = True
del start_config['links']
elif kind == 'dockerContainer':
ports_supported = False
id = instance.networkContainer.uuid
other = compute.get_container(client, instance.networkContainer)
if other is not None:
id = other['Id']
start_config['network_mode'] = 'container:{}'.format(id)
del start_config['links']
except (KeyError, AttributeError, IndexError):
pass
return ports_supported |
def get_last_quarter(month, year):
"""
Get last quarter of given month and year
:param month: given month
:param year: given year
:return: last quarter and it's year
"""
last_quarter = (month-1)//3
if last_quarter == 0:
last_quarter = 4
last_year = year - 1
else:
last_year = year
return last_quarter, last_year |
def zeta(z, x, beta):
"""
Eq. (6) from Ref[1] (constant term)
Note that 'x' here corresponds to 'chi = x/rho',
and 'z' here corresponds to 'xi = z/2/rho' in the paper.
"""
return 3 * (4* z**2 - beta**2 * x**2) / 4 / beta**2 / (1+x) |
def returnNoImaging(file):
"""
Return a dummy fitsheader where imaging is set to false
Used for non-imaging instruments (FIES etc.)
"""
fitsheader={'imaging':0}
return fitsheader |
def hash_matches(doc, hash):
"""Check whether the hash of the passed doc matches the passed hash
"""
return doc["_input_hash"] == hash |
def check_bst_property(node) -> bool:
"""
:param node: Root of red black Tree
:return: True if Tree is a valid BST else False
"""
if not node: # Reached its Leaf node
return True
if node.left and node.left.data > node.data: # left must be < node
return False
if node.right and node.data > node.right.data: # right must be > node
return False
# Recursively checking for left and right subtrees
return check_bst_property(node.left) and check_bst_property(node.right) |
def complete(repository):
"""Fill in missing paths of URL."""
if ':' in repository:
return repository
else:
assert '/' in repository
return 'https://github.com/' + repository.strip() |
def xor(message, key):
"""
Given bytes inputs `message` and `key`, return the xor of the two
"""
xored = b""
for m, k in zip(message, key):
xored += chr(m ^ k).encode()
return xored |
def get_color_map_list(num_classes):
""" Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes: Number of classes
Returns:
The color map
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
return color_map |
def _cons8_88(m8, L88, d_gap, k, Cp, h_gap):
"""dz constrant for edge gap sc touching 2 edge gap sc"""
term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts
term2 = 2 * k * d_gap / m8 / Cp / L88 # cond to adj bypass edge
return 1 / (term1 + term2) |
def save_text_file(i):
"""Save string to a text file with all \r removed
Target audience: end users
Args:
text_file (str): name of a text file
string (str): string to write to a file (all \r will be removed)
(append) (str): if 'yes', append to a file
Returns:
(dict): Unified CK dictionary:
return (int): return code = 0, if successful
> 0, if error
(error) (str): error text if return > 0
"""
fn=i['text_file']
s=i['string']
try:
s=s.replace('\r','')
except Exception as e:
pass
try:
s=s.replace(b'\r',b'')
except Exception as e:
pass
m='w'
if i.get('append','')=='yes': m='a'
try:
s=s.encode('utf8')
except Exception as e:
pass
try:
# if sys.version_info[0]>2:
# f=open(fn, m+'b')
# f.write(s)
# else:
f=open(fn, m+'b')
f.write(s)
except Exception as e:
return {'return':1, 'error':'problem writing text file='+fn+' ('+format(e)+')'}
f.close()
return {'return':0} |
def page_not_found(e):
"""Return a custom 404 error."""
return 'Oops, nothing up my sleeve! No content is at this URL.', 404 |
def sort_steps(steps):
"""Sort the steps into a form
{'A': ['B', 'D', 'F']}
where B, D, and F are the steps A needs to be complete before it can start
"""
step_dict = {}
for dependency, step in steps:
try:
step_dict[step].append(dependency)
except KeyError:
step_dict[step] = [dependency]
if dependency not in step_dict:
step_dict[dependency] = []
return step_dict |
def append_overhead_costs(costs, new_id, overhead_percentage=0.15):
"""
Adds 15% overhead costs to the list of costs.
Usage::
from rapid_prototyping.context.utils import append_overhead_costs
costs = [
....
]
costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0])
:param costs: Your final list of costs.
:param new_id: The id that this new item should get.
"""
total_time = 0
for item in costs:
total_time += item['time']
costs.append({
'id': new_id,
'task': 'Overhead, Bufixes & Iterations',
'time': total_time * overhead_percentage, },
)
return costs |
def gcd(a, b):
"""
Using the Euclidean algorithm for calculating the gcd of two integers a and b.
Parameters
----------
a : int
The first integer
b : int
The second integer
Returns
-------
int
The GCD of two numbers
"""
# Base case
if b == 0:
return a
# Recursive case
return gcd(b, a % b) |
def nextLn(Ln):
""" Return Gray code Ln+1, given Ln. """
Ln0 = ['0' + codeword for codeword in Ln]
Ln1 = ['1' + codeword for codeword in Ln]
Ln1.reverse()
return Ln0 + Ln1 |
def _strip_timestamp(text: str) -> str:
"""Remove timestamp that are added to end of some git vars.
I'm not sure how fixed the format is (w/o timezone etc.), that's why this logic
is a bit more complicated
"""
parts = text.split()
for _ in range(2):
if parts[-1].lstrip("+-").isdigit():
parts = parts[:-1]
return " ".join(parts) |
def area_of_rect(bounding_box):
"""
Finds the area of a bounding box produced from selective search
"""
_, _, width, height = bounding_box # Convert for readablity
return width * height |
def normalized_node_id(node_id):
"""normalize node id"""
if "/" in node_id:
return node_id.split("/")[1]
return node_id |
def is_number(s):
"""
Returns True if the string s can be cast to a float.
Examples:
is_number('1') is True
is_number('a') is False
is_number('1a') is False
is_number('1.5') is True
is_number('1.5.4') is False
is_number('1e-6') is True
is_number('1-6') is False
Parameter s: The string to test
Precondition: s is a string
"""
try:
x = float(s)
return True
except:
return False |
def rc_read_write(conn, rc_write):
"""
get/send new RC data from UART
"""
try:
i = str(rc_write)[1:-1].replace(" ", "") + "\n"
conn.write(i.encode()) # definitly not great performance
rc_read = conn.readline().decode() # example: "0@500@500@0@500@992@\n"
return list(map(int,rc_read.split('@')[:-1]))
except Exception as e:
print("exception in rc_read_write: {}".format(e))
return [0,0,0,0,0,0] |
def FormatUrlFieldValue(url_str):
"""Check for and add 'https://' to a url string"""
if not url_str.startswith('http'):
return 'http://' + url_str
return url_str |
def create_species_model(
vaccination_states,
non_vaccination_state,
virus_states,
areas,
species_comp,
):
"""Create species of the model.
Parameters
----------
vaccination_states : list of strings
List containing the names of the vaccinations containing a state for
non-vaccinated individuals.
non_vaccination_state : str
Name of state indicates non-vaccinated individuals.
virus_states : list of strings
List containing the names of the virus types.
areas : list of strings
List containing the names of the areas.
species_comp : list of strings
List containing the names of the compartments. Should not be changed.
Returns
-------
species : dict
Dictionary that contains the species names as keys and dicitonaries,
that specify all parameters for the species, as values.
"""
species = {}
for index_compartments in species_comp:
for index_areas in areas:
for index_vaccination in vaccination_states:
for index_virus in virus_states:
if index_compartments == "susceptible":
species_combined = (
f"{index_compartments}_{index_areas}_{index_vaccination}"
)
else:
species_combined = f"{index_compartments}_{index_areas}_{index_vaccination}_{index_virus}"
amount_t0 = 0
species[species_combined] = {
"compartment": index_areas,
"initial_amount": amount_t0,
}
return species |
def assert_dict(obj):
"""Make sure it is a dict"""
try:
if getattr(obj, 'fromkeys') \
and getattr(obj, 'popitem') \
and getattr(obj, 'setdefault') \
and getattr(obj, 'update') \
and getattr(obj, 'values'):
return True
except:
return False |
def get_fields(fields):
"""
From the last column of a GTF, return a dictionary mapping each value.
Parameters:
fields (str): The last column of a GTF
Returns:
attributes (dict): Dictionary created from fields.
"""
attributes = {}
description = fields.strip()
description = [x.strip() for x in description.split(";")]
for pair in description:
if pair == "": continue
pair = pair.replace('"', '')
key, val = pair.split()
attributes[key] = val
# put in placeholders for important attributes (such as gene_id) if they
# are absent
if 'gene_id' not in attributes:
attributes['gene_id'] = 'NULL'
return attributes |
def indent_block(block, padding):
"""ident block of text"""
lines = block.split("\n")
return "\n".join(
padding + line if line else ""
for line in lines
) |
def _null_truncate(s):
"""Given a string, returns a version truncated at the first '\0' if
there is one. If not, the original string is returned."""
i = s.find(chr(0))
if i != -1:
s = s[:i]
return s |
def correct_dist_matrix(distmat, maxdist=40, fardist=None):
"""remove -1 and extremely large distances (>maxdist), replace them with
fatdist (defaults to maximum distance in matrix)"""
if fardist is None:
fardist = 0
for row in distmat:
for x in row:
if x < maxdist:
fardist = max(fardist, x)
distmat2 = []
for row in distmat:
distmat2.append([])
for x in row:
if x == -1 or x > maxdist:
distmat2[-1].append(fardist)
else:
distmat2[-1].append(x)
return distmat2 |
def right_split(string, separator=None, max_splits=-1):
""":yaql:rightSplit
Returns a list of tokens in the string, using separator as the
delimiter. If maxSplits is given then at most maxSplits splits are done -
the rightmost ones.
:signature: string.rightSplit(separator => null, maxSplits => -1)
:receiverArg string: value to be splitted
:argType string: string
:arg separator: delimiter for splitting. null by default, which means
splitting with whitespace characters
:argType separator: string
:arg maxSplits: number of splits to be done - the rightmost ones.
-1 by default, which means all possible splits are done
:argType maxSplits: integer
:returnType: list
.. code::
yaql> "abc de f".rightSplit()
["abc", "de", "f"]
yaql> "abc de f".rightSplit(maxSplits => 1)
["abc de", "f"]
"""
return string.rsplit(separator, max_splits) |
def celsius_to_fahr(temp_celsius):
"""Converts Celsius temperatures to fahranheit
Parameters
--------
temp_celsius: int | float
Input temperature value in celsius (should be a number)
Returns
--------
Temperature in Fahranheit (float)
"""
return 9/5 * temp_celsius + 32 |
def get_title(p_col_name):
""" returns paper-ready y axis name """
y_axis_title = {
"P50_latency(ms)": "50th Percentile Query Latency (ms)",
"P90_latency(ms)": "90th Percentile Query Latency (ms)",
"P95_latency(ms)": "95th Percentile Query Latency (ms)",
"P99_latency(ms)": "99th Percentile Query Latency (ms)",
"QPS": "QPS"
}
return y_axis_title[p_col_name] |
def user_model(username):
"""Return a user model"""
return {
'username': username,
} |
def wrap_as_envelope(data=None, error=None, as_null=True):
"""
as_null: if True, keys for null values are created and assigned to None
"""
payload = {}
if data or as_null:
payload['data'] = data
if error or as_null:
payload['error'] = error
return payload |
def split(string, separator, keep_separator):
"""
Splits given string by given separator.
"""
parts = string.split(separator)
if keep_separator:
*parts, last_part = parts
parts = [part + separator for part in parts]
if last_part:
return parts + [last_part]
return parts |
def my_mystery_function(name='Joe'):
"""This function does black magic and can destroy your life.
Keyword arguments:
name -- The name of person to curse.
"""
return name + ' is cursed' |
def calc_color_percent(min, max, temp):
"""
Calculates the percentage of the color range that the temperature is in
Arguments:
min - minimum temperature
max - maximum temperature
temp - temperature
"""
tp = (temp-min)/(max-min)
if tp > 1:
tp = 1
if tp < 0:
tp = 0
return tp |
def s2ms(data):
"""
timestamp format s to ms
:param data:
:return:
"""
res = []
for point in data:
point = list(point)
point[0] *= 1000
res.append(point)
return res |
def app_config(app_config):
"""Get app config."""
app_config["APP_ALLOWED_HOSTS"] = ["localhost"]
app_config["CELERY_TASK_ALWAYS_EAGER"] = True
app_config["JSONSCHEMAS_SCHEMAS"] = [
"acquisition",
"document_requests",
"documents",
"eitems",
"ill",
"internal_locations",
"items",
"invenio_opendefinition",
"invenio_records_files",
"loans",
"locations",
"series",
"vocabularies",
]
return app_config |
def _compare_tuples(parta, partb):
"""Function for cachable dominance comparision."""
partial_sum_a = [
sum(parta[:k + 1]) for k in range(len(parta))]
partial_sum_b = [
sum(partb[:k + 1]) for k in range(len(partb))]
min_length = min(len(partial_sum_a), len(partial_sum_b))
comparing = [
partial_sum_a[x] - partial_sum_b[x] for x in range(min_length)]
compare_a = [x >= 0 for x in comparing]
compare_b = [x <= 0 for x in comparing]
if all(compare_a):
out = '>'
elif all(compare_b):
out = '<'
else:
out = 'Non-comparable'
return out |
def linear(lst: list, target: int or str) -> int:
""" `linear` takes a list of length n and a target value.
Each item in the list is compared against the target value until a
match is found. `linear_search` returns the INDEX corresponding to
the first identified match. If no match found, function returns -1.
>>> linear([5, 2, 1, 0, 4, 10], 0)
3
>>> linear(['a', 'b', 'c'], 'd')
-1
"""
index = 0 #set index to zero
while index < len(lst): #iterate list
if lst[index] == target: #compare value at current index to target value
return index #return index if target match
index+=1 #increment index
return -1 |
def single_set(empty_set):
"""Return single item HashSet."""
empty_set.add("element 1")
return empty_set |
def merge_sort(array):
"""Use merge sort to sort an array."""
if(len(array)) > 1:
mid = len(array) // 2
left_array = array[:mid]
right_array = array[mid:]
merge_sort(left_array)
merge_sort(right_array)
# merge(left_array=left_array, right_array=right_array)
i = j = k = 0
while i < len(left_array) and j < len(right_array):
if left_array[i] <= right_array[j]:
array[k] = left_array[i]
i += 1
else:
array[k] = right_array[j]
j += 1
k += 1
while i < len(left_array):
array[k] = left_array[i]
i += 1
k += 1
while j < len(right_array):
array[k] = right_array[j]
j += 1
k += 1
return array |
def fib(n: int) -> int:
"""Return the Fibonacci number in the series at the given n number."""
if n < 2:
return n
return fib(n - 1) + fib(n - 2) |
def get_model_scores(pred_boxes):
"""Creates a dictionary of from model_scores to image ids.
Args:
pred_boxes (dict): dict of dicts of 'boxes' and 'scores'
Returns:
dict: keys are model_scores and values are image ids (usually filenames)
"""
model_score={}
for img_id, val in pred_boxes.items():
for score in val['scores']:
if score not in model_score.keys():
model_score[score]=[img_id]
else:
model_score[score].append(img_id)
return model_score |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.