content stringlengths 42 6.51k |
|---|
def traverse_dir(start, to, neighbors):
"""
Traverse branch start at node 'start' in direction of node 'to'.
Args:
start: start node
to: destination node
neighbors: dict of neighbors per node
"""
visited = [start, to]
next_nodes = [n for n in neighbors[to] if n != start]
while next_nodes:
s = next_nodes.pop(0)
visited.append(s)
next_nodes += [n for n in neighbors[s] if n not in visited]
return visited |
def getRepresentativeColor(
value, startValue, endValue, startColor=(1, 1, 1), endColor=(0.7, 0.7, 0.9)
):
"""Given a numeric value, get a color that represents that value relative to a range
of numbers which map to a range of colors.
:param float value: Numeric value to represent with the returned color.
:param float startValue: Numeric value represented by `startColor` (probably either
the min or max of the dataset `value` comes from).
:param float endValue: Numeric value represented by `endColor` (probably either the
min or max of the dataset `value` comes from).
:param (float, float, float) startColor: RGB 3-tuple of floats between 0 and 1 for
the color representing `startValue`.
:param (float, float, float) endColor: RGB 3-tuple of floats between 0 and 1 for the
color representing `endValue`.
:return (float, float, float) color: RGB 3-tuple of floats between 0 and 1 for the
color we give the passed in `value`.
"""
proportionFromStart = (value - startValue) / (endValue - startValue)
proportionFromStart = max(0, proportionFromStart)
proportionFromStart = min(1, proportionFromStart)
color = tuple(
startRGBVal + (proportionFromStart * (endRGBVal - startRGBVal))
for startRGBVal, endRGBVal in zip(startColor, endColor)
)
return color |
def little_endian_to_int(b) -> int:
"""
Takes a byte sequence as a little-endian number and returns an integer
"""
return int.from_bytes(b, 'little') |
def _make_divisible(value, divisor, msg_on_change = None):
"""
Makes 'value' divisible by divisor
If given, logs 'msg_on_change' if value is changed
"""
initial = value
extra = value % divisor
value -= extra
if extra >= divisor / 2:
value += divisor
if isinstance(divisor, int):
value = int(value)
if value == 0:
print(f"\tWARNING (_make_divisible): value is rounded to 0, given {initial} w/ divisor {divisor}")
if msg_on_change and initial != value:
values_to_log = {}
if "{initial}" in msg_on_change:
values_to_log["initial"] = initial
if "{final}" in msg_on_change:
values_to_log["final"] = value
print("\tWARNING (_make_divisible): " + msg_on_change.format(**values_to_log))
return value |
def square_area(side):
"""Returns the area of a square"""
area = side * side
return round(area, 1) |
def check_disjoint_filter(record_id:str, disjoint_id_sets:dict, record_ids_map:dict)->bool:
"""This function checks if the record_id contains any common ids with the disjoint datasets.
If a common ids is found, the check fails.
This function is used in filter.py.
Args:
record_ids (str): record id for a recording.
disjoint_id_sets (Dict[str, Set[str]]): dictionary that maps the ids along which the output dataset
will be disjoint to the set of ids included in the `disjoint_datasets`.
record_ids_map (Dict[str, Dict[str, str]]): dictionary to maps record_id to other ids like
speaker, lesson, line (or target-sentence).
Returns:
(bool): True if the ids associated with the record_id are not contained in any of
the `disjoint_ids_sets`. Otherwise, False.
"""
# assumes the check passes (not the safest initial assumption but it makes the logic cleaner)
pass_check = True
# names of the ids along which the output dataset will be disjoint
for id_name, dj_id_set in disjoint_id_sets.items():
disjoint_id = record_ids_map[record_id][id_name]
# if the id is contained in the id_set of the disjoint_datasets, the check fails
if disjoint_id in dj_id_set:
pass_check = False
break
return pass_check |
def multiple_split(string, delimiters=[]):
"""."""
if delimiters == []:
return [string]
for x in delimiters:
string = string.replace(x, ' ')
return string.split() |
def isPalindrome(s):
""" Assumes s is a str
Returns True if the letters in s form a palindrome;
False otherwise. Non-letters and capitalization are ignored."""
def toChars(s):
s = s.lower()
letters = ''
for c in s:
if c in 'abcdefghijklmnopqrstuvwxyz':
letters = letters + c
return letters
def isPal(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPal(s[1:-1])
return isPal(toChars(s)) |
def solve(a, m, k):
"""
Solve simple linear conguruence equation
Find the least x such that ax=k (mod m)
"""
if a == 0:
if k == 0:
return 0
else:
raise ValueError(f"{a}x%{m}={k} - No SOLUTIONS")
if a == 1:
return k % m
if k % a == 0:
return k // a
new_x = solve(
a=m % a,
m=a,
k=(a - k) % a
)
x = (k + new_x * m) // a
return x |
def round_to_nearest_increment(x :float, tick_size : float = 0.25) -> float:
"""
rounds a price value to the nearest increment of `tick_size`
Parameters
-----------
x : float
the price value
tick_size: float
the size of the tick. E.G. for ES = 0.25, for CL = 0.01
"""
val = round(tick_size * round(float(x)/tick_size), 2)
if val < tick_size:
return tick_size
else:
return val |
def path_to_uri(path):
""" Swaps \\ for / Other stuff will happen here in the future.
"""
return path.replace('\\', '/') |
def get_average_product_price(shop1: dict, shop2: dict, shop3: dict) -> dict:
"""
This function calculates average price of products between three shops
:param shop1: First e-shop
:param shop2: Second e-shop
:param shop3: Third e-shop
:return: dict of products and their prices
"""
product_dict = {}
product_set = set()
product_set.update(set(shop1.keys()), set(shop2.keys()), set(shop3.keys()))
for product_name in product_set:
if product_name in shop1.keys() and product_name in shop2.keys() and product_name in shop3.keys():
product_dict[product_name] = round((shop1[product_name] + shop2[product_name] + shop3[product_name]) / 3, 2)
elif product_name in shop1.keys() and product_name in shop2.keys():
product_dict[product_name] = round((shop1[product_name] + shop2[product_name]) / 2, 2)
elif product_name in shop1.keys() and product_name in shop3.keys():
product_dict[product_name] = round((shop1[product_name] + shop3[product_name]) / 2, 2)
elif product_name in shop2.keys() and product_name in shop3.keys():
product_dict[product_name] = round((shop2[product_name] + shop3[product_name]) / 2, 2)
elif product_name in shop1.keys():
product_dict[product_name] = shop1[product_name]
elif product_name in shop2.keys():
product_dict[product_name] = shop2[product_name]
elif product_name in shop3.keys():
product_dict[product_name] = shop3[product_name]
else:
product_dict["key"] = "bug"
return product_dict |
def alert2subject(alert_obj):
"""
Transforms an alert into a subject for mails
:param alert_obj:
:return:
"""
return '[WEBMONITOR] WEBSITE : ' + alert_obj['website'] + ' IS ' + alert_obj['status'] |
def select_by_perf(current_stats, candidate_stats,
split='val', measure='accuracy', greater=True):
"""
Based on validation set performance retain or update the current best seen
performance statistics dictionary.
:param current: current best performance stats
:param candidate: new candidate performance stats
:param split: name of the data split on follow performance on
:param measure: name of the performance metric to track
:param greater: if True select the candidate if its perf. is greater
:return: [stats dictionary, True if candidate is better than current]
"""
if f'{split}/{measure}' not in current_stats:
return candidate_stats, True
found = candidate_stats[f'{split}/{measure}'] > current_stats[f'{split}/{measure}']
if not greater:
found = not found
if found:
return candidate_stats, True
else:
return current_stats, False |
def get_value_or_none(data, index):
"""
Get value at certain index from list. Return None if index outside data
range.
"""
try:
return data[index]
except IndexError:
return None |
def key_value_to_dict(key_value_list, sep='=', pair_sep=',' ):
"""
Accept a key_value_list, like::
key_value_list = ['a=1,b=2', 'c=3, d=4', 'e=5']
Return a dict, like::
{'a':'1', 'b':'2', 'c':'3', 'd':'4', 'e':'5'}
"""
d = {}
for speclist in key_value_list:
for spec in speclist.strip().split(','):
key, value = spec.strip().split('=')
d[key] = value
return d |
def parse_type(custom_str):
"""Parse custom string to its corresponding type."""
# check integer
try:
return int(custom_str)
except ValueError:
pass
# check float
try:
return float(custom_str)
except ValueError:
pass
# check boolean
if custom_str in ["True", "False"]:
return custom_str == "True"
# Return string
return custom_str |
def is_noEe(astring):
"""
(str) -> Boolean
Returns True if astring does NOT
contain characters 'E' or 'e'
else returns False.
>>> is_noEe('')
True
>>> is_noEe('e')
False
>>> is_noEe('CHEM 101')
False
>>> is_noEe('abcd')
True
"""
lowere = 'e' in astring
uppere = 'E' in astring
return not(lowere or uppere) |
def summate2(phasevec):
"""Calculate values b'(j^vec) for combining 2 phase vectors.
Parameter:
phasevec: tuple of two phasevectors
Example:
On input (([b_1(0),b_1(1),...,b_1(L-1)], L), ([b_2(0),b_2(1),...,b_2(L'-1)], L'))
give output [b_1(0)+b_2(0), b_1(0)+b_2(1),..., b_1(1)+b_2(0),...,b_1(L-1)+b_2(L'-1)]
"""
b = [] # array for values of summed phasevector
for i in range(phasevec[0][1]):
for j in range(phasevec[1][1]):
b.append(phasevec[0][0][i] + phasevec[1][0][j])
return b |
def fullyflatten(container):
"""
Completely flattens out a cluster and returns a one-dimensional set
containing the cluster's items. This is useful in cases where some items of
the cluster are clusters in their own right and you only want the items.
:param container: the container to flatten.
"""
flattened_items = []
for item in container:
if hasattr(item, 'items'):
flattened_items = flattened_items + fullyflatten(item.items)
else:
flattened_items.append(item)
return flattened_items |
def as_tuple(obj):
"""
Given an object which may be a list/tuple, another object, or None,
return that object in list form.
IE:
If the object is already a list/tuple just return it.
If the object is not None, return it in a list with a single element.
If the object is None return an empty list.
"""
if obj is None:
return ()
elif isinstance(obj, list):
return tuple(obj)
elif isinstance(obj, tuple):
return obj
return (obj,) |
def CheckSum( data ):
"""
Calculate an ASTM checksum for the supplied data
Arguments:
data -- whatever you want to calculate a checksum for.
If manually calculating checksums, you need to be mindful of whether or not you want to include
the <2> (STX) that begins the frame. Sometimes this is expected, sometimes not.
"""
Output = ''
Counter = 0
for char in data:
Counter += ord( char ) # Sum up all the bytes in the data
CheckValue = Counter % 256 # Checksum = Sum mod 256
Output = '%X' % CheckValue
if len( Output ) == 1:
Output = '0' + Output
return Output |
def color(cid, s, code):
"""A function for color styling in the HTML output"""
return "<p style='color: %s'>%d: %s</p>" % (code, cid, s) |
def unzip(array):
"""Unzips arrays [(a1,b1), (a2,b2), ...]."""
a = []
b = []
for x,y in array:
a.append(x)
b.append(y)
return a,b |
def _get_story_memcache_key(story_id, version=None):
"""Returns a memcache key for the story.
Args:
story_id: str. ID of the story.
version: str. Schema version of the story.
Returns:
str. The memcache key of the story.
"""
if version:
return 'story-version:%s:%s' % (story_id, version)
else:
return 'story:%s' % story_id |
def advance_data_iter(data_iter, n):
"""use to warm up data for performance benchmark"""
assert n >= 0
if n == 0:
return data_iter
has_next_batch = True
while has_next_batch:
try:
data_iter.next()
n -= 1
if n == 0:
return data_iter
except StopIteration:
has_next_batch = False |
def get_reversed_first_level_tree(tree):
"""Creates a one level deep straight dependence tree"""
new_tree = {}
for module, dependencies in list(tree.items()):
for dep_module in dependencies:
if dep_module is module:
continue
if not dep_module in new_tree:
new_tree[dep_module] = set([module])
else:
new_tree[dep_module].add(module)
return new_tree |
def ellipsize(value, limit=32):
"""
Truncates a string after a given number of chars keeping whole words.
Usage:
{{ string|ellipsize }}
{{ string|ellipsize:50 }}
"""
if len(value) <= limit:
return value
else:
value = value[:limit]
words = value.split(' ')[:-1]
return ' '.join(words) + '...' |
def filteroutNone(result_list):
"""
This function filter out results list after we use function extract_step_metric
"""
result_list_new=[]
for result_list_cur in result_list:
if result_list_cur[2] is not None:
result_list_new.append(result_list_cur)
return result_list_new |
def is_in_frame(frame_w, frame_h, lm):
"""Returns whether a given landmarks is within the frame boundaries or not."""
return lm[0] < frame_w and lm[1] < frame_h and lm[0] >= 0 and lm[1] >= 0 |
def sub4(a,b):
""" 4 vector `a - b`"""
return [a[0]-b[0],a[1]-b[1],a[2]-b[2],a[3]-b[3]] |
def _statXform(line):
"""
Parse the response to a STAT command.
@type line: L{bytes}
@param line: The response from the server to a STAT command minus the
status indicator.
@rtype: 2-L{tuple} of (0) L{int}, (1) L{int}
@return: The number of messages in the mailbox and the size of the mailbox.
"""
numMsgs, totalSize = line.split(None, 1)
return int(numMsgs), int(totalSize) |
def remove_noise(spectrum: list, noise_level: float) -> list:
""" Remove any frequencies with an amplitude under a specified noise level.
Args:
- spectrum: the spectrum to be perform noise reduction on.
- noise_level: the min power bin values should have to not be removed.
"""
return list(map(lambda amp: 0 if amp < noise_level else amp, spectrum)) |
def printTable(data: list) -> None:
"""Prints data (list of lists)
in a nicely formatted table"""
colWidths = [0] * len(data)
for idx, width in enumerate(colWidths):
longest = 0
for word in data[idx]:
longest = max(longest, len(word))
colWidths[idx] = longest
for i in range(len(data[0])):
for idx, word in enumerate(data):
print(data[idx][i].rjust(colWidths[idx]), end=" ")
print()
return None |
def makeImmutable(x):
"""
>>> makeImmutable(5) == 5
True
>>> makeImmutable('a') == 'a'
True
>>> makeImmutable((1, 2)) == (1, 2)
True
>>> makeImmutable([1, 2]) == [1, 2]
False
"""
# must either be not a collections or immutable
try:
{}[x] = 0 # dicts require immutability
return x
except TypeError:
# so it's mutable; either a collection or a
# mutable class; if a class, we're hosed, so
# assume it's a collection
try:
# if it's a singleton collection, try returning
# first element; this will jump to except
# unless x is a collection
if len(x) == 1:
return makeImmutable(x[0])
# not a singleton collection, but still a collection,
# so make it a tuple
return tuple(x)
except TypeError:
return x # not a collection |
def get_dist_disp_lim(sim_desc):
"""
Given a simulator descriptor, returns the upper display limit for the distances histogram.
"""
dist_disp_lim = {
'gauss': float('inf'),
'mg1': 1.0,
'lotka_volterra': float('inf'),
'hodgkin_huxley': float('inf')
}
return dist_disp_lim[sim_desc] |
def IS(instance, other): # noqa
"""
Support the `future is other` use-case.
Can't override the language so we built a function.
Will work on non-future objects too.
:param instance: future or any python object
:param other: object to compare.
:return:
"""
try:
instance = instance._redpipe_future_result # noqa
except AttributeError:
pass
try:
other = other._redpipe_future_result
except AttributeError:
pass
return instance is other |
def path2key(path: list) -> str:
"""
transform a path, e.g., [1,3,2] to a string key, e.g., "1,3,2"
:param path:
:return:
"""
return ','.join(map(str, path)) |
def cleanup_value(v):
"""Cleanup utility function, strips some unwanted parts from values."""
v = str(v)
if v.startswith("\\??\\"):
v = v[4:]
return v |
def url_ExoMol():
"""return URL for ExoMol
Returns:
URL for ExoMol db
"""
url=u"http://www.exomol.com/db/"
return url |
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':' |
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j, ))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j, ))
return int(j) |
def get_relevant_files(session_data: dict):
"""
Generates the pipeline's "starting node"
Parameters
----------
session_data : dict
A dictionary with the locations of all necessary session's data
Returns
-------
str,str
The "starting" node for processing
"""
return session_data.get("dwi"), session_data.get("fmap") |
def sum_of_multiples_of_3_and_5(n):
"""Return the sum of all multiples of 3 and 5 below `n`.
Parameters
----------
n : int
The number up to which the sum of multiples of 3 and 5 is computed.
Returns
-------
int
The sum of all multiples of 3 and 5 up to `n`.
"""
# begin solution
s = 0
for i in range(n):
if i % 3 == 0 or i % 5 == 0:
s += i
return s
# end solution |
def _ComputeSpeedIndex(completeness_record):
"""Computes the speed-index from a completeness record.
Args:
completeness_record: list(CompletenessPoint)
Returns:
Speed-index value.
"""
speed_index = 0.0
last_time = completeness_record[0][0]
last_completness = completeness_record[0][1]
for time, completeness in completeness_record:
if time < last_time:
raise ValueError('Completeness record must be sorted by timestamps.')
elapsed = time - last_time
speed_index += elapsed * (1.0 - last_completness)
last_time = time
last_completness = completeness
return speed_index |
def _is_path_watched(filepath: str) -> bool:
"""
Check if file should trigger pytest run
"""
return filepath.endswith(".py") |
def tag(tag):
"""Select a single tag."""
return {"tag": tag} |
def _create_tf_idf_matrix(tf_matrix, idf_matrix):
"""tf-idf matrix from tf and idf matrices
Args:
tf_matrix (dict): term frequency
idf_matrix (dict):document frequency
Returns:
dict: tf-idf matrix
"""
tf_idf_matrix = {}
for (sent1, f_table1), (sent2, f_table2) in zip(tf_matrix.items(), idf_matrix.items()):
tf_idf_table = {}
for (word1, value1), (word2, value2) in zip(f_table1.items(),
f_table2.items()): # here, keys are the same in both the table
tf_idf_table[word1] = float(value1 * value2)
tf_idf_matrix[sent1] = tf_idf_table
return tf_idf_matrix |
def _min_index(b, h):
"""
Returns: The index of the minimum value in b[h..]
Parameter b: The sequence to search
Precondition: b is a mutable sequence (e.g. a list).
"""
# We typically do not enforce preconditions on hidden helpers
# Start from position h
i = h
index = h;
# index is position of min in b[h..i-1]
while i < len(b):
if b[i] < b[index]:
index = i
i = i+1
# index is position of min in b[h..len(b)-1]
return index |
def bin_to_int(b, lend=False):
"""
Converts binary number to integer.
Parameters
----------
b : tuple of bool
Binary tuple.
lend : bool
Endianness of tuple.
Returns
-------
int
Value as integer.
"""
if not lend:
s = "".join("1" if v else "0" for v in b)
else:
s = "".join("1" if v else "0" for v in reversed(b))
return int(s, 2) |
def isSet( obj ):
"""
Returns a boolean whether or not 'obj' is of either type 'set' or
'frozenset'.
"""
return isinstance( obj, set ) or isinstance( obj, frozenset ) |
def ping_reply():
"""Construct message used when replying to pings.
:returns: Constructed ping_reply dict, ready to be sent over the wire.
"""
return {"type": "ping_reply"} |
def rate(seq):
"""
Rate of convergence in time.
"""
sequence = []
for i in range(len(seq)-1):
r = (seq[i+1] - seq[i])
sequence.append(r)
return sequence |
def tip(bill):
"""Adds 15% tip to a restaurant bill."""
bill *= 1.15
print("With tip: %f" % bill)
return bill |
def translate(value, left_min, left_max, right_min, right_max):
"""This maps a value from one range to another. TY to the guy on stackoverflow"""
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span) |
def triple_step(n):
""" Triple Step: A child is running up a staircase with n steps and can hop either 1 step, 2 steps, or 3
steps at a time. Implement a method to count how many possible ways the child can run up the
stairs.
"""
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 1 + 2 * 2 + 1
else:
ways = triple_step(n - 1) * n + triple_step(n - 2) * (n - 1) |
def unFreezList(set_of_freezed_graphs):
"""
This functions recive a Set of Freezed StratGraphs and returns a list
of unfreezed StratGraphs.
"""
L = []
N = []
for fg in set_of_freezed_graphs:
L.append(fg.stratGraph)
N.append(fg.string)
return [L, N] |
def get_channel_columns(columns):
"""Filters leaderboard columns to get the channel column names.
Args:
columns(iterable): Iterable of leaderboard column names.
Returns:
list: A list of channel column names.
"""
return [col for col in columns if col.startswith('channel_')] |
def dict_item(dictionary, key):
"""'Template filter to allow accessing dictionary value by variable key.
Example use::
{{ mydict|dict_item:keyvar }}
"""
return dictionary.get(key, None) |
def get_new_values(values):
"""Record any changes higher. Its size is the same as its argument's."""
new_values = []
new_value = values[0]
for value in values:
if value > new_value:
new_value = value
new_values.append(new_value)
return new_values |
def get_part_of_day(hour, sunrise_hour, sunset_hour):
"""Gets part of day from hour."""
return (
"morning"
if sunrise_hour <= hour <= 11
else "afternoon"
if 12 <= hour <= sunset_hour
else "evening"
if sunset_hour + 1 <= hour <= 22
else "night"
) |
def transpose(matrix):
"""return the transpose of a matrix stored as a 9-element tuple"""
return (matrix[0], matrix[3], matrix[6],
matrix[1], matrix[4], matrix[7],
matrix[2], matrix[5], matrix[8]) |
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0] |
def search_pattern(alarms, searchFor):
"""Helper function to search pattern in the Alarm Names.
Arguments:
alarms {dict} -- dictionary with all SMC alarms
searchFor {str} -- search pattern entered by user
Returns:
[list] -- List with all matched Alarms if successm None if not found
"""
alarm_list = []
for key in alarms.keys():
if searchFor in key:
alarm_list.append(alarms[key])
if alarm_list:
return alarm_list
else:
return None |
def is_manorm_header(line):
"""Returns if the line is a header line used in MAnorm xls."""
line = line.strip()
if line.startswith('#') or line.split('\t')[0] == 'chr':
return True
else:
return False |
def loc_to_block_cell_num(y, x):
"""
:param y: The y location of the cell on the board. Precondition: 0 <= y < 9
:param x: The x location of the cell on the board. Precondition: 0 <= x < 9
:return: The cell number offset inside a block
"""
return (y % 3) * 3 + (x % 3) |
def find_largest(n: int, L: list) -> list:
"""Return the n largest values in L in order from smallest to largest.
>>> L= [3, 4, 7, -1, 2, 5]
>>> find_largest(3, L)
[4, 5, 7]
"""
copy=sorted(L)
return copy[-n:] |
def evalPoly(fit_param, Y):
"""
Evaluate X, based on Y of the polynomial
"""
return fit_param[0]*Y**2 + fit_param[1]*Y + fit_param[2] |
def parse_put_location_response(put_location_response):
"""
Parse a response from RpcPutLocation.
:returns: physical path (an object path)
"""
return put_location_response["PhysPath"] |
def extract_classes(document):
""" document = "545,32 8:1 18:2"
extract_classes(document) => returns "545,32"
"""
return document.split()[0] |
def list_to_tuple(x):
""" Convert a list to a tuple recursively. """
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x) |
def correct_braces(string):
"""
Check if string has correct braces "{" and "}"
:param string: String with braces to be chekced
:return: true if braces are correct, otherwise false
"""
if string is None:
raise ValueError("String to check correct braces was None")
braces_count = 0
quotation = False
for character in string:
if character == '{' and not quotation:
braces_count += 1
if character == '}' and not quotation:
braces_count -= 1
if character == '"':
quotation = not quotation
if braces_count < 0:
return False
return braces_count == 0 |
def hasattr_really(obj: object, attr) -> bool:
"""
Examines whether an object really has an attribute, without calling
__getattr__, which in states looks up the attribute in the monitor
it is part of.
:param obj: the object to look for the attribute.
:param attr: the attribute.
:return: True iff the attribute is really defined in that object.
"""
try:
obj.__getattribute__(attr)
return True
except:
return False |
def words_normalize(words):
"""
Do a normalization precess on words. In this case is just a tolower(),
but you can add accents stripping, convert to singular and so on...
"""
normalized_words = []
for index, word in words:
wnormalized = word.lower()
normalized_words.append((index, wnormalized))
return normalized_words |
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results |
def _find_intervals(intervals, end):
"""
Finds a complete set of intervals partitioning [0, end), given a partial
set of non-overlapping intervals.
"""
cuts = list(sorted({0, end}.union(*intervals)))
return list(zip(cuts[:-1], cuts[1:])) |
def unknown_option(argument):
"""
Check for a valid flag option (no argument) and return ``True``,
else return argument stripped.
(Directive option conversion function.)
For unknown options we cannot know if they should be
passed to the loader as flags or strings.
We could pass ``None`` if the option string contains nothing
except whitespace but this would not be intuitive for
keyword argument flags as ``bool(None) is False``.
"""
if argument:
stripped = argument.strip()
if stripped:
return stripped
return True |
def filter_startswith(word, beginning=None):
"""Filter a word starting with specified string.
:param word (str): The word.
:param beginning (str, optional): The beginning string to check.
:rtype: bool: The resulting check.
"""
return word.lower().startswith(beginning) |
def get_int_list(input_str):
""" Returns the comma-separated input string as a list of integers. """
items = input_str.split(",")
for index, item in enumerate(items):
items[index] = int(item)
return items |
def is_floatable(value):
"""Check if an object can be cast to a float, return true if so, false if not"""
try:
float(value)
return True
except:
return False |
def split_by_whitespace(text):
"""A very basic functional version of str.split"""
return text.split() |
def sanitize(text):
"""
We're using a very simple approach to writing a CSV (which is actually
tab-separated, but a lot of software considers that a kind of CSV). Our
CSV output will be formatted correctly and understood by psql as long
as we avoid certain characters that would conflict:
- Newlines (which of course separate CSV entries)
- Tabs (which separate our columns)
- Null codepoints (which are not allowed in psql)
We also need to escape every literal backslash, as backslashes are
interpreted by psql.
It's a good question whether this step should be necessary -- names
of concepts shouldn't include control characters, and the few
instances of backslashes in ConceptNet appear to be mistakes.
"""
return (
text.replace('\n', '')
.replace('\t', '')
.replace('\x00', '')
.replace('\\', '\\\\')
) |
def union(a, b):
"""Find union of two lists, sequences, etc.
Returns a list that includes repetitions if they occur in the input lists.
"""
return list(a) + list(b) |
def getAlias(column, alias = ''):
"""
Composes an alias for the column specified.
"""
if alias:
return column + ' AS ' + alias
return column |
def any(seq):
"""any(iterable) -> bool
Return True if bool(x) is True for any x in the iterable."""
for x in seq:
if x:
return True
return False |
def permutation_to_cosine(permutation, flip):
"""Get mhd direction cosine for this dimension permutation"""
dir_cosine = [0, 0, 0, 0, 0, 0, 0, 0, 0]
dir_cosine[permutation[0]*3] = -1 if flip[0] else 1
dir_cosine[permutation[1]*3 + 1] = -1 if flip[1] else 1
dir_cosine[permutation[2]*3 + 2] = -1 if flip[2] else 1
return dir_cosine |
def get_sec(time_str):
"""Get Seconds from time."""
if ":" in time_str:
min, sec = time_str.split(':')
return int(min) * 60 + int(sec)
else:
return int(time_str) |
def invert_obj(object):
"""Returns a new object with the keys of the given object as values, and the
values of the given object, which are coerced to strings, as keys. Note
that the last key found is preferred when handling the same value"""
if hasattr(object, "items"):
o = object.items()
else:
o = enumerate(object)
out = {}
for key, value in o:
if hasattr(value, "__iter__") and type(value) != str:
for v in value:
out[v] = key
else:
out[value] = key
return out |
def split_string0(buf):
"""split a list of zero-terminated strings into python not-zero-terminated bytes"""
return buf.split(b'\0')[:-1] |
def get_wikipedia_multi_pattern(lang, date):
"""Return a regex pattern matching for wiki .bz2 files to be extracted."""
return r'({}wiki-{}-pages-articles[0-9]+.xml.*bz2$)'.format(lang, date) |
def decode_message(data):
"""Decodes a channel topic into a channel id, message id tuple."""
try:
creator_id, channel_id, message_id = data.split('|')
except:
return None, None, None
return creator_id, channel_id, message_id |
def replace_s3_invalid_characters(key):
"""Replaces characters invalid for an S3 object key in a string
Args:
key: string where to replace characters
Returns:
string where any invalid characters were replaced with underscores
"""
spec_chars = " !-_'.,*()"
lst = []
for char in key:
if char.isalpha() or char.isdigit() or spec_chars.find(char) >= 0:
lst.append(char)
else:
lst.append('_')
return ''.join(lst) |
def _get_power(x_str, power):
"""
Get the string for a power of a variable in `_build_formula`
"""
if power>1:
return '{0}**{1}'.format(x_str, power)
elif power==1:
return x_str
return None |
def strike_temp(ambient_temp, target_temp, ratio):
"""
Calculate the temperature of the strike liquor
:param ambient_temp: [float] Ambient temperature in any units (both temps
must agree)
:param ratio: [float] desired ratio in quarts / pound
:param target_temp: [float] Target wort resting temperature
:return: [float] strike temperature in units given
"""
return (target_temp - ambient_temp) / (5 * ratio) + target_temp |
def parrot_trouble(talking: bool, hour: int) -> bool:
"""Determine if the parrot is causing trouble."""
if talking and hour not in range(7, 21):
return True
return False |
def filter_(func, seq):
""" imitates built-in `filter`, returning tuples
"""
return tuple(filter(func, seq)) |
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by generating
the complement sequence with T -> U replacement
"""
# simple checks -- ennsure nonzero sequence, capitalize seq, ensure only A, C, G, T in seq.
dna = {'A', 'C', 'G', 'T'}
if len(seq) == 0:
raise ValueError("You passed in an empty sequence.")
seq = seq.upper()
if not set(seq).issubset(dna): # if the sequence contains other characters
raise ValueError("Your sequence can only contain A, C, G, and T, but you included a sequence with"
f" {set(seq) - dna}.")
complement = {"A": "U", "T": "A", "C": "G", "G": "C"}
return ''.join(complement[base] for base in seq) |
def bigger_price(limit: int, data: list) -> list:
"""
TOP most expensive goods
"""
return sorted(data,key=lambda x: x['price'], reverse=True)[0:limit] |
def __label(column: str, with_better=False) -> str:
"""Translate the name of a column to a label with a unit"""
label_by_column = {
'threads': '# of threads',
'iodepth': 'iodepth',
'bs': 'block size [B]',
'lat_avg': 'latency [usec]',
'lat_pctl_99.9': 'latency [usec]',
'lat_pctl_99.99': 'latency [usec]',
'bw_avg': 'bandwidth [Gb/s]',
'cpuload': 'CPU load [%]'
}
lower = 'lower is better'
higher = 'higher is better'
better_by_column = {
'lat_avg': lower,
'lat_pctl_99.9': lower,
'lat_pctl_99.99': lower,
'bw_avg': higher
}
# If the column is not in the dictionary
# the default return value is the raw name of the column.
output = label_by_column.get(column, column)
if with_better:
output += '\n(' + better_by_column.get(column, column) + ')'
return output |
def fill_with_dflts(d, dflt_dict=None):
"""
Fed up with multiline handling of dict arguments?
Fed up of repeating the if d is None: d = {} lines ad nauseam (because defaults can't be dicts as a default
because dicts are mutable blah blah, and the python kings don't seem to think a mutable dict is useful enough)?
Well, my favorite solution would be a built-in handling of the problem of complex/smart defaults,
that is visible in the code and in the docs. But for now, here's one of the tricks I use.
Main use is to handle defaults of function arguments. Say you have a function `func(d=None)` and you want
`d` to be a dict that has at least the keys `foo` and `bar` with default values 7 and 42 respectively.
Then, in the beginning of your function code you'll say:
d = fill_with_dflts(d, {'a': 7, 'b': 42})
See examples to know how to use it.
ATTENTION: A shallow copy of the dict is made. Know how that affects you (or not).
ATTENTION: This is not recursive: It won't be filling any nested fields with defaults.
Args:
d: The dict you want to "fill"
dflt_dict: What to fill it with (a {k: v, ...} dict where if k is missing in d, you'll get a new field k, with
value v.
Returns:
a dict with the new key:val entries (if the key was missing in d).
>>> fill_with_dflts(None)
{}
>>> fill_with_dflts(None, {'a': 7, 'b': 42})
{'a': 7, 'b': 42}
>>> fill_with_dflts({}, {'a': 7, 'b': 42})
{'a': 7, 'b': 42}
>>> fill_with_dflts({'b': 1000}, {'a': 7, 'b': 42})
{'a': 7, 'b': 1000}
"""
if d is None:
d = {}
if dflt_dict is None:
dflt_dict = {}
return dict(dflt_dict, **d) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.