content stringlengths 42 6.51k |
|---|
def cap_absolute_value(value, max_absolute_value=1):
"""
Returns `value` with absolute value capped at `max_absolute_value`.
Particularly useful in passing values to trignometric functions
where numerical errors may result in an argument > 1 being passed
in.
This code is modified from the pymatgen source code [1]_.
Parameters
----------
value : :class:`float`
Value to cap.
max_absolute_value : :class:`float`, optional
Absolute value to cap `value` at.
Defaults to 1.
Returns
-------
:class:`float`
`value` capped at `max_absolute_value` with sign preserved.
References
----------
.. [1] https://pymatgen.org/pymatgen.util.num.html
"""
return max(min(value, max_absolute_value), -max_absolute_value) |
def used_cached_results(metadata):
"""
Modified from: https://github.com/broadinstitute/dsde-pipelines/blob/develop/scripts/calculate_cost.py
"""
return (
"callCaching" in metadata
and "hit" in metadata["callCaching"]
and metadata["callCaching"]["hit"]
) |
def are_same_length(list1, list2) -> bool:
"""Return True if length of two lists are the same.
Args:
list1 (list or tuple): a list to be checked.
list2 (list or tuple): a list to be checked.
Returns:
bool: True if length of two lists are the same.
"""
return len(list1) == len(list2) |
def remove_duplicates(results:list) -> list:
"""
Ensures that the hit_id in each result is unique
"""
hits = set()
l = []
for d in results:
hit_id = d.get('hit_id')
if hit_id not in hits:
hits.add(hit_id)
l.append(d)
return l |
def _prioritize_pes(choices):
"""Prioritize and deprioritize paired environments based on names.
We're looking for multiprocessing friendly environments, so prioritize ones with SMP
in the name and deprioritize those with MPI.
"""
# lower scores = better
ranks = {"smp": -1, "mpi": 1}
sort_choices = []
for n in choices:
# Identify if it fits in any special cases
special_case = False
for k, val in ranks.items():
if n.lower().find(k) >= 0:
sort_choices.append((val, n))
special_case = True
break
if not special_case: # otherwise, no priority/de-priority
sort_choices.append((0, n))
sort_choices.sort()
return sort_choices[0][1] |
def spt_pre_sequencing(dataset, *args, **kwargs):
"""
Generates an initial job sequence based on the shortest-processing-time
dispatching strategy. The job sequence will be feed to the model.
"""
sequence = []
for job in dataset.values():
if sequence == []:
sequence.append(job)
else:
sequencing_successful = False
for previous_job in sequence:
if (job["t_smd"] + job["t_aoi"]) < (
previous_job["t_smd"] + previous_job["t_aoi"]
):
sequence.insert(sequence.index(previous_job), job)
sequencing_successful = True
break
if sequencing_successful == False:
sequence.append(job)
return sequence |
def argsort_list(seq):
"""Returns indices such that the list is sorted.
Example: argsort_list([2, 1, 3]) = [1, 0, 2].
"""
return sorted(range(len(seq)), key=seq.__getitem__) |
def plaintext(target):
"""
Returns `target` string stripped of non-ASCII characters.
"""
return "".join([c for c in target if ord(c) < 128]) |
def get_face_names_from_indices(mesh, indices):
"""
Returns a list of face names from a given list of face indices
:param mesh: str
:param indices: list(int)
:return: list(str)
"""
found_face_names = list()
for index in indices:
face_name = '{}.f[{}]'.format(mesh, index)
found_face_names.append(face_name)
return found_face_names |
def bubbleSort_decr(array):
"""
It repeatedly swaps adjacent elements that are out of order
it has O(n2) time complexity
smaller numbers are sorted first
"""
for i in range(len(array)):
for j in range(len(array)-i-1):
if array[j] < array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array |
def key_type_validator(x):
"""
Property: KeySchema.KeyType
"""
valid_types = ["HASH", "RANGE"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x |
def split_clusters(clusters):
"""Increase number of clusters 2x by splitting
"""
result = []
for cluster in clusters:
even = cluster[0::2]
odd = cluster[1::2]
assert len(even) + len(odd) == len(cluster)
if even:
result.append(even)
if odd:
result.append(odd)
return result |
def power(x, y):
""" Initialize the result because x pow 0 = 1 , so this is the base condition."""
result = 1
while y > 0:
""" Check weather y is an odd number. """
""" If y is odd number then multiply only once starting from the base condition. """
# x with result
if (y & 1) == 1:
result = result * x
""" If Y is an even number then divide by 2 """
y = y >> 1
x = x * x
return result |
def bytes_fmt(value, precision=2):
"""Represent bytes value in a human-readable format."""
for units in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'):
if value < 1024 or units == 'YB':
if units != 'B' or isinstance(value, float): # float = rate (B/s)
value = round(value, precision)
return '{} {}'.format(value, units)
value /= 1024 |
def get_slope(r, sy, sx):
"""
Get the slope for a regression line having given parameters.
Parameters
----------
> `r`: regrwssion coefficient of the line
> `sy` sample standard deviation of y distribution
> `sx`: sample standard deviation of x distribution
Returns
-------
The slope of the given regression line with the above parameters.
"""
return r * (sy / sx) |
def format_fuzzy_result(hits):
"""
format search result
@param hits: searched parts
@type hists: list
@return part informaions
@rtype: list
"""
part_list = list()
for item in hits:
info = item['_source']
part_info = {
'part_name' : info['part_name'],
'part_id' : info['part_id'],
'part_type' : info['part_type'],
}
part_list.append(part_info)
return part_list |
def bump_version(version: str, mode: str) -> str:
"""given a version string will bump accordying to mode
Eg.
bump_version("1.0.3", "micro")
-> "1.0.4"
bump_version("1.0.3", "minor")
-> "1.1.0"
"""
newver = [int(n) for n in version.split(".")]
if mode == "major":
newver[-3] += 1
newver[-2] = 0
newver[-1] = 0
elif mode == "minor":
newver[-2] += 1
newver[-1] = 0
else:
newver[-1] += 1
return ".".join(str(v) for v in newver) |
def PrettyOS(os, os_v1=None, os_v2=None, os_v3=None, os_v4=None):
"""Pretty os string."""
if os_v4:
return '%s %s.%s.%s.%s' % (os, os_v1, os_v2, os_v3, os_v4)
if os_v3:
if os_v3[0].isdigit():
return '%s %s.%s.%s' % (os, os_v1, os_v2, os_v3)
else:
return '%s %s.%s%s' % (os, os_v1, os_v2, os_v3)
elif os_v2:
return '%s %s.%s' % (os, os_v1, os_v2)
elif os_v1:
return '%s %s' % (os, os_v1)
return os |
def path_to_edges(p: list):
"""
:param p: a path in a graph
:return: all edges in the path p
"""
if len(p) == 1:
return [(p[0], p[0])]
e = [(p[i-1], p[i]) for i in range(len(p)) if i > 0]
return e |
def lmap(v, x, y) -> float:
"""Linear map of value v with range x to desired range y."""
return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0]) |
def rotate_clockwise(array_2d):
"""
Code copied by: https://stackoverflow.com/a/48444999/3753724
"""
list_of_tuples = zip(*array_2d[::-1])
return [list(elem) for elem in list_of_tuples] |
def CreateResourceName(parent, collection, resource_id):
"""Creates the full resource name.
Args:
parent: The project or organization id as a resource name, e.g.
'projects/my-project' or 'organizations/123'.
collection: The resource collection. e.g. 'logs'
resource_id: The id within the collection , e.g. 'my-log'.
Returns:
resource, e.g. projects/my-project/logs/my-log.
"""
# id needs to be escaped to create a valid resource name - i.e it is a
# requirement of the Stackdriver Logging API that each component of a resource
# name must have no slashes.
return '{0}/{1}/{2}'.format(
parent, collection, resource_id.replace('/', '%2F')) |
def round_percent(num):
"""Return a customercare-format percentage from a number."""
return round(num, 1) if num < 10 else int(round(num, 0)) |
def midpoint_point_point(a, b):
"""Compute the midpoint of two points lying in the XY-plane.
Parameters
----------
a : sequence of float
XYZ coordinates of the first point.
b : sequence of float
XYZ coordinates of the second point.
Returns
-------
list
XYZ coordinates of the midpoint.
"""
return [0.5 * (a[0] + b[0]),
0.5 * (a[1] + b[1]),
0.5 * (a[2] + b[2])] |
def simplified_env_name(env_name: str) -> str:
"""Get the simplified version of a gym environment name.
In more details, if the given environment name
contains the name of the exporting module,
that module information is removed.
For example: 'mymodule:MyEnv-v0' becomes 'MyEnv-v0'.
Args:
env_name: The gym environment name.
Returns:
The environment name, with its module part removed.
"""
assert isinstance(env_name, str)
where = env_name.find(":")
if where >= 0:
env_name = env_name[where + 1:]
return env_name |
def default_filter(files):
"""Function to filter folders based on content
Parameters
----------
files : list
A list containing strings of filenames in directory
Returns
-------
bool : a flag indicating whether the list contains '1.mkv', '2.mkv'
and 'Labels.json'
"""
if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:
return True
return False |
def round_margin(margin: float, threshold: float) -> int:
"""Round margin according to threshold
:param margin: calculated margin
:param threshold: threshold for rounding
:return: rounded margin
"""
root = int(margin)
if threshold < margin - root:
return root + 1
else:
return root |
def rivers_with_station(stations):
"""Create's a list of rivers from a list of Monitoring Stations"""
RiversSet = set()
for station in stations:
RiversSet.add(station.river)
return RiversSet |
def _skewtent_onestep(value, threshold):
"""
Computes a single step of iteration through the skew-tent map given an
input (previous) value and a threshold. Returns the next value as output.
This function is called by _iterate_skewtent for iterating repeatedly.
Parameters
----------
value : scalar, float64
Input value to the skew-tent map.
threshold : scalar, float64
Threshold value of the skew-tent map.
Returns
-------
Output value as float64 from the skew-tent map.
Computed conditionally as follows:
If value < threshold, then output is value / threshold
Else, output is (1 - value)/(1 - threshold)
"""
if value < threshold:
return value / threshold
return (1 - value) / (1 - threshold) |
def empty_bytearray(preallocate: int) -> bytearray:
"""
Returns bytearray that won't allocate for at least `preallocate` bytes.
Useful in case you want to avoid allocating too often.
"""
b = bytearray(preallocate)
b[:] = bytes()
return b |
def is_floating_number(number):
"""
is_floating_number
:param number:
:return:
:rtype: boolean
"""
is_float = False
try:
float(number)
is_float = True
# except TypeError:
# return False
except ValueError:
is_float = False
return is_float |
def adjust_ranges(bands, freq):
"""
The bands and frequencies are adjusted so that the first and last
frequencies in the range are non-zero.
:param bands: The bands dictionary.
:param freq: The frequency dictionary.
:return: Adjusted bands and frequencies.
"""
# Get the indices of the first and last non-zero elements.
first = 0
for k, v in freq.items():
if v != 0:
first = k
break
rev_keys = list(freq.keys())[::-1]
last = rev_keys[0]
for idx in list(freq.keys())[::-1]:
if freq[idx] != 0:
last = idx
break
# Now adjust the ranges.
min_key = min(freq.keys())
max_key = max(freq.keys())
for idx in range(min_key, first):
freq.pop(idx)
bands.pop(idx)
for idx in range(last + 1, max_key + 1):
freq.popitem()
bands.popitem()
old_keys = freq.keys()
adj_freq = dict()
adj_bands = dict()
for idx, k in enumerate(old_keys):
adj_freq[idx] = freq[k]
adj_bands[idx] = bands[k]
return adj_bands, adj_freq |
def UInt64(value):
"""Encode a 64-bit integer value"""
return value.to_bytes(8, 'big') |
def moving_avg(v, N):
"""
simple moving average.
Parameters
----------
v : list
data ta to average
N : integer
number of samples per average.
Returns
-------
m_avg : list
averaged data.
"""
s, m_avg = [0], []
for i, x in enumerate(v, 1):
s.append(s[i - 1] + x)
if i >= N:
avg = (s[i] - s[i - N]) / N
m_avg.append(avg)
return m_avg |
def _not_in(input, values):
"""Checks if the given input is not in the list of values
:param input: The input to check
:type input: int/float/string
:param values: The values to check
:type values: :func:`list`
:returns: True if the condition check passes, False otherwise
:rtype: bool
"""
try:
if input in values:
return False
except TypeError:
return True
return True |
def cropRect(rect, cropTop, cropBottom, cropLeft, cropRight):
"""
Crops a rectangle by the specified number of pixels on each side.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Crop by the specified value
x += cropLeft
y += cropTop
w -= (cropLeft + cropRight)
h -= (cropTop + cropBottom)
# Re-pack the padded rect
return (x,y,w,h) |
def lpph2lpmm(lpph, n, pixelPitch):
"""Convert resolution specified in Line-Pair per Picture Height (LPPH)
to Line-Pair per Milli Meters (LPMM)
Parameters
----------
lpph : float
resolution in terms of Line-Pair per Picture Height (LPPH)
n : integer
Number of pixels in the picture along the dimension
(height or width) of consideration
pixelPitch : float
pixel pitch of the sensor in microns
Returns
-------
lpmm : float
resolution in terms of Line-Pair per Milli-Meters (LPMM)
"""
return lpph*1000.0/(n*pixelPitch) |
def format_repr(obj, max_len=50, ellipsis="..."):
"""Wrapper around `repr()` to print shortened and formatted string version.
obj: The object to represent.
max_len (int): Maximum string length. Longer strings will be cut in the
middle so only the beginning and end is displayed, separated by ellipsis.
ellipsis (unicode): Ellipsis character(s), e.g. "...".
RETURNS (unicode): The formatted representation.
"""
string = repr(obj)
if len(string) >= max_len:
half = int(max_len / 2)
return "{} {} {}".format(string[:half], ellipsis, string[-half:])
else:
return string |
def strength(x):
"""
:param x [x, y, z, k, g] history record
scala 0 - 100
Neuron sense, strength can have positive or negative weight for optimization of whole algorithm
It will be based on x[0] percentage of the biggest population and on level of x[1](not matter if
is a -INT or +INT, what's count is the size of number)
"""
return (((x[0]+abs(x[1]))*100)/100)/10 |
def addition(s, t):
"""Returns the mod-2 sum of two n-bit strings s and t."""
return tuple([(s[i] + t[i]) % 2 for i in range(len(s))]) |
def calc_target_joint_dimension_from_link_list(link_list):
"""Calculate Total Degrees of Freedom from link list
Parameters
----------
link_list : list[skrobot.model.Link]
Returns
-------
n : int
total Degrees of Freedom
"""
n = 0
for link in link_list:
if hasattr(link, 'joint'):
n += link.joint.joint_dof
return n |
def add_prefix(name, prefix=None, split='.'):
"""Add prefix to name if given."""
if prefix is not None:
return '{}{}{}'.format(prefix, split, name)
else:
return name |
def skewtent_onestep(value, threshold):
"""
Computes a single step of iteration through the skew-tent map given an
input (previous) value and a threshold. Returns the next value as output.
This function is called by _iterate_skewtent for iterating repeatedly.
Parameters
----------
value : scalar, float64
Input value to the skew-tent map.
threshold : scalar, float64
Threshold value of the skew-tent map.
Returns
-------
Output value as float64 from the skew-tent map.
Computed conditionally as follows:
If value < threshold, then output is value / threshold
Else, output is (1 - value)/(1 - threshold)
"""
if value < threshold:
return value / threshold
return (1 - value) / (1 - threshold) |
def compare_pointlists(a, b, epsilon=0.001):
"""Check if two stroke lists (a and b) are equal."""
if len(a) != len(b):
return False
for stroke_a, stroke_b in zip(a, b):
if len(stroke_a) != len(stroke_b):
return False
for point_a, point_b in zip(stroke_a, stroke_b):
keys = ["x", "y", "time"]
for key in keys:
if abs(point_a[key] - point_b[key]) > epsilon:
return False
return True |
def _set_default_temperature_rise(
subcategory_id: int,
family_id: int,
) -> float:
"""Set the default temperature rise.
:param subcategory_id: the subcategory ID of the inductive device with missing
defaults.
:param family_id: the family ID of the inductive device with missing defaults.
:return: _temperature_rise
:rtype: float
"""
return 30.0 if subcategory_id == 1 and family_id == 3 else 10.0 |
def wiggle_sort(nums: list) -> list:
"""
Python implementation of wiggle.
Example:
>>> wiggle_sort([0, 5, 3, 2, 2])
[0, 5, 2, 3, 2]
>>> wiggle_sort([])
[]
>>> wiggle_sort([-2, -5, -45])
[-45, -2, -5]
>>> wiggle_sort([-2.1, -5.68, -45.11])
[-45.11, -2.1, -5.68]
"""
for i, _ in enumerate(nums):
if (i % 2 == 1) == (nums[i - 1] > nums[i]):
nums[i - 1], nums[i] = nums[i], nums[i - 1]
return nums |
def _compress_cmd(log_path):
"""Return bash command which compresses the given path to a tarball."""
compres_cmd = 'cd "$(dirname %s)" && ' % log_path
compres_cmd += 'f="$(basename %s)" && ' % log_path
compres_cmd += 'tar czf "$f.tgz" "$f" && '
compres_cmd += 'rm -rf %s' % log_path
return compres_cmd |
def GaussianPenalty(var,params):
""" Adds a Gaussian log penalty factor to a log-likelihood variable """
penalty=-0.5*((var-params[0])/params[1])**2
return penalty |
def convert_bytes(num):
"""
Convert num to idiomatic byte unit.
:param num: the input number.
:type num: int
:return: str
"""
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0 |
def cron_trigger_dict(ts_epoch):
"""A cron trigger as a dictionary."""
return {
'year': '2020',
'month': '*/1',
'day': '*/1',
'week': '*/1',
'day_of_week': '*/1',
'hour': '*/1',
'minute': '*/1',
'second': '*/1',
'start_date': ts_epoch,
'end_date': ts_epoch,
'timezone': 'utc',
'jitter': 1,
} |
def _parse_shell_command(shell_command, list_needed=False):
"""Utility function."""
if type(shell_command) == list:
if list_needed:
return shell_command
cmd = ''
for i in shell_command:
cmd += i + ' '
else:
if list_needed:
cmd = []
s = shell_command.split(' ')
for w in s:
cmd.append(w)
else:
cmd = shell_command
return cmd |
def expand_id_map(id_map, all_ids):
""" Ensures all ids within all_ids are included as keys in the mapping """
unmapped_ids = list(set(all_ids).difference(id_map.keys()))
for i in unmapped_ids:
id_map[i] = i
return id_map |
def parse_name(git_line: str) -> str:
"""
Return the author/committer name from git show output
>>> parse_name('Author: John Doe <JohnDoe@email.com>')
'John Doe'
"""
data = git_line.split(":")[1]
name = data.split("<")[0].strip()
return name |
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)] |
def print_from_template (s):
""" Show the value of a string that is being processed in a
Jinja template, for debugging.
"""
print(s)
return s |
def package_name(url):
""" get the package name from a github url """
project = url.split('/')[-1].split('.')[0]
user = url.split('/')[-2]
return {
"project": project,
"user": user
} |
def rangeheads(s):
"""Return first element of each range in a sorted sequence of numbers.
>>> rangeheads( (0, 1, 3, 4, 6) )
[0, 3, 6]"""
sset = set(s)
return [a for a in s if a - 1 not in sset] |
def max_actions(points, action_cost, recover_cost, limit):
"""
Returns the max number of actions. It will weight the action cost against the available points, adding the
recovery cost each time it runs out of points.
"""
if limit <= 0:
actions = 0
elif action_cost <= 0:
actions = 0
else:
fire_actions = int(points / action_cost)
actions = fire_actions
if actions > limit:
# Shoots above capacity. Has to reload
# We need to check the number of times the weapon has to reload
reload_shots = fire_actions
if reload_shots > 1:
# Adds cost to reload between shots
reload_cost = (reload_shots - 1) * recover_cost
# The cost will be averaged to each shot
reload_cost = reload_cost / reload_shots
else:
# A single shot. No reload needed
reload_cost = 0
cost = action_cost + reload_cost
fire_actions = int(points / cost)
actions = fire_actions
return actions |
def unpack_arch(architecture):
"""
:param architecture: dict. containing num. of units in every layer (2-layer FC network)
:return: input_size, hidden_size, output_size of NN
"""
input_size = architecture["state_dim"]
hidden_size = architecture["hidden_units"]
output_size = architecture["num_actions"]
return input_size, hidden_size, output_size |
def _get_literal_string_prefix_len(token_string: str) -> int:
"""
Getting the length of the literal string prefix.
Parameters
----------
token_string : str
String to check.
Returns
-------
int
Length of the literal string prefix.
Examples
--------
>>> example_string = "'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
0
>>> example_string = "r'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
1
"""
try:
return min(
token_string.find(quote)
for quote in (r"'", r'"')
if token_string.find(quote) >= 0
)
except ValueError:
return 0 |
def abbreviation_pattern(prefix, suffix, capturing=False):
"""Create a pattern for matching the abbreviated word.
The word may contain a single hash which separates the minimum permitted abbreviation
on the left from optional letters on the right. If no word is present, the only the
first letter is mandatory.
"""
open_group = '(' if capturing else '(?:'
close_group = ')?'
parts = [prefix] + [c.upper() for c in suffix]
pattern = open_group.join(parts) + close_group * len(suffix)
return pattern |
def radialdesaturate(percentage=1.0, minimum=1.0):
"""
percentage = amount of blurring to apply - can go above 1 to increase the effect even more so.
minimum = minimum amount of area to blur; lower numbers equals less of the screen blurred.
"""
return ("""
// Name: 16-Sample Radial Blur
// Author: SolarLune
// Date Updated: 6/6/11
//
// Notes: Really, it's more of a diamond blur, but it's blurry enough for me not to care (LOL).
uniform sampler2D bgl_RenderedTexture;
void main(void)
{
float x = gl_TexCoord[3].st.x;
float y = gl_TexCoord[3].st.y;
float value;
float min = 0.1 * """ + str(float(minimum)) + """;
value = ((abs(x - 0.5) - min) + (abs(y - 0.5) - min)) * """ + str(float(percentage)) + """;
if (value < 0.0)
value = 0.0;
if (value > 1.0)
value = 1.0;
vec4 color = texture2D(bgl_RenderedTexture, gl_TexCoord[0].st);
float gray = dot(color.rgb, vec3(0.299, 0.587, 0.114));
// The human eye is more sensitive to certain colors (like bright yellow) than others, so you need to use this specific color-formula to average them out to one monotone color (gray)
vec4 desat = vec4(gray, gray, gray, color.a);
gl_FragColor = mix(color, desat, value);
}
""")
# |
def find_last_word(s):
"""Find the last word in a string."""
# Note: will break on \n, \r, etc.
alpha_only_sentence = "".join([c for c in s if (c.isalpha() or (c == " "))]).strip()
return alpha_only_sentence.split()[-1] |
def _initialize_segments(lines):
"""Returns list([0, ...n]) for n = max length in lines."""
if lines:
return [0] * max([len(line) for line in lines])
return [] |
def to_classname(filename: str) -> str:
"""
maps divided mock class file name to class names
inverse function of headersplit.to_filename
e.g. map "test/mocks/server/admin_stream.h" to "MockAdminStream"
Args:
filename: string, mock class header file name (might be the whole path instead of the base name)
Returns:
corresponding class name
"""
classname_tokens = filename.split('/')[-1].replace('.h', '').split('_')
classname = "Mock" + ''.join(map(lambda x: x[:1].upper() + x[1:], classname_tokens))
return classname |
def safe_boolcomp(value, expected):
"""Safely do a boolean comparison.
This works even if the value you wantto compare is a string.
Args:
value: what you want to safely compare
expected (bool): What you want to compare `value` with
Returns:
bool: True if str(value).lower() is True
"""
return str(value).lower() == str(expected).lower() |
def _todict(obj, classkey=None):
"""Convert an object graph to dictionary.
Adapted from:
http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary .
"""
if isinstance(obj, dict):
for k in obj.keys():
obj[k] = _todict(obj[k], classkey)
return obj
elif hasattr(obj, "__keylist__"):
data = {key: _todict(obj[key], classkey)
for key in obj.__keylist__
if not callable(obj[key])}
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
elif hasattr(obj, "__dict__"):
data = {key: _todict(value, classkey)
for key, value in obj.__dict__.iteritems()
if not callable(value)}
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
elif hasattr(obj, "__iter__"):
return [_todict(v, classkey) for v in obj]
else:
return obj |
def get_test_by_id(
test_id
): # noqa: E501
"""Get tests
Get the status of a given test run. # noqa: E501
:param test_id: test ID
:type test_id: str
:rtype: ServiceTest
"""
return 'Not Implemented', 501 |
def filename_from_filepath(filepath):
""" Strip the path off a filepath to get a filename"""
try:
return filepath[filepath.rindex('/')+1:]
except ValueError:
return filepath |
def add_genotype(cell_genotypes, cell_barcode, umi, genotype):
"""
Append genotype information for cell and UMI to dictionary
return modified cell_genotypes dictionary
"""
try:
cell_genotypes[cell_barcode]
except KeyError:
# haven't seen the cell, must be new UMI
cell_genotypes[cell_barcode] = {umi: [genotype]}
else:
try:
cell_genotypes[cell_barcode][umi]
except KeyError:
cell_genotypes[cell_barcode][umi] = [genotype]
else:
cell_genotypes[cell_barcode][umi].append(genotype)
return cell_genotypes |
def get_nbr_genes(person, one_gene, two_genes):
"""
Return number of genes for person
"""
return (2 if person in two_genes
else 1 if person in one_gene
else 0) |
def check_param(var, datatype):
"""Checks if a variable is the correct data type.
If it's not correct, return the error message to raise.
Parameters
-----------
var
the variable to check.
datatype
the data type to compare against.
Returns
----------
str
the error message to raise.
"""
if isinstance(var, datatype):
return ''
return f"{[name for name, value in locals().items() if var == value][0]} expects '{datatype.__name__}' not '{type(var)}'" |
def problem_26(lim=1000):
""" longest recurring cycle in decimal fractions"""
def div_cycle(n, print_cyc=False):
""" helper function to determine the length of the cycle in 1/n.
Essentially, step through long division and once we reach a remainder
that has already been seen, we have found the cycle (between the
two remainders) """
base = 10
remainder = 1
# for debugging
cyc = ""
# track remainders and length of cycle before then
rems = {}
cycle_len = 0
while remainder:
# for debugging
dec = remainder * base / n
# long-division "carry"
remainder *= base
# already seen remainder is the start of next cycle
if remainder in rems:
if print_cyc:
print("cycle found: ", cyc[rems[remainder] :])
return cycle_len - rems[remainder]
# store the index at occurence of remainder, because there can be
# digits before the cycle begins, need to subtract out
rems[remainder] = cycle_len
cycle_len += 1
remainder %= n
cyc += str(dec)
return -1
max_len = 0
max_n = 0
# loop over all numbers and find the n with highest corresponding cycle len.
for num in range(1, lim):
val = div_cycle(num)
if val > max_len:
max_len = val
max_n = num
return max_n |
def naive_fibonacci(n):
"""
Return the n-th number in the fibonacci sequence. Uses recursion.
The basic math expression is Xn = (Xn-1) + (Xn-2)
The complexity is O(2^n). There are too many overlapping subproblems.
"""
n = int(n)
if n == 0 or n == 1: # base case
return n
return naive_fibonacci(n-1) + naive_fibonacci(n-2) |
def reduce_files(step, paths, times=None, timestamps=None):
"""Filter out every STEP frame."""
# Reduce the paths
add_last = False
paths_new = paths[::step]
if paths_new[-1] != paths[-1]:
add_last = True
# If new last frame is not the absolute last frame
paths_new.append(paths[-1])
if not (times is None) and not (timestamps is None):
if add_last:
times = times[::step] + [times[-1]]
timestamps = timestamps[::step] + [timestamps[-1]]
else:
times = times[::step]
timestamps = timestamps[::step]
return paths_new, times, timestamps |
def wrap_index(idx, vector_size):
""" wrap the index so they always stay inside vector size.
"""
if idx < 0: return vector_size + idx
if idx >= vector_size : return idx - vector_size
else: return idx |
def mapFromTo(from_indices, to_indices):
"""Get a map from old index to new index."""
# if no old index exists map from new to new
from_to = {}
# magic mapping function:
for t in to_indices:
match = [f[1] for f in from_indices if f[0] is t[0]]
if len(match) == 1:
from_to[match[0]] = t[1]
elif len(match) > 1:
print("Found multiple indices of " + str(t) + ": " + str(match))
else:
print("Did not found index " + str(t) + "added mapping new to new")
from_to[t[1]] = t[1]
return from_to |
def is_pandas_df(obj):
"""Check if an object is a Pandas DataFrame
Returns
-------
bool
Returns True if object is a Pandas DataFrame and False otherwise
"""
return obj.__class__.__module__ == "pandas.core.frame" and obj.to_records and obj.to_dict |
def dbsession(request):
"""Create dict that represents database and database session."""
database = {'value': 'Framework value'}
return database |
def ramp_1(t,old,new,l):
"""
A function to simulate tardiness in compliance to social measures.
Interpolates a parameter between the values 'old' and 'new' using a one parameter ramp function.
Parameters
----------
t : float or int
time since last checkpoint
old : np.array
parameter value before checkpoint
new : np.array
parameter value after checkpoint
l : float
time to reach full compliance
Returns
-------
out : np.array
interpolation between old and new parameter value
"""
# perform interpolation
if t <= l:
f = (1/l)*t
else:
f = 1.0
return old + f*(new-old) |
def get_pred_class(lls):
"""
Get MAP - class with max likelihood assigned
Gets max key of dict passed in
:param lls: Map from class name to log likelihood
"""
return max(lls, key=lls.get) |
def strip_mult(*args):
""" converts a variable amount of args to be striped of whitespace """
temp = []
for element in args:
temp.append(element.strip())
return temp |
def isSubDict(dict_super, dict_sub):
"""
Tests if the second dictonary is a subset of the first.
:param dict dict_super:
:param dict dict_sub:
:return bool:
"""
for key in dict_sub.keys():
if not key in dict_super:
return False
if not dict_sub[key] == dict_super[key]:
return False
return True |
def convert_to_time(time):
"""Print the training time in days: hours: minutes: seconds format."""
days = time // (24 * 60 * 60)
time %= (24 * 60 * 60)
hrs = time // (60 * 60)
time %= (60 * 60)
mins = time // 60
time %= 60
secs = time
msg = f"Training completed in {days:.0f} days: {hrs:.0f} hours: {mins:.0f} minutes: {secs:.0f} seconds"
print(msg)
return msg |
def s2_dn2toa(I):
"""convert the digital numbers of Sentinel-2 to top of atmosphere (TOA)
Notes
-----
sentinel.esa.int/web/sentinel/technical-guides/sentinel-2-msi/
level-1c/algorithm
"""
I_toa = I*10000
return I_toa |
def _build_custom_vocab(tokens, vocab_length):
"""
Helper function for building custom vocab
"""
custom_vocab = {}
cur_key = vocab_length
for token in tokens:
custom_vocab[token] = cur_key
cur_key += 1
return custom_vocab |
def show_supported(supported):
"""
Returns OK (in green) if supported evaluates to True, otherwise NOT OK (in red).
"""
try:
from colorama import Fore, Style, init
init()
startcolor = Fore.GREEN if supported else Fore.RED
stopcolor = Style.RESET_ALL
except:
startcolor = stopcolor = ""
output = "OK" if supported else "NOT OK"
return f"{startcolor}{output}{stopcolor}" |
def rbsearch(list_,value,lower=None,upper=None):
"""Binary search, recursive."""
# handle empty list
if len(list_) == 0:
return None
# set defaults for lower, upper
if lower is None or upper is None:
lower = 0
upper = len(list_) - 1
if list_[lower] == value:
return lower
elif list_[upper] == value:
return upper
elif not lower < upper:
return None
else:
mid = lower + (upper - lower) / 2
if list_[mid] == value:
return mid
elif list_[mid] < value:
return rbsearch(list_,value,mid+1,upper)
else:
return rbsearch(list_,value,lower,mid-1) |
def evaluateSystem(axiom, rules, n):
"""Evaluates a regular bog-standard L-System.
axiom is a sequence of symbols.
rules is either:
a dictionary type S->Symbols, like {1: [1, 2, 3]}
a list of tuples as the ones produced by the function production()
a single tuple as the ones produced by the function production()
n is an integer specifying the number of iterations
What data constitutes as a symbol is not really important, but it should
support equality and hashing.
Example for the simple Pythagoras tree fractal:
>>> evaluateSystem("0", {"1":list("11"),"0":list("1[0]0")}, 2)
['1', '1', '[', '1', '[', '0', ']', '0', ']', '1', '[', '0', ']', '0']
A cleaner version using production():
>>> evaluateSystem("0", production("1->11", "0->1[0]0"), 2)
['1', '1', '[', '1', '[', '0', ']', '0', ']', '1', '[', '0', ']', '0']
"""
if issubclass(type(rules), list):
rules = dict(rules)
if issubclass(type(rules), tuple):
rules = [rules]
if n < 1:
return axiom
result = []
for x in axiom:
if x in rules:
result += rules[x]
else:
result.append(x)
return evaluateSystem(result, rules, n-1) |
def lyrics_to_dictionary(songlyrics):
"""
argument: List, songlyrics
returns: dictionary with -> word in songlyrics(key):numberofoccurences in song(value)
"""
lyrics_dictionary = {}
for word in songlyrics:
if word in lyrics_dictionary:
lyrics_dictionary[word] = lyrics_dictionary[word] + 1
else:
lyrics_dictionary[word] = 1
return lyrics_dictionary |
def _KeyValueToDict(
pair
):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d |
def simple_decompression(compressed_string):
"""Decompression for `simple_compression(string)`"""
string=""; i=0;
len_string = len(compressed_string)
while True:
if compressed_string[i].isdigit():
s = i
while i<len_string and compressed_string[i].isdigit():
i+=1
n = int(compressed_string[s:i])
string += string[-1]*(n-1)
else:
string+=compressed_string[i]
i+=1
if i==len_string:
break
return string |
def _blank_out_conflicting_opts(opt_list, opt_names, conflicting_opts=None):
"""Utility for :py:meth:`MRJobRunner._combine_opts()`: if multiple
configs specify conflicting opts, blank them out in all but the
last config (so, for example, the command line beats the config file).
This returns a copy of *opt_list*
"""
conflicting_opts = set(conflicting_opts or ()) | set(opt_names)
# copy opt_list so we can modify it
opt_list = [dict(opts) for opts in opt_list]
# blank out region/zone before the last config where they are set
blank_out = False
for opts in reversed(opt_list):
if blank_out:
for opt_name in opt_names:
opts[opt_name] = None
elif any(opts.get(opt_name) is not None
for opt_name in conflicting_opts):
blank_out = True
return opt_list |
def solution(number: int) -> int:
"""
If we list all the natural numbers below 10 that are
multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of
these multiples is 23.
Finish the solution so that it returns the sum of all
the multiples of 3 or 5 below the number passed in.
:param number:
:return:
"""
result = 0
for n in range(1, number):
if n % 3 == 0 or n % 5 == 0:
result += n
return result |
def _scrape_available_devices(text):
"""Scrapes the output of the adb devices command into a list
:param text: Full output of adb devices command to scrape
"""
lines = text.split('\n')
available_devices = []
for line in lines:
words = line.split('\t')
if len(words[0]) > 5 and words[0].find(" ") == -1:
available_devices.append(words[0])
return available_devices |
def get_first(data, *keys):
"""Retrieve a normalized data item, looking first in 'msg'
"""
if "msg" in data:
first, *rest = keys
cand = data["msg"].get(first)
if cand is not None:
return cand
keys = rest
for key in keys:
cand = data.get(key)
if cand is not None:
return cand |
def extract_objects(f_t):
"""Get objects coordinates as a list."""
objs_k = ['key', 'chest', 'food']
objs = {}
for obj in objs_k:
if obj not in objs:
objs[obj] = []
keys = f_t.keys()
keys = filter(lambda x: x.startswith(obj + '_'), keys)
keys = map(lambda x: x[:-1], keys)
keys = set(keys)
# print(list(keys))
for k in keys:
objs[obj].append((f_t[k + 'x'], f_t[k + 'y']))
return objs |
def pattern(n):
"""
Perform checks on the number, if 0, return an empty string,
if 1, return a string literal of 1,
else,find the range of the numbers and convert each to a string and repeat the string x times will adding to
another list
join this list with \n
"""
return "\n".join(["1"] + ["1" + "*" * (i - 1) + str(i) for i in range(2, n + 1)]) |
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
subs = set()
if (n > len(a)):
return subs
if (a == b and len(a) == n):
return [a]
for i in range(0, len(a) - n + 1):
substring = a[i:i+n]
if (substring in b):
subs.add(substring)
return subs |
def repeat_count_with_max_length(x, max_length, assert_at_least_one_rep=False):
"""
The maximum number of times `x` can be repeated such that its length is <= `max_length`.
Parameters
----------
x : tuple or Circuit
the operation sequence to repeat
max_length : int
the maximum length
assert_at_least_one_rep : bool, optional
if True, assert that number of repetitions is > 0.
This can be useful when used within a create_circuits inner loop
to build a operation sequence lists where a string must be repeated at
least once to be added to the list.
Returns
-------
int
the number of repetitions.
"""
l = len(x)
if assert_at_least_one_rep: assert(l <= max_length)
reps = max_length // l if l > 0 else 0
return reps |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.