content stringlengths 42 6.51k |
|---|
def format_duration(seconds: float) -> str:
"""
Nicely format a given duration in seconds.
Args:
seconds: The duration to format, in seconds.
Returns:
The duration formatted as a string with unit of measurement appended.
"""
return f"{seconds:.2f} sec" |
def isempty(prt_r):
"""Test whether the parent of a missingness indicator is empty"""
return len(prt_r) == 0 |
def factorial_recursive(n):
"""
:param n: Integer
:return: n * n-1 * n-2 * n-3..........1
"""
if n==1:
return 1
else:
return n * factorial_recursive(n-1) |
def strip_outer_matching_chars(s, outer_char):
"""
If a string has the same characters wrapped around it, remove them.
Make sure the pair match.
"""
s = s.strip()
if (s[0] == s[-1]) and s.startswith(outer_char):
return s[1:-1]
return s |
def path_to_history_file(ctypedir, ctype) :
"""Returns path to HISTORY file in the calib store.
e.g. /some-path/calib/Jungfrau::CalibV1/CxiEndstation.0:Jungfrau.0/pedestals/HYSTORY
See parameters description in :py:meth:`history_record`.
"""
return '%s/%s/HISTORY' % (ctypedir, ctype) |
def level_numeric_value(level):
"""
Convert string levels like 'debug' to number.
:param level:
:return:
"""
if level is "debug":
level = 10
elif level is "info":
level = 30
elif level is "warning":
level = 50
return level |
def steps_cancel_out(prev_step: str, step: str) -> bool:
"""
>>> steps_cancel_out(None, "U")
False
>>> steps_cancel_out("U", "U'")
True
>>> steps_cancel_out("U'", "U")
True
>>> steps_cancel_out("U2", "U2")
True
>>> steps_cancel_out("U", "U")
False
Returns:
True if the steps cancel each other out
"""
if prev_step is None:
return False
# U2 followed by U2 is a no-op
if step == prev_step and step.endswith("2"):
return True
# U' followed by U is a no-op
if prev_step.endswith("'") and not step.endswith("'") and step == prev_step[0:-1]:
return True
# U followed by U' is a no-op
if not prev_step.endswith("'") and step.endswith("'") and step[0:-1] == prev_step:
return True
return False |
def subsets(nums):
"""
O(2**n)
"""
def backtrack(res, nums, stack, pos):
if pos == len(nums):
res.append(list(stack))
else:
# take nums[pos]
stack.append(nums[pos])
backtrack(res, nums, stack, pos+1)
stack.pop()
# dont take nums[pos]
backtrack(res, nums, stack, pos+1)
res = []
backtrack(res, nums, [], 0)
return res |
def _average(values):
""" Return average of a list of numbers """
return sum(values)/len(values) |
def _contains(prop_value, cmp_value, ignore_case=False):
"""
Helper function that take two arguments and checks if :param cmp_value:
is in :param prop_value:.
:param prop_value: Property value that you are checking.
:type prop_value: :class:`str`
:param cmp_value: Value that you are checking if it is in the property
value.
:type cmp_value: :class:`str`
:param ignore_case: True to run using incase sensitive.
:type ignore_case: :class:`bool`
:returns: True if :param cmp_value: is in :param prop_value:
:rtype: class:`bool`
"""
if ignore_case is True:
prop_value = prop_value.lower()
cmp_value = cmp_value.lower()
return cmp_value in prop_value |
def hill_equation(l, emax, kd, n):
"""Hill receptor-response equation.
Args:
l (float, numpy.array): The input concentration of an ligand in
concentration units.
emax (float): The maximum response in response units.
Bounds fot fitting: 0 <= emax <= inf
kd (float): The ligand-receptor dissociation constant (or its
effective value) in concentration units.
Bounds fot fitting: 0 <= kd <= inf
n (int, float): The Hill coefficient (or Hill slope).
Bounds for fitting: 0 <= n <= inf
Returns:
float, numpy.array : The response for the given ligand concentration(s)
in response units.
"""
return emax * l**n / (l**n + kd**n) |
def checkPrime(number: int, i: int = 2):
"""This function checks if a number is prime.
Returns --bool
Arguments:
number {int} -- number to be checked
Keyword Arguments:
check {int} -- check situation (default: {number})
"""
try:
if number <= 2:
return True if (number == 2) else False
if (number % i) == 0:
return False
if (i * i) > number:
return True
return checkPrime(number, i + 1)
except TypeError:
raise TypeError("Int required!")
except RecursionError:
raise RecursionError |
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result |
def _power_of_two(value):
"""Returns whether the given value is a power of two."""
return (value & (value - 1)) == 0 |
def get_denom(cc, related_chart=False):
"""Get the numerator and denominator map."""
# If chart requires denominator, use it for both primary and related charts.
if 'denominator' in cc:
result = {}
if len(cc['denominator']) != len(cc['statsVars']):
raise ValueError('Denominator number not matching: %s', cc)
for num, denom in zip(cc['statsVars'], cc['denominator']):
result[num] = denom
return result
# For related chart, use the denominator that is specified in the
# 'relatedChart' field if present.
if related_chart and cc.get('relatedChart', {}).get('scale', False):
return cc['relatedChart'].get('denominator', 'Count_Person')
return None |
def decintfix(decorint=0):
"""
Fix The Formatting Of Decimals And Integers
"""
if str(decorint)[-2:] == '.0':
return int(decorint)
return float(decorint) |
def get_converter_type_path(*args, **kwargs):
"""
Handle converter type "path"
:param args:
:param kwargs:
:return: return schema dict
"""
schema = {
'type': 'string',
'format': 'path',
}
return schema |
def merge_dicts(lst):
"""
Combine a list of dictionaries together to form one complete dictionary
"""
dall = {}
for d in lst:
dall.update(d)
return dall |
def get_import_errors(error_container, index_name, total_count):
"""Returns a string with error message or an empty string if no errors.
Args:
error_container: dict with error messages for each index.
index_name: string with the search index name.
total_count: integer with the total amount of events indexed.
"""
if index_name not in error_container:
return ''
index_dict = error_container[index_name]
error_list = index_dict.get('errors', [])
if not error_list:
return ''
error_count = len(error_list)
error_types = index_dict.get('types')
error_details = index_dict.get('details')
if error_types:
top_type = error_types.most_common()[0][0]
else:
top_type = 'Unknown Reasons'
if error_details:
top_details = error_details.most_common()[0][0]
else:
top_details = 'Unknown Reasons'
if total_count is None:
total_count = 0
if not top_type:
top_type = 'Unknown Reasons'
if not top_details:
top_details = 'Unknown Reasons'
return (
'{0:d} out of {1:d} events imported. Most common error type '
'is "{2:s}" with the detail of "{3:s}"').format(
total_count - error_count, total_count,
top_type, top_details) |
def bondfilter(motif, bond, bondpattern):
"""
For a given linear sequence of atoms it tests whether the bond orders match pattern bondpattern.
E.g., bondpattern can be 1, 1, 2, 1 for a 5-long motif.
"""
for atomi in range(len(motif)-1):
if bondpattern[atomi] == 'X':
continue
if bond[motif[atomi]][motif[atomi+1]] != bondpattern[atomi]:
return -1
return 0 |
def individual_collate(batch):
"""
Custom collation function for collate with new implementation of individual samples in data pipeline
"""
data = batch
# Assuming there's at least one instance in the batch
add_data_keys = data[0].keys()
collected_data = {k: [] for k in add_data_keys}
for i in range(len(list(data))):
for k in add_data_keys:
collected_data[k].extend(data[i][k])
return collected_data |
def convert_em(element, text):
"""
Italicizes the text
"""
if text:
text = "*%s*" % text
return text |
def _pad_post_array(input_array, length):
"""
Pads the input 2D list with 0's at the end so that is is of shape (None, length)
:param input_array: 2D list
:param length: length to pad to
:return: (None, length) padded array
"""
output_array = []
for arr in input_array:
padded = arr + (length - len(arr))*[0]
output_array.append(padded)
return output_array |
def check_format(json_string, correct_value):
"""takes the first key from json formatted string and compares it to a desired value"""
string_start = json_string.split(":", 1)[0]
first_value = string_start.strip("{")
return first_value == correct_value |
def percentage(value):
"""Returns the number as percentage with two-digit precision"""
try:
value = float(value)
except (ValueError, TypeError, UnicodeEncodeError):
return ''
return '{0:.2%}'.format(value) |
def get_cell_numbers(contained):
"""Retrieve non-overlapping cell numbers from the output of `get_overlapping`.
None may appear at the ends of the output, indicating that the corresponding
target cells are not overlapping with any source cells. These should be ignored
when regridding.
Cell numbers of 0 indicate that the corresponding target cells need to be regridded
in combination with the previous non-zero cell number target cell.
Returns:
cell_numbers (list): The number of cells corresponding to the source
dimension, as described above.
overlap (bool): If True, this indicates that for at least one location, there
is an overlap of depth 1 between adjacent operations.
"""
cell_numbers = []
overlap = False
for prev_elements, elements in zip([None] + contained[:-1], contained):
cell_number = None
if (
prev_elements is not None
and elements
and prev_elements
and elements[0] == prev_elements[-1]
):
overlap = True
cell_number = -1
if elements:
if cell_number is None:
cell_number = 0
cell_number += elements[-1] - elements[0] + 1
cell_numbers.append(cell_number)
return cell_numbers, overlap |
def check_valid_key_name(name):
"""
Ensure the key name provided is legal
:param name: a potential key name
:return: boolean indicating legality of name
:rtype: bool
"""
if type(name) not in [str]:
return False
bad_chars = ["*", ".", "&&&&"]
if any(k in name for k in bad_chars):
return False
return True |
def poly_area_calculation(points):
"""gets Area and CG"""
l = len(points)
if len(points) < 3: raise ValueError('NOT SUFFICIENT POINTS!')
sum = [0.0, 0.0]
area = 0
for i in range(l):
j = i + 1
if i == l - 1: j = 0
m = points[i][0]*points[j][1] - points[j][0]*points[i][1]
sum[0] += (points[i][0]+points[j][0]) * m
sum[1] += (points[i][1]+points[j][1]) * m
area += m
area = 0.5 * area
if area != 0:
sum[0] = sum[0]/(6*area)
sum[1] = sum[1]/(6*area)
return [abs(area), (sum[0], sum[1])] |
def _get_all_committers(commits_info):
""" Returns all committers' information
The information includes name and Email
"""
committers_unduplicated = [{'name': commit_info['committer'], 'email': commit_info['committerEmail']}
for commit_info in commits_info]
all_names = []
committers = []
for committer in committers_unduplicated:
if committer['name'] not in all_names:
committers.append(committer)
all_names.append(committer['name'])
return committers |
def isPalindrome(s):
""" Return True, if a given text is a Palindrom. """
def onlyChars(text):
""" Check only letters in lower case """
text = text.lower()
feedback = ''
for letter in text:
if letter in "abcdefghijklmnopqrstuvwxyz": #string.ascii_lowercase
feedback += letter
return feedback
def checkPal(text):
""" Walk trough the text and verify each letter """
if len(text) <= 1:
return True
else:
return text[0] == text[-1] and checkPal(text[1:-1])
return checkPal(onlyChars(s)) |
def which(program):
"""Check if the excutable exists in the users path
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None |
def gps_format_datetime(datetime_str):
"""convert CGNS GGA date time into format for REST post"""
year = datetime_str[:4]
month = datetime_str[4:6]
day = datetime_str[6:8]
hours = datetime_str[8:10]
minutes = datetime_str[10:12]
seconds = datetime_str[12:14]
return '{}%2F{}%2F{}+{}%3A{}%3A{}'.format(
month, day, year, hours, minutes, seconds
) |
def mean(mylist):
"""
Returns the mean value of a list
"""
total = 1.0 * sum(mylist)
length = len(mylist)
if length == 0.0:
raise ArithmeticError(
"Attempting to compute the mean of a zero-length list")
return (total / length) |
def parse_individual(arg):
"""
Try to open arg as a file and return list of each line, otherwise assume it is a comma separated
list and use split to return an actual list
"""
inds=[]
try:
file=open(arg, "r")
inds=file.readlines()
inds=[i[:-1] for i in inds] # remove newlines
except IOError:
inds=arg.split(",")
return inds |
def InvertDepthNorm(
depth, maxDepth=1000.0, minDepth=10, transform_type='inverse'
):
"""Renormalizes predictions back to targets space"""
if transform_type == 'inverse':
return maxDepth / depth
elif transform_type == 'scaled':
return depth * minDepth
elif transform_type == 'log':
return (10 ** depth) * minDepth |
def json_table_to_text(table):
"""
Transform JSON table to text
:param table: table to transform into text
:type table: list
:return: content of the table in plain text
:rtype: str
"""
if not len(table):
return ""
header = table[0]
text = ' '.join(header.keys()) + '\n'
for l in table:
text += ' '.join(map(str, l.values())) + '\n'
return text |
def get_run_and_tag_from_filename(filename):
"""e.g. SFX_each_193441_79553414.h5"""
import os
sp = os.path.splitext(os.path.basename(filename))[0].split("_")
if len(sp) >= 2:
run = int(sp[-2])
tag = int(sp[-1])
return run, tag
else:
return -1, -1 |
def is_valid_misra_violation(misra_violation):
"""Check if misra violation is formatted correctly"""
return isinstance(misra_violation, str) |
def fill_byte(byteA, n:int):
"""
Fill byte till length n.
Output: bytes
"""
if not isinstance(byteA,bytearray):
byteA = bytearray(byteA)
while n > len(byteA) :
byteA.insert(0,0)
return bytes(byteA) |
def ternary_operator_v(x, iflogic, assertion, elselogic):
"""
Apply ternary operator logic executing functions.
Functions should receive a single value: `x`.
Better if you see the code:
`return iflogic(x) if assertion(x) else elselogic(x)`
Parameters
----------
x
The value to pass to each function.
"""
return iflogic(x) if assertion(x) else elselogic(x) |
def count_padding_bases(seq1, seq2):
"""Count the number of bases padded to
report indels in .vcf
Args:
seq1, seq2 (str): alleles in .vcf
Returns:
n (int):
Examples:
REF ALT
GCG GCGCG
By 'left-alignment', REF and ATL are alignmed:
GCG
|||
GCGCG
The first 3 bases are left-aligned.
In this case, 3 will be returned
"""
if len(seq2) < len(seq1):
return count_padding_bases(seq2, seq1)
n = 0
for base1, base2 in zip(seq1, seq2[: len(seq1)]):
if base1 == base2:
n += 1
else:
break
return n |
def peak_to_subpeak_list(chrom,start,end,size=60):
""" Take the given peak, split into a list of subregions that make
up the peak """
num_subpeaks = int(end) - int(start) // int(size)
start_list = list(range(start,end,size))
end_list = start_list[1:]
last_endpoint = start_list[-1] + size
if last_endpoint > end:
last_endpoint = end
end_list.append(last_endpoint)
subpeak_lists = [(chrom,s,e) for s,e in zip(start_list,end_list)]
return subpeak_lists |
def rgb_to_ansi(r, g, b):
"""
Convert an rgb color to ansi color
"""
(r, g, b) = int(r), int(g), int(b)
if r == g & g == b:
if r < 8:
return int(16)
if r > 248:
return int(230)
return int(round(((r - 8) / 247) * 24) + 232)
to_ansi_range = lambda a: int(round(a / 51.0))
r_in_range = to_ansi_range(r)
g_in_range = to_ansi_range(g)
b_in_range = to_ansi_range(b)
ansi = 16 + (36 * r_in_range) + (6 * g_in_range) + b_in_range
return int(ansi) |
def deol(s):
"""Remove any EOL from a line."""
return s.rstrip("\r").rstrip("\n") |
def transform_detection(p0,detections,detect_thresh):
"""Convert the result of the detection from a cropped part
of image to the original image coordinates.
Parameters
----------
p0 : tuple
The top-left point used for cropping the image (x,y)
detections : list
A list of lists for the detections in the cropped frame as it is the
output from the detection network. The list has the following
shape [top-left point, bottom-right point,probabilty, class id]
detect_thresh : float
The minimum probability to consider a detection result from Yolo
model ok.
Returns
-------
list
The same detection list as the input but with moving
the coordinates to the original frame of coordinates
before cropping.
"""
output = []
for detection in detections:
if detection[2]>detect_thresh:
output.append( [(p0[0]+detection[0][0],p0[1]+detection[0][1]),
(p0[0]+detection[1][0],p0[1]+detection[1][1]),
detection[2], detection[3]])
return output |
def greet(name):
"""Make a function that will return a greeting statement that uses an input.
Args:
name (str): A persons name.
Returns:
str: "Hello, <name> how are you doing today?".
"""
return "Hello, {} how are you doing today?".format(name) |
def convert_from(client, denomination, amount):
"""
Convert the amount from 18 decimals to the dedsired precision
"""
if denomination == 'nct':
return client.from_wei(amount, 'ether')
elif denomination == 'nct-gwei':
return client.from_wei(amount, 'gwei')
elif denomination == 'nct-wei':
return amount
else:
raise ValueError() |
def get_slack_user_fields(user_info):
"""
Get SlackUser fields from Slack 'user' type
https://api.slack.com/types/user
"""
return {
"username": user_info["id"],
"readable_name": user_info["profile"]["real_name"],
"avatar": user_info["profile"]["image_24"],
} |
def time2sample(time: float, sample_rate: int) -> int:
"""converts time to number of sample
Args:
time (float): time in seconds to be converted into samples
sample_rate (int): sample rate to use
Returns:
int: sample
"""
return round(time * sample_rate) |
def remove_line_break(text):
"""Remove line breaks from text"""
return text.replace("\n", "") |
def dict2tuple(d):
"""Build a tuple from a dict.
:param d: The dict to coherence into a tuple.
:returns: The dict d in tuple form.
"""
items = list(d.items())
items.sort()
return tuple(items) |
def sequence_pairs_of_minibatches(minibatches, direction="forward_val"):
"""
turn minibatches into pairs.
((x_i, y_i), (x_i+1, y_i+1))
"""
pairs = []
if "forward" in direction:
for i in range(len(minibatches)):
if i == len(minibatches) - 1: continue
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[i][0], minibatches[i][1]
xj, yj = minibatches[j][0], minibatches[j][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
elif "backward" in direction:
for i_ in range(len(minibatches)):
i = len(minibatches) - i_ - 1
if i == 0: continue
j = i - 1
xi, yi = minibatches[i][0], minibatches[i][1]
xj, yj = minibatches[j][0], minibatches[j][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
else:
raise NotImplementedError
return pairs |
def fformat(float_num, precision):
"""
https://stackoverflow.com/a/44702621/3453033
"""
assert isinstance(precision, int) and precision > 0
return ('{{:.{}f}}'
.format(precision)
.format(float_num)
.rstrip('0')
.rstrip('.')) |
def deepcopy_basic_type(obj: object) -> object:
"""
deepcopy an object without copy the complicated objects.
This is useful when you want to generate Qlib tasks and share the handler
NOTE:
- This function can't handle recursive objects!!!!!
Parameters
----------
obj : object
the object to be copied
Returns
-------
object:
The copied object
"""
if isinstance(obj, tuple):
return tuple(deepcopy_basic_type(i) for i in obj)
elif isinstance(obj, list):
return list(deepcopy_basic_type(i) for i in obj)
elif isinstance(obj, dict):
return {k: deepcopy_basic_type(v) for k, v in obj.items()}
else:
return obj |
def get_order(order):
"""
All the orders they create look something like this:
"milkshakepizzachickenfriescokeburgerpizzasandwichmilkshakepizza"
Their preference is to get the orders as a nice clean string
with spaces and capitals like so:
"Burger Fries Chicken Pizza Pizza Pizza Sandwich Milkshake Milkshake Coke"
"""
menu = ['burger',
'fries',
'chicken',
'pizza',
'sandwich',
'onionrings',
'milkshake',
'coke']
new_order = ''
for i in menu:
x = (i.capitalize() + ' ')
y = order.count(i)
new_order += x * y
print(new_order[:-1])
return new_order[:-1] |
def strip_string(val: str) -> str:
"""
Returns ``val`` as a string, without any leading or trailing whitespace.
:param val:
"""
return str(val).strip() |
def _element_fill_join(elements, width):
"""Create a multiline string with a maximum width from a list of strings,
without breaking lines within the elements"""
s = ''
if(elements):
L = 0
for i in range(len(elements) - 1):
s += str(elements[i]) + ', '
L += len(elements[i]) + 2
if(L + len(elements[i+1]) >= width - 2):
s += '\n'
L = 0
s += elements[-1]
return(s) |
def size(obj):
"""Returns the size of an object in bytes"""
try:
return obj.__sizeof__()
except:
return 0 |
def echo(value, prompt="Value"):
"""Debug function to print a value, then return it.
Talon debugging is limited. Wrap any value with this function to echo it
without affecting functionality.
"""
print(f"{prompt}: {value}")
return value |
def sample(i):
"""Helper method to generate a meaningful sample value."""
return 'sample{}'.format(i) |
def AppIdWithDefaultPartition(app_id, default_partition):
"""Add a partition to an application id if necessary."""
if not default_partition:
return app_id
if '~' in app_id:
return app_id
return default_partition + '~' + app_id |
def parse_game_features(s):
"""
Parse the game features we want to detect.
"""
game_features = ['target', 'enemy', 'health', 'weapon', 'ammo']
split = list(filter(None, s.split(',')))
assert all(x in game_features for x in split)
return [x in split for x in game_features] |
def odd_even_sum(numbers):
"""Returns the sum of all even and all odd
digits in the given number"""
numbers_list = [int(num) for num in str(numbers)]
odd_nums = []
even_nums = []
for index in range(len(numbers_list)):
if numbers_list[index] % 2 == 0:
even_nums.append(numbers_list[index])
elif not numbers_list[index] % 2 == 0:
odd_nums.append(numbers_list[index])
return f"Odd sum = {sum(odd_nums)}, Even sum = {sum(even_nums)}" |
def netloc_parser(data):
"""Parse the netloc parameter.
:returns: username, url.
"""
if data and '@' in data:
first_at = data.index('@')
return (data[0:first_at] or None), data[first_at + 1:] or None
else:
return None, data or None |
def replace_escape_codes(input_str):
"""Replace escape codes function
Parameters
----------
input_str : str
String of input
Returns
-------
str
Sting to replace escape codes to print properly
"""
return input_str.replace('"', '"').replace(''', "'").replace('&', '&') |
def getfields(comm):
"""get all the fields that have the key 'field' """
fields = []
for field in comm:
if 'field' in field:
fields.append(field)
return fields |
def merge_dict_of_dicts(list_of_dicts):
"""
merges list of dicts
arg: list of dicts (to be merged)
returns: merged dict
"""
final_dict = {}
for d in list_of_dicts:
for k, v in d.items():
final_dict.setdefault(k, []).append(v)
final_dict.update((k, [item for sublist in v for item in sublist]) for k,v in final_dict.items())
return final_dict |
def mode(data):
"""
function to get mode of data, only returns the first it encounters if
there are more than 1 mode
"""
mode = max(data, key=data.count)
return mode |
def _format_size(size):
"""
Format size in bytes, kb, or mb
"""
if size < 1000:
return f"{size} bytes"
if size >= 1000000:
return f"{size/1000000:.2f} MB"
if size >= 1000:
return f"{size/1000:.2f} kB" |
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url |
def isValidTCSresponse(response):
"""
Argumets: response
Verifies if the argument is valid, returns a boolean
"""
if "UNR EOF" in response:
print('Request cannot be answered, try change language')
return False
return True |
def _get_p_survival(block=0, nb_total_blocks=110, p_survival_end=0.5, mode='linear_decay'):
"""
See eq. (4) in stochastic depth paper: http://arxiv.org/pdf/1603.09382v1.pdf
"""
if mode == 'uniform':
return p_survival_end
elif mode == 'linear_decay':
return 1 - ((block + 1) / nb_total_blocks) * (1 - p_survival_end)
else:
raise |
def constructUniformAllelicDistribution(numalleles):
"""Constructs a uniform distribution of N alleles in the form of a frequency list.
Args:
numalleles (int): Number of alleles present in the initial population.
Returns:
(list): Array of floats, giving the initial frequency of N alleles.
"""
divisor = 100.0 / numalleles
frac = divisor / 100.0
distribution = [frac] * numalleles
return distribution |
def crc64(inpkt) :
""" Returns 64 bit crc of inpkt binary packed string inpkt
inpkt is bytes in python3 or str in python2
returns tuple of two 32 bit numbers for top and bottom of 64 bit crc
"""
inpkt = bytearray(inpkt)
polytop = 0x42f0e1eb
polybot = 0xa9ea3693
crctop = 0xffffffff
crcbot = 0xffffffff
for element in inpkt :
i = 0
#byte = ord(element)
byte = element
while i < 8 :
topbit = 0x0
if (crctop & 0x80000000):
topbit = 0x01
databit = 0x0
if (byte & 0x80):
databit = 0x01
crctop = crctop << 1
crctop = crctop & 0xffffffff
botbit = 0x0
if (crcbot & 0x80000000):
botbit = 0x01
crctop = crctop | botbit
crcbot = crcbot << 1
crcbot = crcbot & 0xffffffff
if (topbit != databit):
crctop = crctop ^ polytop
crcbot = crcbot ^ polybot
byte = byte << 1
byte = byte & 0x00ff
i += 1
crctop = crctop ^ 0xffffffff
crcbot = crcbot ^ 0xffffffff
return (crctop, crcbot) |
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols |
def gradY(x, y):
"""
Evaluates Y-gradient of Beale at x, y
@ In, x, float, value
@ In, y, float, value
@ Out, gradY, float, Y-gradient of Beale
"""
tot = 0
consts = (1.5, 2.25, 2.625)
for i in range(1, 4):
tot += 2 * i * x * (x * (y**i - 1) + consts[i-1])
return tot |
def pre_measure(node, q, anc, mes):
"""mes is either I, X, Y, or Z"""
# Create ancilla
if mes == "I":
return None
elif mes == "X":
q.H()
q.cnot(anc)
q.H()
elif mes == "Y":
q.K()
q.cnot(anc)
q.K()
# q.rot_X(-64) # Rotation -pi/2
elif mes == "Z":
q.cnot(anc)
else:
raise NameError("The measurement {} does not exist.".format(mes)) |
def get_ground_truth_module_source(target_name, command_str, array_strs):
"""Gets the source of a module that will contain the ground truth.
Args:
target_name: Name of the target.
command_str: String of the command name used to generate the ground truth
values.
array_strs: List of strings that encode the ground truth arrays. See
`save_ground_truth_part`.
Returns:
module_source: Source of the module.
"""
array_str = '\n'.join(array_strs)
module_source = r'''# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Ground truth values for `{target_name}`.
Automatically generated using the command:
```
{command_str}
```
"""
import numpy as np
{array_str}'''.format(
target_name=target_name, array_str=array_str, command_str=command_str)
return module_source |
def ConvertPrivateIpv6GoogleAccess(choice):
"""Return PrivateIpv6GoogleAccess enum defined in mixer.
Args:
choice: Enum value of PrivateIpv6GoogleAccess defined in gcloud.
"""
choices_to_enum = {
'DISABLE':
'DISABLE_GOOGLE_ACCESS',
'ENABLE_BIDIRECTIONAL_ACCESS':
'ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE',
'ENABLE_OUTBOUND_VM_ACCESS':
'ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE',
}
return choices_to_enum.get(choice) |
def _merged_maxderiv(maxA,maxB):
"""
Calculate the maxderiv of a merged DFun.
Parameters
----------
maxA, maxB : int or None
maxderiv of merged DFun.
Returns
-------
int or None
The maxderiv of the merger.
"""
if maxA is None and maxB is None:
maxderiv = None
elif maxA is None:
maxderiv = maxB
elif maxB is None:
maxderiv = maxA
else:
maxderiv = min(maxA,maxB)
return maxderiv |
def format_percentage(rid, adata, bdata):
"""Method to calculate percentage of bdata out of adata."""
try:
fdata = ['0'] * len(bdata)
for i, itm in enumerate(bdata):
d_data = float(int(adata[i]))
n_data = float(int(bdata[i]))
p_data = (n_data / d_data) * 100 if d_data > 0 else 0
print('Newton', d_data, n_data, p_data)
fdata[i] = str(round(p_data, 1))
except Exception:
return bdata
else:
return fdata |
def validMountainArray(A):
"""
:type A: List[int]
:rtype: bool
"""
N = len(A)
i = 0
# walk up
while i+1 < N and A[i] < A[i+1]:
i += 1
# peak can't be first or last
if i == 0 or i == N-1:
return False
# walk down
while i+1 < N and A[i] > A[i+1]:
i += 1
return i == N-1 |
def seconds(seconds=0, minutes=0, hours=0, days=0, weeks=0):
"""Returns a value in seconds given some minutes, hours, days, or weeks."""
return seconds + minutes*60 + hours*3600 + days*86400 + weeks*604800 |
def calculate_fantasy_points(player) -> float:
"""
Calculate the fantasy points this player earned from the formula
Kill = 0.3
Death = -0.3
Assist = 0.15
Last Hit = 0.003
Gold per minute = 0.002
EXP per minute = 0.002
Seconds of enemy stuns = 0.07
Every 1000 allied healing done = 0.4
Tower Kill = 1
Roshan Kill = 1
First blood = 3
https://dota2.gamepedia.com/Fantasy_Dota
Parameters
----------
player: Summary - a player summary
Returns
-------
The fantasy points scored by this player
"""
return (
player["kills"]*0.3 - player["deaths"]*0.3 + player["assists"]*0.15 + player["last_hits"]*0.003
+ player["gpm"]*0.002 + player["xpm"]*0.002 + player["enemy_stun_time"]*0.07
+ (player["healing"]/1000)*0.4 + player["towers_killed"] + player["rosh_kills"]
+ (3 if player["first_blood"] else 0)
) |
def _webwallet_support(coin, support):
"""Check the "webwallet" support property.
If set, check that at least one of the backends run on trezor.io.
If yes, assume we support the coin in our wallet.
Otherwise it's probably working with a custom backend, which means don't
link to our wallet.
"""
if not support.get("webwallet"):
return False
return any(".trezor.io" in url for url in coin["blockbook"] + coin["bitcore"]) |
def target_tensor(len, labels, scores):
""" create the target by labels and scores """
target = [0]*len
for id, l in enumerate(labels):
target[l] = scores[id]
return target |
def crear_cola(ncola):
"""Crear cola."""
primer = [10, 400]
listpos = [primer]
for i in range(1, ncola):
pos = [listpos[i - 1][0] + 72, 400]
listpos.append(pos)
return listpos |
def without_tests(tests, without):
"""Exclude tests from a list."""
return [t for t in tests if t not in without] |
def is_anagram(word1, word2):
"""Two words are anagrams if you can rearrange the letters from one to spell the other.
Write a function called is_anagram that takes two strings and returns True if they are anagrams."""
chars1 = list(word1)
chars1.remove(" ")
chars2 = list(word1)
chars2.remove(" ")
return sorted(chars1) == sorted(chars2) |
def version_tuple_to_str(version_tuple, separator='.'):
"""
Turns something like (X, Y, Z) into "X.Y.Z"
:param version_tuple: the tuple identifying a software Semantic version
:type version_tuple: tuple
:param separator: the character to be used as separator
:type separator: str, defaults to '.'
:return: str
"""
str_version_tuple = [str(v) for v in version_tuple]
return separator.join(str_version_tuple) |
def is_valid(isbn):
"""
Given a string the program will check if
the provided string is a valid ISBN-10.
:param isbn:
:return:
"""
# ISBN is invalid in case input string is empty
if not isbn or isbn == '':
return False
# Converting from strings to numbers
digits = []
for i in isbn:
if i.isdigit():
digits.append(int(i))
# Check digit of an ISBN-10 may be 'X' (representing '10')
if isbn[-1] == 'X':
digits.append(10)
# ISBN is invalid in case it has less than 10 digits
if len(digits) < 10:
return False
# Multiply ISBN members:
for n in range(10, 0, -1):
digits[n - 1] *= n
# Calculate mod and return the answer
# If the result is 0, then it is a valid ISBN-10, otherwise it is invalid:
return sum(digits) % 11 == 0 |
def is_overlapping(segment_time, previous_segments):
"""
Checks if the time of a segment overlaps with the times of existing segments.
Arguments:
segment_time -- a tuple of (segment_start, segment_end) for the new segment
previous_segments -- a list of tuples of (segment_start, segment_end) for the existing segments
Returns:
True if the time segment overlaps with any of the existing segments, False otherwise
"""
segment_start, segment_end = segment_time
overlap = False
for previous_start, previous_end in previous_segments:
if segment_end >= previous_start and segment_start <= previous_end:
overlap = True
return overlap |
def process_enclitics(word):
"""
Not yet able to process enclitics
"""
word = word.split('-')
#print(word)
return(''.join(word[0])) |
def is_matching_set(cards):
"""Determine if the set of 3 `Card`s defines a valid matching set (that can be redeemed).
A set is matching if the symbols on the 3 cards are either all the same or all different.
(e.g. `[1, 1, 1]` matches, `[1, 0, 1]` does not, but `[0, 1, 2]` is a matching set.
`cards` -- `[Card]` -- cards to check
**`returns`** -- `bool` -- True if the cards match
"""
symbols = set(card.symbol for card in cards)
return len(cards) == 3 and (len(symbols) == 1 or len(symbols) == len(cards)) |
def _smartquote(s, quoteit=True, qchar='"'):
""" smartquote a string so that internal quotes are distinguished from surrounding
quotes for SPSS and return that string with the surrounding quotes. qchar is the
character to use for surrounding quotes.
if quoteit is True, s is a string that needs quoting; otherwise it does not
"""
if quoteit:
return qchar + s.replace(qchar, qchar+qchar) + qchar
else:
return s |
def species_icon_url_for_dimension(species_icon_key, width, height):
""" Return the icon URL for a specific species icon 'key' (the 'icon' field in species definitions). """
return "/static/img/species_icons/%s_%dx%d.png" % (species_icon_key, width, height); |
def finder(parent, starts_with, matching_object):
"""ignores unique numbers in keys"""
candidates = {k for k in parent.keys() if k.startswith(starts_with)}
nested_candidates = {}
for candidate in candidates:
split_candidate = candidate[len(starts_with)+1:].split('.', 1)
if len(split_candidate) < 2:
continue
if split_candidate[0] not in nested_candidates:
nested_candidates[split_candidate[0]] = {}
nested_candidates[split_candidate[0]][split_candidate[1]] = parent[candidate]
for nested_candidate in nested_candidates.values():
have_match = False
for key, value in matching_object.items():
if key not in nested_candidate.keys() or nested_candidate[key] != value:
have_match = False
break
have_match = True
if have_match:
return True
return False |
def partition_linkedlist_around_value(linked_list, x):
"""Patitions a linkedlist around a given value x.data
Travese the linked list, insert nodes with node.data >= x at the end.
Args:
linked_list: An instace object of class LinkedList.
x: value of type LinkedList.data
Returns:
Reference to modified linked_list. Changes the input linked_list
argument.
"""
if (not linked_list or not linked_list.head or
not linked_list.head.next_node):
# Change nothing
return linked_list
# Find the tail node
current = linked_list.head
while current.next_node:
current = current.next_node
original_tail = current
# Setup pointers
new_tail = original_tail
previous = None
current = linked_list.head
# Partition
while current != original_tail:
if current.data >= x:
if previous:
previous.next_node = current.next_node
new_tail.next_node = current
current.next_node = None
new_tail = current
current = previous.next_node
else:
linked_list.head = current.next_node
new_tail.next_node = current
current.next_node = None
new_tail = current
current = linked_list.head
else:
previous = current
current = current.next_node
return linked_list |
def scrubSuffixes(name):
"""
Removes commonly seen suffixes.
"""
suffixes = ["I", "II", "III", "IV", "Jr.", "Sr.", "Jr", "Sr", "MA", "MD", "1st", "2nd", "3rd"]
names = name.split()
if names[-1] in suffixes:
names = names[:-1]
else:
names = names[0:]
return ' '.join(names) |
def map_type(ansible_type: str) -> str:
"""Return JSON date type for a given Ansible type."""
# https://json-schema.org/understanding-json-schema/reference/type.html
# raw is used for file mode by ansible
if ansible_type in ['str', 'filename', 'path', 'raw', 'sid']:
return 'string'
if ansible_type == 'list':
return 'array'
if ansible_type == 'bool':
return 'boolean'
if ansible_type == 'int':
return 'integer'
if ansible_type in ['dict', 'jsonarg', 'json']:
return 'object'
if ansible_type == 'float':
return 'number'
raise NotImplementedError(
f"Unable to map ansible type {ansible_type} to JSON Schema type."
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.