content stringlengths 42 6.51k |
|---|
def compute_min_value(ad_bits):
"""Compute the min value for the codebook with codewords of length ad_bits
:param ad_bits: codeword length
:type ad_bits: int
:return: min value
:rtype: int
"""
tab = [0, None, 1, 6, 18, 83, 376, 1264, 5263, 17580, 72910]
return tab[ad_bits] |
def solution1(A):
"""
Assumes A is an unsorted list and this implementation makes no attempt at pre-sorting numbers.
:param A: List - An unsorted list of ints
:returns: int - the smallest positive integer (greater than 0) that does not occur in A
"""
if 1 not in A: return 1
smallest = 100001
for number in A:
if number < 1 or number > 100000: continue
nnum = number+1
pnum = number-1 if number != 1 else nnum
if pnum not in A and pnum < smallest:
smallest = pnum
elif nnum not in A and nnum < smallest:
smallest = nnum
for number in range(1, smallest):
if number not in A:
return number
return smallest |
def rotate_matrix(matrix):
"""rotates a matrix 90 degrees clockwise"""
n = len(matrix)
for layer in range(n // 2):
first, last = layer, n - layer - 1
for i in range(first, last):
# save top
top = matrix[layer][i]
# left -> top
matrix[layer][i] = matrix[-i - 1][layer]
# bottom -> left
matrix[-i - 1][layer] = matrix[-layer - 1][-i - 1]
# right -> bottom
matrix[-layer - 1][-i - 1] = matrix[i][-layer - 1]
# top -> right
matrix[i][-layer - 1] = top
return matrix |
def get_codelist_values(elements: list) -> list:
"""
Returns list of code list values as strings for all elements (except the ones with no value)
The value can be in the element attribute or text node.
:param elements : The elements to check
"""
values = []
for element in elements:
value = element.get('codeListValue')
if value is None:
value = element.text
if value is not None:
values.append(value)
return values |
def elite_selection(population, elite_num):
"""
Uses elite selection to pick elite_num units from population
:param population: population to be picked from
:param elite_num: number of units to be selected
:return: array of selected units
"""
return population[:elite_num] |
def keep_file(filepath):
"""Decide if we keep the filepath, solely by exlcusion of end of path
This is primarily to avoid keeping .pyc files
>>> keep_file('/foo.pyc')
False
"""
ignoredpathendings = ['.pyc',]
for ending in ignoredpathendings:
if filepath.endswith(ending):
return False
return True |
def process_results(results):
"""Construct the request response into a
slightly more intuitive structure
"""
response = {}
try:
response['count'] = int(results['d']['__count'])
except:
response['count'] = None
if 'error' in results.keys():
response['error'] = results['error']
response['results'] = None
elif type(results['d']) is list:
response['results'] = results['d']
elif 'results' in results['d'].keys():
response['results'] = results['d']['results']
else:
response['results'] = results['d']
return response |
def fibonacci(num):
"""
Use fibonacci as test case for int variable names
"""
count = 0
if num < 0:
return None
elif num == 0:
return 0
elif num == 1 or num == 2:
return 1
else:
return fibonacci(num - 1) + fibonacci(num - 2) |
def tolist(data):
"""
if data is a string, convert to [data]
if already a list, return the list
input:
data : str or list of str
output:
data : list of str
"""
if isinstance(data, str):
return [data]
return data |
def force_str_2_bool(bool_str: str, raise_if_unknown: bool = False) -> bool:
"""convent 'True' or 'False' to bool, using on query_param"""
if isinstance(bool_str, bool):
return bool_str
if bool_str in ["True", "true", "1"]:
return True
elif bool_str in ["False", "false", "0"]:
return False
if raise_if_unknown:
raise ValueError("str should be 'True/true' or 'False/false' ")
# unknown str regard as False
return False |
def rotate_grid(grid):
"""
Rotates the input 90 degrees clockwise
:param list grid: input with nested lists to format
"""
return list(zip(*grid[::-1])) |
def is_cython_function(fn):
"""Checks if a function is compiled w/Cython."""
if hasattr(fn, "__func__"):
fn = fn.__func__ # Class method, static method
name = type(fn).__name__
return (
name == "method_descriptor"
or name == "cython_function_or_method"
or name == "builtin_function_or_method"
) |
def _normalize_managed_link(managed_link) -> dict:
"""
Update Api-doc version
Args:
file_path': 'str',
version_info: 'str'
Returns:
"""
n_normalized = {}
for key, value in managed_link.items():
n_key = key.replace('_', ' ')
n_value = value
n_normalized[n_key] = n_value
return n_normalized |
def color_brightness(color):
"""Calculate the brightness of an RGB color as a value between 0 and 1."""
r = float(color["r"])
g = float(color["g"])
b = float(color["b"])
if "cs" in color and color["cs"].lower() != "srgb":
# Generic fallback. https://www.w3.org/TR/AERT/#color-contrast
return 0.299 * r + 0.587 * g + 0.114 * b
# Calculate relative luminance for the sRGB color space.
# https://www.w3.org/TR/WCAG20/#relativeluminancedef
def f(x):
if x <= 0.03928:
return x / 12.92
return ((x + 0.055) / 1.055) ** 2.4
return 0.2126 * f(r) + 0.7152 * f(g) + 0.0722 * f(b) |
def midpoint(pt1, pt2):
"""Computes the midpoint between two points"""
x = pt2[0]+int((pt1[0]-pt2[0])/2)
y = pt2[1]+int((pt1[1]-pt2[1])/2)
return (x,y) |
def filter_results(analysis_results, name):
"""Filter list of analysis results by result name"""
for result in analysis_results:
if result.name == name:
return result
return None |
def retry_http(response):
"""Retry on specific HTTP errors:
* 429: Rate limited to 50 reqs/minute.
Args:
response (dict): Dynatrace API response.
Returns:
bool: True to retry, False otherwise.
"""
retry_codes = [429]
code = int(response.get('error', {}).get('code', 200))
return code in retry_codes |
def filter_below(threshold, results):
"""Filters items below a certain threshold out."""
out_list = []
for line in results:
if line['validation_acc'] > threshold:
out_list.append(line)
return out_list |
def get_version_from_tag(tag):
"""Handles 1.5.0 or v1.5.0"""
return tag[1:] if tag.startswith('v') else tag |
def int_or_tuple(value):
"""Converts `value` (int or tuple) to height, width.
This functions normalizes the input value by always returning a tuple.
Args:
value: A list of 2 ints, 4 ints, a single int or a tf.TensorShape.
Returns:
A list with 4 values.
Raises:
ValueError: If `value` it not well formed.
TypeError: if the `value` type is not supported
"""
if isinstance(value, int):
return [1, value, value, 1]
elif isinstance(value, (tuple, list)):
len_value = len(value)
if len_value == 2:
return [1, value[0], value[1], 1]
elif len_value == 4:
return [value[0], value[1], value[2], value[3]]
else:
raise ValueError('This operation does not support {} values list.'.format(len_value))
raise TypeError('Expected an int, a list with 2/4 ints or a TensorShape of length 2, '
'instead received {}'.format(value)) |
def sanitize_dictlist(dict_list):
"""Do some cleanup. String 'True' and 'False' must be
interpreted as bool values. Empty string should evaluate
to None."""
for element in dict_list:
for key,value in element.items():
if value == 'False': element[key] = False
if value == 'True' : element[key] = True
if value == '' : element[key] = None
else: element[key] = element[key]
return dict_list |
def kahan_sum(list_of_floating_point_numbers):
"""
Computes the sum of a list of floating point numbers, correcting for precision loss.
Parameters:
----------
list_of_floating_point_numbers: ndarray
Returns:
-------
Sum of the elements.
"""
suma = 0.0
c = 0.0
for i in range(0, len(list_of_floating_point_numbers)):
y = list_of_floating_point_numbers[i] - c
t = suma + y
c = (t - suma) - y
suma = t
return suma |
def file_comparison(files0, files1):
"""Compares two dictionaries of files returning their difference.
{'created_files': [<files in files1 and not in files0>],
'deleted_files': [<files in files0 and not in files1>],
'modified_files': [<files in both files0 and files1 but different>]}
"""
comparison = {'created_files': [],
'deleted_files': [],
'modified_files': []}
for path, sha1 in files1.items():
if path in files0:
if sha1 != files0[path]:
comparison['modified_files'].append(
{'path': path,
'original_sha1': files0[path],
'sha1': sha1})
else:
comparison['created_files'].append({'path': path,
'sha1': sha1})
for path, sha1 in files0.items():
if path not in files1:
comparison['deleted_files'].append({'path': path,
'original_sha1': files0[path]})
return comparison |
def sort(s, reverse=False):
"""
Sort given string by ascending order.
If reverse is True, sorting given string by descending order.
"""
return ''.join(sorted(s, reverse=reverse)) |
def numeric_cast (v):
"""Try to cast values to int or to float"""
if type(v)== str:
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
return v |
def save_str_as_file(str, filepath):
"""Save a string to a file and return the file path.
Keyword arguments:
str - the string that you want to save as in a file
filepath - the path to the file that you want to save the string to
"""
with open(filepath, "w", encoding="utf-8") as file:
file.write(str)
return filepath |
def clean_nginx_git_tag(tag):
"""
Return a cleaned ``version`` string from an nginx git tag.
Nginx tags git release as in `release-1.2.3`
This removes the the `release-` prefix.
For example:
>>> clean_nginx_git_tag("release-1.2.3") == "1.2.3"
True
>>> clean_nginx_git_tag("1.2.3") == "1.2.3"
True
"""
if tag.startswith("release-"):
_, _, tag = tag.partition("release-")
return tag |
def bytes_to_readable(bytes_value):
"""
Convert bytes to a readable form
:param bytes_value: int, bytes
:return: string, readable value, like 1GB
"""
from math import ceil
if bytes_value > 1073741824:
# 1073741824 = 1024 * 1024 * 1024
# bytes to gigabytes
readable_value = str(int(ceil(bytes_value * 1.1 / 1073741824))) + 'GB'
elif bytes_value > 1048576:
# 1048576 = 1024 * 1024
# bytes to megabytes
readable_value = str(int(ceil(bytes_value * 1.1 / 1048576))) + 'MB'
else:
# bytes to kilobytes
readable_value = str(int(ceil(bytes_value * 1.1 / 1024))) + 'KB'
return readable_value |
def is_tracked_zone(cname, zones):
"""
Is the root domain for the provided cname one of the known domains?
"""
for zone in zones:
if cname.endswith("." + zone) or cname == zone:
return True
return False |
def part_exists(partitions, attribute, number):
"""
Looks if a partition that has a specific value for a specific attribute
actually exists.
"""
return any(
part[attribute] and
part[attribute] == number for part in partitions
) |
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
level -= 1
try:
if package.count('.') < level:
raise ValueError("attempted relative import beyond top-level "
"package")
except AttributeError:
raise ValueError("'package' not set to a string")
try:
# rpartition is more "correct" and rfind is just as easy to use, but
# neither are in Python 2.3.
dot_rindex = package.rindex('.', level)[0]
base = package[:dot_rindex]
except ValueError:
base = package
if name:
return "%s.%s" % (base, name)
else:
return base |
def str_to_date(_data: str) -> str:
"""
formata a string da data para um formato que possa ser inserido no banco como date
:param _data: str
:return: str
"""
return str(_data)[-4:] + '-' + str(_data)[-6:-4] + '-' + str(_data)[:-6] |
def set_async_call_stack_depth(maxDepth: int) -> dict:
"""Enables or disables async call stacks tracking.
Parameters
----------
maxDepth: int
Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
call stacks (default).
"""
return {
"method": "Debugger.setAsyncCallStackDepth",
"params": {"maxDepth": maxDepth},
} |
def db2mag(x):
""" Converts from dB to magnitute ratio
Parameters
----------
x - Input in dB
Returns
-------
m - magnitude ratio
"""
m = 10.0 ** (x / 20.0)
return m |
def sign(number):
"""Returns 1 if number is positive, -1 if number is negative and 0 if number is 0"""
if number < 0:
return -1
elif number > 0:
return 1
else:
return 0 |
def _convert(text, mapping):
""" Convert the text using the mapping given """
for key, value in mapping.items():
if isinstance(value, str):
text = text.replace(key, value)
else:
while key in text:
for actualValue in value:
text = text.replace(key, actualValue, 1)
return text |
def pluralize(apitools_collection_guess):
"""Pluralize krm_kind and handle common atypical pluralization cases."""
ending_plurals = [('Policy', 'Policies'), ('Proxy', 'Proxies'),
('Repository', 'Repositories'), ('Index', 'Indexes'),
('Address', 'Addresses')]
found_plural = False
for singular, replacement_plural in ending_plurals:
if apitools_collection_guess.endswith(singular):
apitools_collection_guess = apitools_collection_guess.replace(
singular, replacement_plural)
found_plural = True
if not found_plural:
apitools_collection_guess += 's'
return apitools_collection_guess |
def parse_hashtag_string(hashtags: str) -> list:
"""Parses string of hashtags returns list."""
return list(set([item.strip() for item in hashtags.split("#") if item != ""])) |
def vec_2_str(vec):
"""
Convert vector of integers to string.
:param vec: [int, int, ...]
:return: string
"""
char_vec = [chr(i) for i in vec]
return ''.join(char_vec) |
def get_scenario_start_index(base_times, scenario_start_time):
"""
Returns the index of the closest time step that is at, or before the scenario start time.
"""
indices_after_start_index = [
idx for idx, time in enumerate(base_times) if time > scenario_start_time
]
if not indices_after_start_index:
raise ValueError(
f"Scenario start time {scenario_start_time} is set after the baseline time range."
)
index_after_start_index = min(indices_after_start_index)
start_index = max([0, index_after_start_index - 1])
return start_index |
def ujoin(*args):
"""Join strings with the url seperator (/).
Note that will add a / where it's missing (as in between 'https://pypi.org' and 'project/'),
and only use one if two consecutive tokens use respectively end and start with a /
(as in 'project/' and '/pipoke/').
>>> ujoin('https://pypi.org', 'project/', '/pipoke/')
'https://pypi.org/project/pipoke/'
Extremal cases
>>> ujoin('https://pypi.org')
'https://pypi.org'
>>> ujoin('https://pypi.org/')
'https://pypi.org/'
>>> ujoin('')
''
>>> ujoin()
''
"""
if len(args) == 0 or len(args[0]) == 0:
return ''
return ((args[0][0] == '/') * '/' # prepend slash if first arg starts with it
+ '/'.join(x[(x[0] == '/'):(len(x) - (x[-1] == '/'))] for x in args)
+ (args[-1][-1] == '/') * '/') |
def foo2(value):
"""Bare return statement implies `return None`"""
if value:
return value
else:
return |
def is_float(string):
"""
Check whether string is float.
See also
--------
http://stackoverflow.com/questions/736043/checking-if-a-string-can-be-converted-to-float-in-python
"""
try:
float(string)
return True
except ValueError:
return False |
def get_objects_name(train_files):
"""
returns: object name, unique str in case of shapenet
given names in case of pallet
"""
objs = list()
for t in train_files:
splits = t.split('/')[-1]
obj = splits[:-4]
objs.append(obj)
return objs |
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: ?
Space Complexity: ?
"""
if nums == None or len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
max_sum = nums[0] + nums[1]
max_sum_here = 0
for i in range(len(nums)):
max_sum_here = max_sum_here + nums[i]
if max_sum < max_sum_here:
max_sum = max_sum_here
if max_sum_here < 0:
max_sum_here = 0
return max_sum |
def js_lang_fallback(lang_name, js_name=None):
"""
Return the fallback lang name for js files.
:param a :class:`str:`
:param js_name: a :class:`str:`, optional.
:return: a :class:`str:`
"""
# The mapping is crap, we use a special case table to fix it.
if js_name == "fullcalendar":
known_fallback_mapping = {
"zh-hans": "zh-cn",
"zh-hant": "zh-tw"}
return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()
return lang_name |
def mag_zeropoint(filter_name):
"""
Given a filter name, return the number of photons per square centimeter per second
which a star of zero'th magnitude would produce above the atmosphere. We assume that the star
has a spectrum like Vega's.
The numbers are pre-calculated and we just pick the appropriate one for the given filter.
:param filter_name: Filter name string
:return: Number of photons per sq.cm. per sec from zero'th mag star
>>> mag_zeropoint("none")
4320000.0
>>> mag_zeropoint("B")
391000.0
>>> get_extinct_coeff("other")
Traceback (most recent call last):
...
ValueError: Bad filter name: other
"""
photons_per_sq_cm = {"none": 4.32e+06, "U": 5.50e+05, "B": 3.91e+05, "V": 8.66e+05,
"R": 1.10e+06, "I": 6.75e+05}
try:
return photons_per_sq_cm[filter_name]
except KeyError:
raise ValueError("Bad filter name: {filter_name}".format(filter_name=filter_name)) |
def make_virtual_offset(block_start_offset, within_block_offset):
"""Compute a BGZF virtual offset from block start and within block offsets.
The BAM indexing scheme records read positions using a 64 bit
'virtual offset', comprising in C terms:
block_start_offset << 16 | within_block_offset
Here block_start_offset is the file offset of the BGZF block
start (unsigned integer using up to 64-16 = 48 bits), and
within_block_offset within the (decompressed) block (unsigned
16 bit integer).
>>> make_virtual_offset(0, 0)
0
>>> make_virtual_offset(0, 1)
1
>>> make_virtual_offset(0, 2**16 - 1)
65535
>>> make_virtual_offset(0, 2**16)
Traceback (most recent call last):
...
ValueError: Require 0 <= within_block_offset < 2**16, got 65536
"""
if within_block_offset < 0 or within_block_offset >= 65536:
raise ValueError("Require 0 <= within_block_offset < 2**16, got %i" %
within_block_offset)
if block_start_offset < 0 or block_start_offset >= 281474976710656:
raise ValueError("Require 0 <= block_start_offset < 2**48, got %i" %
block_start_offset)
return (block_start_offset << 16) | within_block_offset |
def data_consistency(k, k0, mask, noise_lvl=None):
"""
k - input in k-space
k0 - initially sampled elements in k-space
mask - corresponding nonzero location
"""
v = noise_lvl
if v: # noisy case
out = (1 - mask) * k + mask * (k + v * k0) / (1 + v)
else: # noiseless case
out = (1 - mask) * k + k0
return out |
def groupby_type(estimators):
"""
finds the number of estimators for each estimator class
:param estimators: list of estimators (not necessarily trained)
:return: a dictionary of estimator class as key and frequency as value
"""
unique_classes = {}
for estimator in estimators:
clf_name = estimator.__class__.__name__
if clf_name not in list(unique_classes.keys()):
unique_classes[clf_name] = 0
unique_classes[clf_name] += 1
return unique_classes |
def split(string, separator=None, max_splits=-1):
""":yaql:split
Returns a list of tokens in the string, using separator as the
delimiter.
:signature: string.split(separator => null, maxSplits => -1)
:receiverArg string: value to be splitted
:argType string: string
:arg separator: delimiter for splitting. null by default, which means
splitting with whitespace characters
:argType separator: string
:arg maxSplits: maximum number of splittings. -1 by default, which means
all possible splits are done
:argType maxSplits: integer
:returnType: list
.. code::
yaql> "abc de f".split()
["abc", "de", "f"]
yaql> "abc de f".split(maxSplits => 1)
["abc", "de f"]
yaql> "abcde".split("c")
["ab", "de"]
"""
return string.split(separator, max_splits) |
def _get_exclude_files(old_baseline):
"""
Older versions of detect-secrets always had an `exclude_regex` key,
this was replaced by the `files` key under an `exclude` key in v0.12.0
:rtype: str|None
"""
if old_baseline.get('exclude'):
return old_baseline['exclude']['files']
if old_baseline.get('exclude_regex'):
return old_baseline['exclude_regex'] |
def PssmValidator(pssm):
"""validate each PSSM matrix format, no head.
pssm = [[], [], ... , []]
"""
for pos in pssm:
if len(pos) != 4:
return False
for base in pos:
try:
float(base)
except ValueError:
return False
return True |
def linesegmentsintersect(p1, p2, q1, q2):
"""Checks if two line segments intersect.
Input:
p1 : The start vertex of the first line segment.
p2 : The end vertex of the first line segment.
q1 : The start vertex of the second line segment.
q2 : The end vertex of the second line segment.
Output:
True if the two line segments intersect
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
da = q2[0] - q1[0]
db = q2[1] - q1[1]
# segments are parallel
if (da*dy - db*dx) == 0:
return False
s = (dx * (q1[1] - p1[1]) + dy * (p1[0] - q1[0])) / (da * dy - db * dx)
t = (da * (p1[1] - q1[1]) + db * (q1[0] - p1[0])) / (db * dx - da * dy)
return s >= 0 and s <= 1 and t >= 0 and t <= 1 |
def has_str_prefix(value: str) -> bool:
"""
Inspect a token value of type string to check if it has a
prefix or contains escaped characters.
Args:
value (str): the string value of this token
Returns:
Boolean defining whether value has a prefix
"""
string_prefix = ["r", "u", "R", "U", "f", "F", "fr", "Fr", "fR", "FR", "rf", "rF", "Rf", "RF"]
bytes_prefix = ["b", "B", "br", "Br", "bR", "BR", "rb", "rB", "Rb", "RB"]
return value[0] in string_prefix or value[0] in bytes_prefix |
def flatten_verifications(old_verifications):
""" Convert verifications from v1 to v2 """
new_verifications = []
for key in old_verifications:
verification = old_verifications[key]
verification['key'] = key
new_verifications.append(verification)
return new_verifications |
def get_iptc_seg(segments):
"""Returns iptc from JPEG meta data list
"""
for seg in segments:
if seg[0:2] == b"\xff\xed":
return seg
return None |
def get_processing_instructions(body):
""" Extract the processing instructions / acl / etc. at the beginning of a page's body.
Hint: if you have a Page object p, you already have the result of this function in
p.meta and (even better) parsed/processed stuff in p.pi.
Returns a list of (pi, restofline) tuples and a string with the rest of the body.
"""
pi = []
while body.startswith('#'):
try:
line, body = body.split('\n', 1) # extract first line
except ValueError:
line = body
body = ''
# end parsing on empty (invalid) PI
if line == "#":
body = line + '\n' + body
break
if line[1] == '#':# two hash marks are a comment
comment = line[2:]
if not comment.startswith(' '):
# we don't require a blank after the ##, so we put one there
comment = ' ' + comment
line = '##%s' % comment
verb, args = (line[1:] + ' ').split(' ', 1) # split at the first blank
pi.append((verb.lower(), args.strip()))
return pi, body |
def is_char_field(model_field):
"""
Checks if a model field is a char field.
"""
class_name = model_field.__class__.__name__
return class_name == 'CharField' |
def _list_d2s(int_list):
"""Converts a list of ints to strings."""
return ["{:d}".format(x) for x in int_list] |
def cssid(input):
"""Custom filter"""
return f"#{input}" |
def cast_uint(value):
"""
Cast value to 32bit integer
Usage:
cast_int(1 << 31) == 2147483648
"""
value = value & 0xFFFFFFFF
return value |
def make_great(magicians):
"""Modify each magician name in a list."""
great_magicians = []
while magicians:
great_magicians.append(magicians.pop() + " the Great")
return great_magicians |
def head_of_all(x, l):
"""List of lists from l where x is the head of all the lists."""
return [[x] + p for p in l] |
def _check_sorting_runs(candidates, id_char):
"""helper to ensure correct run-parsing and mapping"""
run_idx = [f.find(id_char) for f in candidates]
for config, idx in zip(candidates, run_idx):
assert config[idx - 1].isdigit()
assert not config[idx - 2].isdigit()
runs = [int(f[idx - 1]) for f, idx in zip(candidates, run_idx)]
return runs, candidates |
def read_float(field: str) -> float:
"""Read a float."""
return float(field) if field != "" else float('nan') |
def isValidArgument(s):
"""Returns whether s is strictly a valid argument for an IRC message."""
return '\r' not in s and '\n' not in s and '\x00' not in s |
def _IsReadableStream(obj):
"""Checks whether obj is a file-like readable stream.
:rtype:
boolean
"""
if (hasattr(obj, 'read') and callable(getattr(obj, 'read'))):
return True
return False |
def format_team(team_id, name, div_id):
"""
Helper for team row formatting
"""
data_point = {}
data_point["ID"] = team_id
data_point["Name"] = name
data_point["DivisionID"] = div_id
return data_point |
def calc_delta(r, aa):
"""
Calculate ubiquitous function on Kerr spacetimes.
Parameters:
r (float): radius
aa (float): spin parameter (0, 1)
Returns:
delta (float)
"""
# return r * r - 2 * r + aa * aa
return r * (r - 2) + aa * aa |
def priority_offset(priority):
"""
Args:
priority:
Returns:
"""
if priority == 'low':
return .7
elif priority == 'medium':
return .5
elif priority == 'high':
return .3
else:
return .1 |
def tree_binary(t):
"""Returns true just in case `t` is locally binary branching."""
return (len(t) == 2) |
def generate_splits_name(y_size, z_size, x_size, Y_size, Z_size, X_size,
out_dir, filename_prefix, extension):
"""
generate all the splits' name based on the number of splits the user set
"""
split_names = []
for x in range(0, int(X_size), int(x_size)):
for z in range(0, int(Z_size), int(z_size)):
for y in range(0, int(Y_size), int(y_size)):
split_names.append(
out_dir + '/' + filename_prefix +
'_' + str(y) + "_" + str(z) + "_" + str(x) +
"." + extension)
return split_names |
def epsilon(dtype):
"""A simple way to determine (at runtime) the precision of a given type
real number.
Precision is defined such that (1.0 + epsilon(dtype) > 1.0).
Below this number, the addition will not yield a different result.
"""
one = dtype(1.0)
small = one
small2 = small
while one + small > one:
small2 = small
small = dtype(small / 2)
return small2 |
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t) |
def _clean_string(value):
"""
Return `str(value)` if it's a string or int, otherwise "".
"""
if isinstance(value, (int,) + (str,)):
return str(value)
return "" |
def canConstruct_v2(ransomNote: str, magazine: str) -> bool:
"""The LeetCode solution runner judges this as the fastest solution of all four."""
for letter in set(ransomNote):
if ransomNote.count(letter) > magazine.count(letter):
return False
return True |
def add_protocol(x):
"""Add https protocol to link"""
return f"https:{x}" if x.startswith("//") else x |
def get_cancer_types(file_paths_by_cancer):
"""
Maps cancer type to an index value
Parameters:
file_paths_by_cancer: (dict) cancer : list of data files
Returns:
cancer_dict: (dict) cancer : integer identifier
"""
cancer_dict = {}
cancer_index = 0
for cancer in file_paths_by_cancer:
cancer_dict[cancer] = cancer_index
cancer_index += 1
return cancer_dict |
def dt_calc(etime):
"""Returns an interval of time that increased as the ellapsed time etime increases"""
if etime <= 60:
return 5
elif etime <= 300:
return 10
elif etime <= 600:
return 30
elif etime <= 3600:
return 60
else:
return 300 |
def pyopenssl_callback(conn, cert, errno, depth, ok):
"""Callback method for _get_cert_alternate"""
if depth == 0 and (errno == 9 or errno == 10):
return False
return True |
def to_tuple(lst):
"""Recursively convert nested lists to nested tuples."""
return tuple(to_tuple(i) if isinstance(i, list) else i for i in lst) |
def UFP_(kexo,Sexo,kendo,kin,kout,Aspine):
"""Returns the fixed point of the mobile receptor pool.
Parameters
----------
kexo : float
Rate of exocytosis events occuring at the spine.
Sexo : float
Exocytosis event size.
kendo : float
Rate at which receptors are endocytosed at the spine.
kin : float
Rate at which receptors hop from the dednritic membrane compartment onto the spine membrane compartment.
kout : float
Rate at which receptors hop from the spine membrane compartment onto the dendritic membrane compartment.
Aspine : float
Spine surface area.
Returns
-------
float
Fixed point of the mobile receptor pool.
"""
return Aspine*(kexo*Sexo+kin)/(kendo+kout) |
def resolve_conflicts(inputs, outputs):
"""
Checks for duplicate inputs and if there are any,
remove one and set the output to the max of the two outputs
Args:
inputs (list<list<float>>): Array of input vectors
outputs (list<list<float>>): Array of output vectors
Returns:
tuple<inputs, outputs>: The modified inputs and outputs
"""
data = {}
for inp, out in zip(inputs, outputs):
tup = tuple(inp)
if tup in data:
data[tup].append(out)
else:
data[tup] = [out]
inputs, outputs = [], []
for inp, outs in data.items():
inputs.append(list(inp))
combined = [0] * len(outs[0])
for i in range(len(combined)):
combined[i] = max(j[i] for j in outs)
outputs.append(combined)
return inputs, outputs |
def abs_value_equal(x, y):
"""Return whether or not the absolute value of both numbers is the same.
Please refrain from using libraries (abs)
>>> abs_value_equal(-2, -2)
True
>>> abs_value_equal(-3, 3)
True
>>> abs_value_equal(1, 2)
False
>>> abs_value_equal(3, 3)
True
>>> abs_value_equal(-6, -6)
True
>>> abs_value_equal(-1, -5)
False
>>> abs_value_equal(5, -6)
False
"""
return ((x==y) or (x+y==0)) |
def norm_label(y, left_min, left_max, right_min, right_max):
"""
normalise the value
:param y: original value
:param left_min: original min
:param left_max: original max
:param right_min: desired min
:param right_max: desired max
:return:
normalised steering angle
"""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = (y - left_min) / left_span
return right_min + (value_scaled * right_span) |
def json_patch(from_obj, to_obj, ignore_keys=None, only_keys=None, no_remove=False):
"""
Creates a JSON patch diff between two objects.
Arguments:
from_obj (dict): from object, usually the existing object returned by API
to_obj (dict): to object, usually the new object to return
Keyword arguments:
ignore_keys (list of str, optional): whether to ignore any keys (e.g. not patch them)
only_keys (list of str, optional): only patch keys from this whitelist
no_remove (bool, optional): don't remove any keys when patching (in case you forgot
to specify certain keys in the object)
"""
ops = []
for key in to_obj:
# filter keys based on ignore_keys, only_keys
if ignore_keys is not None and key in ignore_keys:
continue
if only_keys is not None and key not in only_keys:
continue
# when key is not in from but in to, use add op
if key not in from_obj or from_obj[key] is None:
ops.append({
'op': 'add',
'path': '/%s' % key,
'value': to_obj[key]
})
elif from_obj[key] != to_obj[key]:
ops.append({
'op': 'replace',
'path': '/%s' % key,
'value': to_obj[key]
})
if no_remove is False:
for key in from_obj:
# filter keys based on ignore_keys, only_keys
if ignore_keys is not None and key in ignore_keys:
continue
if only_keys is not None and key not in only_keys:
continue
# when key is in from and not in to, use remove op
if key not in to_obj or to_obj[key] is None:
ops.append({
'op': 'remove',
'path': '/%s' % key
})
return ops |
def full_dict(ldict, keys):
"""Return Comparison Dictionaries
from list dict on keys
keys: a list of keys that when
combined make the row in the list unique
"""
if type(keys) == str:
keys = [keys]
else:
keys = keys
cmp_dict = {}
for line in ldict:
index = []
for key in keys:
index.append(str(line.get(key, '')))
index = '-'.join(index)
cmp_dict[index] = line
return cmp_dict |
def quick_sort_out_of_place(array):
"""Recursive QuickSort Implementation:
- O(nlog(n)) time
- O(n) space (out of place)
- unstable
- pivot = mean of the range (best on normal, numerical distributions)
"""
# Base Case
if len(array) < 2:
return array
# Recurisive Case - choose a pivot
pivot = (min(array) + max(array)) // 2
# Divide and Conquer - partition by the pivot
lower_partition = [val for val in array if val < pivot]
middle = [val for val in array if val == pivot]
upper_partition = [val for val in array if val > pivot]
# combine - recurse on left and right partitions
return (
quick_sort_out_of_place(lower_partition)
+ middle
+ quick_sort_out_of_place(upper_partition)
) |
def prefixed_with_varlong(buf, mlen=10):
"""
Returns whether the data is prefixed with what is probably a valid varint
"""
for i in range(mlen):
if len(buf) <= i:
return False
if buf[i] & 0x80 == 0x00:
return True
return False |
def remove_quotes(value, unused):
"""Remove quotes helper."""
return value.replace('"', "") |
def interpolateLinear(
y1, #
y2, #
x # weighting [0..1]. 0 would be 100 % y1, 1 would be 100 % y2
):
"""
simple linear interpolation between two variables
@param y1
@param y2
@param x weighting [0..1]: 0 would be 100 % y1, 1 would be 100 % y2
@return the interpolated value
"""
return y1 * (1.0 - x) + y2 * x |
def remove_spaces(input_text, main_rnd_generator=None, settings=None):
"""Removes spaces.
main_rnd_generator argument is listed only for compatibility purposes.
>>> remove_spaces("I love carrots")
'Ilovecarrots'
"""
return input_text.replace(" ", "") |
def guess_keys(data):
"""Guess keys should be uniform from first item
"""
return list(data[0].keys()) |
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 100:
lr *= 0.5e-3
elif epoch > 80:
lr *= 1e-3
elif epoch > 60:
lr *= 1e-2
elif epoch > 40:
lr *= 1e-1
print('Learning rate: ', lr)
return lr |
def drop_role(role):
"""Helper method to construct SQL: drop role."""
return f"DROP ROLE IF EXISTS {role};" |
def get_positions(start_idx, end_idx, length):
""" Get subj/obj position sequence. """
return list(range(-start_idx, 0)) + [0] * (end_idx - start_idx + 1) + \
list(range(1, length - end_idx)) |
def _get_random_seeds(i):
"""
returns 10 seeds
"""
seed_list = [42, 103, 13, 31, 17, 23, 46, 57, 83, 93]
return seed_list[i - 1] |
def splitFullFileName(fileName):
"""
split a full file name into path, fileName and suffix
@param fileName
@return a list containing the path (with a trailing slash added), the
file name (without the suffix) and the file suffix (without the
preceding dot)
"""
tmp = fileName.split('/')
path = '/'.join(tmp[:-1]) + '/'
fullFileName = tmp[-1]
tmp2 = fullFileName.split('.')
fileName = '.'.join(tmp2[:-1])
suffix = tmp2[-1]
return path, fileName, suffix |
def score_by_source_ips(event, attributes):
""" Score based on number of source IPs implicated """
score = 0
for attribute in attributes:
if attribute["category"] == "Network activity":
ty = attribute["type"]
if ty == "ip-src":
score += 3
return score |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.