content stringlengths 42 6.51k |
|---|
def try_except(var, d, text1, text2):
"""
Helps to deal with exceptions
:param var: string, that needs to be converted to integer
:param d: right border of possible values for integer
:param text1: message that will appear if the task number is not integer
:param text2: message that will appear if the task number is out of range
:return: integer value of var minus one
"""
while True:
try:
var = int(var) - 1
except ValueError:
var = input(text1)
else:
if var < 0 or var >= d:
var = input(text2)
else:
return var |
def make_shell_logfile_data_url(
host: str,
shell_port: int,
instance_id: int,
offset: int,
length: int,
) -> str:
"""
Make the url for log-file data in heron-shell
from the info stored in stmgr.
"""
return (
f"http://{host}:{shell_port}"
f"/filedata/log-files/{instance_id}.log.0"
f"?offset={offset}&length={length}"
) |
def limitslist(limits):
"""
Translate a one dimensional vector to a list of pairs of consecutive
elements.
"""
return [(mini, limits[i+1]) for i, mini in enumerate(limits[:-1])] |
def catch_function_errors(fn, *args, **kwargs):
"""Returns function return value or None if there are errors."""
try:
result = fn(*args, **kwargs)
# Keyboard interrupts should stop server
except KeyboardInterrupt:
raise
# Notify user that error occurred
except Exception as err:
logging.error(f'{err}\n{"".join(traceback.format_stack()[:-1])}')
print(f'Function {fn.__name__}: {err.__class__} - {err}')
result = None
return result |
def cauchy(xs, h, y0, f, **derivatives):
"""Cauchy"""
ys = [y0]
for k in range(len(xs) - 1):
subsidiary_y = ys[k] + f(xs[k], ys[k]) * h / 2
next_y = ys[k] + f(xs[k] + h / 2, subsidiary_y) * h
ys.append(next_y)
return ys |
def fuse_events(events, fuse_duration=0.5, target_duration=5.0):
"""From list of (start, stop) times, create list with them merged
"""
sorted_events = sorted(events)
result = []
for event in sorted_events:
if len(result) == 0:
result.append(list(event))
elif result[-1][1] - result[-1][0] > target_duration:
if event[0] >= result[-1][1] + fuse_duration:
result.append(list(event))
if event[0] < result[-1][1] + fuse_duration:
result[-1][1] = max(event[1], result[-1][1])
elif event[0] < result[-1][1] and event[1] < result[-1][1]:
pass
elif event[0] < result[-1][1] and event[1] - result[-1][1] < fuse_duration:
result[-1][1] = event[1]
elif event[0] < result[-1][1] and event[1] - result[-1][1] >= fuse_duration:
result.append([result[-1][1], event[1]])
elif event[1] < result[-1][1]:
pass
elif event[0] > result[-1][1] + fuse_duration:
result.append(list(event))
elif event[1] > result[-1][1]:
result[-1][1] = event[1]
return result |
def rotation_cs(X, Y, c, s) :
"""For numpy arrays X and Y returns the numpy arrays of Xrot and Yrot
for specified rotation angle cosine and sine values.
"""
Xrot = X*c - Y*s
Yrot = Y*c + X*s
return Xrot, Yrot |
def resolve_alias(term: str) -> str:
"""
Resolves search term aliases (e.g., 'loc' for 'locations').
"""
if term in ("loc", "location"):
return "locations"
elif term == "kw":
return "keywords"
elif term == "setting":
return "setting"
elif term == "character":
return "characters"
else:
return term |
def iou(box_a, box_b):
"""
Calculates the IOU between two boxes.
For example:
>>> iou([0.5, 0.5], [1, 1])
0.25
:param box_a:
:param box_b:
:return:
"""
c_w, c_h = box_b
w, h = box_a
if c_w >= w and c_h >= h:
intersection, union = w * h, c_w * c_h
elif c_w >= w and c_h <= h:
intersection, union = w * c_h, w * h + (c_w-w) * c_h
elif c_w <= w and c_h >= h:
intersection, union = c_w * h, w * h + c_w * (c_h-h)
else:
intersection, union = c_w * c_h, w * h
return intersection / union |
def b128_encode(n):
""" Performs the MSB base-128 encoding of a given value. Used to store variable integers (varints) in the LevelDB.
The code is a port from the Bitcoin Core C++ source. Notice that the code is not exactly the same since the original
one reads directly from the LevelDB.
The encoding is used to store Satoshi amounts into the Bitcoin LevelDB (chainstate). Before encoding, values are
compressed using txout_compress.
The encoding can also be used to encode block height values into the format use in the LevelDB, however, those are
encoded not compressed.
Explanation can be found in:
https://github.com/bitcoin/bitcoin/blob/v0.13.2/src/serialize.h#L307L329
And code:
https://github.com/bitcoin/bitcoin/blob/v0.13.2/src/serialize.h#L343#L358
The MSB of every byte (x)xxx xxxx encodes whether there is another byte following or not. Hence, all MSB are set to
one except from the very last. Moreover, one is subtracted from all but the last digit in order to ensure a
one-to-one encoding. Hence, in order decode a value, the MSB is changed from 1 to 0, and 1 is added to the resulting
value. Then, the value is multiplied to the respective 128 power and added to the rest.
Examples:
- 255 = 807F (0x80 0x7F) --> (1)000 0000 0111 1111 --> 0000 0001 0111 1111 --> 1 * 128 + 127 = 255
- 4294967296 (2^32) = 8EFEFEFF (0x8E 0xFE 0xFE 0xFF 0x00) --> (1)000 1110 (1)111 1110 (1)111 1110 (1)111 1111
0000 0000 --> 0000 1111 0111 1111 0111 1111 1000 0000 0000 0000 --> 15 * 128^4 + 127*128^3 + 127*128^2 +
128*128 + 0 = 2^32
:param n: Value to be encoded.
:type n: int
:return: The base-128 encoded value
:rtype: hex str
"""
l = 0
tmp = []
data = ""
while True:
tmp.append(n & 0x7F)
if l != 0:
tmp[l] |= 0x80
if n <= 0x7F:
break
n = (n >> 7) - 1
l += 1
tmp.reverse()
for i in tmp:
data += format(i, '02x')
return data |
def Euler003(n):
"""Solution of third Euler problem."""
pm = 2
while n != 1:
if n % pm == 0:
n = n / pm
else:
pm += 1
return pm |
def fitness_function(binary_string):
"""OneMax - Returns number of ones in a string"""
return binary_string.count('1') |
def count_coll_com(s):
"""Count how many commenters are colleagues."""
auth = set([a for a in s["auth"] if a])
coms = [set(c.strip("-").split("-")) for c in s["coms"]
if c and c not in ("?", "nan")]
if not coms:
return None
return len([c for c in coms if len(c.intersection(auth))]) |
def _get_difference_locator(expected_image_locator, actual_image_locator):
"""Returns the locator string used to look up the diffs between expected_image
and actual_image.
Args:
expected_image_locator: locator string pointing at expected image
actual_image_locator: locator string pointing at actual image
Returns: locator where the diffs between expected and actual images can be
found
"""
return "%s-vs-%s" % (expected_image_locator, actual_image_locator) |
def clean_key(key):
""" Clean the key words"""
key = key.replace(',', '').replace('\n', ' ').replace('\t', ' ').replace(' ', ' ').strip()
return key |
def median(data_list):
"""
Finds the median in a list of numbers.
:type data_list list
"""
data_list = list(map(float, data_list))
length = len(data_list)
data_list.sort()
# Test whether the length is odd
if length & 1:
# If is is, get the index simply by dividing it in half
index = int(length / 2)
return data_list[index]
# If the length is even, average the two values at the center
low_index = int(length / 2) - 1
high_index = int(length / 2)
average = (data_list[low_index] + data_list[high_index]) / 2
return average |
def to_std_dicts(value):
"""Convert nested ordered dicts to normal dicts for better comparison."""
if isinstance(value, dict):
return {k: to_std_dicts(v) for k, v in value.items()}
elif isinstance(value, list):
return [to_std_dicts(v) for v in value]
else:
return value |
def parse_cpu_info_tag_value(line):
"""parse the /proc/cpuinfo."""
elems = line.split(":")
if len(elems) == 2:
return (elems[0].strip(), elems[1].strip())
return (None, None) |
def is_lower_camel(text):
"""
Check if a string is in lowerCamelCase format
:param text: String to check
:return: Whether string is in lower camel format
"""
if " " in text:
return False
return text[0].islower() and "_" not in text and not text.islower() |
def compare_values(value1, value2, relative, absolute):
"""
Compare two values with respect to a relative and an absolute deviation.
:param value1: First value
:param value2: Second value
:param relative: Relative deviation (0..1)
:param absolute: Absolute deviation (e.g. 1, -5.7, 100)
:return: True is value1 is within valid deviation of value2, False if not
"""
mi = min(value1, value2)
ma = max(value1, value2)
if ((ma * (1 - relative)) - absolute) < mi:
return True
else:
return False |
def _dictionary_to_column_paths(dictionary, prefix=tuple()):
"""Convert a dictionary to the column paths within this dictionary
For example, if the argument is
{
1 : {
'a' : True,
'b' : False
},
(10, 'blah') : SomeObject()
}
The result would be
[
(1, 'a'),
(1, 'b'),
((10, 'blah'))
]
"""
paths = set()
for key, val in dictionary.items():
if isinstance(val, dict):
paths.update(_dictionary_to_column_paths(val, prefix + (key,)))
else:
paths.add(prefix + (key,))
return paths |
def hex_to_rbg(hex_color: str) -> tuple:
"""
Convert hex color to rgb
example: #FFFFFF to (255, 255, 255)
:param hex_color:
:return:
"""
r = int(hex_color[1:3], 16)
g = int(hex_color[3:5], 16)
b = int(hex_color[5:7], 16)
return r, g, b |
def normal_round(num, ndigits=0):
"""
Rounds a float to the specified number of decimal places.
num: the value to round
ndigits: the number of digits to round to
"""
if ndigits == 0:
return int(num + 0.5)
else:
digit_value = 10 ** ndigits
return int(num * digit_value + 0.5) / digit_value |
def BuildInitialList(LastNum):
"""
Builds an array of boolean values (0/1) filled
with 1/3 of the provided LastNum, reduced by 1.
First element is 0, all other elements are 1.
"""
return [0] + [1] * (int(LastNum / 3) - 1) |
def sparse_search(arr, s):
""" 10.5 Sparse Search: Given a sorted array of strings that is interspersed
with empty strings, write a method to find the location of a given string.
EXAMPLE:
Input: find "ball" in {"at", "", "", "" , "ball", "", "", "car", "" , "" , "dad", ""}
Output: 4
"""
def spread(arr, middle, left, right):
k = 1
while middle - k >= left and middle + k <= right:
if arr[middle - k] != "":
return middle - k
if arr[middle + k] != "":
return middle + k
k += 1
return middle
def rec_sparse_search(arr, s, left, right):
if left > right:
return None
middle = (left + right) / 2
if arr[middle] == "":
new_middle = spread(arr, middle, left, right)
if new_middle == middle:
return None
middle = new_middle
if arr[middle] == s:
return middle
if arr[middle] < s:
return rec_sparse_search(arr, s, left, middle - 1)
return rec_sparse_search(arr, s, middle + 1, right)
return rec_sparse_search(arr, s, 0, len(arr) - 1) |
def hardlim(n):
"""
Hard Limit Transfer Function
:param int n:Net Input of Neuron
:return: Compute Heaviside step function
:rtype: int
"""
if n<0:
return 0
else:
return 1 |
def f2measure(precision, recall):
"""Returns the f2measure (or F2-score)"""
return (1.0 + (2 ** 2)) * ((precision * recall) /
((2 ** 2 * precision) + recall)) |
def check_novelty_objects(smiles,training_smiles, verbose=False):
"""Need to pass in valid smiles"""
count_in_training=0
num_molecules_generated = len(smiles)
if num_molecules_generated==0:
return 0
else:
training_smiles_set=set(training_smiles)
num_molecules_training = len(training_smiles_set)
if verbose:
print("Num distinct molecules in training data: "+str(num_molecules_training))
for smile in smiles:
if smile in training_smiles_set:
count_in_training+=1
if verbose:
print("Num generated molecules that were already in training data: "+str(count_in_training))
return 1 - count_in_training/float(num_molecules_generated) |
def dToB(n, numDigits):
"""requires: n is a natural number less than 2**numDigits
returns a binary string of length numDigits representing the
the decimal number n."""
assert type(n)==int and type(numDigits)==int and n >=0 and n < 2**numDigits
bStr = ''
while n > 0:
bStr = str(n % 2) + bStr
n = n//2
while numDigits - len(bStr) > 0:
bStr = '0' + bStr
return bStr |
def _extended_euclidean(a, b):
"""Helper function that runs the extended Euclidean algorithm, which
finds x and y st a * x + b * y = gcd(a, b). Used for gcd and modular
division. Returns x, y, gcd(a, b)"""
flip = b > a
if flip:
a, b = b, a
if b == 0:
return 1, 0, a
x, y, gcd = _extended_euclidean(b, a % b)
ny = y
nx = x - (a // b) * y
assert(a * ny + b * nx == gcd)
if flip:
ny, nx = nx, ny
return ny, nx, gcd |
def verify_education(education_str: str) -> bool:
"""
A helper function to make verifying education strings easier
Parameters
----------
education_str: str
The single letter education string to be verified.
Valid inputs are: 'b', 'm', 'd'
Returns
----------
bool: Returns true if the education string is a valid education
"""
return bool(
education_str.lower() == "b"
or education_str.lower() == "m"
or education_str.lower() == "d"
) |
def get_buffer_base_name(noise, size, data_type, ending):
"""Get the replay buffer name automatically from arguments.
We make two versions (for now), one with .p for the pickle file of data, and
another with .txt for logged data, to tell us behavioral policy performance.
"""
assert ('constant_' in noise or 'discrete_' in noise or 'uniform_' in noise or
'constanteps_' in noise or 'uniformeps_' in noise or 'randact_' in noise or
'nonaddunif_' in noise), noise
if 'uniform_' in noise:
assert len(noise.split('_')) == 3, noise.split('_')
elif 'uniformeps_' in noise:
assert len(noise.split('_')) == 5, noise.split('_')
elif 'randact_' in noise:
assert len(noise.split('_')) == 3, noise.split('_')
elif 'nonaddunif_' in noise:
assert len(noise.split('_')) == 4, noise.split('_')
assert size > 0, size
assert data_type in ['train', 'valid', 'neither'], data_type
assert ending in ['.txt', '.p'], ending
base = f'rollout-maxsize-{size}-steps-{size}-noise-{noise}-dtype-{data_type}{ending}'
return base |
def get_clusters_size(n_samples, n_clusters):
"""Gets the number of members per cluster for equal groups kmeans"""
return (n_samples + n_clusters - 1) // n_clusters |
def sum_n_numbers(target, nums, n):
"""Find target sum from n numbers in nums."""
sums = {num: [num] for num in nums}
for iteration in range(n - 1):
for num in nums:
updates = {}
for cur_num, sum_from in sums.items():
# print(cur_num, sum_from)
if num in sum_from:
continue
if iteration + 1 != len(sum_from):
continue
cur_sum = num + sum(sum_from)
if cur_sum <= target:
cur_from = sum_from[:]
cur_from.append(num)
updates[cur_sum] = cur_from
sums.update(updates)
return sums.get(target) |
def parse(input_list : list) -> dict:
"""Parse the flags of a written line into dictionary representing the flags
The outputted dict is in the form of {FLAG:value, ...}, where value is
Union[str, int, list]. Note that the preceeding '-' is removed from flag dict key.
The default arguments (with no flag, connecting directly to the command) are stored under
the flag 'default'
"""
flags = {}
if len(input_list) > 1:
i = 1
while i < len(input_list) and not (input_list[i][0] == '-' and input_list[i][1].isalpha()):
flags.setdefault("default", [])
flags["default"].append(input_list[i])
i += 1
flag = ''
for value in input_list[i:]:
if value[0] == '-':
flag = value[1:]
flags[flag] = []
else:
flags[flag].append(value)
for flag, args in flags.items():
if len(args) == 0:
flags[flag] = None
elif len(args) == 1:
flags[flag] = args[0]
return flags |
def interpolate(val, min, max):
"""
Interpolates values between 0 and 1 to values between the specified min and max.
Used primarily map values generated by random.random() to a specified range.
"""
return val * (max - min) + min |
def matching_ngram_completions(comparison_seq, hypothesis, n):
"""
Return the list of words that if appended to hypothesis, would create a n-gram that
already exists in comparison_seq. For efficiency, this function represents words
as integers not strings.
Inputs:
comparison_seq: list of integers
hypothesis: list of integers or None
n: integer
Output:
bad_words: list of integers
"""
if hypothesis is None or len(hypothesis) < n - 1 or len(comparison_seq) < n:
return []
hypothesis = [int(i) for i in hypothesis] # cast to list of ints
comparison_seq = [int(i) for i in comparison_seq] # cast to list of ints
n_minus_1_gram = hypothesis[-(n - 1) :] # list of ints length n-1
bad_words = [
comparison_seq[i]
for i in range(n - 1, len(comparison_seq))
if comparison_seq[i - (n - 1) : i] == n_minus_1_gram
] # list of ints
return bad_words |
def contains(vector, space):
"""
:param vector: [0.26, 0.19]
:param space: [ [0.5, 1.0], [0.5, 1.0] ]
:return: True / False
"""
flag = True
for ind, value in enumerate(vector):
if value < space[ind][0] or value > space[ind][1]:
flag = False
break
return flag |
def uniquify(l):
"""
Uniquify a list (skip duplicate items).
"""
result = []
for x in l:
if x not in result:
result.append(x)
return result |
def rfc3339strFromBeamBQStr(s):
"""Convert a string formatted BEAM_BQ_TIMESTAMP_FORMAT to RFC3339_TIMESTAMP_FORMAT"""
return s[:-4].replace(' ', 'T')+'+00:00' |
def extract_meta_instance_id(form):
"""Takes form json (as returned by xml2json)"""
if form.get('Meta'):
# bhoma, 0.9 commcare
meta = form['Meta']
elif form.get('meta'):
# commcare 1.0
meta = form['meta']
else:
return None
if meta.get('uid'):
# bhoma
return meta['uid']
elif meta.get('instanceID'):
# commcare 0.9, commcare 1.0
return meta['instanceID']
else:
return None |
def knot_insertion_alpha(u, knotvector, span, idx, leg):
""" Computes :math:`\\alpha` coefficient for knot insertion algorithm.
:param u: knot
:type u: float
:param knotvector: knot vector
:type knotvector: tuple
:param span: knot span
:type span: int
:param idx: index value (degree-dependent)
:type idx: int
:param leg: i-th leg of the control points polygon
:type leg: int
:return: coefficient value
:rtype: float
"""
return (u - knotvector[leg + idx]) / (knotvector[idx + span + 1] - knotvector[leg + idx]) |
def _remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label |
def piglatin(word):
""" IN this function changing word to a pig latin word"""
vowels = "aeiou"
if word[0] in vowels:
new_word = word[1:] + word[0] + "way"
else:
new_word = word[1:] + word[0] + "ay"
return new_word |
def sum_numbers_from_lists(list1, list2,):
"""Sums two integer lists"""
temp_list = []
for i in range(len(list1)):
temp_num = list1[i] + list2[i]
if temp_num > 26:
temp_num -= 26
#temp_num = temp_num % 26
temp_list.append(temp_num)
return temp_list |
def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length):
"""
Make the url for log-file data in heron-shell
from the info stored in stmgr.
"""
return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \
(host, shell_port, instance_id, offset, length) |
def create_url_by_date(file_date):
"""
Returns the url of the gkg zip file to download from the Gdelt project.
@Param file_date: Date of the file to download in the YYYYMMDDHHMMSS format (UTC).
@Return url to the gkg zip file.
"""
# Url to the our data
url_header = "http://data.gdeltproject.org/gdeltv2/"
url_date = file_date
url_tail = ".gkg.csv.zip"
url = url_header + url_date + url_tail
return url |
def difference(array_a: list, array_b: list) -> list:
"""
Calculates the differences between two given values.
:param array_a: list[int | float]
:param array_b: list[int | float]
:return:
- differences - list[int | float]
"""
differences = []
if len(array_a) == len(array_b):
for i in zip(array_a, array_b):
differences.append(i[0] - i[1])
return differences |
def _en_to_enth(energy, concs, A, B, C):
"""Converts an energy to an enthalpy.
Converts energy to enthalpy using the following formula:
Enthalpy = energy - (energy contribution from A) - (energy contribution from B) -
(energy contribution from C)
An absolute value is taken afterward for convenience.
Parameters
----------
energy : float
The energy of the structure
concs : list of floats
The concentrations of each element
A : float
The energy of pure A
B : float
The energy of pure B
C : float
The energy of pure C
Returns
-------
enth : float
The enthalpy of formation.
"""
enth = abs(energy - concs[0]*A - concs[1] * B - concs[2] * C)
return enth |
def config_contains_tar(pipeline_config: dict, tags=None) -> bool:
"""
Check if the input file list contains a `.tar` archive.
"""
if tags is None:
tags = ["archive", "inside_archive"]
params = pipeline_config.get("params", {})
ffiles = params.get("fetch_files", [])
for ff in ffiles:
# For tar archives, we don't attempt to make a samplesSheet
ftags = ff.get("tags", [])
if any([t in ftags for t in tags]):
return True
return False |
def closest_divisor(a, b):
"""
Returns the divisor of `a` that is closest to `b`.
Parameters
----------
a, b : int
Returns
-------
int
"""
if b >= a or b == 0: return a # b=0 is special case.
for test in range(b, 0, -1):
if a % test == 0: return test
assert(False), "Should never get here - a %% 1 == 0 always! (a=%s, b=%s)" % (str(a), str(b)) |
def extract(s, start):
"""Given a string and index position to the first '(', extract a full group up to its ')'."""
level = 1
for idx in range(start+1, len(s)):
ch = s[idx]
if ch == '(':
level += 1
if ch == ')':
level -= 1
if level == 0:
return s[start:idx+1]
# should never get here
raise ValueError('ill-formed string: {}'.format(s)) |
def switch_encoding(phasing):
"""
>>> switch_encoding('0001011')
'001110'
"""
assert isinstance(phasing, str)
return ''.join(('0' if phasing[i-1] == phasing[i] else '1') for i in range(1, len(phasing))) |
def partition(iterable, pivot, key):
"""Returns 3 lists: the lists of smaller, equal and larger elements, compared to pivot.
Elements are compared by applying the key function and comparing the results"""
pkey = key(pivot)
less = []
equal = []
greater = []
for x in iterable:
k = key(x)
if k < pkey:
less.append(x)
elif k == pkey:
equal.append(x)
else:
greater.append(x)
return less, equal, greater |
def guess_n_eigs(n_electron, n_eigs=None):
"""
Guess the number of eigenvalues (energies) to compute so that the smearing
iteration converges. Passing n_eigs overrides the guess.
"""
if n_eigs is not None: return n_eigs
if n_electron > 2:
n_eigs = int(1.2 * ((0.5 * n_electron) + 5))
else:
n_eigs = n_electron
return n_eigs |
def find_mimetype(filename):
"""In production, you don't need this,
Static files should serve by web server, e.g. Nginx.
"""
if filename.endswith(('.jpg', '.jpep')):
return 'image/jpeg'
if filename.endswith('.png'):
return 'image/png'
if filename.endswith('.gif'):
return 'image/gif'
return 'application/octet-stream' |
def get_device_status_sensortext(sensor_status):
""" Get Text Information from Sensor Status int using Binary logic """
sensor_int = int(sensor_status)
sensor_text = str(sensor_status) + ", "
if sensor_int == 0:
sensor_text = "OK, "
elif sensor_int == 4: # "Lightning Disturber"
sensor_text = "OK, "
pass # Don't fail a sensor because of lightning
else:
if sensor_int & 1:
sensor_text += "Lightning failed, "
if sensor_int & 2:
sensor_text += "Lightning noise, "
if sensor_int == 4:
# sensor_text += "Lightning Disturber, "
pass # Don't fail a sensor because of lightning
if sensor_int & 8:
sensor_text += "Pressure Failed, "
if sensor_int & 16:
sensor_text += "Temperature Failed, "
if sensor_int & 32:
sensor_text += "Humidity Failed, "
if sensor_int & 64:
sensor_text += "Wind Failed, "
if sensor_int & 128:
sensor_text += "Precip failed, "
if sensor_int & 256:
sensor_text += "UV Failed, "
if sensor_int & 512:
sensor_text += "bit 10, " # Considered 'Internal' Weatherflow
if sensor_int & 1024:
sensor_text += "bit 11, " # Considered 'Internal' Weatherflow
if sensor_int & 2048:
sensor_text += "?Batt Mode 1, " # Considered 'Internal' Weatherflow
if sensor_int & 4096:
sensor_text += "?Batt Mode 2, " # Considered 'Internal' Weatherflow
if sensor_int & 8192:
sensor_text += "?Batt Mode 3, " # Considered 'Internal' Weatherflow
if sensor_int & 16384:
sensor_text += "bit 15, " # Considered 'Internal' Weatherflow
if sensor_int & 32768:
sensor_text += "Power Booster Depleted, " # 0x8000
if sensor_int & 65536:
sensor_text += "Power Booster Shore Power, " # 0x10000
sensor_text = sensor_text[:-2]
return sensor_text |
def GetContentForTemplate(api_query):
"""Prepares and returns the template value for an API Query response.
Args:
api_query: The API Query for which to prepare the response content template
value.
Returns:
A dict containing the template value to use for the Response content.
"""
content = {}
if api_query:
api_query_response = api_query.api_query_responses.get()
if api_query_response:
content['response_content'] = api_query_response.content
return content |
def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out |
def list2str(args):
"""
Convert list[str] into string. For example: ['x', 'y'] -> "['x', 'y']"
"""
if args is None: return '[]'
assert isinstance(args, (list, tuple))
args = ["{}".format(arg) for arg in args]
return repr(args) |
def format_weighted_edges(retweeted):
"""Return a dictionary with the weight."""
elements = []
for key, value in retweeted.items():
source, destiny = key
weight = value
elements.append((source, destiny, weight))
return elements |
def get_default_semantics(n_dim):
"""
Generate default semantics for n_dim dimensions.
Parameters
----------
n_dim : int
Number of dimensions. Indicating the number of variables of the model.
Returns
-------
semantics: dict
Generated model description.
"""
semantics = {
"names": [f"Variable {dim+1}" for dim in range(n_dim)],
"symbols": [f"X_{dim+1}" for dim in range(n_dim)],
"units": ["arb. unit" for dim in range(n_dim)],
}
return semantics |
def get_public_scoreboard():
"""Gets the archived public scoreboard.
Kind of a hack, tells the front end to look for a static page scoreboard rather than sending a 2000+ length
array that the front end must parse.
"""
return {'path': '/staticscoreboard.html', 'group': 'Public'} |
def dec2bit(value, bits=8):
""" bits=8: 42 -> (False, True, False, True, False, True, False, False) """
v = value % 2**bits
seq = tuple(True if c == '1' else False for c in bin(v)[2:].zfill(bits)[::-1])
if value - v > 0: seq = seq + dec2bit(value // 2**bits)
return seq |
def CalDistPointToFragment(x, fragment):#{{{
"""
MMMMMMM
K dist = 1
K dist = 0
R dist = 0
"""
if x <= fragment[0]:
dist = fragment[0]-x
elif x >= fragment[1]:
dist = x-fragment[1]+1
else:
d1 = x - fragment[0]
d2 = (fragment[1]-1) - x
dist = min(d1,d2)
return dist |
def convert_value(var):
"""Convert the metric value from string into python type."""
if var["type"] == "number":
return float(var["value"])
if var["type"] == "boolean":
return var["value"] == "true"
if var["type"] == "string":
return var["value"]
print("can't convert unknown type {} for var {}".format(var["type"], var["name"]))
return None |
def lcm(a: int, b: int) -> int:
"""
https://en.wikipedia.org/wiki/Least_common_multiple
>>> lcm(4, 6)
12
>>> lcm(21, 6)
42
"""
from math import gcd
return (a // gcd(a, b)) * b |
def sentence2id(sent, word2id):
"""
:param sent:
:param word2id:
:return:
"""
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id:
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id |
def eh_tabuleiro(tabuleiro):
""" Function that receives any type of variable.
Checks if it is a tabuleiro.
"""
if not isinstance(tabuleiro, tuple) or len(tabuleiro) != 3:
return False
if not all(isinstance(line, tuple) for line in tabuleiro):
return False
if (len(tabuleiro[0]), len(tabuleiro[1])) != (3, 3) or len(tabuleiro[2]) != 2:
return False
for line in tabuleiro:
for state in line:
if not isinstance(state, int) or state not in (-1, 0, 1):
return False
return True |
def reverse_digits(n: int) -> int:
"""Returns: The integer with all digits reversed.
Example: 4297341 would return 1437924
"""
negative = False
if n < 0:
negative = True
n = -n
result = 0
while n > 0:
result = result * 10 + n % 10
n //= 10
if negative:
return -result
return result |
def _validate_text(text):
"""If text is not str or unicode, then try to convert it to str."""
if isinstance(text, str):
return text.encode()
else:
return str(text).encode() |
def _list_geojson(list_stops):
""" Creates a list of stop data in GeoJSON format.
:param list_stops: List of StopPoint objects.
:returns: JSON-serializable dict.
"""
geojson = {
"type": "FeatureCollection",
"features": [s.to_geojson() for s in list_stops]
}
return geojson |
def import_obj(obj_path, hard=False):
"""
import_obj imports an object by uri, example::
>>> import_obj("module:main")
<function main at x>
:param obj_path: a string represents the object uri.
:param hard: a boolean value indicates whether to raise an exception on
import failures.
"""
try:
# ``__import__`` of Python 2.x could not resolve unicode, so we need
# to ensure the type of ``module`` and ``obj`` is native str.
module, obj = str(obj_path).rsplit(':', 1)
m = __import__(module, globals(), locals(), [obj], 0)
return getattr(m, obj)
except (ValueError, AttributeError, ImportError):
if hard:
raise |
def isGSM19(filename):
"""
Checks whether a file is GSM19 format.
"""
try:
temp = open(filename, 'rt') #, encoding='utf-8', errors='ignore'
except:
return False
try:
li = temp.readline()
except:
return False
while li.isspace():
li = temp.readline()
if not li.startswith('Gem Systems GSM-19'):
if not li.startswith('/Gem Systems GSM-19'):
return False
return True |
def insert_sort(lst):
"""Insert sort."""
for i in range(len(lst)):
k = lst[i]
j = i
while j > 0 and k < lst[j - 1]:
lst[j] = lst[j - 1]
j -= 1
lst[j] = k
return lst |
def column_fill(keyword):
"""``column-fill`` property validation."""
return keyword in ('auto', 'balance') |
def oneHotEncode_3_evtypes(x, r_vals=None):
"""
This function one hot encodes the input for the event
types cascade, tracks, doubel-bang
"""
# define universe of possible input values
cascade = [1., 0., 0.]
track = [0., 1., 0.]
s_track = [0., 0., 1.]
# map x to possible classes
mapping = {0: cascade, 1: cascade, 2: track, 3: s_track, 4: track,
5: cascade, 6: cascade, 7: cascade, 8: track, 9: cascade}
return mapping[int(x)] |
def responseBuilder(responseDict):
"""
accept dictionary, build response string
"""
#Status-Line = HTTP-Version SP Status-Code SP Reason-Phrase CRLF
statusLine = responseDict['statusLine']['httpVersion'] + " "
statusLine += responseDict['statusLine']['statusCode'] + " "
statusLine += responseDict['statusLine']['reasonPhrase']
headersDict = responseDict['responseHeaders']
body = responseDict['responseBody']
responseStr = statusLine + "\r\n"
for headerKey in headersDict:
responseStr += headerKey+": "+headersDict[headerKey]+"\r\n"
responseStr+="\r\n"
responseStr = responseStr.encode()
responseStr+=body
return responseStr |
def get_photo_image_name_from_img_src(img_src, exclude = '_display'):
"""Parses img_src gotten from photologue's .get_display_url() (or thumbnail)
(E.g. "to_del_photo_/media/photologue/photos/cache/ImageName_rndnumbers_display.jpg")
and returns a substring of photo's image's name. (E.g. "ImageName_rndnumbers.jpg)
Excludes '_display' substring by default.
"""
return str(img_src).replace(str(exclude), '').split('/')[-1] |
def _headers(skey):
"""Returns the auth header for Splunk."""
return {
'Authorization': 'Splunk {0}'.format(skey),
'Content-Type': 'application/json',
'Accept': 'application/json',
} |
def smooth(list_, half_window=1):
"""Use box averaging smoothing."""
new_list = []
for i, _ in enumerate(list_):
jjs = [j for j in range(i - half_window, i + half_window + 1) if 0 <= j < len(list_)]
new_list.append(sum([list_[k] for k in jjs]) / len(jjs))
return new_list |
def merge_and_count_inversions(left_tuple, right_tuple):
"""
Count the number of split inversions while merging the given results of
the left and right subproblems and return a tuple containing the
resulting merged sorted list and the total number of inversions.
left_tuple -- a tuple containing the sorted sublist and the count of
inversions from the left subproblem
right_tuple -- a tuple containing the sorted sublist and the count of
inversions from the right subproblem
We call split inversions the pairs of items where the larger item is in
the left subsequence and the smaller item is in the right.
The total number of inversions "count_total" is:
count_total = count_left + count_right + count_split,
where "count_left", "count_right" are the number of inversions in the
left and right subsequence respectively and "count_split" is the number
of split inversions.
"""
left, count_left = left_tuple
right, count_right = right_tuple
merged, count_split = list(), 0
# Careful!
# If we use list slicing in the following loop we might end up with
# worse than O(L*log(L)) complexity. We will use indices instead.
index_l, index_r = 0, 0
while len(left) - index_l > 0 and len(right) - index_r > 0:
if left[index_l] <= right[index_r]:
merged.append(left[index_l])
index_l += 1
# no inversions discovered here
else:
# the item right[index_r] is smaller than every item in
# left[index_l:]
merged.append(right[index_r])
index_r += 1
# all the items of left[index_l:] formed inversions with the
# item we just appended to "merged"
count_split += len(left) - index_l
if len(left) - index_l > 0:
merged.extend(left[index_l:])
# no more inversions
elif len(right) - index_r > 0:
merged.extend(right[index_r:])
# no more inversions
count_total = count_left + count_right + count_split
return (merged, count_total) |
def time_unit(val):
"""Time unit constraint finds values of the form: 10ns
"""
import re
return re.search(r'^\d+(s|ms|us|ns|ps|fs)$', val) is not None |
def add_sys_path(new_path):
""" Adds a directory to Python's sys.path
Does not add the directory if it does not exist or if it's already on
sys.path. Returns 1 if OK, -1 if new_path does not exist, 0 if it was
already on sys.path.
Based on: https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s23.html
Challenge: in order to use this function, we need to import the dse_do_utils package
and thus we need to add it's location it to sys.path!
This will work better once we can do a pip install dse-do_utils.
"""
import sys
import os
# Avoid adding nonexistent paths
if not os.path.exists(new_path):
return -1
# Standardize the path. Windows is case-insensitive, so lowercase
# for definiteness.
new_path = os.path.abspath(new_path)
if sys.platform == 'win32':
new_path = new_path.lower( )
# Check against all currently available paths
for x in sys.path:
x = os.path.abspath(x)
if sys.platform == 'win32':
x = x.lower( )
if new_path in (x, x + os.sep):
return 0
sys.path.append(new_path)
return 1 |
def binary_search_iterative(lst, key, start=0, end=None):
"""
Performs binary search for the given key in iterable.
Parameters
----------
lst : python iterable in which you want to search key
key : value you want to search
start : starting index
end : ending index
Returns
-------
index (int): key's index if found else -1
"""
if not end:
end = len(lst)
while start <= end:
mid = (start+end)//2
if lst[mid] == key:
return mid
elif lst[mid] < key:
start = mid + 1
else:
end = mid - 1
return -1 |
def remove_overlap(ranges):
""" Simplify a list of ranges; I got it from https://codereview.stackexchange.com/questions/21307/consolidate-list-of-ranges-that-overlap """
result = []
current_start = -1
current_stop = -1
for start, stop in sorted(ranges):
if start > current_stop:
# this segment starts after the last segment stops
# just add a new segment
result.append( (start, stop) )
current_start, current_stop = start, stop
else:
# current_start already guaranteed to be lower
current_stop = max(current_stop, stop)
# segments overlap, replace
result[-1] = (current_start, current_stop) # SLAV: I modified this to update the stop too.
return(result) |
def sub(proto, *args):
"""
Format string prototype I{proto} with I{args}. This really should
be a built-in function.
"""
try:
return proto.format(*args)
except:
raise ValueError("Proto '{}' couldn't apply args {}", proto, args) |
def keep_faces(faces, indices):
"""
Remove surface mesh faces whose three vertices are not all in "indices".
Parameters
----------
faces : list of lists of three integers
the integers for each face are indices to vertices, starting from zero
indices : list of integers
indices to vertices of the surface mesh that are to be retained
Returns
-------
faces : list of lists of three integers
reduced number of faces
Examples
--------
>>> from mindboggle.guts.mesh import keep_faces
>>> faces = [[1,2,3], [2,3,7], [4,7,8], [3,2,5]]
>>> indices = [0,1,2,3,4,5]
>>> keep_faces(faces, indices)
[[1, 2, 3], [3, 2, 5]]
"""
import numpy as np
fs = frozenset(indices)
faces = [lst for lst in faces if len(fs.intersection(lst)) == 3]
faces = np.reshape(np.ravel(faces), (-1, 3))
#len_faces = len(faces)
#if verbose and len(faces) < len_faces:
# print('Reduced {0} to {1} triangular faces'.
# format(len_faces, len(faces)))
return faces.tolist() |
def is_power_of_two(n):
"""
Return True if n is a power of 2
"""
return (n & (n - 1) == 0) and n != 0 |
def extract_name_from_file(filename):
"""
"""
return filename.replace(".py", "") |
def part2(input_data):
"""
>>> part2([1721,979,366,299,675,1456])
241861950
"""
for entry1 in input_data:
for entry2 in input_data:
for entry3 in input_data:
if entry1 + entry2 + entry3 == 2020:
return entry1 * entry2 * entry3
return None |
def _create_eip712_market_join(chainId: int, verifyingContract: bytes, member: bytes, joined: int,
marketId: bytes, actorType: int, meta: str) -> dict:
"""
:param chainId:
:param verifyingContract:
:param member:
:param joined:
:param marketId:
:param actorType:
:param meta:
:return:
"""
assert type(chainId) == int
assert type(verifyingContract) == bytes and len(verifyingContract) == 20
assert type(member) == bytes and len(member) == 20
assert type(joined) == int
assert type(marketId) == bytes and len(marketId) == 16
assert type(actorType) == int
assert meta is None or type(meta) == str
data = {
'types': {
'EIP712Domain': [
{
'name': 'name',
'type': 'string'
},
{
'name': 'version',
'type': 'string'
},
],
'EIP712MarketJoin': [
{
'name': 'chainId',
'type': 'uint256'
},
{
'name': 'verifyingContract',
'type': 'address'
},
{
'name': 'member',
'type': 'address'
},
{
'name': 'joined',
'type': 'uint256'
},
{
'name': 'marketId',
'type': 'bytes16'
},
{
'name': 'actorType',
'type': 'uint8'
},
{
'name': 'meta',
'type': 'string',
},
]
},
'primaryType': 'EIP712MarketJoin',
'domain': {
'name': 'XBR',
'version': '1',
},
'message': {
'chainId': chainId,
'verifyingContract': verifyingContract,
'member': member,
'joined': joined,
'marketId': marketId,
'actorType': actorType,
'meta': meta or '',
}
}
return data |
def run(location,option=None):
"""run function, needs definition"""
print()
print('He is following you, about 30 feet back.')
print()
return location |
def recursive_unzip(dictionary, item_idx):
""" "Unzip" dictionary """
ret = {}
for key, item in dictionary.items():
if isinstance(item, dict):
ret[key] = recursive_unzip(item, item_idx)
else:
ret[key] = item[item_idx]
return ret |
def split_year(title):
"""Returns (title base, year)
Some categories have a year at the end. This detects that
and returns a split.
Example:
>>> split_year('Foo')
('Foo', None)
>>> split_year('PyCon 2013')
('PyCon', 2013)
"""
try:
title = title.strip()
return title[:-4].strip(), int(title[-4:])
except (IndexError, ValueError):
return title, None |
def is_stringlike(item):
"""Is item string like?"""
try:
item + 'string'
return True
except TypeError:
return False |
def _chainCompareRecurse(a, b, k):
"""Compare implicit slices a[k:] and b[k:]"""
if len(a) == k:
return len(b) > k
elif len(b) == k:
return False
elif a[k][0] < b[k][0]:
return True
elif b[k][0] < a[k][0]:
return False
elif a[k][1] == b[k][1]:
return _chainCompareRecurse(a, b, k + 1)
else:
# arbitrary tie-breaker for our model of chains with multiple inheritance
return str(a[k][1]) < str(b[k][1]) |
def euclidan_distance_sqr(point1, point2):
"""
>>> euclidan_distance_sqr([1, 2], [2, 4])
5
"""
return (point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 |
def getStr(bytes_arr):
"""Create string from ASCIIZ string buffer"""
strng = ""
for value in bytes_arr:
if value != 0:
strng = strng + chr(value)
else:
break
return strng |
def is_tableau_code(tabcode):
"""
Return True iff two character string is a valie tableau code.
"""
if ( (tabcode[0] not in ['L','R','P','O'] or
tabcode[1] not in ['E','D','S','T']) and
tabcode != 'HH' and tabcode != 'KK' ):
return False
else:
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.