content stringlengths 42 6.51k |
|---|
def find_largest_keys(root, k):
"""
Question 15.4: Find k largest keys in bst
"""
largest = []
stack = []
done = False
while not done and len(largest) < k:
if root:
stack.append(root)
root = root.right
else:
if len(stack):
root = stack.pop()
largest.insert(0, root.val)
root = root.left
else:
done = True
return largest |
def GetFullPartitionSize(partition, metadata):
"""Get the size of the partition including metadata/reserved space in bytes.
The partition only has to be bigger for raw NAND devices. Formula:
- Add UBI per-block metadata (2 pages) if partition is UBI
- Round up to erase block size
- Add UBI per-partition metadata (4 blocks) if partition is UBI
- Add reserved erase blocks
"""
erase_block_size = metadata.get('erase_block_size', 0)
size = partition['bytes']
if erase_block_size == 0:
return size
# See "Flash space overhead" in
# http://www.linux-mtd.infradead.org/doc/ubi.html
# for overhead calculations.
is_ubi = partition.get('format') == 'ubi'
reserved_erase_blocks = partition.get('reserved_erase_blocks', 0)
page_size = metadata.get('page_size', 0)
if is_ubi:
ubi_block_size = erase_block_size - 2 * page_size
erase_blocks = (size + ubi_block_size - 1) // ubi_block_size
size += erase_blocks * 2 * page_size
erase_blocks = (size + erase_block_size - 1) // erase_block_size
size = erase_blocks * erase_block_size
if is_ubi:
size += erase_block_size * 4
size += reserved_erase_blocks * erase_block_size
return size |
def calculate_iteration_count(total_recs, max_rec):
"""
Given the total number of records and the maximum records allowed in a granule,
calculates the number of iterations required to traverse the entire array in chunks of size max_rec
@param total_recs The total number of records
@param max_rec The maximum number of records allowed in a granule
"""
cnt = total_recs / max_rec
if total_recs % max_rec > 0:
cnt += 1
return cnt |
def check_name_in_string(name, string):
"""
Check whether the name string is a substring of another string (i.e. wikipedia title)
"""
return int(name.lower() in string) |
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
# You have to code here
# REMEMBER: Tests first!!!
return ((base_major+base_minor) / 2 ) * height |
def calculate_receptive_field(kernel_size, n_layers, stride, dilations=[]):
""" Given model parameters, calcualte the receptive field.
Args:
model (obj): model object.
"""
if len(dilations) == 0:
dilations = [1 for n in range(n_layers)]
receptive_field = kernel_size
for n in range(n_layers):
receptive_field += ((kernel_size-1)*stride)*dilations[n]
return receptive_field |
def _nullable_list_intersection(list_1, list_2):
"""
Returns the intersection of 2 nullable lists.
Parameters
----------
list_1 : `None` or `list` of ``DiscordEntity``
First list.
list_2 : `None` or `list` of ``DiscordEntity``
First list.
Returns
-------
intersection : `None` or `list` of ``DiscordEntity``
A list with the two list's intersection.
"""
if list_1 is None:
return None
if list_2 is None:
return None
intersection = set(list_1) & set(list_2)
if not intersection:
return None
return list(intersection) |
def insert_underscores(num: str) -> str:
"""Given a number, insert underscore every 3 digit after decimal dot
"""
# add underscore before .
if "." in num:
idx = num.index(".")-3
else:
idx = len(num) - 3
while idx > 0:
num = num[:idx] + "_" +num[idx:]
idx -= 3
# add underscores after .
if not "." in num:
return num
idx = num.index('.') + 4
if "e" in num:
e_idx = len(num) - num.index('e')
else:
e_idx = 0
while idx < len(num) - e_idx:
num = num[:idx] + "_" +num[idx:]
idx += 3+1
return num |
def label2string(labels):
"""
List of labels to a comma-saperated string.
"""
if labels is not None:
for _i in labels:
if "," in _i:
raise ValueError(
"The labels must not contain a comma as it is used "
"as the separator for the different values.")
labels = u",".join([_i.strip() for _i in labels])
return labels |
def sanity_check(vars, cons):
""" Check all variables participate in some constraint """
v_con = []
for c in cons:
for x in cons[c]['scope']:
if x not in v_con:
v_con.append(x)
for v in vars:
if v not in v_con:
return False
return True |
def find_first_slice_value(slices, key):
"""For a list of slices, get the first value for a certain key."""
for s in slices:
if key in s and s[key] is not None:
return s[key]
return None |
def onBoard(top, left=0):
"""Simplifies a lot of logic to tell if the coords are within the board"""
return 0 <= top <= 9 and 0 <= left <= 9 |
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node |
def make_airline_link(carrier_names):
"""Creates a string for an airline's website.
>>> carrier_names = []
>>> make_airline_link(carrier_names)
[]
>>> carrier_names = [[u'Alaska', u'Airlines', u'Inc.'], [u'United', u'Airlines', u'', u'Inc.']]
>>> make_airline_link(carrier_names)
[u'alaskaairlines.com', u'unitedairlines.com']
>>> carrier_names = [[u'Alaska', u'Airlines', u'Inc.'], [u'Alaska', u'Airlines', u'Inc.'], [u'United', u'Airlines', u'', u'Inc.']]
>>> make_airline_link(carrier_names)
[u'alaskaairlines.com', u'alaskaairlines.com', u'unitedairlines.com']
"""
airlines = []
for carrier_name in carrier_names:
carrier_name = carrier_name[:2]
airline_link = ""
for word in carrier_name:
airline_link = airline_link + word
airlines.append(airline_link)
airline_links = []
for airline in airlines:
airline_link = (airline+".com").lower()
airline_links.append(airline_link)
return airline_links |
def flatten(l):
"""Flattens list l.
:type l: list
:arg l: list to flatten
:rtype: list
:return: flattened list
"""
return [item for sublist in l for item in sublist] |
def polarity(num):
"""
Returns the polarity of the given number.
"""
if num > 0:
return 1
if num < 0:
return -1
return 0 |
def seconds_to_datetime(seconds):
"""
Updates the information from seconds to 00:00:00 time
:param seconds:
:return:
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
h, m, s = int(h), int(m), int(s)
if h < 0:
h = m = s = 0
if h < 10:
h = "0" + str(h)
else:
h = str(h)
if m < 10:
m = "0" + str(m)
else:
m = str(m)
if s < 10:
s = "0" + str(s)
else:
s = str(s)
return h, m, s |
def get_localized_name(name):
"""Returns the localizedName from the name object"""
locale = "{}_{}".format(
name["preferredLocale"]["language"],
name["preferredLocale"]["country"]
)
return name['localized'].get(locale, '') |
def l1_distance(x1, y1, x2, y2):
""" Returns l1 (manhattan) distance."""
return abs(x1-x2) + abs(y1-y2) |
def sign(x):
"""
Devuelve el signo de x (0 se interpreta con signo positivo)
x>=0 --> +1
x <0 --> -1
"""
if x<0:
return -1
elif x>=0:
return +1 |
def parse_unknown_options(args):
"""
Args:
args:
"""
warning_message = 'Please provide args with a proper ' \
'identifier as the key and the following structure: ' \
'--custom_argument="value"'
assert all(a.startswith('--') for a in args), warning_message
assert all(len(a.split('=')) == 2 for a in args), warning_message
p_args = [a.lstrip('--').split('=') for a in args]
assert all(k.isidentifier() for k, _ in p_args), warning_message
r_args = {k: v for k, v in p_args}
assert len(p_args) == len(r_args), 'Replicated arguments!'
return r_args |
def removeInvertedPaths(mpDict):
"""
Find the number of paths of this type joining
the nodes in the sample
:param mpDict: dict {str: [int, bool]},
key, str - name of the metapath
value, [int, bool] - which matrix file to use, and
whether to use the transpose (inverse path)
:return: mpList, str list, ordered names of paths available,
less paths that are mirror-images of another
"""
# The item to return
mpList = list()
# Check the keys in the dict
for key in mpDict.keys():
# If the boolean is True, then the path is an
# inverse of another; only append if false
if mpDict[key][1] == False:
mpList.append(key)
# end loop
mpList.sort()
return mpList |
def compose_remove_description(document: dict) -> str:
"""
Compose a change description for removing an OTU.
:param document: the OTU document that is being removed
:return: a change description
"""
name = document["name"]
abbreviation = document.get("abbreviation")
description = f"Removed {name}"
if abbreviation:
return f"{description} ({abbreviation})"
return description |
def is_valid_method_param(met):
""" Checks if method is compatible with GROMACS """
methods = ['linkage', 'jarvis-patrick', 'monte-carlo', 'diagonalization', 'gromos']
return met in methods |
def get_size_and_path(line):
"""From a 'ls -l' line, return columns 4 (size) and 8 (path)."""
cols = line.split()
size, path = (int(cols[4]), cols[8])
return size, path |
def parse_coordinate(coordinate):
"""
Args:
coordinate (str):
Example:
>>> parse_coordinate('chr1:153500000-153501000, chr1:153540000-153542000')
['chr1', 153500000, 153501000, 153540000, 153542000]
Return:
A list [chromosome] + [coordinate of four corners]
"""
try:
pos1, pos2 = [elm.strip() for elm in coordinate.split(',')]
pos1 = pos1.replace(':', '-')
c1, p11, p12 = [elm.strip() for elm in pos1.split('-')]
pos2 = pos2.replace(':', '-')
c2, p21, p22 = [elm.strip() for elm in pos2.split('-')]
p11, p12, p21, p22 = int(p11), int(p12), int(p21), int(p22)
except:
raise ValueError('Invalid coordinate string!')
if c1 != c2:
raise ValueError('Intrachromosomal contacts only!')
if p22 - p11 > 200000:
raise ValueError('Short-distance contacts (within 200 kb) only!')
return [c1, p11, p12, p21, p22] |
def _expand_prefix(all_names, prefix):
"""Expand the given prefix into real entity names.
Args:
all_names: A list of all entity names.
prefix: A prefix string.
Returns:
A list of entity names that the pattern expands to.
"""
return [name for name in all_names if name.startswith(prefix)] |
def miller_rabin(n):
""" primality Test
if n < 3,825,123,056,546,413,051, it is enough to test
a = 2, 3, 5, 7, 11, 13, 17, 19, and 23.
Complexity: O(log^3 n)
"""
if n == 2: return True
if n <= 1 or not n&1: return False
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]
d = n - 1
s = 0
while not d&1:
d >>= 1
s += 1
for prime in primes:
if prime >= n: continue
x = pow(prime, d, n)
if x == 1: break
for r in range(s):
if x == n - 1: break
if r + 1 == s: return False
x = x * x % n
return True |
def parse_batch_parse_statement_forwarder_id_url(json):
"""
Extract the statement forwarder ids from a batch of statement forwarders.
:param json: JSON text to parse.
:type json: list(dict(str, dict(str, str)))
:return: List of ids from statement forwarders.
:rtype: list(dict(str, str))
"""
forwarder_list = []
for forwarder in json:
forwarder_list.append({
'url': forwarder['configuration']['url'],
'id': forwarder['_id']
})
return forwarder_list |
def checkPatch_index_inImages_naive(Index_check_patch,Num_patches_perImage,num_col,num_row):
"""check the index of patches and the size of images"""
aaa = int(Index_check_patch/Num_patches_perImage)
nn = Index_check_patch%Num_patches_perImage
bbb = int(nn/num_col)
ccc = nn%num_col
# aaa = np.where(reshaped_Index_Array==Index_check_patch)
return aaa,bbb,ccc
#this part cost too much time, it should be somehow changed to save more time |
def searching(value, liste):
""" filter the items that math the search term """
list1 = []
for item in liste:
if value.lower() in item.lower():
list1.append(item)
return list1 |
def str_int_dpid(str_dpid):
"""Return stringified int version, of int or hex DPID from YAML."""
str_dpid = str(str_dpid)
if str_dpid.startswith('0x'):
return str(int(str_dpid, 16))
else:
return str(int(str_dpid)) |
def underscore_join( strlis ):
"""Uses an underscore to join a list of strings into a long string."""
return '_'.join( [s for s in strlis if s is not None and len(s)>0] ) |
def return_calibration_type(model_name):
"""
Returns metadata for the calibration dataset for specified model
Parameters
----------
model: string
String of model name.
Returns
-------
dict
Metadata on the dataset. A dict is returned with parameters CO2, H2O, or mixed set to True or False.
True indicates that experiments where performed using that fluid type. For example, a model may be
able to calculate mixed H2O-CO2 solubilities, but it's dataset may be made up only of pure-CO2 and
pure-H2O data. In this case, a model's metadata would be {'CO2': True, 'H2O': True, 'Mixed': False}.
This value expresses whether experiments used to parameterized models are pure-CO2, pure-H2O, or
mixed H2O-CO2 experiments. In many cases, pure-CO2 experiments cannot be distinguished from
mixed H2O-CO2 experiments, in which case the experiments are classified as 'Mixed'.
"""
if model_name == 'AllisonCarbon':
return {'CO2': False, 'H2O': False, 'Mixed': True}
if model_name == 'Dixon':
return {'CO2': True, 'H2O': True, 'Mixed': False}
if model_name == 'DixonCarbon':
return {'CO2': True, 'H2O': False, 'Mixed': False}
if model_name == 'DixonWater':
return {'CO2': False, 'H2O': True, 'Mixed': False}
if model_name == 'IaconoMarziano':
return {'CO2': False, 'H2O': True, 'Mixed': True}
if model_name == 'IaconoMarzianoCarbon':
return {'CO2': False, 'H2O': False, 'Mixed': True}
if model_name == 'IaconoMarzianoWater':
return {'CO2': False, 'H2O': True, 'Mixed': False}
if model_name == 'Liu':
return {'CO2': False, 'H2O': True, 'Mixed': True}
if model_name == 'LiuCarbon':
return {'CO2': False, 'H2O': False, 'Mixed': True}
if model_name == 'LiuWater':
return {'CO2': False, 'H2O': True, 'Mixed': False}
if model_name == 'MagmaSat':
return{'CO2': True, 'H2O': True, 'Mixed': True}
if model_name == 'MooreWater':
return {'CO2': False, 'H2O': False, 'Mixed': True}
if model_name == 'Shishkina':
return {'CO2': False, 'H2O': True, 'Mixed': True}
if model_name == 'ShishkinaCarbon':
return {'CO2': False, 'H2O': False, 'Mixed': True}
if model_name == 'ShishkinaWater':
return {'CO2': False, 'H2O': True, 'Mixed': False} |
def get_filter_constant(interval_distance, track_size):
"""Given a track size and distance, determine the minimum allowable
split duration.
"""
# Determine constant of impossible time for 400m off of distance.
if interval_distance <= 200:
constant = 20
elif interval_distance > 200 and interval_distance <= 300:
constant = 30
elif interval_distance > 300 and interval_distance <= 400:
constant = 50
elif interval_distance > 400 and interval_distance <= 800:
constant = 52
elif interval_distance > 800 and interval_distance <= 1200:
constant = 55
elif interval_distance > 1200 and interval_distance <= 1600:
constant = 58
else:
constant = 59
# Modify constant if on different sized track like 300m or 200m.
# (Don't modify if 200s on 200m track.)
if interval_distance > 200:
constant = constant * (track_size/400.0)
return constant |
def IsValidTimezone(timezone):
"""
Checks the validity of a timezone string value:
- checks whether the timezone is in the pytz common_timezones list
- assumes the timezone to be valid if the pytz module is not available
"""
try:
import pytz
return timezone in pytz.common_timezones
except ImportError: # no pytz
print("Timezone not checked " "(install pytz package for timezone validation)")
return True |
def type_error_message(func_name: str, expected: str, got: str) -> str:
"""Return an error message for function func_name returning type got,
where the correct return type is expected."""
return ('{0} should return a {1}, but returned {2}' +
'.').format(func_name, expected, got) |
def excstr(e):
"""Return a string for the exception.
The string will be in the format that Python normally outputs
in interactive shells and such:
<ExceptionName>: <message>
AttributeError: 'object' object has no attribute 'bar'
Neither str(e) nor repr(e) do that.
"""
if e is None:
return None
return '%s: %s' % (e.__class__.__name__, e) |
def _check_geo_type_suffix(x):
""" Checks if `geo_type` suffix contains an `int` """
try:
return int(x)
except:
raise ValueError(f"`geo_type` suffix: '{x}' cannot be parsed as `int`.") |
def build_jrn(**kwargs):
"""
Composes a journal file of journal blocks.
:param kwargs: journal blocks
:return:
"""
jrn_text = "\n".join(kwargs.values())
return jrn_text |
def point_in_rectangle(point, rect_top_left, rect_sides):
"""
Checks if point is in rectangle
Parameters
----------
point : (float, float)
(x,y) coordinates of point
rect_top_left : (float, float)
(x,y) coordinates of rectangle top left corner
rect_sides : (float, float)
(x,y) lengths of rectangle sides
Returns
-------
bool
True if point is in rectangle, otherwise False.
"""
return rect_top_left[0] < point[0] < rect_top_left[0] + rect_sides[0] and \
rect_top_left[1] < point[1] < rect_top_left[1] + rect_sides[1] |
def f(x):
"""
Function x^3 - x -2
Example function.
"""
return x**3. - x - 2 |
def add(vec_1, vec_2):
"""
This function performs vector addition. This is a good place
to play around with different collection types (list, tuple, set...),
:param vec_1: a subscriptable collection of length 3
:param vec_2: a subscriptable collection of length 3
:return vec_3: a subscriptable collection of length 3
"""
# add two vectors
vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])]
return vec_3 |
def _format_result(result):
"""Format result into string for templating."""
# do not include decimal if it's 100
if result == 100:
return "100"
return "{:.1f}".format(result) |
def convert(list):
""" Converts list to categories. """
categories = dict()
for dict_row in list:
try:
categories[tuple(dict_row[0][0])][dict_row[1]].append(dict_row[2][0])
except KeyError:
try: categories[tuple(dict_row[0][0])][dict_row[1]] = [dict_row[2][0]]
except: categories[tuple(dict_row[0][0])] = {dict_row[1]: [dict_row[2][0]]}
return categories |
def fibonacci_comprehension(limit):
"""fibonacci sequence using a list comprehension."""
sequence = [0, 1]
[sequence.append(sequence[i] + sequence[i-1]) for i in range(1, limit)]
return sequence[-1] |
def constant_increment_growth_rule(increment, level):
"""
The number of samples in the 1D quadrature rule where number of of points
grow by a fixed constant at each level.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 1:
return 3
return increment*level+1 |
def unwrap_hash( signals ):
"""
unwrap all signals in a hash array and return a copy
"""
# make a copy
list_of_unwrapped_signals = signals.copy()
# create a new list of signals
for key, signal in list_of_unwrapped_signals.items():
list_of_unwrapped_signals[key] = signal.unwrap
return list_of_unwrapped_signals |
def prep_msg(msg):
""" Prepare a string to be sent as a message """
msg += '\0'
return msg.encode('utf-8') |
def binarySearch(arr, val):
"""
array values must be sorted
"""
left = 0
right = len(arr) - 1
half = (left + right) // 2
while arr[half] != val:
if val < arr[half]:
right = half - 1
else:
left = half + 1
half = (left + right) // 2
if arr[half] == val:
return half
return -1 |
def cles(lessers, greaters):
"""
# code from https://github.com/ajschumacher/cles
# explanation from https://janhove.github.io/reporting/2016/11/16/common-language-effect-sizes
the probability that a score sampled at random from one distribution will be greater than a score sampled from some other distribution.
Common-Language Effect Size
Probability that a random draw from `greater` is in fact greater
than a random draw from `lesser`.
Args:
lesser, greater: Iterables of comparables.
"""
if len(lessers) == 0 and len(greaters) == 0:
raise ValueError('At least one argument must be non-empty')
# These values are a bit arbitrary, but make some sense.
# (It might be appropriate to warn for these cases.)
if len(lessers) == 0:
return 1
if len(greaters) == 0:
return 0
numerator = 0
# lessers, greaters = sorted(lessers), sorted(greaters)
lesser_index = 0
for greater in greaters:
while lesser_index < len(lessers) and lessers[lesser_index] < greater:
lesser_index += 1
numerator += lesser_index # the count less than the greater
denominator = len(lessers) * len(greaters)
return float(numerator) / denominator |
def length(tree):
"""
Count the total number of the tree nodes.
:param tree: a tree node
:return: the total of nodes in the subtree
"""
if tree:
n_nodes = length(tree.left)
n_nodes += length(tree.right)
n_nodes += 1
return n_nodes
return 0 |
def usi32_to_si32(val):
"""
Quick hack to read the signed val of an unsigned int (Image loads all ints from bytes as unsigned ints)
:param val:
:return:
"""
bits = 32
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is
|
def boundary_check(string, start_idx, end_idx):
"""
Check that given character indexes into given string
align with token boundaries.
"""
start_clause = (
(start_idx == 0) or
string[start_idx-1] in (" ", ",")
)
end_clause = (
(end_idx == len(string)) or
string[end_idx] in (" ", ",", ".")
)
return (start_clause and end_clause) |
def triangular2(cycle):
"""
triangular2 for cyclic LR. https://arxiv.org/abs/1506.01186
"""
return 1.0 / (2.**(cycle - 1)) |
def build_search_query(query, page, per_page) -> dict:
"""
Build the multi-search query for Elasticsearch.
:param str query:
:param int page:
:param int per_page:
"""
search_query = {
"query": {"multi_match": {"query": query, "fields": ["*"]}},
"from": (page - 1) * per_page,
"size": per_page,
}
return search_query |
def encoding_description(encoding):
"""
Function to create a meaningful description that will be added as variable attribute into the netcdf file.
Input:
------
encoding: dict
Dictionary with the encoding specifications for each variable.
"""
labels = {
32001: "BLOSC",
32013: "ZFP",
32017: "SZ",
}
descriptions = {}
for variable, var_encoding in encoding.items():
try:
filter_id = var_encoding["compression"]
filter_name = labels[filter_id]
if filter_name == "BLOSC":
compression_type = "Lossless"
else:
compression_type = "Lossy"
description = "Compressed using %s (id:%i) - %s" % (filter_name, filter_id, compression_type)
descriptions[variable] = description
except KeyError:
description = "Non compressed"
descriptions[variable] = description
return descriptions |
def split_first_level(dependency):
"""
Internal function to split nested dependencies.
:param dependency:
:return:
"""
dependencies = []
beginning = 0
level = 0
for i in range(len(dependency)):
if dependency[i] == ',' and level == 0:
dependencies.append(dependency[beginning:i].strip())
beginning = i + 1
elif dependency[i] == '{':
level += 1
elif dependency[i] == '}':
level -= 1
dependencies.append(dependency[beginning:].strip())
return dependencies |
def handle_status_update(loan_id, new_status):
"""Handles a status update for an order."""
# lookup order in your system using loan_id
# set order status to new_status
return '' |
def sort_and_cut_by_cluster(row, N, fractions):
"""Pick the top N items from a cluster.
This function returns the top N * fractions[cluster] items
Args:
row (rdd object): row has the form
((user, cluster), iterator_over((user, cluster, item, rating)))
N (int): number of items desired in to be recommended in total
fractions (dict): map of cluster to fraction of the total dataset that
is represented by that cluster.
Returns:
list of tuples: The tuples have the form (user, rating, item)
"""
cluster = row[0][1]
to_take = round(N * fractions[cluster])
content = ((user, rating, item) for (user, _, item, rating) in row[1])
output = []
i = 0
for tup in sorted(content, reverse=True):
if i == to_take:
return output
output.append(tup)
i += 1
return output |
def add_auth_response(appreq=None, auth_obj=None):
"""Called after init_actingweb() if add_response was set to False, and now responses should be added."""
if not appreq or not auth_obj:
return False
appreq.response.set_status(auth_obj.response['code'], auth_obj.response['text'])
if auth_obj.response['code'] == 302:
appreq.redirect(auth_obj.redirect)
elif auth_obj.response['code'] == 401:
appreq.response.out.write("Authentication required")
for h in auth_obj.response['headers']:
appreq.response.headers[h["header"]] = h["value"]
return True |
def FWHMeff2FWHMgeom(FWHMeff):
"""
Convert FWHMeff to FWHMgeom.
This conversion was calculated by Bo Xin and Zeljko Ivezic
(and will be in an update on the LSE-40 and overview papers).
Parameters
----------
FWHMeff: float
the single-gaussian equivalent FWHM value, appropriate for calcNeff, in arcseconds
Returns
-------
float
FWHM geom, the geometric FWHM value as measured from a typical PSF profile in arcseconds.
"""
FWHMgeom = 0.822*FWHMeff + 0.052
return FWHMgeom |
def is_truthy(data):
"""
Returns True if the data is a truthy value, False otherwise.
"""
string = str(data).lower()
return string in ['true', '1', 'f'] |
def n_tasks(dec_num):
"""
Takes a decimal number as input and returns the number of ones in the
binary representation.
This translates to the number of tasks being done by an organism with a
phenotype represented as a decimal number.
"""
bitstring = ""
try:
bitstring = dec_num[2:]
except:
bitstring = bin(int(dec_num))[2:] # cut off 0b
# print bin(int(dec_num)), bitstring
return bitstring.count("1") |
def check_if_elements_is_empty(json_object):
"""
Checks to see that there are elements in the json object
:param json_object:
:return: bool false if not empty, true if empty
"""
try:
if len(json_object) > 0:
is_empty = len(json_object['elements']) == 0
else:
is_empty = True
except KeyError:
print("TypeError [" + str(TypeError) + " ]")
return True
return is_empty |
def _configure_title_plotly(title, font_size, color="black"):
"""Helper function for plot_surf with plotly engine.
This function configures the title if provided.
"""
if title is None:
return dict()
return {"text": title,
"font": {"size": font_size,
"color": color,
},
"y": 0.96,
"x": 0.5,
"xanchor": "center",
"yanchor": "top"} |
def choices_to_list(choices):
"""Transforms choices dict to an ordered string list.
Arguments:
choices -- the dict containing available choices
"""
return list(map(str, sorted(choices.keys(), key=lambda v: v if type(v) == int else -1))) |
def allocz(size):
"""Alloc zeros with range"""
return [0 for _ in range(size)] |
def _build_underline(data: str, md: bool = False, emoji: bool = False) -> str:
"""An internal function to return a rough estimate of an underline"""
text_len = len(data)
add = 3
if md and not emoji:
add = 0
elif emoji and not md:
add = 7
elif emoji and md:
add = 1
return "\n" + "-" * (len(data) + add) |
def complexns_core(strin):
"""
Improved conversion of string representation of complex numbers to float
"""
try:
return complex(strin)
except ValueError:
pass
# Do the simple fixes
tmp = strin.replace(' ', '').replace('*','')
tmp = tmp.lower().replace('i','j')
try:
return complex(strin)
except ValueError:
pass
# Try to move 'j' to end
loc = tmp.find('j')
if loc:
#tmp[1] = tmp[1][1:-1] + tmp[1][0]
tmp = tmp[:loc] + tmp[(loc+1):] + 'j'
return complex(tmp) |
def gray_code(n):
"""Finds Gray Code of n!"""
return n ^ (n >> 1) |
def update_fluent_cached_urls(item, dry_run=False):
"""
Regenerate the cached URLs for an item's translations. This is a fiddly
business: we use "hidden" methods instead of the public ones to avoid
unnecessary and unwanted slug changes to ensure uniqueness, the logic for
which doesn't work with our publishing.
"""
change_report = []
if hasattr(item, 'translations'):
for translation in item.translations.all():
old_url = translation._cached_url
item._update_cached_url(translation)
change_report.append(
(translation, '_cached_url', old_url, translation._cached_url))
if not dry_run:
translation.save()
if not dry_run:
item._expire_url_caches()
# Also process all the item's children, in case changes to this item
# affect the URL that should be cached for the children. We process
# only draft-or-published children, according to the item's status.
if item.is_draft:
children = [child for child in item.children.all()
if child.is_draft]
else:
children = [child for child in item.get_draft().children.all()
if child.is_published]
for child in children:
update_fluent_cached_urls(child, dry_run=dry_run)
return change_report |
def factorial(n):
"""
factorial: calculate factorial n!
:param n: input number
:return: n!
"""
fact=1
for i in range(1,n+1):
fact*=i
return fact |
def split(formula):
"""
>>> split('CNNCB')
('CNN', 'NCB')
>>> split('NNCB')
('NNC', 'CB')
>>> split('NCB')
('NC', 'CB')
"""
left = formula[:len(formula) // 2 + 1]
right = formula[len(left) - 1:]
return left, right |
def load_mapeval_runtimes(map_times_path):
"""
read the runtimes out of map_times.txt, assuming first line is a header
"""
try:
map_times_dict = {}
with open(map_times_path) as map_times_file:
lines = [line for line in map_times_file]
for line in lines[1:]:
toks = line.split('\t')
map_times_dict[toks[0]] = toks[1]
return map_times_dict
except:
pass
return None |
def delimiter_check(line):
"""
Determines what delimiter is used in given text file.
Parameters:
-line: str
line from text file
Returns:
-delim: str
delimiter used in text file
Notes:
Recognizes only ",", "\t", and ":" delimiters.
"""
if ',' in line:
delim = ','
elif '\t' in line:
delim = '\t'
elif ':' in line:
delim = ':'
else:
raise ValueError('Unrecognized delimiter, use ",", "\t", or ":" instead.')
return delim |
def closestMatch(value, _set):
"""Returns an element of the set that is closest to the supplied value"""
a = 0
element = _set[0]
while(a<len(_set)):
if((_set[a]-value)*(_set[a]-value)<(element-value)*(element-value)):
element=_set[a]
a = a + 1
return element |
def p_value(y, l2):
"""
returns p-value for each response based on l2
:param y: The value for which the p-value is to be computed
:param l2: The list of values on which the p-value calculation is based
:return: The calculated p-value
"""
return (len([x for x in l2 if x >= y]) + 1) / (len(l2) + 1) |
def draw_progressbar(value, total, width=40):
"""Visualize progess with a progress bar.
:param value:
The current progress as a fraction of total.
:type value:
int
:param total:
The maximum value that 'value' may obtain.
:type total:
int
:param width:
The character width of the drawn progress bar.
:type width:
int
"""
"Helper function for the visualization of progress."
assert value >= 0 and total > 0
n = int(value / total * width)
return '|' + ''.join(['#'] * n) + ''.join(['-'] * (width - n)) + '|' |
def safe_compare(val1, val2):
"""
Compares two strings to each other.
The time taken is independent of the number of characters that match.
:param val1: First string for comparison.
:type val1: :class:`str`
:param val2: Second string for comparison.
:type val2: :class:`str`
:returns: :class:`bool` -- True if the two strings are equal, False otherwise.
"""
if len(val1) != len(val2):
return False
result = 0
for c1, c2 in zip(val1, val2):
result |= ord(c1) ^ ord(c2)
return result == 0 |
def color_match_threshold(pxl, ink, thresh):
""" Returns True if both colors match, within threshhold
"""
diff = [0, 0, 0]
diff[0] = abs(pxl[0] - ink[0])
diff[1] = abs(pxl[1] - ink[1])
diff[2] = abs(pxl[2] - ink[2])
#print diff
if(diff[0]+diff[1]+diff[2]) <= thresh:
return True
return False |
def stringListToFloat(stringList):
"""Converts a list with strings into a list with floats."""
return [float(singleFloatResult) for singleFloatResult in stringList] |
def parse_struct_stat(stats):
"""Parse the structure of stats values in influencer data in order
to return a pandas-compatible object.
"""
data = dict()
for stat in stats:
for item in stats[stat]:
for metric in item['counts']:
data.setdefault(metric, dict())
data[metric].update({
(stat, item['term']): item['counts'][metric]
})
return data |
def get_probes(probes):
"""Returns a list of probes"""
probe_list = ['a', 'b', 'c', 'd', 'e']
thprobe_list = ['tha', 'thb', 'thc', 'thd', 'the']
ans_list = []
if not isinstance(probes, (list, tuple)):
probes = [probes]
for p in probes:
p = p.lower()
if p == '*':
ans_list = thprobe_list
break
if p in probe_list:
ans_list.append('th' + p)
elif p in thprobe_list:
ans_list.append(p)
return ans_list |
def lib_ext(shared):
"""Returns the appropriate library extension based on the shared flag"""
return '.a' if not shared else '.so' |
def observation(s, breaks, matrix, memo):
"""
Returns an observation using memoization, based on the parameter s.
"""
b = len(breaks)
if b == 0 or s <= breaks[0]:
return s * matrix[0]
for i in range(1, b + 1):
if i == b or s <= breaks[i]:
j = (i - 1) * 2
return memo[j] + s * matrix[i] - memo[j + 1] |
def rgb_to_hex(r: float, g: float, b: float) -> int:
"""Convert the color from RGB coordinates to hexadecimal."""
return int(r * 255) << 16 | int(g * 255) << 8 | int(b * 255) |
def power_law_vband_norm(x, slope):
"""Power law normalised at 0.55 microns (V band)."""
return (x / 0.55) ** slope |
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}' |
def conv_if_neg(x):
"""returns abs of an angle if it's negative"""
#sin(-x) = -sin(x)
if x<0:
return abs(x), True
return x,False |
def assemble_pair(label, value):
"""Assemble a 'pair' Celery Script node (for use as a body item)."""
return {
'kind': 'pair',
'args': {'label': label, 'value': value}} |
def remove_unused_samples(music):
"""
Remove samples that are never used
Depends: to_mod_format
"""
# Get a list of samples actually referenced (each entry is unique)
referenced_samples = []
for chan in music['mod']['channels']:
for sample_idx in chan:
if sample_idx not in referenced_samples:
referenced_samples.append(sample_idx)
# Remove samples not in the list
for sample_idx in range(len(music['mod']['samples']) - 1, -1, -1):
if sample_idx not in referenced_samples:
# Actually remove from the samples list
del music['mod']['samples'][sample_idx]
# Update references to samples with a higher idx
for chan in music['mod']['channels']:
for chan_sample_num in range(len(chan)):
assert chan[chan_sample_num] != sample_idx, 'found use of an unused sample'
if chan[chan_sample_num] > sample_idx:
chan[chan_sample_num] -= 1
# Note: no need to update referenced_samples as we iterate samples
# in reverse we don't care about it referencing bad higher indexes
pass
return music |
def list_results(query_results):
"""
keys :: [key]
= all possible keys. (Not all results will contain all keys).
results :: [dict]
= list of results
"""
keys = query_results['head']['vars']
results = query_results['results']['bindings']
return keys, results |
def get_qualified_name(names):
"""
``get_qualified_name`` gets a qualified name for the provided name list.
:param names: name list to qualify
:type names: list(str)
:return: a qualified name
:rtype: str
:Example:
>>> type, name = demangle_ms(Architecture["x86_64"], "?testf@Foobar@@SA?AW4foo@1@W421@@Z")
>>> get_qualified_name(name)
'Foobar::testf'
>>>
"""
return "::".join(names) |
def levenshtein_distance(s1, s2):
"""
The minimum amount of edits needed to make s2 into s1.
Args:
s1: string
s2: string
Returns:
int
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, char2 in enumerate(s2):
new_distances = [index2+1]
for index1, char1 in enumerate(s1):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(
1 + min(
(
distances[index1],
distances[index1+1],
new_distances[-1]
)
)
)
distances = new_distances
distance = distances[-1]
return distance |
def filter_credit_score(credit_score, bank_list):
"""Filters the bank list by the mininim allowed credit score set by the bank.
Args:
credit_score (int): The applicant's credit score.
bank_list (list of lists): The available bank loans.
Returns:
A list of qualifying bank loans.
"""
credit_score_approval_list = []
for bank in bank_list:
if credit_score >= int(bank[4]):
credit_score_approval_list.append(bank)
return credit_score_approval_list |
def deserialize_datetime(string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:type string: str
:return: datetime.
:rtype: datetime
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string |
def format_vault_encrypted_secret(vault_encrypted_secret):
""" returns a prettier vault secret """
# Clean up spacing on the ansible vault line
vault_encrypted_secret = vault_encrypted_secret.replace(" $ANSIBLE_VAULT", " $ANSIBLE_VAULT")
return vault_encrypted_secret |
def char_collect_all(s, c):
"""
Find all appearances of char in string
:param s: haystack
:param c: needle
:return: list of indices
"""
start = 0
res = []
clen = len(c)
while True:
start = s.find(c, start)
if start == -1: break
res.append(start)
start += clen
return res |
def task9(a: int) -> int:
"""
Function that computes the value of a+aa+aaa+aaaa with a given digit as the value of a
Input: digit as integer
Output: number as integer
"""
pierwsza = a
druga = int(str(a)*2)
trzecia = int(str(a) * 3)
czwarta = int(str(a) * 4)
wynik = pierwsza + druga + trzecia + czwarta
return wynik |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.