content stringlengths 42 6.51k |
|---|
def phiRb_constant_translation(gamma_max,
nu_max,
cpc_Kd,
Kd_cpc,
phi_O):
"""
Computes the ribosomal allocation which maintains a high translation rate.
Parameters
----------
gamma_max : positive float
The maximum translational efficiency in units of inverse time.
nu_max : positive float
The maximum nutritional capacity in units of inverse time.
phi_O : float [0, 1]
Allocation towards other proteins.
Returns
-------
phi_Rbt : positive float [0, 1]
The ribosomal allocation for constant translation.
"""
c_pc = cpc_Kd * Kd_cpc
return (1 - phi_O) * nu_max * (c_pc + Kd_cpc) / (nu_max * (c_pc + Kd_cpc) + gamma_max * c_pc * (c_pc + 1)) |
def _checkNconvertStr(texts):
"""
Checks whether input is a string or if it can be casted into one
:param texts: A string or a type which can be converted into one
:return: Text as a string if successful otherwise nothing (exception)
"""
concatTextLst = []
for text in texts:
# Test texts elements
tempText = str(text)
if type(tempText) is not str:
raise TypeError("Input type must be a string or convertible into a string")
concatTextLst.append(str(text))
return " ".join(concatTextLst) # Preserve whitespaces when using multiple text elements |
def _normalize_vendor(vendor):
"""Return a canonical name for a type of database."""
if not vendor:
return "db" # should this ever happen?
if "sqlite" in vendor:
return "sqlite"
if "postgres" in vendor or vendor == "psycopg2":
return "postgres"
return vendor |
def to_pascal_case(value):
"""
Converts the value string to PascalCase.
:param value: The value that needs to be converted.
:type value: str
:return: The value in PascalCase.
:rtype: str
"""
return "".join(character for character in value.title() if not character.isspace()) |
def create_adv_inputs_single(states):
"""
Create the input for the ADV Net: cab-positions + psng-positions
"""
# For testing this only works for 2 agents and 2 passengers
assert len(states) == 2
assert len(states[0]) == 9
# passenger is the same for all cabs
pass1_x, pass1_y = states[0][7], states[0][8]
cab1_pos_x, cab1_pos_y = states[0][5], states[0][6]
cab2_pos_x, cab2_pos_y = states[1][5], states[1][6]
return [cab1_pos_x, cab1_pos_y, cab2_pos_x, cab2_pos_y, pass1_x, pass1_y] |
def _write_record(data):
"""Return a list with a single string."""
return ["{:80}\n".format("".join(data))] |
def _majorana_terms_commute(term_a, term_b):
"""Whether two Majorana terms commute.
Args:
term_a (Tuple[int]): The indices of a Majorana operator term
term_b (Tuple[int]): The indices of a Majorana operator term
Returns:
bool. Whether The terms commute.
"""
intersection = 0
i, j = 0, 0
while i < len(term_a) and j < len(term_b):
if term_a[i] < term_b[j]:
i += 1
elif term_a[i] > term_b[j]:
j += 1
else:
intersection += 1
i += 1
j += 1
parity = (len(term_a)*len(term_b) - intersection) % 2
return not parity |
def _stripReverse(s):
"""Returns the string s, with reverse-video removed."""
return s.replace('\x16', '') |
def make_col_names(d):
"""Make column names (i.e. x1, x2, ..., xd) for data dimension d"""
return ['x' + str(i) for i in range(1, d + 1)] |
def get_reverse(sequence):
"""Reverse orientation of `sequence`.
Returns a string with `sequence` in the reverse order.
If `sequence` is empty, an empty string is returned.
"""
#Convert all rna_sequence to upper case:
sequence=sequence.upper()
#reverse rna sequence:
rna_rev_list=sequence[::-1]
return rna_rev_list |
def subdict(d, keys):
"""Return a newly allocated shallow copy of a mapping `d` restricted to keys in `keys`."""
d = dict(d)
keys = set(keys)
return {k: v for k, v in d.items() if k in keys} |
def calc_ac(A):
"""
Calculate good guess of neutron channel radius.
Parameters
----------
A : int
The mass number of the nucleus e.g. Ta has
one isotope of mass number 181 (essentially)
Returns
-------
a_c : float
The channel radius [fm] or [Fermi]
Notes
-----
.. math:: a_c = 1.23A^{1/3} + 0.8
F.H. Frohner, JEFF Report 18, p. 52
"""
a_c = 1.23*A**(1/3.) + 0.8
return a_c |
def repeat(text: str, n: int) -> str:
"""Repeat each character in a string n times."""
return "".join([letter * n for letter in text]) |
def shlok_lengths(shlokasSyllables):
"""Return a dictionary of lengths of syllables of shlokas in shlokasSyllables.
Input
shlokasSyllables : list of lists of syllables in every shloka
Ouptuts
shlokLengths : dictionary of shlok_lengths:shlok_numbers
"""
shlokLengths = {}
for i, shlokSyllables in enumerate(shlokasSyllables):
lenSyllables = len(shlokSyllables)
if lenSyllables not in shlokLengths.keys():
shlokLengths[lenSyllables] = [i]
else:
shlokLengths[lenSyllables].append(i)
return shlokLengths |
def include_for_structures(kinds):
"""get the includes for structures depends on kind"""
if 'ENUM' in kinds:
return '\nfrom enum import Enum'
return '' |
def __to_unsigned(val):
"""Convert signed (2 complement) value to unsigned."""
if val < 0:
val = ~(-val - 1)
return val |
def _count(*args):
"""Execute 'count' operation unsupported by core JsonLogic."""
return sum(1 if a else 0 for a in args) |
def peak_1d_binary_search_iter(nums):
"""Find peak by iterative binary search algorithm.
Time complexity: O(logn).
Space complexity: O(1).
"""
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < nums[mid + 1]:
# If mid < its right, search right part.
left = mid + 1
elif nums[mid] < nums[mid - 1]:
# If mid < its left, search left part.
right = mid - 1
else:
# Else, found peak.
return mid
# For left = right.
return left |
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
"""
Defines an elicit slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "ElicitSlot",
"intentName": intent_name,
"slots": slots,
"slotToElicit": slot_to_elicit,
"message": message,
},
} |
def filter_not_running_tasks(tasks):
""" Filters those tasks where it's state is *not* TASK_RUNNING.
:param tasks: a list of mesos.cli.Task
:return filtered: a list of tasks *not* running
"""
return [task for task in tasks if task['state'] != 'TASK_RUNNING'] |
def cvsecs(*args):
"""
Converts a time to second. Either cvsecs(min,secs) or
cvsecs(hours,mins,secs).
>>> cvsecs(5.5) # -> 5.5 seconds
>>> cvsecs(10, 4.5) # -> 604.5 seconds
>>> cvsecs(1, 0, 5) # -> 3605 seconds
"""
if len(args) == 1:
return args[0]
elif len(args) == 2:
return 60*args[0]+args[1]
elif len(args) ==3:
return 3600*args[0]+60*args[1]+args[2] |
def is_unique_chars_v3(str):
"""
If not allowed to use additional data structures, we can compare every character of the string to every other
character of the string.
This will take 0(n ** 2) time and 0(1) space
"""
for char1 in str:
occurrence = 0
for char2 in str:
if char1 == char2:
occurrence += 1
if occurrence > 1:
return False
return True |
def new_value_part_2(seat: str, visible_count: int) -> str:
"""
Returns the next state for one seat.
"""
if seat == "L" and visible_count == 0:
return "#"
elif seat == "#" and 5 <= visible_count:
return "L"
else:
return seat |
def get_standard_filename(year, month, day):
"""
Get the "standardized" name of a daily precipitation file, i.e. the name under the
scheme used for pre-2006 files.
"""
return 'PRCP_CU_GAUGE_V1.0GLB_0.50deg.lnx.{YEAR:04d}{MONTH:02d}{DAY:02d}.gz'.format(YEAR=year, MONTH=month, DAY=day) |
def apply(rom, patches):
"""Applies a patch, which is a list of dictionaries
Arguments:
rom {list} -- A list of bytes depicting the ROM data to be patched.
patches {list} -- A list of dictionaries that depict of set of patches to be applied to the ROM.
Returns:
list -- a list of bytes depicitng the patched rom
"""
for patch in patches:
offset = int(list(patch.keys())[0])
patch_values = list(patch.values())[0]
for idx, value in enumerate(patch_values):
rom[offset + idx] = value
return rom |
def KGtoLB(mkg):
"""
Convertie une masse en kg vers lb
note: 1 kg = 2.20462 lb
:param mkg: masse [kg]
:return mlb: masse [lb]
"""
mlb = mkg * 2.20462
return mlb |
def istablaset(a):
"""returns True/False if argument appears to fulfill TablaSet class"""
return (hasattr(a, '_tablarrays') and hasattr(a, '_ts')
and hasattr(a, 'keys') and hasattr(a, 'bcast')) |
def is_none(obj):
"""Method to check if object is None.
Args:
obj : The object.
"""
return bool(obj is None) |
def get_dbot_score(verdict):
"""
Evaluate the dbot (Demisto) score as per verdict from SlashNext cloud API
:param verdict: SlashNext verdict on a certain IoC
:return: Dbot score
"""
if verdict == 'Malicious':
return 3
elif verdict == 'Suspicious':
return 2
elif verdict == 'Benign' or verdict == 'Redirector':
return 1
else:
return 0 |
def lin_portfolio(
q1: float, q2: float, c1: float = 2, c2: float = 1, *args: float
) -> float:
"""Linear function with analytic EE solution for the next test."""
return c1 * q1 + c2 * q2 |
def _fibonacci_impl(n: int) -> int:
"""Recursive implementation of the Fibonacci sequence."""
if n <= 1:
return n
else:
return _fibonacci_impl(n - 1) + _fibonacci_impl(n - 2) |
def observable_product(*observables):
"""
Finds the product-observable of the input observables.
If the observable conditions are contradicting, returns None. For the
format of the observables, see the docstring of `probability_table`.
"""
res_obs = {}
for obs in observables:
for k in obs:
if k in res_obs:
if obs[k] != res_obs[k]:
return None
else:
res_obs[k] = obs[k]
return res_obs |
def _rel_window_to_abs_window(el_min, el_max, w_0, w_sz=None):
"""Convert min/max eigenvalues and relative window to absolute values.
Parameters
----------
el_min : float
Smallest eigenvalue.
el_max : float
Largest eigenvalue.
w_0 : float [0.0 - 1.0]
Relative window centre.
w_sz : float, optional
Relative window width.
Returns
-------
l_0[, l_min, l_max]:
Absolute value of centre of window, lower and upper intervals if a
window size is specified.
"""
el_range = el_max - el_min
el_w_0 = el_min + w_0 * el_range
if w_sz is not None:
el_w_min = el_w_0 - w_sz * el_range / 2
el_w_max = el_w_0 + w_sz * el_range / 2
return el_w_0, el_w_min, el_w_max
return el_w_0 |
def flatten_dict_data(data, fun="{}/{}".format):
"""Flatten data as dict with structure named as ``fun``."""
def dict_gen(dat):
return dat.items()
def list_gen(dat):
return enumerate(dat)
if isinstance(data, (dict, list, tuple)):
ret = {}
gen_1 = dict_gen if isinstance(data, dict) else list_gen
for i, data_i in gen_1(data):
tmp = flatten_dict_data(data_i)
if isinstance(tmp, (dict, list, tuple)):
gen_2 = dict_gen if isinstance(tmp, dict) else list_gen
for j, tmp_j in gen_2(tmp):
ret[fun(i, j)] = tmp_j
else:
ret[i] = tmp
return ret
return data |
def is_same_class(obj, a_class):
"""
Function that determine the type of class.
Args:
obj (object any type): The object to analyze.
a_class (object any type): The reference object.
Returns:
Returns True if the obj is exactly an instance of the specified
a_class ; otherwise False.
"""
return type(obj) is a_class |
def strToBool(string):
"""
_strToBool_
Try to convert a string to boolean. i.e. "True" to python True
"""
if string in [False, True]:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string) |
def smallest_first(L):
"""
Rotates a list so that its smallest element appears first.
Arguments:
`L`: A list of integers, no two of them equal
Returns:
A list that is the result of moving the first element of `L` to the end,
repeatedly, until the first element of `L` is the smallest element of the list.
Example: smallest_first([46,41,28]) returns [28,46,41]
Example: smallest_first([4,2,1]) returns [1,4,2]
Example: smallest_first([9,8,7,6,5,4,3,2,1]) returns [1,9,8,7,6,5,4,3,2]
"""
while True:
if L[0] != min(L):
L.append(L[0])
L.remove(L[0])
elif L[0] == min(L):
break
return L |
def consolidate_outputs(bundles, prefix):
"""
Consolidates values of multiple outputs into one array for the new
output.
"""
res = {}
outputs = [output for bundle in bundles for output in bundle.outputs]
for output in outputs:
output_name = output['name']
new_name = prefix + output_name[0].upper() + output_name[1:] + 's'
if not new_name in res:
res[new_name] = {'name': new_name, 'value': []}
res[new_name]['value'].append(output['value'])
# We sort the output by key to guarantee deterministic results. This makes
# DM's Python3 compatibility checker less grumpy.
return [value for _, value in sorted(res.items())] |
def _get_all_imports(d):
"""
Return list of all the imports
"""
# 1. No local variables
# 2.`from Module import Something`, puts Module in
# the namespace. We do not want that
import types
lst = [name for name, obj in d.items()
if not (name.startswith('_') or
isinstance(obj, types.ModuleType))]
return lst |
def transform_bbox(x,y,w,h,nw,nh,ow,oh):
"""
Transform an nw*nx bounding box into an ow*oh one.
:return (int,int,int,int)
"""
#x / nw = xp / ow
xp = int(ow * x / nw)
yp = int(oh * y / nh)
wp = int(ow * w / nw)
hp = int(oh * h / nh)
return (xp,yp,wp,hp) |
def get_ranges(i, j, n):
"""
(int, int, int) -> list of ranges
Return a list of n equal ranges from i to j
"""
return [range(i + k, j, n) for k in range(n)] |
def count_failures(trajectory):
"""count the number of failed frame in a trajectory.
Args:
trajectory (list[ndarray]): list of tracking results.
Returns:
List: the number of failed frame in a trajectory.
"""
num_fails = 0
for bbox in trajectory:
if len(bbox) == 1 and bbox[0] == 2.:
num_fails += 1
return num_fails |
def sort_links(links):
"""Sorts endpoint links alphabetically by their href"""
return sorted(links, key=lambda link: link["href"]) |
def merge_sets(*zs):
"""Merge one or more sets into one without modifying any argument set.
>>> merge_sets(set([1,2,3]), set([3,4,5]))
{1, 2, 3, 4, 5}
"""
res = set()
for z in zs:
res.update(z)
return res |
def lorentz_sd(omega, omega_0, gamma):
"""
Lorentz function for the odd spectral density \tilde{C}''(\omega).
Parameters
----------
omega : array-like, floats
Angular frequencies
omega_0 : float
Vibrational oscillation frequency
gamma : float
Dissipation gamma
Notes
-----
From V. Butkus, L. Valkunas, and D. Abramavicius, J. Chem. Phys. 137, 8231
(2012).
"""
n = omega*gamma
l = omega**2-omega_0**2-gamma**2
d = l*l+n*n*4
return n/d*omega_0**3*4 |
def parse_data_config(path):
"""
Function:
Parses the data configuration file
Arguments:
path -- path of data configuration file
Returns:
options -- dictionary of data configurations options
"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options |
def create_sets_of_20_from_x_for_pairwise_comparisions(X):
"""
Create sets of 20 to denote each timestep for all schedules
:return: range(0, length_of_X, 20)
"""
length_of_X = len(X)
return list(range(0, length_of_X, 20)) |
def parse_binsizes(map, filetype):
"""returns needed binsizes from preprocessing map."""
binsizes = set()
for windowsize, bins in map.items():
if windowsize != "variable":
binsizes |= set(bins[filetype])
return list(binsizes) |
def cleanup_amendment(json_data):
"""Delete empty amendment add/remove arrays."""
if 'addVehicleCollateral' in json_data and not json_data['addVehicleCollateral']:
del json_data['addVehicleCollateral']
if 'deleteVehicleCollateral' in json_data and not json_data['deleteVehicleCollateral']:
del json_data['deleteVehicleCollateral']
if 'addGeneralCollateral' in json_data and not json_data['addGeneralCollateral']:
del json_data['addGeneralCollateral']
if 'deleteGeneralCollateral' in json_data and not json_data['deleteGeneralCollateral']:
del json_data['deleteGeneralCollateral']
if 'addSecuredParties' in json_data and not json_data['addSecuredParties']:
del json_data['addSecuredParties']
if 'deleteSecuredParties' in json_data and not json_data['deleteSecuredParties']:
del json_data['deleteSecuredParties']
if 'addDebtors' in json_data and not json_data['addDebtors']:
del json_data['addDebtors']
if 'deleteDebtors' in json_data and not json_data['deleteDebtors']:
del json_data['deleteDebtors']
return json_data |
def weighted_percentile(a, percentile=None, weights=None):
"""
O(nlgn) implementation for weighted_percentile.
"""
import numpy as np
a = np.array(a)
percentile = np.array(percentile) / 100.0
if weights is None:
weights = np.ones(len(a))
else:
weights = np.array(weights)
a_indsort = np.argsort(a)
a_sort = a[a_indsort]
weights_sort = weights[a_indsort]
ecdf = np.cumsum(weights_sort)
percentile_index_positions = percentile * (weights.sum() - 1) + 1
# need the 1 offset at the end due to ecdf not starting at 0
locations = np.searchsorted(ecdf, percentile_index_positions)
out_percentiles = np.zeros(len(percentile_index_positions))
for i, empiricalLocation in enumerate(locations):
# iterate across the requested percentiles
if ecdf[empiricalLocation - 1] == np.floor(percentile_index_positions[i]):
# i.e. is the percentile in between 2 separate values
uppWeight = percentile_index_positions[i] - ecdf[empiricalLocation - 1]
lowWeight = 1 - uppWeight
out_percentiles[i] = a_sort[empiricalLocation - 1] * lowWeight + a_sort[empiricalLocation] * uppWeight
else:
# i.e. the percentile is entirely in one bin
out_percentiles[i] = a_sort[empiricalLocation]
return out_percentiles |
def caselessSort(alist):
"""Return a sorted copy of a list. If there are only strings
in the list, it will not consider case.
"""
try:
return sorted(alist, key=lambda a: (a.lower(), a))
except TypeError:
return sorted(alist) |
def elite_selection(population, fitness, n_elites):
"""
Elite selection from population
"""
sorted_population = sorted(zip(fitness, population), reverse=True)
return [x for _, x in sorted_population[:n_elites]] |
def check_meeting_url(sig_info, errors):
"""
Check meeting_url
:param sig_info: content of sig-info.yaml
:param errors: errors count
:return: errors
"""
if 'meeting_url' not in sig_info.keys():
print('ERROR! meeting_url is a required field')
errors += 1
else:
print('Check meeting_url: PASS')
return errors |
def get_fuel_for_mass(mass):
"""
Fuel required to launch a given module is based on its mass.
Specifically, to find the fuel required for a module, take its mass,
divide by three, round down, and subtract 2.
>>> get_fuel_for_mass(12)
2
>>> get_fuel_for_mass(14)
2
>>> get_fuel_for_mass(1969)
654
>>> get_fuel_for_mass(100756)
33583
>>> get_fuel_for_mass(2)
0
>>> get_fuel_for_mass(5)
0
"""
return max(int(mass/3) - 2, 0) |
def selection_sort(lst):
"""Selection sort algorithm."""
for i in range(len(lst) - 1):
lowest_idx = i
for j in range(i + 1, len(lst)):
if lst[lowest_idx] > lst[j]:
lowest_idx = j
lst[i], lst[lowest_idx] = lst[lowest_idx], lst[i]
return lst |
def split_up_key_and_value(key_value_string):
"""Split up 'key=value' into 'key' and 'value'."""
parts = key_value_string.split('=')
key = parts[0] if len(parts) > 0 else None
value = parts[1] if len(parts) > 1 else None
return (key, value) |
def find_pair_in_hits(
hits,
pair,
max_separation = None,
separation = None
):
"""Finds the locations where a pair of TFs bind in a sequence of hits
hits: The hits
pair: A tuple ( binder1, binder2, orientation1, orientation2 )
max_separation: If specified determines maximum separation
separation: If specified determines exact separation (overrides max_separation)
returns a sequence of pairs of hits that satisfy the criteria
"""
( binder1, binder2, orientation1, orientation2 ) = pair
result = [ ]
for h1 in hits:
if binder1 != h1.binder: continue
for h2 in hits:
if binder2 != h2.binder: continue
if h1.location.overlap( h2.location ): continue
distance = h1.location.separation( h2.location )
if None != separation and separation != distance: continue
if None != max_separation and max_separation < distance: continue
if h1.location.position < h2.location.position:
if (
h1.location.positive_strand != orientation1
or
h2.location.positive_strand != orientation2
): continue
else:
if (
h1.location.positive_strand == orientation1
or
h2.location.positive_strand == orientation2
): continue
result.append( ( h1, h2 ) )
return result |
def mtx_zip_url(expression_accession):
"""Make the url to grab the zipped mtx experssion data from the ebi's accessionI_id"""
url = "https://www.ebi.ac.uk/gxa/sc/experiment/%s/download/zip?fileType=quantification-filtered&accessKey=" \
% expression_accession
return url |
def difference(test_stat, ctrl_stat):
"""Calculates difference change. A good default.
Args:
test_stat: numpy array of test statistics
ctrl_stat: numpy array of control statistics
Returns:
test_stat - ctrl_stat
"""
return (test_stat - ctrl_stat) |
def dist(tuple1, tuple2):
"""Manhattan distance"""
return abs(tuple1[0] - tuple2[0]) + abs(tuple1[1] - tuple2[1]) |
def lcs(strA, strB):
"""Determine the length of the Longest Common Subsequence of 2 strings."""
if len(strA) == 0 or len(strB) == 0:
return 0
if strA[-1] == strB[-1]:
return 1 + lcs(strA[:-1], strB[:-1])
return max(lcs(strA[:-1], strB), lcs(strA, strB[:-1])) |
def is_module(node):
"""Returns whether node is a module node."""
return node.__class__.__name__ == "Module" |
def shortened_cmd(cmd: str, length: int) -> str:
"""Shorten the command to the specified length."""
if len(cmd) + 2 <= length:
return '`' + cmd + '`'
return '`' + cmd[:(length-5)] + '...`' |
def rectify(X):
"""Rectified linear activation function to provide non-linearity for NNs.
Faster implementation using abs() suggested by Lasagne.
"""
return (X + abs(X)) / 2 |
def get_permutation(rng, payload_size, image_size):
"""Return a permutation of indices to permute over the pixels
Keyword arguments:
rng -- a random number generator seeded with a password
payload_size -- the number of elements of the permutation needed
image_size -- the range of indices to permute over
Performs a Fisher-Yates shuffle of numbers from 1 to image_size
and returns the first payload_size elements"""
permutation = [i for i in range(image_size+1)]
for i in range(payload_size):
try:
rand_ind = rng.randrange(i,image_size+1)
except ValueError:
return 0
permutation[i], permutation[rand_ind] \
= (permutation[rand_ind], permutation[i])
#Need only the first payload_size elements
permutation = permutation[0:payload_size]
return permutation |
def as_bool(value):
"""Returns whether the input is a string representation of a boolean.
:param str value: value to convert to a bool
:return bool: the bool representation
"""
if value.lower() in ('yes', 'on', 'true', '1'):
return True
elif value.lower() in ('no', 'off', 'false', '0'):
return False
else:
raise ValueError("'{0}' is not a boolean representation".format(value)) |
def make_successive(xs):
"""
Return a list of successive combinations
Parameters
----------
xs : list
List of elements, e.g. [X, Y, Z]
Returns
-------
list
List of combinations where each combination include all the preceding elements
Examples
--------
>>> make_successive(['W', 'X', 'Y', 'Z'])
[['W'], ['W', 'X'], ['W', 'X', 'Y'], ['W', 'X', 'Y', 'Z']]
"""
return [xs[:(i+1)] for i in range(len(xs))] |
def Clipped(value, l, u):
"""Return a clipped value between lower bound `l` and upper bound `u` (closed).
Args:
value: A value.
l: The lower bound.
u: The upper bound.
Returns:
Any: The clipped value.
"""
return min(max(value,l),u) |
def get_raw_of_filtered_dataset(
dat: str,
datasets_filt_map: dict) -> str:
"""
Parameters
----------
dat : str
Dataset name
datasets_filt_map : dict
Mapping filtered dataset name -> raw dataset name
Returns
-------
dat_tax : str
Raw dataset name
"""
if dat in datasets_filt_map:
dat_raw = datasets_filt_map[dat]
else:
dat_raw = dat
return dat_raw |
def generate_invite(client_id, permissions_bits=None):
"""
Generates an invite URL.
:param client_id: The client ID to use. This is a snowflake.
:param permissions_bits: optional. This should be a bitfield of permissions to require, if specified.
See https://discordapp.com/developers/docs/topics/permissions#permissions-bitwise-permission-flags for
info on these bitfields.
"""
url = f'https://discordapp.com/oauth2/authorize?&client_id={client_id}&scope=bot'
if permissions_bits is not None:
url += f'&permissions={permissions_bits}'
return url |
def max_len(iterable, minimum=0):
"""Return the len() of the longest item in ``iterable`` or ``minimum``.
>>> max_len(['spam', 'ham'])
4
>>> max_len([])
0
>>> max_len(['ham'], 4)
4
"""
try:
result = max(map(len, iterable))
except ValueError:
return minimum
return max(result, minimum) |
def first(xs):
"""
Returns the first element of a list, or None if the list is empty
"""
if not xs:
return None
return xs[0] |
def _long_hash(istream):
"""
Produces a hash-like value which is longer than the usual hash value.
As currently implemented, returns a 4-tuple of regular width hashes.
These are used internally to have an acceptably low chance of collisions.
It does not make any promise of cryptographic security,
only protection against failure in regular use.
"""
state = [0]*6
key = 0
def _mix(state, value, key):
"""
Internal mixing function.
Absorbs a new value, then does a mixing round.
Designed to be inexpensive but provide sufficient mixing.
"""
state[0] ^= hash(value)
for n in range(5):
state[n+1] ^= hash((state[n], key, n))
state[0] ^= hash((state[5], key, 5))
# absorb all values
for value in istream:
_mix(state, value, key)
key += 1
# pad/finalize
_mix(state, 0, -2)
# truncate result
return tuple(state)[0:4] |
def _get_usd_row_cells(trs):
"""
Get the table cells from the webpage we need for the exchange rate data.
:param trs: browser.get_current_page().find_all("tr")
:return: list(): a list of cells
"""
cells = []
for tr in trs:
if tr.td:
if 'USD' in tr.td.text:
usd_row = tr.td.next_siblings
if usd_row:
for cell in usd_row:
if '\n' not in cell:
cells.append(cell)
return cells |
def is_mtu_valid(mtu):
"""Determine whether a mtu is valid."""
try:
if int(mtu) < 576:
return False
elif int(mtu) > 9216:
return False
else:
return True
except (ValueError, TypeError):
return False |
def parse_changes(change_list):
"""
Takes in a section and item parsed dictionary and parses on > forming a
list. It works as the following:
To move an item to another section or item name
section/item -> new_section/new_item
section/item -> Removed
Args:
change_list: List of modifcations that occured to the config
Returns:
result: a list of a list length 2 containing of list length 4
representing section item property value which are all any
by default.
"""
results = []
for i, s in enumerate(change_list):
if "->" in s:
changes = [c.lower().strip() for c in s.split("->")]
else:
raise ValueError("Invalid changelog syntax at line "
" {}. A changelog entry must follow"
" <section>/<item>/<property>/<value> ->"
" <section>/<item>/<property>/<value> or REMOVED"
"".format(i))
# build out an any list and populate it
final_changes = []
for c in changes:
mods = ["any" for i in range(4)]
mod_lst = []
if "/" in c:
mod_lst = c.split("/")
# Catch singular sections referenced
else:
mod_lst.append(c)
mods[0:len(mod_lst)] = mod_lst
final_changes.append(mods)
results.append(final_changes)
return results |
def multiply_regular(data):
"""
Multiplies all elements in passed sequence.
You may assume that all elements in passed argument is numeric.
>>> multiply_regular([3, 6, 9])
162
>>> multiply_regular(range(10))
0
>>> multiply_regular(range(1, 10))
362880
>>> multiply_regular((8.2, 6.3, 13.1))
676.746
>>> multiply_regular(tuple())
1
Args:
data: iterable or sequence with numbers
Returns:
Multiplication of all elements in passed args, as single number
"""
d = list(data)
if d == []:
return 1
else:
r = d[0]
i = 1
while i < len(d):
r *= d[i]
i += 1
return r |
def y_eq_func(ylag,pilag,v,s,slag,alpha,h,b,phi,gamma):
""" equilibrium value for output
Args:
ylag (float): lagged output
pilag (float): lagged inflation
v (float): demand disturbance
s (float): supply disturbance
slag (float): lagged supply disturbance
alpha (float): sensitivity of demand to real interest rate
h (float): coefficient on inflation in Taylor rule
b (float): coefficient on output in Taylor rule
phi (float): degree of stickiness in inflation expectations
gamma (float): effect of output on inflation in SRAS
Returns:
(float): equilibrium value for output
"""
return 1/(alpha*b+alpha*gamma*h+1)*(-pilag*alpha*h+alpha*gamma*h*phi*ylag+alpha*h*phi*slag-alpha*h*s+v) |
def convertDate(dateData):
""" Convert time data for plotting:
seconds to hours
"""
return dateData / 60 / 60 |
def normalize_international_phone_number(number: str) -> str:
"""Clean phone number and make sure it's + prefixed.
:param number: Hand typed international phone number like +1 (555) 123-1234
:return: Raw phone number like +15551231234
"""
assert type(number) == str
digits = "+0123456789"
number = "".join([digit for digit in number if digit in digits])
# International 00 prefix
if number.startswith("00"):
number = "+1" + number[2:]
# Assume country code without + prefix
if not number.startswith("+"):
number = "+" + number
return number |
def last(seq):
"""
Description
----------
Return the last value in the sequence.
Parameters
----------
seq : (list or tuple or string) - sequence to return last value of
Returns
----------
any - the last value in the sequence
Example
----------
>>> lst = [1, 2, 3, 4, 5]
>>> last(lst)
-> 5
"""
if not isinstance(seq, (list, tuple, str)):
raise TypeError("param 'seq' must be a list, tuple, or string")
if len(seq) == 0:
return None
return seq[-1] |
def strip_spaces(value, sep=None, join=True):
"""Cleans trailing whitespaces and replaces also multiple whitespaces with a single space."""
value = value.strip()
value = [v.strip() for v in value.split(sep)]
join_sep = sep or " "
return join_sep.join(value) if join else value |
def removeInvalidParentheses(s):
"""
:type s: str
:rtype: List[str]
"""
removed = 0
results = {s}
count = {"(": 0, ")": 0}
for i, c in enumerate(s):
if c == ")" and count["("] == count[")"]:
new_results = set()
while results:
result = results.pop()
for j in range(i - removed + 1):
if result[j] == ")":
new_results.add(result[:j] + result[j + 1:])
results = new_results
removed += 1
else:
if c in count:
count[c] += 1
count = {"(": 0, ")": 0}
i = len(s)
ll = len(s) - removed
for ii in range(ll - 1, -1, -1):
i-=1
c = s[i]
if c == "(" and count["("] == count[")"]:
new_results = set()
while results:
result = results.pop()
for j in range(ii, ll):
if result[j] == "(":
new_results.add(result[:j] + result[j + 1:])
results = new_results
ll -= 1
else:
if c in count:
count[c] += 1
print(list(results))
return list(results) |
def to_camel_case(text, split=" "):
"""
Converts text to camel case, e.g. ("the camel is huge" => "theCamelIsHuge")
Args:
text (string): Text to be camel-cased
split (char): Char to split text into
"""
camel_case_text = text
splitter = text.split(split)
if splitter:
camel_case_text = splitter[0][0].lower()
if len(splitter[0]) > 1:
camel_case_text += splitter[0][1:]
for index in range(1, len(splitter)):
camel_case_text += splitter[index].capitalize()
return camel_case_text |
def getObjectTuple(obj1, obj2, obj3, obj4):
"""
@function: getObjectTuple
@description: returns a collection of objects
@param obj1 - first object to be returned
@param obj2 - second object to be returned
@param obj3 - third object to be returned
@param obj4 - fourth object to be returned
@return - returns a tuple of all the object parameters
"""
return obj1, obj2, obj3, obj4 |
def get_location_id(from_dict):
"""Searched from_dict for a site_id or aggregate_id and sets the value of
in to_dict.
"""
location_dict = {}
if 'site_id' in from_dict:
location_dict['site_id'] = from_dict.get('site_id')
if 'aggregate_id' in from_dict:
location_dict['aggregate_id'] = from_dict.get('aggregate_id')
return location_dict |
def decode_blind(sourcebraille):
"""Braille decoder."""
blind = {
'1': 'a',
'12': 'b',
'14': 'c',
'145': 'd',
'15': 'e',
'124': 'f',
'1245': 'g',
'125': 'h',
'24': 'i',
'245': 'j',
'13': 'k',
'123': 'l',
'134': 'm',
'1345': 'n',
'135': 'o',
'1234': 'p',
'12345': 'q',
'1235': 'r',
'234': 's',
'2345': 't',
'136': 'u',
'1236': 'v',
'2456': 'w',
'1346': 'x',
'13456': 'y',
'1356': 'z',
'': ' '
}
# decode input.
arr = sourcebraille.split('0', 50)
output = ''
for x in arr:
if str(x) != '' and str(x) in blind:
output += blind[str(x)]
# if not encoded properly - return no result.
if not output:
output = 'NR'
return output |
def range_min_max(a, b):
"""Create a range from the min value to the max value."""
return range(int(min(a, b)), int(max(a, b))) |
def _normalize_string_values(field_values):
"""Normalize string values for passed parameters.
Normalizes parameters to ensure that they are consistent across queries where
factors such as case are should not change the output, and therefore not
require additional Telescope queries.
Args:
field_values: (list) A list of string parameters.
Returns:
list: A list of normalized parameters for building selectors from.
"""
return [field_value.lower() for field_value in field_values] |
def solution(blue_discs: int, total_discs: int) -> int:
"""
Finds number of blue discs
1st parameter is the total blue discs
2nd parameter is the total number of discs
>>> soultion(15, 21)
756872327473
"""
target = 1000000000000
while(total_discs < target):
btemp = 3 * blue_discs + 2 * total_discs - 2
ntemp = 4 * blue_discs + 3 * total_discs - 3
blue_discs = btemp
total_discs = ntemp
return blue_discs |
def _permutation_making_qubits_adjacent(qubit_indices, num_qubits):
"""Given an iterable of qubit indices construct a permutation such that they are
next to each other."""
return list(qubit_indices) + [
i for i in range(num_qubits) if i not in qubit_indices
] |
def generate_final_vocabulary(reserved_tokens, char_tokens, curr_tokens):
"""Generates final vocab given reserved, single-character, and current tokens.
Args:
reserved_tokens: list of strings (tokens) that must be included in vocab
char_tokens: set of single-character strings
curr_tokens: string to int dict mapping token to count
Returns:
list of strings representing final vocabulary
"""
sorted_char_tokens = sorted(list(char_tokens))
vocab_char_arrays = []
vocab_char_arrays.extend(reserved_tokens)
vocab_char_arrays.extend(sorted_char_tokens)
# Sort by count, then alphabetically.
sorted_tokens = sorted(sorted(curr_tokens.items(), key=lambda x: x[0]),
key=lambda x: x[1], reverse=True)
for token, _ in sorted_tokens:
vocab_char_arrays.append(token)
seen_tokens = set()
# Adding unique tokens to list to maintain sorted order.
vocab_words = []
for word in vocab_char_arrays:
if word in seen_tokens:
continue
seen_tokens.add(word)
vocab_words.append(word)
return vocab_words |
def is_broadcastable_and_smaller(shp1, shp2):
"""
Test if shape 1 can be broadcast to shape 2, not allowing the case
where shape 2 has a dimension length 1
"""
for a, b in zip(shp1[::-1], shp2[::-1]):
# b==1 is broadcastable but not desired
if a == 1 or a == b:
pass
else:
return False
return True |
def single_pulse_SCPI(pulsewidth, updown, high_voltage, low_voltage, channel = '1', *args, **kwargs):
"""
Returns SCPI string that can be written to the pulse generator to put it in the correct state to apply a single pulse.
args:
pulsewidth (str): Pulsewidth. i.e. '10ns' allowed units {ns, us, ms, s}
updown (str): Specify polarity. 'up' or 'down'.
high_voltage (str): High voltage of pulse. i.e. '1000mv' allowed units {V, mv}
low_voltage (str): Low voltage of pulse. i.e. '-1000mv' allowed units {V, mv}
channel (str): Specify the output channel. '1' or '2'
"""
if pulsewidth[-2:] not in set({'ns', 'us', 'ms',}):
if pulsewidth[-1] != 's':
raise ValueError('pulsewidth ' + str(pulsewidth) + ' not supported')
if updown not in set({'up', 'down'}):
raise ValueError('updown ' + str(updown) + ' not supported')
if high_voltage[-2:].lower() not in set({'mv'}):
if high_voltage[-1].lower() != 'v':
raise ValueError('high_voltage ' + str(high_voltage) + ' not supported')
if low_voltage[-2:].lower() not in set({'mv'}):
if low_voltage[-1].lower() != 'v':
raise ValueError('low_voltage ' + str(low_voltage) + ' not supported')
if channel not in set({'1', '2'}):
raise ValueError('channel ' + str(channel) + ' not supported')
if updown == 'up':
out = 'outp'+channel+':puls:mode sin;'
out += ':sour'+channel+':inv off;'
out += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'
out += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'
#puls1 means the first pulse because we are in single mode
out += ':sour'+channel+':puls1:wid '+pulsewidth + ';'
return out
else:
out = 'outp'+channel+':puls:mode sin;'
out += ':sour'+channel+':inv on;'
out += ':sour'+channel+':volt:lev:imm:low '+low_voltage + ';'
out += ':sour'+channel+':volt:lev:imm:high '+high_voltage + ';'
#puls1 means the first pulse because we are in single mode
out += ':sour'+channel+':puls1:wid '+pulsewidth + ';'
return out |
def float_to_text(value, sig):
"""
Convert float to text string for computing hash.
Preseve up to N significant number given by sig.
:param value: the float value to convert
:param sig: choose how many digits after the comma should be output
"""
if value == 0:
value = 0. # Identify value of -0. and overwrite with 0.
fmt = f'{{:.{sig}g}}'
return fmt.format(value) |
def x_www_form_urlencoded(post_data):
""" convert origin dict to x-www-form-urlencoded
@param post_data
{"a": 1, "b":2}
a=1&b=2
@return (str)
a=1&b=2
"""
if isinstance(post_data, dict):
return "&".join([
u"{}={}".format(key, value)
for key, value in post_data.items()
])
else:
return post_data |
def Merge(source, destination):
"""
>>> a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1' } } }
>>> b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5' } } }
>>> merge(b, a) == { 'first' : { 'all_rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
True
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
Merge(value, node)
elif isinstance(value, list):
destination.setdefault(key, value)
else:
if key in destination:
pass
else:
destination.setdefault(key, value)
return destination |
def _make_number(ctx, param, value):
"""converts a number to float/int"""
if value:
try:
value = int(value)
except ValueError:
value = float(value)
return value |
def normalize_filename(name, ftype="markdown"):
"""Guess the filename based on a link target.
This function only deals with regular files.
"""
if name.endswith("/"):
name = name[:-1]
if ftype == "markdown":
name += ".md"
else:
name += ".html"
return name |
def rk4(rhs, initial, t_initial, t_final, dt):
"""RK4 integrator.
Inputs:
- rhs: a callable that takes arguments (t, y)
- initial: initial value
- t_initial: initial time
- t_final: final time
- dt: step size
Returns:
The solution computed at the final time.
"""
t = t_initial
sol = initial
while t < t_final:
dt = min(dt, t_final - t)
s0 = rhs(t, sol)
s1 = rhs(t + dt/2, sol + dt/2 * s0)
s2 = rhs(t + dt/2, sol + dt/2 * s1)
s3 = rhs(t + dt, sol + dt * s2)
sol = sol + dt / 6 * (s0 + 2 * s1 + 2 * s2 + s3)
t += dt
return sol |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.