content stringlengths 42 6.51k |
|---|
def _sort_cluster_dict(dictionary):
"""Calculates for the given dictionary which is their natural order:
minimum values < ... < maxium values
Returns the given dictionary sorted like this
"""
output = {}
values = {
"min": 1000000,
"min_name": None,
"max": 0,
"max_name": None,
"middle_name": None
}
for k in dictionary:
if min(dictionary[k]) < values["min"]:
values["min"] = min(dictionary[k])
values["min_name"] = k
if max(dictionary[k]) > values["max"]:
values["max"] = max(dictionary[k])
values["max_name"] = k
for index in dictionary:
mini = values["min_name"]
maxi = values["max_name"]
if index not in [mini, maxi]:
values["middle_name"] = index
# Compose the output in order
output[0] = dictionary[values["min_name"]]
output[1] = dictionary[values["middle_name"]]
output[2] = dictionary[values["max_name"]]
return output |
def test_equals(x):
"""
>>> x = testobj()
>>> result = test_equals(x)
>>> isinstance(result, plop)
True
>>> test_equals('hihi')
False
>>> test_equals('coucou')
True
"""
eq = x == 'coucou' # not every str equals returns a bool ...
return eq |
def strToList(sampleNonPartitionStr):
"""
Inputs:
sampleNonPartitionStr: str, format ("means,sds,concentration")
"""
sampleNonPartitionList = sampleNonPartitionStr.split(",")
return sampleNonPartitionList |
def normalize_mac_address(mac_address=None):
"""
normalize a MAC address
* format letters to upper case
* add colons if missing
Parameters
----------
mac_address: str
MAC address to normalize
Returns
-------
str: result of normalization
"""
if mac_address is None:
return None
mac_address = mac_address.upper()
# add colons to interface address
if ":" not in mac_address:
mac_address = ':'.join(mac_address[i:i+2] for i in range(0, len(mac_address), 2))
return mac_address |
def hs_parab_opti_step(x_n, x, u, u_n, F, dt, params, u_c):
"""
Must be equal to zero in order to fulfill the implicit scheme
Returns
-------
res : Numpy array or Casadi array
Residue to minimize
"""
f = F(x, u, params)
f_n = F(x_n, u_n, params)
x_c = (x + x_n) / 2 + dt / 8 * (f - f_n)
f_c = F(x_c, u_c, params)
res = x + dt / 6 * (f + 4 * f_c + f_n) - x_n
return res |
def create_fitnesses(params_string):
"""return equivalent length tuple list
:type params_string: str
"""
params = params_string.split(";")
# get length
res = [(i,) for i in range(len(params))]
return (res) |
def newline_list_of_strings(result_key, data, option_value=None):
"""Return a string from a list of strings while appending newline"""
return "\n".join(data) |
def _do_eval(match, exp):
"""Used internally to evaluate an expression."""
return eval(exp, globals(), {'m': match}) |
def dict_search_value_as_key(d, v):
"""smp_base.common.dict_search_value_as_key
Search for value `v` in dict `d` and return its key `k` if found.
"""
for k_, v_ in list(d.items()):
if v == v_: return k_
return None |
def invert_dictionary(dictionary):
"""Invert a dictionary.
NOTE: if the values of the dictionary are not unique, the function returns the one of the mappings
Args:
dictionary (dict): A dictionary
Returns:
dict: inverted dictionary
Examples:
>>> d = {"a": 1, "b": 2}
>>> d_inv = invert_dictionary(d)
>>> json.dumps(d_inv, sort_keys=True)
'{"1": "a", "2": "b"}'
"""
return {v: k for k, v in dictionary.items()} |
def dropdown_options(radio_value):
"""
This function will base on the radio value to allows the dropdown menu to have certain value
param radio_value: a string either 'bar' or 'scatter'
return: an option for the dropbar menu, and the default value
"""
# if the radio value is bar then the value can be 'Price', 'Area', 'Length', and 'Width'
if radio_value == 'bar':
options = [{'label': x, 'value': x} for x in ['Price', 'Area', 'Length', 'Width']]
# set the default value as 'Price'
value = 'Price'
# if the radio value is scatter then the value will be exclude the 'Price' compare to the bar option
else:
options = [{'label': x, 'value': x} for x in ['Area', 'Length', 'Width']]
# set the default value as 'Area'
value = 'Area'
# return the list of option and the default value
return options, value |
def get_map_dicts(entity_list):
"""
Map identifiers to indices and vice versa.
Args:
- entity_list (list) - List of entities (atoms, residues, etc.)
to index.
Returns:
- Tuple of the entity to index and index to entity dicts, respectively.
"""
# Create the entity:index dictionary
ent2idx_dict = {entity: idx for idx, entity in enumerate(entity_list)}
# Create the index:entity dictionary
idx2ent_dict = {idx: entity for entity, idx in ent2idx_dict.items()}
return (ent2idx_dict, idx2ent_dict) |
def write_text_to_file(text, filename):
"""Write a string to a file, overwriting an existing file. Takes a string and
an absolute path as input. Returns True if succesful, False otherwise."""
try:
f = open(filename, 'w')
f.write(text)
f.close()
return True
except:
return False |
def factorial(n):
"""Returns the facorial of a number"""
if n ==1:
return 1
return n*factorial(n-1) |
def energy_to_channel(energy, offset=0., gain=10.):
"""
Convert energy to channel number.
:param energy:
The energy in eV.
:param offset:
Energy offset in eV. Or the energy of channel 0.
:param gain:
Energy gain in eV. The increment of energy in eV from channel to channel.
:return:
Channel number.
"""
return (energy - offset) // gain |
def reward2avg(rewards, lengths):
"""
for episodic environment, change overall rewards to the average reward for each episode
:param rewards: the vector of rewards
:param length: the vector of lengths for each episode
:return: the average undiscounted return
"""
#rewards = np.array(rewards)
#lengths = np.array(lengths)
total_length = -1
for length in lengths:
total_length += length + 1
rewards[total_length] = 0
return sum(rewards) * (1./len(lengths)) |
def is_natural_number(number, include_zero: bool = False) -> bool:
"""
Returns True if `number` is the natural number.
Args:
number : an integer number to be checked.
include_zero (bool, optional): A flag to swith to include 0
in the natural numbers.
Defaults to False.
Returns:
bool: True if `number` is the natural number.
"""
lower_bound = 0 if include_zero else 1
return isinstance(number, int) and number >= lower_bound |
def is_line_continuation(line):
"""Determine whether *line* has a line continuation marker.
.properties files can be terminated with a backslash (\\) indicating
that the 'value' continues on the next line. Continuation is only
valid if there are an odd number of backslashses (an even number
would result in a set of N/2 slashes not an escape)
:param line: A properties line
:type line: str
:return: Does *line* end with a line continuation
:rtype: Boolean
"""
pos = -1
count = 0
if len(line) == 0:
return False
# Count the slashes from the end of the line. Ensure we don't
# go into infinite loop.
while len(line) >= -pos and line[pos:][0] == "\\":
pos -= 1
count += 1
return (count % 2) == 1 |
def subfinder_bool(mylist, pattern):
"""if a subpattern is in a list return
a bool"""
matches = []
for i in range(len(mylist)):
if mylist[i] == pattern[0] and mylist[i:i+len(pattern)] == pattern:
matches.append(pattern)
return True
return False |
def ts_candle_from_ts(ts, timeframe_int):
"""Return candle timestamp from a timestamp and a given timeframe (integer)"""
return((ts // timeframe_int) * timeframe_int) |
def split_csv(value):
"""Special behaviour when we have a comma separated options"""
value = [element for element in
(chunk.strip() for chunk in value.split(','))
if element]
return value |
def Comp(sequence):
""" complements a sequence, preserving case."""
d={'A':'T','T':'A','C':'G','G':'C','a':'t','t':'a','c':'g','g':'c','N':'N','n':'n'}
cSeq=''
for s in sequence:
cSeq+=d[s]
return cSeq |
def validFloat(someString):
"""
return float type
return -1.1 if type converstion fails
"""
try:
return float(someString)
except:
return -1.1 |
def scale_resolution(old_res, new_res):
"""Resize a resolution, preserving aspect ratio. Returned w,h will be <= new_res"""
# https://stackoverflow.com/a/6565988
old_width, old_height = old_res
new_width, new_height = new_res
old_ratio = old_width / old_height
new_ratio = new_width / new_height
if new_ratio > old_ratio:
return (old_width * new_height//old_height, new_height)
return new_width, old_height * new_width//old_width |
def inrange(value: int, down_limit: int, top_limit: int) -> bool:
"""
Check if number is in specified range and return bool.
"""
if value in range(down_limit, top_limit + 1):
return True
else:
return False |
def get_ydot(x, p_y):
"""speed in y direction, from coordinates and momenta"""
v_y = p_y - x
return v_y |
def insertionSort(array):
""" sort an array by inserting items
where appropriate by iterating through
an array """
# iterate through list starting from second
# value
for i in range(1, len(array)):
# initialize variables
current_val = array[i]
# check where current value should be
while i > 0 and array[i-1] > current_val:
array[i] = array[i-1] # shift item by one
i -= 1 # search left
array[i] = current_val # insert value into correct place
return array |
def flatten(input_container):
"""
:param container: collection of lists and tuples with arbitrary nesting levels
:return: A 1-D list of the input's contents
"""
def __flattener(container):
for i in container:
if isinstance(i, (list, tuple)):
for j in __flattener(i):
yield j
else:
yield i
return list(__flattener(input_container)) |
def pixels_between_points(v1, v2):
""" Returns a list of interpolated points as if
it was drawing a list on the screen between the two points
Based on code from John Carter, lecturer for comp3004 Graphics
"""
list = []
dx = v2[0] - v1[0]
dy = v2[1] - v1[1]
incrN = 0
incrEN = 0
incrE = 0
incrNE = 0
y_end = 0
x_end = 0
Xinc = 0
Yinc = 0
d = 0
if abs(dx) > abs(dy):
if dx < 0:
tmp = v1
v2 = v1
v1 = tmp
if v2[1]>v1[1]:
YInc = 1
else:
YInc = -1
dx = abs(dx)
dy = abs(dy)
d = 2*dy -dx
incrE = 2*dy
incrEN = 2*(dy-dx)
x_end = v2[0]
x = v1[0]
y = v1[1]
list.append((x, y))
while x < x_end:
if d <= 0:
d = d + incrE
x = x + 1
else:
d = d + incrNE
x = x + 1
y = y + Yinc
list.append((x, y))
else:
if dy < 0:
tmp = v2
v2 = v1
v1 = tmp
if v2[0] > v2[0]:
Xinc = 1
else:
Xinc = -1
dx = abs(dx)
dy = abs(dy)
d = 2*dx-dy
incrN = 2*dx
incrEN = 2*(dx-dy)
y_end = v2[1]
x = v1[0]
y = v1[1]
list.append((x,y))
while y < y_end:
if d <= 0:
d = d + incrN
y = y + 1
else:
d = d + incrEN
y = y + 1
x = x + Xinc
list.append((x,y))
return list |
def compute_max_h(chip_w, inst_x, inst_y):
"""Calculates maximum chip height.
Parameters
----------
chip_w : int
Chip width.
inst_x : list
Circuits' widths.
inst_y : list
Circuits' heights.
Returns
-------
max_h : int
Maximum chip height.
"""
# Maximum height
max_h = 0
# Circuits' widths and heights
inst = zip(inst_x, inst_y)
# Cycle while there are circuits left
while inst:
# Sort circuits according to height and width
inst = sorted(inst, key=lambda tup: tup[0], reverse=True)
inst = sorted(inst, key=lambda tup: tup[1], reverse=True)
# Width left
chip_cumulative = chip_w
# Circuit index
k = 0
# Heights list
heights = []
# Cycle while there are circuits
while k < len(inst):
# Enter if there is space for the circuit
if inst[k][0] <= chip_cumulative:
# Subtract width
chip_cumulative -= inst[k][0]
# Append circuit height
heights.append(inst[k][1])
# Remove used circuit
del inst[k]
else:
# Increase index
k += 1
# Increase maximum height
max_h += max(heights)
return max_h |
def aggregate_by_player_id(statistics, playerid, fields):
"""
Inputs:
statistics - List of batting statistics dictionaries
playerid - Player ID field name
fields - List of fields to aggregate
Output:
Returns a nested dictionary whose keys are player IDs and whose values
are dictionaries of aggregated stats. Only the fields from the fields
input will be aggregated in the aggregated stats dictionaries.
"""
dictt = {}
for dit in statistics:
dictt[dit[playerid]] = {}
for field in fields:
dictt[dit[playerid]][field] = 0
dictt[dit[playerid]][playerid] = dit[playerid]
# Everything is set up, only addition of fields is due:
#print(d)
for key, val in dictt.items():
for dit in statistics:
if key == dit[playerid]:
# print(True)
for stat in fields:
dictt[key][stat] += int(dit[stat])
return dictt |
def parse_input_args(input_str: str):
"""
Utility to parse input string arguments. Returns a dictionary
"""
output_dict = {}
if not input_str:
raise ValueError("Empty input string: {}".format(input_str))
key_pairs: list = input_str.split(",")
key_pairs = [x.strip() for x in key_pairs]
if not key_pairs:
raise ValueError("Incorrect format: {}".format(input_str))
for each_key in key_pairs:
try:
key, value = each_key.split("=")
except ValueError as value_error:
raise ValueError("Expected input format "
"'key1=value1, key2=value2' "
"but received {}".format(input_str)) \
from value_error
if value.isdigit():
value = int(value)
output_dict[key] = value
return output_dict |
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return "_embed" if embed_manifest else "" |
def offset_to_next_multiple(num, mult):
"""Calculates the amount needed to round num upward to the next multiple of mult.
If num is divisible by mult, then this returns 0, not mult.
"""
if mult == 0:
return 0
else:
offset = mult - (num % mult)
if offset == mult:
offset = 0
return offset |
def parse_gpg_version(gpg) :
"""Parses the version number for gpg.
Keyword arguments:
gpg - The result of querying the version from gpg.
"""
return gpg.split()[2] |
def timeEstimateDays(periodType, timeToElapse):
"""
Takes in period type and time to elapse
and returns the days equivalent.
"""
if periodType.lower() == "days":
days = timeToElapse
elif periodType.lower() == "weeks":
days = 7 * timeToElapse
else:
days = 30 * timeToElapse
return days |
def contarPares(n):
""" Funcion que cuenta pares hasta n """
contador = [] # Es un array vacio...
for i in range(n):
if (i % 2) == 0:
contador.append(i)
return contador |
def firstStringInList(literalEntities, prefLanguage="en"):
"""
from a list of literals, returns the one in prefLanguage
if no language specification is available, return first element
"""
match = ""
if len(literalEntities) == 1:
match = literalEntities[0]
elif len(literalEntities) > 1:
for x in literalEntities:
if getattr(x, 'language') and getattr(x, 'language') == prefLanguage:
match = x
if not match: # don't bother about language
match = literalEntities[0]
return match |
def bin_coef_dp(n: int, k: int) -> int:
"""
Formula: C(n, k) = C(n-1, k-1) + C(n-1, k)
Time Complexity: O(n*k)
Space Complexity: O(n*k)
"""
c = [[0] * (k+1) for _ in range(n+1)]
for i in range(n+1):
c[i][0] = 1
for j in range(1, 1 + min(i, k)):
c[i][j] = c[i-1][j-1] + c[i-1][j]
return c[n][k] |
def _next_power_of_two(x):
"""Calculates the smallest enclosing power of two for an input.
Args:
x: Positive float or integer number.
Returns:
Next largest power of two integer.
"""
return 1 if x == 0 else 2**(int(x) - 1).bit_length() |
def conductivity_to_tan_delta(freq: float, conductivity: float,
real_permittivity: float) -> float:
"""Converts between conductivity and loss tangent at a specific frequency
This is a simple and straightforward conversion between the value of
conductivity, in S/m, at a particular frequency, and a loss tangent.
Args:
freq: A `float` with the frequency, in GHz, at which to do the
conversion
conductivity: A `float` value for the conductivity in S/m
real_permittivity: A `float` value for the real part of the
complex relative permittivity.
Returns:
The value for the loss tangent, as a `float` number.
Raises:
ZeroDivisionError: If you specify 0 Hz, i.e. DC, for the frequency
"""
try:
tan_delta = 17.97591 * conductivity / (real_permittivity * freq)
except ZeroDivisionError as error:
raise ZeroDivisionError('Real part and frequency must be > 0'). \
with_traceback(error.__traceback__)
return tan_delta |
def explode(lc_all):
"""Helper function for lattice_from_covers.
Returns:
A list of all elements below each element `i` of the list of covers
`lc_all`.
"""
n = len(lc_all)
result = [set(i) for i in lc_all]
exploded = [False for _ in range(n)]
for i in range(n):
exploded[i] = True
covers = result[i].copy()
while covers:
cover = covers.pop()
if not exploded[cover]:
covers.update(result[cover])
exploded[cover] = True
result[i].update(result[cover])
return result |
def capitalized(
text: str
) -> str:
"""Capitalize the first letter of the text."""
if not text:
return text
return text[0].upper() + text[1:] |
def _union(d1: dict, d2: dict) -> dict:
"""equivalent to (d1 | d2) in py >= 3.9"""
u = d1.copy()
u.update(d2)
return u |
def validate_type(value, type_, name=None):
"""Validates that value is of the specified type.
Args:
value: The value to validate.
type_: The requested type.
name: The name of the argument being validated. This is only used to format
error messages.
Returns:
A valid value of type `type_`.
Raises:
TypeError: If `value` does not have type `type_`.
"""
if not isinstance(value, type_):
raise TypeError(
f"Argument `{name}` must be of type {type_}, "
f"but received type: {type(value)}")
return value |
def for_factorial(num):
"""Iterative solution"""
result = 1
for i in range(2, num + 1):
result *= i
return result |
def binary_tree_search(values_list, value, left=None, right=None):
"""
Searching the value in sorted list with binary tree algorithm
@param right: right index of sublist
@param left: left index of sublist
@param values_list: list, where we searching the value
@param value: searching value
@return: Index ef list element with value or -1 if not found
"""
left = 0 if left is None else left
right = len(values_list)-1 if right is None else right
middle = (right+left) // 2
if right <= left:
return -1
if middle == 0 or values_list[middle-1] < value <= values_list[middle]:
return middle + 1
if values_list[middle] >= value:
return binary_tree_search(
values_list, value, left, middle
)
elif values_list[middle] < value:
return binary_tree_search(
values_list, value, middle+1, right
) |
def normalize_test_name(test_name: str) -> str:
"""Normalize test names that may have been run on windows or unix."""
return test_name.replace("\\", "/") |
def UnQuote(quoted_data):
"""
Args:
quoted_data: "foo" or 'foo'
Returns:
foo:
"""
quote_char = quoted_data[0]
end_quote = quoted_data[1:].find(quote_char)
if end_quote != -1:
return quoted_data[1:end_quote+1]
return None |
def index_items(col, values, all=False):
"""
Return a correctly formed list of tuples to matching an index.
"""
items = [
(col, str(i))
for i in values
]
if all: items = [(col, 'All')] + items
return items |
def sec_to_time(sec: int) -> str:
""" Convert second to human readable time """
h = sec // 3600
sec = sec % 3600
m = sec // 60
sec = sec % 60
return '{:02}:{:02}:{:02}'.format(h, m, sec) |
def get_tax_rate(tax_rates: dict, rate_name=None):
"""
Get the tax rate for a set of tax rates and a given rate_name.
WARNING: rate_name is currently not used.
This is due to the nature of how TaxJar's API works.
"""
if tax_rates is None:
return None
rate = tax_rates['average_rate']['rate']
return rate |
def strip_short_path(path, strip_paths):
""" Given a short_path string will iterate over the
list of strip_paths and remove any matches returning a
new path string with no leading slash.
"""
if not strip_paths:
return path
for strip_path in strip_paths:
if path.startswith(strip_path):
return path.replace(strip_path, "").lstrip("/")
return path |
def get_option_value(option):
"""
An option in a Checkboxes or CheckboxTree question is a dict, but we need to treat their
contents in consistent ways, e.g. when getting the value to be persisted in the API.
:param option: dict from a Question's list of options
:return: string value to be persisted
"""
return option.get('value') or option['label'] |
def subListInSexp(sexp, xys):
"""Given a list of tuples of the form xys = [(x1, y1), ..., (xn, yn)]
substitute (replace) `x` with `y` in `sexp`
i.e. returns `sexp[y1/x1, ..., yn/xn]`
"""
d = dict(xys)
if type(sexp) != list:
return (d[sexp] if sexp in d else sexp)
else:
return [subListInSexp(s, xys) for s in sexp] |
def count_consonants(string: str) -> int:
"""
Counts the number of consonants in the given string
:param string: String to count
:return: int
Example:
>>> count_consonants("Hello World")
>>> 7
"""
return sum(1 for char in string if char.lower() in 'bcdfghjklmnpqrstvwxyz') |
def kernal_mus(n_kernels):
"""
get the mu for each gaussian kernel. Mu is the middle of each bin
:param n_kernels: number of kernels (including exact match). first one is exact match
:return: l_mu, a list of mu.
"""
l_mu = [1]
if n_kernels == 1:
return l_mu
bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]
l_mu.append(1 - bin_size / 2) # mu: middle of the bin
for i in range(1, n_kernels - 1):
l_mu.append(l_mu[i] - bin_size)
return l_mu |
def unique(sequence):
"""
Return only the unique elements in sequence, preserving order.
:param Sequence[T] sequence: the sequence to unique-ify
:rtype: List[T]
"""
seen = set()
result = []
for item in sequence:
if not item in seen:
result.append(item)
seen.add(item)
return result |
def remove_duplicates(values, compare):
"""
remove duplicate, keeping order of original elements, using CUSTOM comparison method
:param values: input list
:param compare: functions to compare the values
:return: values without duplicate
"""
output = []
for value in values:
# If value has not been encountered yet,
# ... add it to output
if value in output:
continue
match = False
for v2 in output:
if compare(value, v2):
match= True
break
if not match:
output.append(value)
return output |
def count_first_word(str_list):
"""Count the first word of each string in the list.
Args:
str_list: List of strings
Returns:
{"word": count, ...}
"""
ret_count = dict()
for phrase in str_list:
words = phrase.split("-")
ret_count[words[0]] = ret_count.get(words[0], 0) + 1
return ret_count |
def get_dependencies(component_dict):
# type: (dict) -> tuple
"""
Returns tuple of two lists - dependencies for including in the source file and dependencies for adding to manifest
"""
if 'dependencies' not in component_dict.keys():
return [], []
dependencies = component_dict['dependencies']
include_list = [
dependencies[library].pop('include', None) for library in dependencies.keys()
if dependencies[library].get('include', None)
]
libraries_for_manifest = [
library for library in dependencies.keys()
if 'git' in dependencies[library].keys() or 'version' in dependencies[library].keys()
]
return include_list, libraries_for_manifest |
def modular_pow(base, exponent, modulus):
"""Use modular exponentiation to calculate number to high powers.
Adapted from Wikipedia: Modular exponentiation -
http://en.wikipedia.org/wiki/Modular_exponentiation
Returns the result of raising an integer to a high power over a given
modulus. Rather than calculate the full power and divide the resulting
number by the modulus, this algorithm applies the modulus to a running
value multiplied by the base integer a number of times, determined by the
exponent.
Keyword arguments:
base -- An integer to be raised to a desired power, modulo a given modulus.
exponent -- An integer power to which to raise the base.
modulus -- An integer modulus to apply to the result of the power operation.
"""
c = 1
for e_prime in range(exponent):
c = (c * base) % modulus
return c |
def remove_suffix(sset_name):
"""If more than one hyphen in sset_name, remove last hyphen and everything
after it"""
name_fields = sset_name.split('-')
if len(name_fields) > 2:
return '-'.join(name_fields[:-1])
return sset_name |
def parse_request(url):
"""Returns a pair (route, payload), where
route is the path to API and payload is the query string stored in a dictionary (if exists).
Parameters
----------
url : str
the URL to be parsed.
Returns
-------
tuple
a tuple of type:
(route, payload), if a query string is provided.
(route, None) if there is no query string.
(None, None) if provided url is not valid.
"""
try:
if '?' in url:
route, data = url.split('?')
data = data.split('&')
payload = {}
for d in data:
key, value = d.split('=')
payload[key] = value
return route, payload
else:
return url, None
except ValueError:
print('Invalid route!')
return None, None |
def update_array(old_array, indices, val):
"""
Add an item to a Python list.
"""
# Sanity check.
if (not isinstance(old_array, list)):
old_array = []
# 1-d array?
if (len(indices) == 1):
# Do we need to extend the length of the list to include the indices?
index = int(indices[0])
if (index >= len(old_array)):
old_array.extend([0] * (index - len(old_array) + 1))
old_array[index] = val
# 2-d array?
elif (len(indices) == 2):
# Do we need to extend the length of the list to include the indices?
index = int(indices[0])
index1 = int(indices[1])
if (index >= len(old_array)):
# NOTE: Don't do 'old_array.extend([[]] * (index - len(old_array) + 1))' here.
# The [] added with extend refers to the same list so any modification
# to 1 sublist shows up in all of them.
for i in range(0, (index - len(old_array) + 1)):
old_array.append([])
if (index1 >= len(old_array[index])):
old_array[index].extend([0] * (index1 - len(old_array[index]) + 1))
old_array[index][index1] = val
# Done.
return old_array |
def _D0N2_Deg1_full_linear(k0, k2):
""" from 1d knots, return int_0^2 x b**2(x) dx """
return (k2 - k0) / 3. |
def to_helm_values_list(values):
"""
The sh lib doesn't allow you to specify multiple instances of the same
kwarg. https://github.com/amoffat/sh/issues/529
The best option is to concatenate them into a list.
"""
values_list = []
for key, val in values.items():
values_list += ["--set", f"{key}={val}"]
return values_list |
def __find_all_plugins(paths):
"""
Finds all Bakefile plugins in given directories that aren't loaded yet and
yields them.
"""
from os import walk
from os.path import splitext
x = []
for dirname in paths:
for root, dirs, files in walk(dirname):
for f in files:
basename, ext = splitext(f)
if ext != ".py":
continue
if basename == "__init__":
continue
x.append(basename)
return x |
def get_max_time(canvas_size, t_offset):
"""Get the maximum number of reasonable time steps for a given ToySquares instance, until which not many objects will have fallen off the canvas
Parameters
----------
canvas_size : int
size of the canvas on which the toy squares fall, in pixels
t_offset : int
number of time steps between X and Y
"""
max_time = int(canvas_size/2)
max_time = 2*(max_time//2) + 2 # round to nearest even number
if (max_time == 1) or (max_time < t_offset):
raise ValueError("Value supplied for t_offset is too high compared to the canvas size.")
return max_time |
def sorted_items(params):
"""Return an iterator of the dict's items sorted by its keys."""
return sorted(params.items()) |
def part1_count_increases(measurements):
"""Count increases of a measure with the next."""
windows = zip(measurements[1:], measurements[:-1])
increases = filter(lambda w: w[0] > w[1], windows)
return len(list(increases)) |
def get_closure(fn):
"""
Get a dictionary of closed over variables from a function
"""
captures = {}
captures.update(fn.__globals__)
for index, captured_name in enumerate(fn.__code__.co_freevars):
captures[captured_name] = fn.__closure__[index].cell_contents
return captures |
def argmax(seq, fn):
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
'to'
"""
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score > best_score:
best, best_score = x, x_score
return best |
def rervi(b5, b8):
"""
Red-edge Ratio Vegetation Index (Cao et al., 2013).
.. math:: RERVI = b8/b5
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns RERVI: Index value
.. Tip::
Cao, Q.; Miao, Y.; Wang, H.; Huang, S.; Cheng, S.; Khosla, R.; \
Jiang, R. 2013. Non-destructive estimation of rice plant nitrogen \
status with Crop Circle multispectral active canopy sensor. Field \
Crops Research 154, 133-144. doi:10.1016/j.fcr.2013.08.005.
"""
RERVI = b8/b5
return RERVI |
def matching_dissim(a, b, **_):
"""Simple matching dissimilarity function"""
s = 0
for t1, t2 in zip(a, b):
if t1 != t2:
s += 1
return s |
def __fade(t):
"""Used internally by noise()."""
return t * t * t * (t * (t * 6 - 15) + 10) |
def hex_to_int(hex_str: str) -> int:
"""hex bytes to integer"""
return int(hex_str, 16) |
def sign(x):
"""Returns the sign of a number"""
if x <= 0:
return -1
else:
return 1 |
def wait3(options=0):
"""Similar to :func:`waitpid`, except no process id argument is given and a
3-element tuple containing the child's process id, exit status indication, and
resource usage information is returned. Refer to :mod:`resource`.\
:func:`getrusage` for details on resource usage information. The option
argument is the same as that provided to :func:`waitpid` and :func:`wait4`."""
return (0, 0, 0) |
def format_timedelta(seconds):
"""
Format the time delta as human readable format such as '1h20m5s' or '5s' if it is short.
"""
# We used to use the datetime.timedelta class, but its result such as
# Blade(info): cost time 00:05:30s
# cause vim to create a new file named "Blade(info): cost time 00"
# in vim QuickFix mode. So we use the new format now.
mins = int(seconds // 60)
seconds %= 60
hours = mins // 60
mins %= 60
result = '%.3gs' % seconds
if hours > 0 or mins > 0:
result = '%sm' % mins + result
if hours > 0:
result = '%sh' % hours + result
return result |
def remove_audio_streams(config, dest_path):
"""Prune audio-based features from the config and dest_path name (necessary for
datasets like MSVD which do not possess sound.) If the audio feature was the control
variable in the experiment, we return False for the dest_path, such that the ablation
is removed altogether.
"""
audio_tags = ["audio", "speech"]
for audio_tag in audio_tags:
if f"-{audio_tag}." in dest_path:
return config, False
dest_path = dest_path.replace(f"-{audio_tag}", "")
if "experts" in config and "modalities" in config["experts"]:
if audio_tag in config["experts"]["modalities"]:
config["experts"]["modalities"].remove(audio_tag)
return config, dest_path |
def get_downsampled_voxel_dims(num_hierarchy_levels, isotropic_level, hierarchy_method,
x_voxel_size, y_voxel_size, z_voxel_size,
iso=False):
"""Method to return a list, mapping resolution levels to voxel dimensions
Args:
num_hierarchy_levels(int): Number of levels to compute
isotropic_level(iso): Resolution level closest to isotropic
hierarchy_method(str): Downsampling method (anisotropic | isotropic)
x_voxel_size(int): voxel size in x dimension
y_voxel_size(int): voxel size in y dimension
z_voxel_size(int): voxel size in z dimension
iso(bool): If requesting isotropic dimensions (for anisotropic channels)
Returns:
(list): List where each element is the voxel coords in [x,y,z]. Array index = resolution level
"""
voxel_dims = [[x_voxel_size, y_voxel_size, z_voxel_size]]
for res in range(1, num_hierarchy_levels):
if hierarchy_method == "isotropic":
voxel_dims.append([voxel_dims[res-1][0] * 2,
voxel_dims[res-1][1] * 2,
voxel_dims[res-1][2] * 2])
else:
# Anisotropic channel
if res > isotropic_level and iso is True:
# You want the isotropic version
voxel_dims.append([voxel_dims[res-1][0] * 2,
voxel_dims[res-1][1] * 2,
voxel_dims[res-1][2] * 2])
else:
# You want the anisotropic version
voxel_dims.append([voxel_dims[res-1][0] * 2,
voxel_dims[res-1][1] * 2,
voxel_dims[res-1][2]])
return voxel_dims |
def jaccard_dependency(exp1, exp2):
"""
calculate the direction of dependence of 2 experiments
if exp1 is parent of exp2 return 0
otherwise return 1
"""
words1, _ = exp1
words2, _ = exp2
l1 = len(words1)
l2 = len(words2)
l_intersect = len(set(words1).intersection(set(words2)))
return 0 if l_intersect / float(l1) >= l_intersect / float(l2) else 1 |
def RPL_LINKS(sender, receipient, message):
""" Reply Code 364 """
return "<" + sender + ">: " + message |
def sumMerge(dict1, dict2):
"""
Adds two dictionaries together, and merges into the first, dict1.
Returns first dict.
"""
for key in dict2:
dict1[key] = list(map(lambda a,b: a + b, dict1.get(key, [0,0,0,0]), dict2[key]))
return dict1 |
def cote_boostee(cote, boost_selon_cote=True, freebet=True, boost=1):
"""
Calcul de cote boostee pour promotion Betclic
"""
mult_freebet = 1 * (not freebet) + 0.8 * freebet
if not boost_selon_cote:
return cote + (cote - 1) * boost * mult_freebet
if cote < 2:
return cote
if cote < 2.51:
return cote + (cote - 1) * 0.25 * mult_freebet
if cote < 3.51:
return cote + (cote - 1) * 0.5 * mult_freebet
return cote + (cote - 1) * mult_freebet |
def get_y_coordinate(y, board):
"""Returns input as index in y coordinate"""
try:
y = int(y)
except ValueError:
return -1
height = len(board)
y -= 1
if y >= 0 and y < height:
return y
else:
return -1 |
def listXor(b,c):
"""Returns elements in lists b and c they don't share
"""
A = [a for a in b+c if (a not in b) or (a not in c)]
return A |
def recallstate(bench, action):
"""action=['Set', '<reg 0-99>,<seq 0-9>']
"""
SCPIcore = '*RCL' #save machine state
return bench, SCPIcore, action |
def new_orig(coords):
""" Sets coordiantes of origin to (0, 0, 0) or to absolute value of
negative coordinates. Returns a tuple."""
# Create a generator
new_coords = (0 if i >= 0 else abs(i) for i in coords)
return tuple(new_coords) |
def find_the_duplicate(nums):
"""Find duplicate number in nums.
Given a list of nums with, at most, one duplicate, return the duplicate.
If there is no duplicate, return None
>>> find_the_duplicate([1, 2, 1, 4, 3, 12])
1
>>> find_the_duplicate([6, 1, 9, 5, 3, 4, 9])
9
>>> find_the_duplicate([2, 1, 3, 4]) is None
True
"""
for num in nums:
if int(str(nums).count(str(num))) > 1:
return num
return None |
def decode_sint24(bb):
"""
Decode 3 bytes as a signed 24 bit integer
Specs:
* **sint24 len**: 3 bytes
* **Format string**: 'K'
"""
return int.from_bytes(bb, byteorder='little', signed=True) |
def powmod(b,e,n):
"""powmod(b,e,n) computes the eth power of b mod n.
(Actually, this is not needed, as pow(b,e,n) does the same thing for positive integers.
This will be useful in future for non-integers or inverses."""
accum = 1; i = 0; bpow2 = b
while ((e>>i)>0):
if((e>>i) & 1):
accum = (accum*bpow2) % n
bpow2 = (bpow2*bpow2) % n
i+=1
return accum |
def expand_padding(from_shape, to_shape, padding):
""" Expands padding with new axes added by reshape. Will fail if the reshape
is doing more than just inserting 1s"""
from_shape = tuple(from_shape)
to_shape = tuple(to_shape)
if from_shape == to_shape:
return padding
from_idx = 0
to_idx = 0
new_padding = []
while to_idx < len(to_shape) or from_idx < len(from_shape):
if from_idx < len(from_shape) and to_idx < len(to_shape) and from_shape[from_idx] == to_shape[to_idx]:
new_padding.append(padding[from_idx])
from_idx += 1
to_idx += 1
elif to_idx < len(to_shape) and to_shape[to_idx] == 1:
new_padding.append((0, 0))
to_idx += 1
elif from_idx < len(from_shape) and from_shape[from_idx] == 1 and sum(padding[from_idx]) == 0:
from_idx += 1
else:
return None
return new_padding if from_idx == len(from_shape) and to_idx == len(to_shape) else None |
def generate_single_libraries_list_code(name, libraries, platform):
"""
Generate single list of libraries to be queries.
"""
platform_libraries = libraries["paths"].get(platform) or []
result = "static const char* {}_paths[] = " . format(name) + "{"
indentation = " " * len(result)
for index, library in enumerate(platform_libraries):
if index:
result += ",\n" + indentation
result += '"' + library + '"'
if platform_libraries:
result += ",\n" + indentation
result += "NULL};"
return result |
def format_barcode(barcode_string):
"""
Removes the dashes from sample strings.
:param barcode_string:
:return:
"""
return barcode_string.format(barcode_string.replace('-', '')) |
def _swiftmodule_for_cpu(swiftmodule_files, cpu):
"""Select the cpu specific swiftmodule."""
# The paths will be of the following format:
# ABC.framework/Modules/ABC.swiftmodule/<arch>.swiftmodule
# Where <arch> will be a common arch like x86_64, arm64, etc.
named_files = {f.basename: f for f in swiftmodule_files}
module = named_files.get("{}.swiftmodule".format(cpu))
if not module and cpu == "armv7":
module = named_files.get("arm.swiftmodule")
return module |
def comment_strip(text, starts_with='#', new_line='\n'):
"""
Strip comments from a text block.
Parameters
-----------
text : str
Text to remove comments from
starts_with : str
Character or substring that starts a comment
new_line : str
Character or substring that ends a comment
Returns
-----------
stripped : str
Text with comments stripped
"""
# if not contained exit immediately
if starts_with not in text:
return text
# start by splitting into chunks by the comment indicator
split = (text + new_line).split(starts_with)
# special case files that start with a comment
if text.startswith(starts_with):
lead = ''
else:
lead = split[0]
# take each comment up until the newline
removed = [i.split(new_line, 1) for i in split]
# add the leading string back on
result = lead + new_line + new_line.join(
i[1] for i in removed
if len(i) > 1 and len(i[1]) > 0)
# strip leading and trailing whitespace
result = result.strip()
return result |
def _SanitizeDoc(doc, leader):
"""Cleanup the doc string in several ways:
* Convert None to empty string
* Replace new line chars with doxygen comments
* Strip leading white space per line
"""
if doc is None:
return ''
return leader.join([line.lstrip() for line in doc.split('\n')]) |
def isfile(filepath: str) -> bool:
"""A function to replace os.path.isfile in micropython."""
try:
open(filepath)
return True
except OSError:
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.