content
stringlengths 42
6.51k
|
|---|
def backupise_name(file_name, counter):
"""
Return the backupised name.
We ignore `.*.bak` in `.gitignore`.
The leading dot makes it hidden on Linux.
Bad luck if you use Windows.
"""
return f'.{file_name}-{counter}.bak'
|
def ia(ta, r, a, b, c):
"""Chicago design storm equation - intensity after peak. Helper for i function.
Args:
ta: time after peak in minutes (measured from peak towards end)
r: time to peak ratio (peak time divided by total duration)
a: IDF A parameter - can be calculated from getABC
b: IDF B parameter - can be calculated from getABC
c: IDF C parameter - can be calculated from getABC
Returns:
Returns intensity in mm/hr.
"""
return a*((1-c)*ta/(1-r)+b)/((ta/(1-r))+b)**(c+1)
|
def flatten(lst):
"""Shallow flatten *lst*"""
return [a for b in lst for a in b]
|
def trait_label(trait, sep=' '):
"""Generate a label for the trait."""
label = [trait['part']]
if 'subpart' in trait:
label.append(trait['subpart'])
label.append(trait['trait'])
label = sep.join(label)
label = label.replace('-', '')
label = label.replace('indumentum' + sep + 'surface', 'indumentum')
return label
|
def dropZeros(dictionary):
"""Drops zero valued items from dictionary"""
sansZeros = {key:value for key,value in dictionary.items() if value != 0}
if len(sansZeros) != 0:
dictionary = sansZeros
return dictionary
|
def is_valid_test_file(test_file):
"""
Checks if file is a .pyc or from __pycache__
:param test_file: str
:return: str
"""
return '.pyc' not in test_file and '__pycache__' not in test_file
|
def _make_lock_uri(cloud_tmp_dir, cluster_id, step_num):
"""Generate the URI to lock the cluster ``cluster_id``"""
return cloud_tmp_dir + 'locks/' + cluster_id + '/' + str(step_num)
|
def int_set(subset):
"""
This function transforms subset into corresponding integer representation.
Parameters
----------
subset : set
The subset you want to transform.
Returns
-------
representation : int
The integer representation of a given subset.
"""
representation = 0
for i in subset:
representation += (1 << i)
return representation
|
def convertRegionDisplayNameToRegionName(regionDisplayName):
"""Converts given region display name to region name. For ex; 'HDInsight.EastUS' to 'eastus'"""
return str(regionDisplayName.split(".")[1]).lower()
|
def cc(key):
"""
Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
'computeEnvironmentName'.
:param key:
:return:
"""
components = key.split('_')
return components[0] + "".join([token.capitalize() for token in components[1:]])
|
def normalize_data_format(value):
"""Normalize the keras data format."""
if value is None:
value = 'channels_last'
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError(
'Excepted <data_format> as one of '
'"channels_first", '
'"channels_last".'
' Received: ' + str(value))
return data_format
|
def rivers_with_station(stations):
"""This function creates a set containing all the river names which have a station on it.
It is set up so each river only occurs onece """
rivers = set()
for station in stations:
rivers.add(station.river)
rivers = sorted(rivers)
return rivers
|
def readBits(value, bitmasks):
"""
Extract bits from the integer value using a list of bit masks.
bitmasks is a list of tuple (mask, text).
>>> bitmask = (
... (1, "exec"),
... (2, "write"),
... (4, "read"))
...
>>> readBits(5, bitmask)
['exec', 'read']
>>> readBits(12, bitmask)
['read', '8']
"""
bitset = []
for mask, item in bitmasks:
if not value & mask:
continue
bitset.append(item)
value = value & ~mask
if value:
bitset.append(str(value))
return bitset
|
def call_fn_with_state_keys(jit_fn, state, other_inputs, keys):
"""Executes `jit_fn`, filtering out all keys except some subset."""
state = state.copy()
extra_state = {}
for k in list(state.keys()):
if k not in keys:
extra_state[k] = state.pop(k)
return jit_fn(state, *other_inputs), extra_state
|
def drone_res(constants):
"""Dummy drone response for tests"""
drone_res = {
"@context": f"/{constants['api_name']}/contexts/Drone.jsonld",
"@id": f"/{constants['api_name']}/Drone/1",
"@type": "Drone",
"DroneState": {
"@id": f"/{constants['api_name']}/State/1",
"@type": "State",
"Battery": "C1WE92",
"Direction": "Q9VV88",
"DroneID": "6EBGT5",
"Position": "A",
"SensorStatus": "335Y8B",
"Speed": "IZPSTE",
},
"MaxSpeed": "A3GZ37",
"Sensor": "E7JD5Q",
"model": "HB14CX",
"name": "Smart Drone",
}
return drone_res
|
def to_upper(text):
"""Convert word to uppercase."""
return text.upper().strip()
|
def source_link_type(url):
"""
Get an URL and will return the URL type for display the true text in the wiki view.
:param url: Ruleset source's URL
:type url: str
:return: A string of URL type (github, patreon or unknown)
"""
if ("github.com" or "www.github.com") in url:
result = "github"
elif ("patreon.com" or "www.patreon.com") in url:
result = "patreon"
else:
result = "unknown"
return result
|
def nameval(in_string):
"""
converts given string to key, value and separator triplets
:param in_string: key/value pair
:type in_string: str (unicode)
:return: key, value and separator triplet
:rtype: tuple
"""
idx = in_string.find("=")
separator = '='
if idx >= 0:
name = in_string[0:idx]
value = in_string[idx + 1:]
else:
idx = in_string.find("~")
separator = '~'
if idx >= 0:
name = in_string[0:idx]
value = in_string[idx + 1:]
else:
name = in_string
value = None
return name, value, separator
|
def account_from_role_arn(role_arn):
"""
Extracts an account number from a role arn, raises a ValueException if the arn does not match a valid arn format
:param role_arn: The arn
:return: The extracted account number
"""
role_elements = role_arn.split(":")
if len(role_elements) < 5:
raise ValueError("Role \"%s\" is not a valid role arn", role_arn)
return role_elements[4]
|
def doesnt_raise(function, message=""):
"""
The inverse of raises().
Use doesnt_raise(function) to test that function() doesn't raise any
exceptions. Returns the result of calling function.
"""
if not callable(function):
raise ValueError("doesnt_raise should take a lambda")
try:
return function()
except Exception as e:
if message:
raise AssertionError(f"Unexpected exception {e!r}: {message}")
raise AssertionError(f"Unexpected exception {e!r}")
|
def balanced_paranthesis(exp):
"""Return True if exp has balanced parantheses, else False."""
pair = {')': '(', ']': '[', '}': '{'}
stack = []
for i in exp:
if i in pair.values():
stack.append(i)
elif i in pair.keys():
if stack.pop() != pair[i]:
return False
if len(stack) == 0:
return True
else:
return False
|
def get_nse_or_ntt_dtype(info, ext):
"""
For NSE and NTT the dtype depend on the header.
"""
dtype = [('timestamp', 'uint64'), ('channel_id', 'uint32'), ('unit_id', 'uint32')]
# count feature
nb_feature = 0
for k in info.keys():
if k.startswith('Feature '):
nb_feature += 1
dtype += [('features', 'int32', (nb_feature,))]
# count sample
if ext == 'nse':
nb_sample = info['WaveformLength']
dtype += [('samples', 'int16', (nb_sample,))]
elif ext == 'ntt':
nb_sample = info['WaveformLength']
nb_chan = 4 # check this if not tetrode
dtype += [('samples', 'int16', (nb_sample, nb_chan))]
return dtype
|
def _resolution_to_timedelta(res_text: str) -> str:
"""
Convert an Entsoe resolution to something that pandas can understand
"""
resolutions = {
'PT60M': '60min',
'P1Y': '12M',
'PT15M': '15min',
'PT30M': '30min',
'P1D': '1D',
'P7D': '7D',
'P1M': '1M',
}
delta = resolutions.get(res_text)
if delta is None:
raise NotImplementedError("Sorry, I don't know what to do with the "
"resolution '{}', because there was no "
"documentation to be found of this format. "
"Everything is hard coded. Please open an "
"issue.".format(res_text))
return delta
|
def reverse( sequence ):
"""Return the reverse of any sequence
"""
return sequence[::-1]
|
def encode_to_7bit(value):
"""Encode to 7 bit"""
if value > 0x7f:
res = []
res.insert(0, value & 0x7f)
while value > 0x7f:
value >>= 7
res.insert(0, (value & 0x7f) | 0x80)
return res
return [value]
|
def check_if_bst(node, mini=float('-inf'), maxi=float('+inf')):
"""
Check if the given tree is Binary Search Tree (BST)
Args:
node: root node of the Tree. `node` arg must have `.left`, `.right` and `.data` variables
mini: min value - should be omitted
maxi: max value - should be omitted
Returns:
bool - True if it's BST and False if not
Examples:
Precondition:
>>> class Node:
... def __init__(self, data):
... self.data = data
... self.left = None
... self.right = None
>>> root = Node(4)
>>> root.left = Node(2)
>>> root.right = Node(6)
>>> root.left.left = Node(1)
>>> root.left.right = Node(3)
>>> root.right.left = Node(5)
>>> root.right.right = Node(7)
Example itself:
>>> check_if_bst(root)
True
"""
if node is None:
return True
if node.data < mini or node.data > maxi:
return False
return (check_if_bst(node.left, mini, node.data - 1) and
check_if_bst(node.right, node.data + 1, maxi))
|
def make_relative(det, box):
"""Make detections relative to the box.
Used for the parameters of test_combine_slice_detections.
"""
x0, y0, x1, y1, p = det
bx0, by0, *_ = box
return x0 - bx0, y0 - by0, x1 - bx0, y1 - by0, p
|
def filter_size(detail):
"""Web app, feed template, additional info: (file) size"""
info = detail['info']
size = '{} {}'.format(info['size'], info['unit'])
return size
|
def _convert_concatenate(arg_list):
"""
Handler for the "concatenate" meta-function.
@param IN arg_list List of arguments
@return DB function call string
"""
return " || ".join(arg_list)
|
def get_triangle_bottom_midpoint(point_list):
"""Returns the midpoint of the top of a triangle regardless of the orientation."""
y = int(max([x[1] for x in point_list]))
x = int((min([x[0] for x in point_list]) + max([x[0] for x in point_list])) / 2)
return x, y
|
def _get_sort_and_permutation(lst: list):
"""
Sorts a list, returned the sorted list along with a permutation-index list which can be used for
cursored access to data which was indexed by the unsorted list. Nominally for chunking of CSR
matrices into TileDB which needs sorted string dimension-values for efficient fragmentation.
"""
# Example input: x=['E','A','C','D','B']
# e.g. [('E', 0), ('A', 1), ('C', 2), ('D', 3), ('B', 4)]
lst_and_indices = [(e, i) for i, e in enumerate(lst)]
# e.g. [('A', 1), ('B', 4), ('C', 2), ('D', 3), ('E', 0)]
lst_and_indices.sort(key=lambda pair: pair[0])
# e.g. ['A' 'B' 'C' 'D' 'E']
# and [1, 4, 2, 3, 0]
lst_sorted = [e for e, i in lst_and_indices]
permutation = [i for e, i in lst_and_indices]
return (lst_sorted, permutation)
|
def build_output_file_pattern(out_fn):
"""Builds the string pattern "{OUTFN}-{PAGE_RANGE}.pdf". This is necessary
if one wants to extract multiple page ranges and does not join the single
results.
Arguments:
out_fn -- Output filename
Returns:
String value which represents the filename pattern.
"""
if len(out_fn.split(".")) >= 2:
tokens = out_fn.split(".")
outfile = "".join(tokens[0:len(tokens) - 1])
return outfile + "-%s.pdf"
else:
return out_fn + "-%s.pdf"
|
def monomial_min(*monoms):
"""
Returns minimal degree for each variable in a set of monomials.
Examples
========
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the minimal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomials import monomial_min
>>> monomial_min((3,4,5), (0,5,1), (6,3,9))
(0, 3, 1)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = min(M[i], n)
return tuple(M)
|
def _vars_dir_for_saved_model(
saved_model_path # type: str
):
# type: (str) -> str
"""
Args:
saved_model_path: Root directory of a SavedModel on disk
Returns the location of the directory where the indicated SavedModel will
store its variables checkpoint.
"""
return saved_model_path + "/variables"
|
def sort_nesting(list1, list2):
"""Takes a list of start points and end points and sorts the second list according to nesting"""
temp_list = []
while list2 != temp_list:
temp_list = list2[:] # Make a copy of list2 instead of reference
for i in range(1, len(list1)):
if list2[i] > list2[i-1] and list1[i] < list2[i-1]:
list2[i-1], list2[i] = list2[i], list2[i-1]
return list2
|
def _count_lines(file_path):
"""Count lines in a file.
A line counter. Counts the lines in given file with counter count counts.
Args:
file_path: `str` path to file where to count the lines.
"""
count = 0
with open(file_path, "r") as fobj:
for line in fobj:
count += 1
return count
|
def get_templates(conf):
"""Return an array of names of existing templates."""
try:
return conf['templates'].keys()
except KeyError:
return []
|
def content_convert(source_wrapper):
"""Convert content from AppetiteApp to dict
Converting the class to dict is needed for json logging and distribution
"""
new_source_wrapper = source_wrapper.copy()
new_source_wrapper['content'] = [content.to_dict for content in source_wrapper['content']]
return new_source_wrapper
|
def mtime(filename):
"""Modication timestamp of a file, in seconds since 1 Jan 1970 12:00 AM GMT
"""
from os.path import getmtime
try: return getmtime(filename)
except: return 0
|
def lstrip_keep(text):
"""
Like lstrip, but also returns the whitespace that was stripped off
"""
text_length = len(text)
new_text = text.lstrip()
prefix = text[0 : (text_length - len(new_text))]
return new_text, prefix
|
def get_mismatch_cts(pileup):
"""Returns pileup[0] with the pileup[1]-th element removed.
e.g. if pileup[0] = [99, 0, 30, 14] and pileup[1] = 2,
this'll return [99, 0, 14].
This corresponds to filtering the list of [A, C, G, T] aligned to a
position to remove whichever of the four nucleotides corresponds to
the reference nucleotide at this position.
"""
ref_idx = pileup[1]
return pileup[0][:ref_idx] + pileup[0][ref_idx + 1:]
|
def make_params(**kwargs):
"""
Helper to create a params dict, skipping undefined entries.
:returns: (dict) A params dict to pass to `request`.
"""
return {k: v for k, v in kwargs.items() if v is not None}
|
def matchingByName (theDictionary, firstLetter):
"""Identifies students a name starting with firstLetter.
Assumes student names are capitalized.
:param dict[str, str] theDictionary:
key: locker number / value: student name or "open"
:param str firstLetter:
The target letter by which to identify students. Currently does
not check for only a single letter.
:return:
The students with name starting with firstLetter
:rtype: list[str]
"""
studentsByName = []
firstLetter = firstLetter.upper()
for key in theDictionary:
if theDictionary[key][0] == firstLetter:
studentsByName.append(theDictionary[key])
return studentsByName
|
def num_diffs(state):
"""
Takes a state and returns the number of differences between
adjacent entries.
num_diffs(str) -> int
"""
differences = 0
for i in range(0, len(state) - 1):
if state[i] != state[i+1]:
differences += 1
return differences
|
def first_true(pred, iterable, default=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
source: https://docs.python.org/3/library/itertools.html
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default)
|
def f_dir(obj):
"""Format public attributes of an object.
Args:
obj: object (Object)
doc: get the documentation (bool)
Returns:
dicts of uncallables and callables (list)
"""
both = {i: i.__doc__ for i in dir(obj) if not i.startswith("_")}
uncallables = {i: j for i, j in both.items() if not callable(getattr(obj, i))}
callables = {i: j for i, j in both.items() if callable(getattr(obj, i))}
return [uncallables, callables]
|
def min_scalar_prod(x, y):
"""Permute vector to minimize scalar product
:param x:
:param y: x, y are vectors of same size
:returns: min sum x[i] * y[sigma[i]] over all permutations sigma
:complexity: O(n log n)
"""
x1 = sorted(x) # make copies to preserve
y1 = sorted(y) # the input arguments
return sum(x1[i] * y1[-i - 1] for i in range(len(x1)))
|
def _null_cast(dic, key):
"""
Allows the user to load value, given key from the dictionary. If the key
is not found, return "null".
Args:
dic (dict): Dictionary in look for (key, value) pair
key (str): Key to look search in the dictionary
Returns:
type: str
Either "null" string or previous data stored in the field.
"""
if key not in dic:
return "null"
return dic[key]
|
def equally_space_times(num_ids, step=0):
"""
takes a list of times and returns an equally spaced ones
"""
assert len(num_ids) > 1, 'not enough snapshots to equally space them in time'
if not step:
step = (num_ids[-1] - num_ids[0]) / float(len(num_ids) - 1)
ids_eq = []
t = num_ids[0]
#print t, num_ids[0], num_ids[-1], step, '<<<<<<<<<<<<<'
while t < num_ids[-1]:
#print t
ids_eq.append(t)
t += step
if len(ids_eq) < len(num_ids):
ids_eq.append(num_ids[-1])
ids_eq[-1] = num_ids[-1]
#print len(num_ids), 'num_ids'
#print len(ids_eq), 'ids_eq'
#print ids_eq[-2], num_ids[-2]
assert len(num_ids) == len(ids_eq), 'BUG! equally spaced times do not have the same length'
return ids_eq
|
def get_int_from_roman_number(input):
"""
From
http://code.activestate.com/recipes/81611-roman-numerals/
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if not isinstance(input, str):
return None
input = input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input:
if c not in nums:
# raise ValueError, "input is not a valid roman num: %s" % input
return None
for i in range(len(input)):
c = input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input[i + 1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum = 0
for n in places:
sum += n
return sum
|
def twelve_digit_serial_no(id):
""" The function create a 12 digit serial number from any number with less than 11 digits"""
f = str(10**(11 - len(str(id))))
twelve_digit_id = f + str(id)
return int(twelve_digit_id)
|
def calc_regularization_loss(filtering_fn,
reg_pl_names_dict,
reg_model_name,
feed_dict,
sess,
all_scopes=None):
"""Calculate regularization loss
Args:
filtering_fn:
callable(reg_param_name) --> boolean
whether to add regularization loss on this param
if False, then reg_placeholder will be filled
with same param effectively making regulaization loss 0
reg_pl_names_dict:
dictionary mapping placeholder_name to param_name
reg_model_name:
name of the model to be regularized w.r.t
feed_dict:
feed_dict to be used in sess.run
all_scopes:
all parameter scopes, used to check whether the
new parameter name is valid
"""
if not callable(filtering_fn):
raise TypeError("`filtering_fn` should be callable, found ",
type(filtering_fn).__name__)
for reg_pl_name, reg_param_name in reg_pl_names_dict.items():
# decide whether a parameter is to be softly shared
# for those not softly shared at this timestep,
# but still have a placeholder for parameters to be shared
# we just let the parameters to be regularized bt itself,
# or setting Loss = || param_i - param_i ||
# which effectively means Loss = 0
# there are obviously better ways to approach this
# e.g. using conditional graph, but this is a bit tricky
# to implement and not much speed gain anyway
if filtering_fn(reg_param_name):
changed_reg_param_name = "_".join(
[reg_model_name] + reg_param_name.split("_")[1:])
else:
# this will make reg_loss == 0
changed_reg_param_name = reg_param_name
# just to make sure the new name is within scopes
if (all_scopes and changed_reg_param_name.split("/")[0]
not in all_scopes):
raise ValueError("%s not in all scopes"
% changed_reg_param_name.split("/")[0])
# add regularization terms into feed_dict
feed_dict[reg_pl_name] = sess.run(changed_reg_param_name)
return feed_dict
|
def find_section(section, y):
"""Find the section closest to y"""
best_i=-1
dist=1e5
for i in range(len(section)):
d=min(abs(section[i][0]-y), abs(section[i][1]-y))
if d < dist:
best_i=i
dist=d
return best_i
|
def index_from_weekday(weekday):
"""
Returns a numeric index for day of week based on name of day
:param weekday: Name of day (e.g. 'Sunday', 'Monday', etc.)
:return: numeric index
"""
weekday_map = {
'Sunday': 0,
'Monday': 1,
'Tuesday': 2,
'Wednesday': 3,
'Thursday': 4,
'Friday': 5,
'Saturday': 6
}
return weekday_map.get(weekday)
|
def find_free_place(places_available, desired_place):
"""
Find place in sequence that is available and nearby.
This function checks if desired_place is in the list of places that are still available and returns
desired_place if so. If not, it returns the closest, higher place that is. If there are no higher ones,
it returns the lowest place in places_available.
:param places_available: sorted list of places in the sequence that are still available
:param desired_place: place to check
:return chosen place
"""
if desired_place in places_available:
places_available.remove(desired_place)
return desired_place
for k in places_available:
if k > desired_place: # assuming places_available is sorted
places_available.remove(k)
return k
chosen_place = min(places_available)
places_available.remove(min(places_available))
return chosen_place
|
def makesingle(text):
""" Make multiple lines into a single line joined by a space. """
return " ".join(
line.strip() for line in text.splitlines() if len(line.strip())
)
|
def slice_indices(depths, breaks):
"""Return a list of tuples"""
d, b = depths, breaks
lst = [[n for n, j in enumerate(d) if j >= i[0] and j < i[1]] for i in b]
return(lst)
|
def zip_with(fn, xs, ys):
"""
Standard python zip with function. User can define custom zipping function instead of the standard tuple.
"""
return [fn(a, b) for (a, b) in zip(xs, ys)]
|
def get_bytes(s):
"""Returns the byte representation of a hex- or byte-string."""
if isinstance(s, bytes):
b = s
elif isinstance(s, str):
b = bytes.fromhex(s)
else:
raise TypeError("s must be either 'bytes' or 'str'!")
return b
|
def greatest_common_divisor(num_a: int, num_b: int) -> int:
"""
A method to compute the greatest common divisor.
Args:
num_a (int): The first number.
num_b (int): Second number
Returns:
The greatest common divisor.
"""
if num_b == 0:
return num_a
print(f">>> Value of num_b: {num_b}")
return greatest_common_divisor(num_b, num_a % num_b)
|
def get_vert_order_from_connected_edges(edge_vertices):
"""
.. note:: Should probably be moved to mampy.
"""
idx = 0
next_ = None
sorted_ = []
while edge_vertices:
edge = edge_vertices.pop(idx)
# next_ is a vert on the edge index.
if next_ is None:
next_ = edge[-1]
sorted_.append(next_)
for i in edge_vertices:
if next_ in i:
idx = edge_vertices.index(i)
next_ = i[-1] if next_ == i[0] else i[0]
break
return sorted_
|
def find_bandgaps(composition):
""" Give as list like ['GaAs'] or ['Si', 'GaPN']
outputs bandgap in eVs in the form that input_ouput_management.SampledBandgaps needs: [[Bandgap1, Bandgap1], [Bandgap2, Bandgap2], etc]
i.e. a list of lists.
Used by the main run function."""
# this number isn't actually critical becauses we just use the absorption coefficient data anyway.
bandgap_dict = {'GaAs': 1.423, 'Si': 1.125, 'CIGS': 1.115, 'CIS': 1.016, 'CdTe': 1.488, 'CdTe monocrystalline': 1.514, 'GaNP': 1.96, 'perovskite triple cation': 1.59, 'perovskite MaPI': 1.58} # Double check the GaPN
# Common bandgaps: Si 1.125 so use sampling_range = [[1.125, 1.125]]. CdTe 1.514, CIS 1.016, CIGS 1.115, https://aip-scitation-org.ezproxy1.lib.asu.edu/doi/pdf/10.1063/1.4767120?class=pdf
# GaAs 1.423 Temperature dependence of semiconductor band gaps by O'Donnell
sampling_range = [[bandgap_dict[comp], bandgap_dict[comp]] for comp in composition]
return(sampling_range)
|
def has_variable(formula, variable):
"""
Function that detects if a formula contains an ID. It traverses the
recursive structure checking for the field "id" in the dictionaries.
:param formula: node element at the top of the formula
:param variable: ID to search for
:return: Boolean encoding if formula has id.
"""
if 'condition' in formula:
# Node is a condition, get the values of the sub classes and take a
# disjunction of the results.
return any([has_variable(x, variable) for x in formula['rules']])
return formula['id'] == variable
|
def initialize_dict_values(keys, default_value, dictionary=dict()):
"""Adds keys to the dictionary with a default value.
If no dictionary is provided, will create a new one.
:param keys: List of strings containing the dictionary keys
:param default_value: default value to be associated with the keys
:param dictionary: dictionary that will receive the keys
:return: updated dictionary
"""
return {**dictionary, **{ **dict.fromkeys(keys, default_value)}}
|
def canon_name_file(param):
"""Convert name+filename to canonical form.
param a string of form "name,path_to_exe".
Return a tuple (name, path_to_exe).
Returns None if something went wrong.
"""
name_file = param.split(',')
if len(name_file) == 2:
return name_file
return None
|
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by replacing
all `T` to `U`
"""
#.replace() function iterates through letters of seq & replaces "T" with "U" (effectively transcribing the DNA sequence)
transcript = seq.replace("T", "U")
return transcript
|
def convert_modules_to_external_resources(buck_modules, modules_with_resources):
""" Converts modules to a map with resources to keep them outside of module jars """
result = {}
for module in modules_with_resources:
result["buck-modules-resources/{}".format(module)] = "{}_resources".format(buck_modules[module])
return result
|
def lensort(lst):
"""
>>> lensort(['python', 'perl', 'java', 'c', 'haskell', 'ruby'])
['c', 'perl', 'java', 'ruby', 'python', 'haskell']
"""
return sorted(lst, key=lambda x: len(x))
|
def unescape_latex_entities(text: str) -> str:
"""
Unescape certain latex characters.
:param text:
"""
# Limit ourselves as this is only used for maths stuff.
out = text
out = out.replace("\\&", '&')
return out
|
def find_triangles(edges):
"""
:param edges:
:return: yield the triangles for each mapper
"""
mean_prob = sum([edge[2] for edge in edges]) / len(edges)
result = []
done = set()
# for each edge we have: src = edge[0], dst = edge[1], prob = edge[2]
for n in edges:
done.add(n)
nbrdone = set()
# if edge has propability less than mean_prob continue
if (n[2] < mean_prob):
continue
nbrs = tuple([edge for edge in edges if edge[0] == n[0] or edge[1] == n[0]])
if len(nbrs) < 2:
continue
for nbr in nbrs:
if nbr in done:
continue
nbrdone.add(nbr)
# if edge has propability less than mean_prob continue
if (nbr[2] < mean_prob):
continue
# third_node have as src or dst the node n or the node nbr. So we need to check for both of them
# first we check nbr[1] == third_node[0] and n[1] == third_node[1]
third_node = None
for edge in edges:
if nbr[1] == edge[0] and n[1] == edge[1]:
third_node = edge
break
if third_node is not None and (third_node not in done and third_node not in nbrdone):
result.append((n, nbr, third_node))
else:
# we check nbr.dst == third_node and n.dst == third_node.dst
# third_node = tuple([edge; break for edge in edges if edge[0] == n[1] and edge[1] == nbr[1])
for edge in edges:
if edge[0] == n[1] and edge[1] == nbr[1]:
third_node = edge
break
if third_node is not None and (third_node not in done and third_node not in nbrdone):
result.append((n, nbr, third_node))
return result
|
def sort_key(attrs, node):
"""
Sort key for sorting lists of nodes.
"""
acc = 0
for i in range(len(attrs)):
if attrs[i] in node.attrs:
acc += 10**i
return acc
|
def clean_str(str1: str) -> str:
"""
clean the string from ),;,\t and ' as not a wanted data
:param str1: the string to clean
:return:The new string with wanted data
"""
str1 = str1.replace(")", "")
str1 = str1.replace(";", "")
str1 = str1.replace("\t", "")
str1 = str1.strip("'")
return str1
|
def pxci_to_bi(nstencil, N):
"""
Generates a translation list converting x center indicesex starting
at 0, which includes padding bins and into bin indices.
Parameters
----------
nstencil: integer
Number of stencil points used
N: integer
Number of bins
Returns
-------
list of bin indices.
"""
nsidep = (nstencil-1)//2
return list(range(nsidep)[::-1]) + list(range(N)) + list(
range(N-1, N-nsidep-1, -1))
|
def exist_files(filenames):
"""filenames: list of pathnames"""
from os import listdir
from os.path import exists,dirname,basename
directories = {}
exist_files = []
for f in filenames:
if not dirname(f) in directories:
try: files = listdir(dirname(f) if dirname(f) else ".")
except OSError: files = []
directories[dirname(f)] = files
exist_files += [basename(f) in directories[dirname(f)]]
return exist_files
|
def filter(dictionaries, filters=[]):
"""filter a list of dictionaries. return a filtered list."""
if not filters:
return dictionaries
if filters == ['Search']:
return dictionaries
filtered = []
for d in dictionaries:
tests = []
for f in filters:
test = 0
for k in d:
if f.lower() in d.get(k).lower():
test = 1
tests.append(test)
if not 0 in tests:
filtered.append(d)
return filtered
|
def toggle_popover_tab1(n, is_open):
"""
:return: Open pop-over callback for how to use button for tab 1.
"""
if n:
return not is_open
return is_open
|
def get_reverse_bits(bytes_array):
"""
Reverse all bits in arbitrary-length bytes array
"""
num_bytes = len(bytes_array)
formatstring = "{0:0%db}" % (num_bytes * 8)
bit_str = formatstring.format(int.from_bytes(bytes_array, byteorder='big'))
return int(bit_str[::-1], 2).to_bytes(num_bytes, byteorder='big')
|
def file_extension(filename: str, file_type: str = "photo") -> str:
"""
Get the file extension of `filename`.
Args:
filename (str): Any filename, including its path.
file_type (str): "photo", "book", or "any"
Returns:
Image (png, jp(e)g, ti(f)f), book (md, pdf) or *any* extension or False.
"""
if "." in filename:
ext = filename.rsplit(".", 1)[1].lower()
if file_type == "photo" and ext in {"png", "jpg", "jpeg", "tiff", "tif"}:
return ext
if file_type == "book" and ext in {"md", "pdf", "json"}:
return ext
if file_type == "any":
return ext
return ""
|
def column(matrix, col):
"""Returns a column from a matrix given the (0-indexed) column number."""
res = []
for r in range(len(matrix)):
res.append(matrix[r][col])
return res
|
def _parse_format(format):
"""Return names, format."""
assert format is not None
if format.find(":") < 0:
assert " " not in format
return None, format
names, fmt = [], ""
for x in format.split():
name, type_ = x.split(":")
assert len(type_) == 1, "Invalid type: %s" % type_
names.append(name)
fmt += type_
assert len(names) == len(fmt)
return names, fmt
|
def calc_heuristic(neighbor_coord, target):
""" Returns hueristic cost. Chebyshev distance used here. """
x1 = neighbor_coord[0]
x2 = target[0]
y1 = neighbor_coord[1]
y2 = target[1]
return max(abs(y2 - y1), abs(x2 - x1))
|
def remove_trailing_delimiters(url: str, trailing_delimiters: str) -> str:
"""Removes any and all chars in trailing_delimiters from end of url.
"""
if not trailing_delimiters:
return url
while url:
if url[-1] in trailing_delimiters:
url = url[:-1]
else:
break
return url
|
def countTotalCoresCondorStatus(status_dict):
"""
Counts the cores in the status dictionary
The status is redundant in part but necessary to handle
correctly partitionable slots which are
1 glidein but may have some running cores and some idle cores
@param status_dict: a dictionary with the Machines to count
@type status_dict: str
"""
count = 0
# The loop will skip elements where Cpus or TotalSlotCpus are not defined
for collector_name in status_dict:
for glidein_name, glidein_details in status_dict[collector_name].fetchStored().items():
# TotalSlotCpus should always be the correct number but
# is not defined pre partitionable slots
if glidein_details.get("PartitionableSlot", False):
count += glidein_details.get("TotalSlotCpus", 0)
else:
count += glidein_details.get("Cpus", 0)
return count
|
def format_labels(labels):
""" Convert a dictionary of labels into a comma separated string """
if labels:
return ",".join(["{}={}".format(k, v) for k, v in labels.items()])
else:
return ""
|
def _is_correct(ground_truth_label):
"""
Returns true if label is > 0, false otherwise
:param int ground_truth_label: label
:return: true or false
:rtype: bool
"""
if ground_truth_label > 0:
return True
return False
|
def _bcd2char(cBCD):
"""
Taken from the Nortek System Integrator
Manual "Example Program" Chapter.
"""
cBCD = min(cBCD, 153)
c = (cBCD & 15)
c += 10 * (cBCD >> 4)
return c
|
def get_insertion_losses_from_prefix(expressions, tx_prefix, rx_prefix):
"""Get the list of all the Insertion Losses from prefix.
Parameters
----------
expressions :
list of Drivers to include or all nets
reclist :
list of Receiver to include. Number of Driver = Number of Receiver an
tx_prefix :
prefix for TX (eg. "DIE")
rx_prefix :
prefix for RX (eg. "BGA")
Returns
-------
type
list of string representing Insertion Losses of excitations
"""
spar = []
left_list=[]
right_list=[]
trlist = [i for i in expressions if tx_prefix in i]
reclist = [i for i in expressions if rx_prefix in i]
if len(trlist)!= len(reclist):
print("TX and RX should be same length lists")
return False
for i, j in zip(trlist, reclist):
spar.append("S({},{})".format(i, j))
return spar
|
def _reg2int(reg):
"""Converts 32-bit register value to signed integer in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
int
A signed integer translated from the register value.
"""
result = -(reg >> 31 & 0x1) * (1 << 31)
for i in range(31):
result += (reg >> i & 0x1) * (1 << i)
return result
|
def get_single_key_value_pair(d):
"""
Get the key and value of a length one dictionary.
Parameters
----------
d : dict
Single element dictionary to split into key and value.
Returns
-------
tuple
of length 2, containing the key and value
Examples
--------
>>> d = dict(key='value')
>>> get_single_key_value_pair(d)
('key', 'value')
"""
assert isinstance(d, dict), f'{d}'
assert len(d) == 1, f'{d}'
return list(d.items())[0]
|
def get_vcf(config, var_caller, sample):
"""
input: BALSAMIC config file
output: retrieve list of vcf files
"""
vcf = []
for v in var_caller:
for s in sample:
vcf.append(
config["vcf"][v]["type"]
+ "."
+ config["vcf"][v]["mutation"]
+ "."
+ s
+ "."
+ v
)
return vcf
|
def check_if_won(board):
"""All combinations of winning boards"""
if board[0] == board[1] == board[2] != " ":
return True
if board[3] == board[4] == board[5] != " ":
return True
if board[6] == board[7] == board[8] != " ":
return True
if board[0] == board[3] == board[6] != " ":
return True
if board[1] == board[4] == board[7] != " ":
return True
if board[2] == board[5] == board[8] != " ":
return True
if board[0] == board[4] == board[8] != " ":
return True
if board[2] == board[4] == board[6] != " ":
return True
return False
|
def merge_dictionaries(d1, d2):
"""Merge dictionaries of string to set.
Return a dictionary that:
- Keys are the union of the keys in both dictionaries
- Values are the union of the sets of values in each dictionary
"""
for k, s in d2.items():
if k not in d1:
d1[k] = set()
d1[k].update(s)
return d1
|
def percentage(percent, whole):
"""
Returns percentage value.
"""
return (percent * whole) / 100.0
|
def apply_to_collection(data, fn):
"""
Applies a given function recursively to a tuple, list or dictionary of
things with arbitrary nesting (including none). Returns the new collection.
"""
if isinstance(data, (tuple, list)):
return type(data)(apply_to_collection(item, fn) for item in data)
elif isinstance(data, dict):
return type(data)((k, apply_to_collection(v, fn))
for k, v in data.items())
else:
return fn(data)
|
def list_b_in_list_a(list_a, list_b):
""" list_b_in_list_a(list_a, list_b)
Whether list_b is subset of list_a
Parameters:
list_a: list
list_b: list
Return:
flag: bool
"""
return set(list_b) <= set(list_a)
|
def make_xml_name(attr_name):
"""
Replaces _ with -
"""
return attr_name.replace('_', '-')
|
def to_str(l, rdic, char=False):
"""Given a sequence of indices, returns the corresponding string
Arguments:
l {list} -- List of indices
rdic {dict} -- Dictionary mapping indices to strings
Keyword Arguments:
char {bool} -- Controls whether to add a whitespace between each token (default: {False})
Returns:
str -- Corresponding string
"""
if char:
sent = ''.join([rdic[i] for i in l])
else:
sent = ' '.join([rdic[i] for i in l])
if sent[-4:] == '</s>':
sent = sent[:-4]
return sent
|
def concatOverlapPep(peptide, j, prefixPeptide):
"""
Called by self.createOverlap(), this function takes two peptides which have an identical prefix/suffix sequence
and combines them around this like sequence. Eg: ABCDE + DEFGH = ABCDEFGH
:param peptide: the peptide with matching suffix sequence
:param j: the length of the matching suffix
:param prefixPeptide: the peptide with matching prefix seqeunce.
:return concatPep: the peptide resulting from concatenation around the matching prefix/suffix.
"""
concatPep = peptide[0:j] + prefixPeptide
return concatPep
|
def get_operator_priority(char: str) -> int:
"""
Used to get operator priority, or to check if char is correct operator
:param char: Operator
:return: Operator priority (positive), or -1 if char is not correct operator
"""
if char == '!' or char == '#':
# '#' - unary minus
return 12
elif char == '*' or char == '/':
return 10
elif char == '+' or char == '-':
return 8
elif char == '>' or char == '<' \
or char == '>=' or char == '<=' \
or char == '==' or char == '!=':
return 6
elif char == '&' or char == '|':
return 5
elif char == '=':
return 4
elif char == '(' or char == ')':
return 2
else:
return -1
|
def EncodeAName(s):
"""
Handle * characters in MSI atom names
"""
if s.find('auto') == 0:
s = s[4:]
# If the atom name begins with *, then it is a wildcard
if s[:1] == '*': # special case: deal with strings like *7
return 'X' # These have special meaning. Throw away the integer.
# (and replace the * with an X)
# If the * character occurs later on in the atom name, then it is actually
# part of the atom's name. (MSI force fields use many strange characters in
# atom names.) Here we change the * to \* to prevent the atom name from
# being interpreted as a wild card in the rules for generating bonds,
# angles, dihedrals, and impropers.
return s.replace('*','star').replace('\'','prime').replace('"','dblpr')
# '*' is reserved for wildcards in moltemplate
# 'star' is a string that is unused in any
# of the force fields I have seen so far.
# Similarly quote characters (' and ") confuse
# moltemplate, so we replace them with something else.
# The following approach doesn't work (mistakenly thinks '\*' = wildcard)
#return s.replace('*','\\*') # this prevents ttree_lex.MatchesAll()
# # from interpreting the '*' as a wildcard
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.