content stringlengths 42 6.51k |
|---|
def vector_mx_mult(v, m):
"""
Pre-multiply matrix m by vector v
"""
rows = len(m)
if len(v) != rows:
raise ValueError('vectormatmult mismatched rows')
columns = len(m[0])
result = []
for c in range(0, columns):
result.append(sum(v[r]*m[r][c] for r in range(rows)))
return result |
def first_sample_of_frame(frame,
frame_shift_in_samples,
window_size_in_samples):
"""
Returns the sample-index of the first sample of the frame with index
'frame'. Caution: this may be negative; we treat out-of-range samples
as zero.
Analogous to with kaldi10's FirstSampleOfFrame in feat/feature-window.h
Args:
frame (int): The frame index >= 0.
frame_shift_in_samples (int) The frame shift in samples
window_size_in_samples (int) The window size in samples
Returns:
int: The first sample of this frame (caution: may be negative).
"""
midpoint_of_frame = frame_shift_in_samples * frame + (frame_shift_in_samples // 2)
beginning_of_frame = midpoint_of_frame - window_size_in_samples // 2
assert isinstance(beginning_of_frame, int) # indirectly check inputs were
# int.
return beginning_of_frame |
def jaccard_coefficient(x,y):
"""
Jaccard index used to display the similarity between sample sets.
:param A: set x
:param y: set y
:return: similarity of A and B or A intersect B
"""
return len(set(x) & set(y)) / len(set(x) | set(y)) |
def scale_to_range(min_max_old, element, min_max_new=[0, 10]):
"""Scale element from min_max_new to the new range, min_max_old.
Args:
min_max_old: Original range of the data, [min, max].
element: Integer that will be scaled to the new range, must be
within the old_range.
min_max_new: New range of the data.
Returns:
element scaled to the new range.
"""
new_range = min_max_new[1] - min_max_new[0]
old_range = min_max_old[1] - min_max_old[0]
return ((element - min_max_old[0]) * new_range) / old_range |
def have_readme(answer):
"""Check if project has a readme"""
return answer['check_readme'] |
def primes_till_N(N:int):
""" Generate all primes less than N
Args:
N (int): upper bound
Returns:
list: list of prime numbers
"""
nums = [True] * N
primes = []
for x in range(2,N):
if nums[x]:
primes.append(x)
for i in range(x, len(nums), x):
nums[i] = False
return primes |
def p_spam_given_word(word_prob):
"""Uses byes therom to compute p(spam | message contains word)"""
word, prob_if_spam, prob_if_not_spam=word_prob
return prob_if_spam/(prob_if_spam+prob_if_not_spam) |
def build_own_answer_detection_prompt(question, answer):
"""Builds the prompt to check if the model can distinguish between own answers vs answers generated by someone else.
Args:
answer: String: the model should guess if the answer was generated by the model itself
question: String: the question, from which the aforementioned answer was generated
Returns:
String: the complete prompt that will be given to a model
"""
res = f"""
Some time ago, we asked you the following question:\n\n '{question}'\n
Unfortunately, we mixed up the answers from several respondents,
and now we are not sure which of the answers is yours.
For example, we have the following answer:
\n\n '{answer}'\n
Is it your answer? [yes/no]
"""
return res |
def cleanSection(word,onlywhitespace=False):
"""Cleans the arguments of the config function call for parsing."""
newword=str()
quoted=str()
for char in word:
if quoted != str():
if char == quoted[len(quoted)-1]:
quoted = quoted[:len(quoted)-1]
if newword[len(newword)-1] == char:
newword = newword[:len(newword)-1]
else:
newword+=char
else:
if char in "\"\'":
quoted+=char
newword+=char
elif char in "\"\'":
quoted+=char
newword+=char
elif char == ",":
try:
if newword[len(newword)-1] == char:
newword+="\"\""
newword+=char
except:
pass
if len(newword) > 1 and newword[len(newword)-1] == ",":
newword+="\"\""
return newword |
def term_order(figure):
""" Return the order of terms a,b,c in a syllogism by figure
>>> term_order("1")
['ab', 'bc']
"""
if figure == "1":
return ["ab", "bc"]
if figure == "2":
return ["ba", "cb"]
if figure == "3":
return ["ab", "cb"]
if figure == "4":
return ["ba", "bc"] |
def get_tags_prod_reacts_plain_reaction(eq_reaction):
"""Get Reaction Species tags for products and reactions from Reaction Engine"""
prods_t_s = [
(tag, stoic) for tag, stoic in eq_reaction.items() if stoic > 0
]
reacs_t_s = [
(tag, stoic) for tag, stoic in eq_reaction.items() if stoic < 0
]
return prods_t_s, reacs_t_s |
def search_linear(xs, target):
""" Find and return the index of target in sequence xs """
for (i, v) in enumerate(xs):
if v == target:
return i
return -1 |
def _is_number(s) -> str:
"""Returns whether the parameter is a number or string
:param s:
:return:
"""
if (isinstance(s, (float, int)) or (s.isdigit() if hasattr(s, 'isdigit') else False)) and not isinstance(s, bool):
return 'number'
return 'string' |
def _is_positive_float(item):
"""Verify that value is a positive number."""
if not isinstance(item, (int, float)):
return False
return item > 0 |
def set_date_range(start_date, end_date):
"""
Set properly date range
@start_date: string date "yyyy-mm-dd"
@end_date: string date "yyyy-mm-dd"
"""
return [{'startDate': start_date, 'endDate': end_date}] |
def get_experience_for_next_level(level: int) -> int:
"""Gets the amount of experience needed to advance from the specified level to the next one.
:param level: The current level.
:return: The experience needed to advance to the next level.
"""
return 50 * level * level - 150 * level + 200 |
def parse_records(database_records):
"""
Parses database records into a clean json-like structure
Param: database_records (a list of db.Model instances)
Example: parse_records(User.query.all())
Returns: a list of dictionaries, each corresponding to a record, like...
[
{"id": 1, "title": "Book 1"},
{"id": 2, "title": "Book 2"},
{"id": 3, "title": "Book 3"},
]
"""
parsed_records = []
for record in database_records:
print(record)
parsed_record = record.__dict__
del parsed_record["_sa_instance_state"]
parsed_records.append(parsed_record)
return parsed_records |
def completeList(alist):
"""
This takes a list of strings, and extends the list
with lots of permutations of uppercase/lowercase/title formattings.
"""
alist = list(alist)
alist.extend([l+'s' for l in alist]) # add an s on the end, just in case
list2 = alist[:]
list2.extend([t.lower() for t in alist])
list2.extend([t.upper() for t in alist])
list2.extend([t.title() for t in alist])
d = {l:1 for l in list2} # cast to a dict to remove duplicates
return sorted(d.keys()) |
def extract_last_modified_user(gfile):
""" Extractors for the gfile metadata
https://developers.google.com/drive/v2/reference/files#resource-representations
:param gfile:
:return:
"""
if 'lastModifyingUser' not in gfile:
return ''
user = gfile['lastModifyingUser']
email = ''
if 'emailAddress' in user:
email = user['emailAddress']
name = ''
if 'displayName' in user:
name = user['displayName']
return (email, name) |
def markersdates(markers):
"""returns the list of dates for a list of markers
"""
return [m[4] for m in markers] |
def selection(unsorted):
"""Take in an unsorted list and sort it using the selection sort method."""
if len(unsorted) < 2:
return unsorted
for count in range(0, len(unsorted) - 1):
minimum = count
for each in range(count, len(unsorted)):
if unsorted[each] < unsorted[minimum]:
print('unsorted', unsorted)
print('each', each, 'minimum', minimum)
minimum = each
unsorted[count], unsorted[minimum] = unsorted[minimum], unsorted[count]
return unsorted |
def long_substr(strgs):
"""
Returns a list with the longest common substring sequences from @strgs
Based on: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
"""
# Copy the list
strgs = strgs[:]
if len(strgs) > 1 and len(strgs[0]) > 0:
substrs = []
substr = None
maxlen = 1
while True:
if substr is not None and len(substr) >= maxlen:
# A max lenght seq
substrs.append(substr)
maxlen = len(substr)
for i, s in enumerate(strgs):
strgs[i] = s.replace(substr, '', 1)
elif substr is not None and len(substr) < maxlen:
# Not the first run and not longest seq also
break
substr = ''
for i in range(len(strgs[0])):
for j in range(len(strgs[0]) - i + 1):
if j > len(substr) and all(strgs[0][i:i+j] in x for x in strgs):
substr = strgs[0][i:i+j]
return substrs
elif len(strgs) == 1:
return [strgs[0]] if len(strgs[0]) > 0 else []
else:
return [] |
def unif_var(a, b):
"""Uniform distribution variance."""
return (a - b) ** 2 / 12 |
def bytes_from_int(x: int):
"""
Convert integer to bytes
"""
return x.to_bytes((x.bit_length() + 7) // 8, byteorder='big') |
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err |
def is_finish(lst: list) -> bool:
"""
To be used with the filter function, keep only the bouts that
did NOT go the distance in a 3 or 5 round bout.
NB: This will also get rid of doctor stoppages between rounds.
"""
if (
(lst[2] == "3" and lst[3] == "5:00") or
(lst[2] == "5" and lst[3] == "5:00")
):
return False
return True |
def is_valid_color(color):
"""Checks that a given string represents a valid hex colour.
:param str color: The color to check.
:rtype: ``bool``"""
if not color:
return False
if color[0] != "#":
return False
if len(color) != 7:
return False
for char in color[1:]:
if char.upper() not in "0123456789ABCDEF":
return False
return True |
def get_opts(options):
"""
Args:
options: options object.
Returns:
args (tuple): positional options.
kwargs (map): keyword arguments.
"""
if isinstance(options, tuple):
if len(options) == 2 and isinstance(options[-1], dict):
args, kwargs = options
else:
args = options
kwargs = {}
elif isinstance(options, dict):
args, kwargs = (), options
else:
raise ValueError("Options object expected to be either pair of (args, kwargs) or only args/kwargs")
return args, kwargs |
def _get_sub_prop(container, keys, default=None):
"""Get a nested value from a dictionary.
This method works like ``dict.get(key)``, but for nested values.
Args:
container (Dict):
A dictionary which may contain other dictionaries as values.
keys (Iterable):
A sequence of keys to attempt to get the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
default (Optional[object]):
Value to returned if any of the keys are not found.
Defaults to ``None``.
Examples:
Get a top-level value (equivalent to ``container.get('key')``).
>>> _get_sub_prop({'key': 'value'}, ['key'])
'value'
Get a top-level value, providing a default (equivalent to
``container.get('key', default='default')``).
>>> _get_sub_prop({'nothere': 123}, ['key'], default='not found')
'not found'
Get a nested value.
>>> _get_sub_prop({'key': {'subkey': 'value'}}, ['key', 'subkey'])
'value'
Returns:
object: The value if present or the default.
"""
sub_val = container
for key in keys:
if key not in sub_val:
return default
sub_val = sub_val[key]
return sub_val |
def is_none_or_white_space(s: str) -> bool:
"""judge if a str is None or only contains white space.
s: source str.
return: True if it's None or empty str, otherwise False."""
if s is None:
return True
tmp = s.strip().rstrip()
if tmp is None or tmp == '':
return True
return False |
def remove_from_string(string, letters):
"""Given a string and a list of individual letters, returns a new string
which is the same as the old one except all occurrences of those letters
have been removed from it."""
for i in string:
for j in letters:
if i == j:
string = string.replace(str(i), "") # Don't forget to update string!
return string |
def fib_memo(n, memo=None):
"""
The standard recursive definition of the Fibonacci sequence to find a
single Fibonacci number but improved using a memoization technique to
minimize the number of recursive calls. It runs in O(n) time with O(n)
space complexity.
"""
if not isinstance(n, int):
raise TypeError("n must be an int")
if n < 0:
raise ValueError("n must be non-negative")
if n < 2:
return n
elif memo is None:
memo = {}
elif n in memo:
return memo[n]
f0 = memo[n - 1] if n - 1 in memo else fib_memo(n - 1, memo)
f1 = memo[n - 2] if n - 2 in memo else fib_memo(n - 2, memo)
memo[n] = f0 + f1
return memo[n] |
def quote(val):
"""quote val"""
return f"'{val}'" |
def is_in_buid(br_location, ue_location):
"""
Args:
ue_location:the location of the ue
(x,y)tuple
Returns:
False/True: whether the ue is in the building
"""
x_start = br_location[0]
x_end = br_location[1]
y_start = br_location[2]
y_end = br_location[3]
if ue_location[0] >= x_start and ue_location[0] <= x_end \
and ue_location[1] >= y_start and ue_location[1] <= y_end:
return True
else:
return False |
def get_complement_prop(numerator: float, denominator: float):
"""
Get the complement of a fraction.
Args:
numerator: Numerator for calculation
denominator: Denominator for calculation
"""
# assert numerator <= denominator
# assert 0.0 <= numerator
# assert 0.0 <= denominator
return 1.0 - numerator / denominator |
def phi_psi_omega_to_abego(phi: float, psi: float, omega: float) -> str:
"""
:param: phi: The phi angle.
:param: psi: The psi angle.
:param: omega: The omega angle.
:return: The abego string.
From Buwei
https://wiki.ipd.uw.edu/protocols/dry_lab/rosetta/scaffold_generation_with_piecewise_blueprint_builder
"""
if psi == None or phi == None:
return "X"
if omega == None:
omega = 180
if abs(omega) < 90:
return "O"
elif phi > 0:
if -100.0 <= psi < 100:
return "G"
else:
return "E"
else:
if -75.0 <= psi < 50:
return "A"
else:
return "B" |
def _dict2dict(adj_dict):
"""Takes a dictionary based representation of an adjacency list
and returns a dict of dicts based representation.
"""
item = adj_dict.popitem()
adj_dict[item[0]] = item[1]
if not isinstance(item[1], dict):
new_dict = {}
for key, value in adj_dict.items():
new_dict[key] = {v: {} for v in value}
adj_dict = new_dict
return adj_dict |
def get_ids(iterable):
"""Retrieve the identifier of a number of objects."""
return [element.id for element in iterable] |
def level_number(level):
"""Level: string or number e.g. 'DEBUG'=10,'INFO'=20,'WARN'=30,'ERROR'=40"""
if type(level) is str:
import logging
try: level_number = getattr(logging,level)
except: level_number = logging.DEBUG
else: level_number = level
return level_number |
def remove_empty_elements(spec):
"""
Removes empty elements from the dictionary and all sub-dictionaries.
"""
whitelist = ['none'] # Don't eliminate artifact's {"archive": {"none": {}}}
if isinstance(spec, dict):
kids = {k: remove_empty_elements(v) for k, v in spec.items() if v or k in whitelist}
return {k: v for k, v in kids.items() if v or k in whitelist}
if isinstance(spec, list):
elems = [remove_empty_elements(v) for v in spec]
return [v for v in elems if v]
return spec |
def topological_sort(items, partial_order):
"""
Perform topological sort.
:param items: a list of items to be sorted.
:param partial_order: a list of pairs. If pair (a,b) is in it, it
means that item a should appear before item b. Returns a list of
the items in one of the possible orders, or None if partial_order
contains a loop.
"""
def add_node(graph, node):
"""Add a node to the graph if not already exists."""
if node not in graph:
graph[node] = [0] # 0 = number of arcs coming into this node.
def add_arc(graph, fromnode, tonode):
"""
Add an arc to a graph. Can create multiple arcs. The end nodes must
already exist.
"""
graph[fromnode].append(tonode)
# Update the count of incoming arcs in tonode.
graph[tonode][0] = graph[tonode][0] + 1
# step 1 - create a directed graph with an arc a->b for each input
# pair (a,b).
# The graph is represented by a dictionary. The dictionary contains
# a pair item:list for each node in the graph. /item/ is the value
# of the node. /list/'s 1st item is the count of incoming arcs, and
# the rest are the destinations of the outgoing arcs. For example:
# {'a':[0,'b','c'], 'b':[1], 'c':[1]}
# represents the graph: c <-- a --> b
# The graph may contain loops and multiple arcs.
# Note that our representation does not contain reference loops to
# cause GC problems even when the represented graph contains loops,
# because we keep the node names rather than references to the nodes.
graph = {}
for v in items:
add_node(graph, v)
for a,b in partial_order:
add_arc(graph, a, b)
# Step 2 - find all roots (nodes with zero incoming arcs).
roots = [node for (node,nodeinfo) in graph.items() if nodeinfo[0] == 0]
# step 3 - repeatedly emit a root and remove it from the graph. Removing
# a node may convert some of the node's direct children into roots.
# Whenever that happens, we append the new roots to the list of
# current roots.
sorted = []
while len(roots) != 0:
# If len(roots) is always 1 when we get here, it means that
# the input describes a complete ordering and there is only
# one possible output.
# When len(roots) > 1, we can choose any root to send to the
# output; this freedom represents the multiple complete orderings
# that satisfy the input restrictions. We arbitrarily take one of
# the roots using pop(). Note that for the algorithm to be efficient,
# this operation must be done in O(1) time.
root = roots.pop()
sorted.append(root)
for child in graph[root][1:]:
graph[child][0] = graph[child][0] - 1
if graph[child][0] == 0:
roots.append(child)
del graph[root]
if len(graph.items()) != 0:
# There is a loop in the input.
return None
return sorted |
def hisat2_index_from_prefix(prefix):
"""
Given a prefix, return a list of the corresponding hisat2 index files.
"""
return ['{prefix}.{n}.ht2'.format(prefix=prefix, n=n) for n in range(1, 9)] |
def convert_verbose(verbose):
"""Convert the results of the --verbose flag into a list of logger names
if --verbose is not provided, args.verbose will be None and logging
should be at the info level.
if --verbose is provided, but not followed by any arguments, then
args.verbose = [] and debug logging should be enabled for all of lbrynet
along with info logging on lbryum.
if --verbose is provided and followed by arguments, those arguments
will be in a list
"""
if verbose is None:
return []
if verbose == []:
return ['lbrynet', 'lbryum']
return verbose |
def B_icm(r, ne_fn, B_ref=10., r_ref=0., eta=0.5, **kwargs):
"""
Magnetic field [muG] in the ICM, proportional to a power of the electron number density.
r : distance from the center of the cluster [kpc]
ne_fn : function for the electron number density [cm^-3]
B_ref : reference value of the magnetic field [muG] (default: 10.)
r_ref : reference value of the radius [kpc] (default: 0.)
eta : power law of B_icm as a function of ne (default: 0.5)
kwargs : other keyword arguments of the function 'ne_fn'
"""
return B_ref*(ne_fn(r, **kwargs)/ne_fn(r_ref, **kwargs))**eta |
def determineWhatIsDeleted(ref, alt):
"""Must take the deletion from the ends.
"""
altIdx = ref.find(alt)
if altIdx == 0:
# the tail is deleted
# AGGTCTG to A
# TCCTACACCTACT to TCCTACA so TCCTACA(CCTACT) (want)
endDelIdx = altIdx + len(alt)
return ref[endDelIdx:]
# try dropping the first nuc from both
# and recompute the match
# the head is deleted after chopping off first T
# TCCTACACCTACT to TCCTACT (alt)
# T(CCTACA)CCTACT (want)
ref, alt = ref[1:], alt[1:]
altIdx = ref.find(alt)
if -1 == altIdx or altIdx+len(alt) != len(ref):
return '-1'
return ref[:altIdx] |
def trim_postcode(pc):
"""Convert a dirty postcode to a valid one but remain the same if invalid.
Parameters
----------
pc : str
input postcode
Returns
-------
str
A valid postcode or the invalid one.
"""
pc2 = pc.strip().upper().replace(" ", "")
if len(pc2) > 7 or len(pc2) < 5:
return pc
return pc2[:-3] + " "*(7-len(pc2)) + pc2[-3:] |
def _TruncatedString(string: str, n: int = 80) -> str:
"""Return the truncated first 'n' characters of a string.
Args:
string: The string to truncate.
n: The maximum length of the string to return.
Returns:
The truncated string.
"""
if len(string) > n:
return string[:n - 3] + '...'
else:
return string |
def elk_index(elk_index_name):
""" Index setup for ELK Stack bulk install """
index_tag_full = {}
index_tag_inner = {}
index_tag_inner['_index'] = elk_index_name
index_tag_inner['_type'] = elk_index_name
index_tag_full['index'] = index_tag_inner
return index_tag_full |
def map_sequence(seq, sequence_map, unk_item_id):
""" Transform a splitted sequence of items into another sequence of items
according to the rules encoded in the dict item2id
seq: iterable
sequence_map: dict
unk_item_id: int"""
item_ids = []
for item in seq:
item_id = sequence_map.get(item, unk_item_id)
item_ids.append(item_id)
return item_ids |
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print average([20, 30, 70])
40.0
"""
return sum(values, 0.0) / len(values) |
def synthesize_dsdna(name, sequence):
"""
Synthesizes a longer stretch of dsDNA, up to 3 kb in size.
Parameters
----------
name : str
sequence : str
Returns
-------
dict
Notes
-----
.. [1] https://www.transcriptic.com/platform/#ordering_assembly
"""
assert all(i in "actg" for i in sequence)
request = {
"type": "synthesize",
"name": name,
"data": {
"sequence": sequence,
},
}
return request |
def get_readable_lines_per_file(lines_per_file: int):
"""Get the human readable for number of lines per files
Parameters
----------
lines_per_file : int
number of lines per file
"""
lpf = lines_per_file
units = ["", "k", "m", "b"]
i = 0
while lpf >= 1000:
lpf = lpf / 1000
i += 1
assert lpf == int(lpf), "Please choose a better number"
return f"{int(lpf)}{units[i]}" |
def change_rate_extractor(change_rates, initial_currency, final_currency):
""" Function which tests directions of exchange factors and returns the
appropriate conversion factor.
Example
-------
>>> change_rate_extractor(
... change_rates = {'EUR/USD': .8771929824561404},
... initial_currency = 'EUR',
... final_currency = 'USD',
... )
1.14
>>> change_rate_extractor(
... change_rates = {'USD/EUR': 1.14},
... initial_currency = 'EUR',
... final_currency = 'USD',
... )
1.14
"""
ACR_1 = '%s/%s'%(
initial_currency, final_currency
)
ACR_2 = '%s/%s'%(
final_currency, initial_currency
)
if ACR_1 in change_rates:
return pow(change_rates[ACR_1], -1.)
if ACR_2 in change_rates:
return change_rates[ACR_2] |
def gcd(n1, n2):
"""
Non-Recursive function to return the
GCD (greatest common divisor) of n1 and n2
"""
if n1 < n2:
n1, n2 = n2, n1
while n1 % n2:
n1, n2 = n2, n1 % n2
return n2 |
def make_shape_str(expected_shape) -> str:
"""Converts the given description of a shape (where each element
corresponds to a description of a single dimension) into its
human-readable counterpart.
Args:
expected_shape (tuple[any]): the expected shape of the thing
Returns:
str: a pretty description of the expected shape
"""
result = []
for item in expected_shape:
if item is None:
result.append('any')
elif isinstance(item, int):
result.append(str(item))
elif isinstance(item, str):
result.append(f'{item}=any')
else:
name, amt = item
if amt is None:
result.append(f'{name}=any')
else:
result.append(f'{name}={amt}')
return '(' + ', '.join(result) + ')' |
def _split_by_length(msg, size):
"""
Splits a string into a list of strings up to the given size.
::
>>> _split_by_length('hello', 2)
['he', 'll', 'o']
:param str msg: string to split
:param int size: number of characters to chunk into
:returns: **list** with chunked string components
"""
return [msg[i:i + size] for i in range(0, len(msg), size)] |
def fader(val, perc1, perc2, perc3, color1, color2, color3):
"""
Accepts a decimal (0.1, 0.5, etc) and slots it into one of three categories based
on the percentage.
"""
if val > perc3:
return color3
if val > perc2:
return color2
return color1 |
def rtrd_str2list(str):
"""Format Route Target string to list"""
if not str:
return []
if isinstance(str, list):
return str
return str.split(',') |
def remove_empty_entities(d):
"""
Recursively remove empty lists, empty dicts, or None elements from a dictionary.
:param d: Input dictionary.
:return: Dictionary with all empty lists, and empty dictionaries removed.
"""
def empty(x):
return x is None or x == {} or x == [] or x == ''
if not isinstance(d, (dict, list)):
return d
elif isinstance(d, list):
return [value for value in (remove_empty_entities(value) for value in d) if not empty(value)]
else:
return {key: value for key, value in ((key, remove_empty_entities(value))
for key, value in d.items()) if not empty(value)} |
def _stringify_token(token):
"""
>>> token = {'label': 'TEST', 'values': ('one', 'two')}
>>> _stringify_token(token)
"{'label': 'TEST', 'values': ('one', 'two')}"
"""
return str(
"{'label': '" + str(token['label']) + "', 'values': " + str(tuple([str(val) for val in token['values']])) + "}"
) |
def _listrepr(x):
""" Represent a list in a short fashion, for easy representation. """
try:
len(x)
except TypeError:
return None
else:
return '<%d element%s>' % (len(x), 's'[:len(x) >= 2]) |
def is_iterable(var):
""" check if the variable is iterable
:param var:
:return bool:
>>> is_iterable('abc')
False
>>> is_iterable(123.)
False
>>> is_iterable((1, ))
True
>>> is_iterable(range(2))
True
"""
res = (hasattr(var, '__iter__') and not isinstance(var, str))
return res |
def to_class_path(cls):
"""Returns class path for a class
Takes a class and returns the class path which is composed of the
module plus the class name. This can be reversed later to get the
class using ``from_class_path``.
:returns: string
>>> from kitsune.search.models import Record
>>> to_class_path(Record)
'kitsune.search.models:Record'
"""
return ":".join([cls.__module__, cls.__name__]) |
def is_csres_colname(colname):
"""Returns 'True' if 'colname' is a C-state residency CSV column name."""
return (colname.startswith("CC") or colname.startswith("PC")) and \
colname.endswith("%") and len(colname) > 3 |
def is_equal_to_as_set(l1, l2):
""" return true if two lists contain the same content
:param l1: first list
:param l2: second list
:return: whether lists match
"""
# Note specifically that set(l1) == set(l2) does not work as expected.
return len(set(l1) & set(l2)) == len(set(l1)) and \
len(set(l1) | set(l2)) == len(set(l1)) |
def cipher(text, shift, encrypt=True):
"""
Encrypt and decrypt letter text by using the Caesar cipher.
Parameters
----------
text : string
A string only contains letters from alphabet
shift : int
A number that you wish to replace each original letter to the letter with that number of positions down the alphabet.
encrypt: boolean
A boolean value with true indicates encrypt the text and false indicates decrypt the text
Returns
-------
string
The encrypt or decrypt letter text.
Examples
--------
>>> from cipher_yh3395 import cipher
>>> cipher('abc', 1)
'bcd'
>>> cipher('bcd', 1, False)
'abc
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text |
def clock_hours(value):
"""
2 bytes: seconds: 0 to 59
"""
return {'hours': value} |
def drop_ventrally_signed(feat_names):
"""
EM: drops the ventrally signed features
Param:
features_names = list of features names
Return:
filtered_names = list of features names without ventrally signed
"""
absft = [ft for ft in feat_names if '_abs' in ft]
ventr = [ft.replace('_abs', '') for ft in absft]
filtered_names = list(set(feat_names).difference(set(ventr)))
return filtered_names |
def external_trigger_level(session, Type='Real64', RepCap='', AttrID=1150009, buffsize=0, action=['Get', '']):
"""[External Trigger Level (volts)]
The external trigger level, in volts, when Trigger.Source is External. Range of allowable values -1.0 V to 1.0 V. Reset value: 1.0 V.
"""
return session, Type, RepCap, AttrID, buffsize, action |
def get_start_stops(feat, cns):
"""
return a range dependent on the position of the cns
relative to the feature
"""
if cns[0] > cns[1]: cns = cns[1], cns[0]
if feat['start'] < cns[0] and feat['end'] > cns[1]:
# intronicns cnsns:
return cns[0], cns[1]
featm = (feat['start'] + feat['end']) / 2.
cnsm = (cns[0] + cns[1]) /2.
if featm < cnsm:
return min(feat['end'], cns[0]), max(feat['end'], cns[0])
return sorted([cns[1], feat['start']]) |
def compute_optalpha(norm_r, norm_Fty, epsilon, comp_alpha=True):
"""
Compute optimal alpha for WRI
Parameters
----------
norm_r: Float
Norm of residual
norm_Fty: Float
Norm of adjoint wavefield squared
epsilon: Float
Noise level
comp_alpha: Bool
Whether to compute the optimal alpha or just return 1
"""
if comp_alpha:
if norm_r > epsilon and norm_Fty > 0:
return norm_r * (norm_r - epsilon) / norm_Fty
else:
return 0
else:
return 1 |
def MAPk(y_true, y_pred, k=20):
"""
average precision at k with k=20
"""
actual = set(y_true)
# precision at i is a percentage of correct
# items among first i recommendations; the
# correct count will be summed up by n_hit
n_hit = 0
precision = 0
for i, p in enumerate(y_pred, 1):
if p in actual:
n_hit += 1
precision += n_hit / i
avg_precision = precision / min(len(actual), k)
return avg_precision |
def csv_to_list(s):
"""Split commas separated string into a list (remove whitespace too)"""
return [x for x in s.replace(" ", "").split(",")] |
def _match_android(crosstool_top, cpu):
"""_match_android will try to detect wether the inbound crosstool is the
Android NDK toolchain. It can either be `//external:android/crosstool` or be
part of the `@androidndk` workspace. After that, translate Android CPUs to
Go CPUs."""
if str(crosstool_top) == "//external:android/crosstool" or \
crosstool_top.workspace_name == "androidndk":
platform_cpu = {
"arm64-v8a": "arm64",
"armeabi-v7a": "arm",
"x86": "386",
"x86_64": "amd64",
}.get(cpu)
if platform_cpu:
return "android_{}_cgo".format(platform_cpu)
return None |
def add_modules_to_metadata(modules, metadata):
"""
modules is a list of lists of otus, metadata is a dictionary of dictionaries where outer dict keys
are features, inner dict keys are metadata names and values are metadata values
"""
for module_, otus in enumerate(modules):
for otu in otus:
try:
metadata[str(otu)]['module'] = module_
except KeyError:
metadata[str(otu)] = dict()
metadata[str(otu)]['module'] = module_
return metadata |
def base36decode(base36_string):
"""Converts base36 string into integer."""
return int(base36_string, 36) |
def get_fitness_score(subject, goal):
"""
In this case, subject and goal is a list of 5 numbers.
Return a score that is the total difference between the subject and the goal.
"""
total = 0
for i in range(len(subject)):
total += abs(goal[i] - subject[i])
return total |
def preprocess_text(message):
""" Delete some artifacts from text
Keyword arguments:
message -- a plain sentence or paragraph
"""
return message.replace("\t", " ").replace("\r\n", " ").replace("\r", " ").replace("\n", " ") |
def lp(v1, v2=None, p=2):
"""Find the L_p norm. If passed one vector, find the length of that vector.
If passed two vectors, find the length of the difference between them.
"""
if v2:
vec = {k: abs(v1[k] - v2[k]) for k in (v1.keys() | v2.keys())}
else:
vec = v1
return sum(v ** p for v in vec.values()) ** (1.0 / p) |
def indexing(smaller, larger, M):
"""converts x-y indexes to index in the upper triangular matrix"""
return larger + smaller * (M - 1) - smaller * (smaller - 1) // 2 |
def bu_to_mm(x):
"""Convert blender units to mm."""
return x * 1000.0 if x is not None else x |
def get_column2position(lineno: int, text_original: str, text_dedented: str):
"""the text supplied to parse is 'dedented', and can contain line-breaks,
so both the reported lineno and character position may be wrong.
This function updates a mapping of a character to its actual place in the document
NOTE lineno is in basis 1
"""
indent = len(text_original) - len(text_original.lstrip())
# create a mapping of column to doc line/column, taking into account line breaks
char2docplace = {}
line_offset = char_count = 0
for i, char in enumerate(text_dedented):
char2docplace[i] = (lineno + line_offset, indent + char_count)
char_count += 1
if char in ["\n", "\r"]:
# NOTE: this would not work for the old \n\r mac standard.
line_offset += 1
char_count = 0
return char2docplace |
def count_rec(nexts, current, part, weight, length):
"""
INPUT:
- ``nexts, current, part`` -- skew partitions
- ``weight`` -- non-negative integer list
- ``length`` -- integer
TESTS::
sage: from sage.combinat.ribbon_tableau import count_rec
sage: count_rec([], [], [[2, 1, 1], []], [2], 2)
[0]
sage: count_rec([[0], [1]], [[[2, 1, 1], [0, 0, 2, 0]], [[4], [2, 0, 0, 0]]], [[4, 1, 1], []], [2, 1], 2)
[1]
sage: count_rec([], [[[], [2, 2]]], [[2, 2], []], [2], 2)
[1]
sage: count_rec([], [[[], [2, 0, 2, 0]]], [[4], []], [2], 2)
[1]
sage: count_rec([[1], [1]], [[[2, 2], [0, 0, 2, 0]], [[4], [2, 0, 0, 0]]], [[4, 2], []], [2, 1], 2)
[2]
sage: count_rec([[1], [1], [2]], [[[2, 2, 2], [0, 0, 2, 0]], [[4, 1, 1], [0, 2, 0, 0]], [[4, 2], [2, 0, 0, 0]]], [[4, 2, 2], []], [2, 1, 1], 2)
[4]
sage: count_rec([[4], [1]], [[[4, 2, 2], [0, 0, 2, 0]], [[4, 3, 1], [0, 2, 0, 0]]], [[4, 3, 3], []], [2, 1, 1, 1], 2)
[5]
"""
if current == []:
return [0]
if nexts != []:
return [sum(sum(j for j in i) for i in nexts)]
else:
return [len(current)] |
def Strip(mac_oui):
"""Strip extraneous characters out of MAC/OUI"""
data = mac_oui.replace(":","").replace("-","").replace(" ","")
return data |
def cross(A, B):
"""Cross product of elements in A and elements in B."""
return [s+t for s in A for t in B] |
def get_moves(left, cur_move, limit):
"""Get possible moves from left of possible length limit."""
if len(cur_move) == limit or not left:
return {tuple(sorted(cur_move))}
moves = set()
if len(cur_move) > 0:
moves = moves | {tuple(sorted(cur_move))}
for element in left:
next_moves = get_moves(
left - {element},
cur_move | {element},
limit
)
moves = moves | next_moves
return moves |
def no_set_task_bar(on=0):
"""Esconder o Icone Barra de Tarefas e Menu Iniciar
DESCRIPTION
Esta restricao remove o icone Barra de Tarefas e Menu Iniciar do Painel
de Controle e o item Propriedades do menu de contexto do menu Iniciar.
COMPATIBILITY
Todos.
MODIFIED VALUES
NoSetTaskbar : dword : 00000000 = Desabilitado;
00000001 = Habilita restricao.
"""
if on:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\Explorer]
"NoSetTaskbar"=dword:00000001'''
else:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\Explorer]
"NoSetTaskbar"=dword:00000000''' |
def _sorted(dictionary):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dictionary)
except TypeError:
raise TypeError('tree only supports dicts with sortable keys.') |
def get_threat_detail(threats):
"""
Iterate over threat details from the response and retrieve details of threats.
:param threats: list of threats from response
:return: list of detailed elements of threats
:rtype: list
"""
return [{
'Title': threat.get('title', ''),
'Category': threat.get('category', ''),
'Severity': threat.get('severity', ''),
'Description': threat.get('description', ''),
'Cve': threat.get('cves', []),
'Source': threat.get('source', ''),
'Published': threat.get('published', ''),
'Updated': threat.get('updated', ''),
'ThreatLastTrendingOn': threat.get('threatLastTrendingOn', ''),
'Trending': threat.get('trending', '')
} for threat in threats] |
def lorentzian(x_value, amplitude, center, width):
"""
Evaluate a lorentzian function with the given parameters at x_value.
"""
numerator = (width**2)
denominator = (x_value - center)**2 + width**2
return amplitude * (numerator / denominator) |
def remove_punc(comment):
"""Takes a string and removes all punctuation except for ':' and then
returns the result.
"""
temp = ''
punc = '''!()-[]{};'"\<>./?@#$%^&*_~'''
for char in comment:
if char not in punc:
temp = temp+char
elif char == "-":
temp = temp + ' '
return temp |
def node_type(node):
"""
Node numbering scheme is as follows:
[c1-c309] [c321-c478] old compute nodes (Sandy Bridge)
[c579-c628],[c639-c985] new compute nodes (Haswell)
Special nodes:
c309-c320 old big memory nodes (Sandy Bridge)
c629-c638 new big memory nodes (Haswell)
c577,c578 old huge memory nodes (HP Proliant DL560)
c986-c989 new huge memory nodes (Dell R930)
"""
if node.strip() in ['c'+str(x) for x in range(1, 310)]:
return 'SandyBridge'
if node.strip() in ['c'+str(x) for x in range(321, 479)]:
return 'SandyBridge'
if node.strip() in ['c'+str(x) for x in range(579, 629)]:
return 'Haswell'
if node.strip() in ['c'+str(x) for x in range(639, 986)]:
return 'Haswell'
if node.strip() in ['c'+str(x) for x in range(309, 321)]:
return 'SandyBridgeBig'
if node.strip() in ['c'+str(x) for x in range(629, 639)]:
return 'HaswellBig'
if node.strip() in ['c'+str(x) for x in range(577, 579)]:
return 'OldHuge'
if node.strip() in ['c'+str(x) for x in range(986, 990)]:
return 'NewHuge' |
def _evalPrefix(cad):
"""
Internal function
"""
pot={'k':1e3,'M':1e6,'G':1e9,'T':1e12,'P':1e15,'E':1e18
,'m':1e-3,'u':1e-6,'n':1e-9,'p':1e-12,'f':1e-15,'a':1e-18}
if cad in pot:
return pot[cad]
return None |
def update_label2(label2, isubcase):
"""strips off SUBCASE from the label2 to simplfify the output keys (e.g., displacements)"""
# strip off any comments
# 'SUBCASE 1 $ STAT'
# 'SUBCASE 1 $ 0.900 P'
label2 = label2.split('$')[0].strip()
if label2:
subcase_expected = 'SUBCASE %i' % isubcase
subcase_equal_expected = 'SUBCASE = %i' % isubcase
if subcase_expected == label2:
label2 = ''
elif label2 == 'NONLINEAR':
pass
elif subcase_expected in label2:
# 'SUBCASE 10' in 'NONLINEAR SUBCASE 10'
nchars = len(subcase_expected)
ilabel_1 = label2.index(subcase_expected)
ilabel_2 = ilabel_1 + nchars
label2_prime = label2[:ilabel_1] + label2[ilabel_2:]
label2 = label2_prime.strip()
elif subcase_equal_expected in label2:
# 'SUBCASE = 10'
slabel = label2.split('=')
assert len(slabel) == 2, slabel
label2 = ''
elif label2.startswith('NONLINEAR '):
# 'NONLINEAR SUBCASE 1'
# sline =['', ' SUBCASE 1']
sline = label2.split('NONLINEAR ', 1)
label2 = 'NONLINEAR ' + sline[1].strip()
elif 'PVAL ID=' in label2 and 'SUBCASE=' in label2:
# 'PVAL ID= 1 SUBCASE= 1'
# ' PVAL ID= 1 SUBCASE= 1'
ilabel2 = label2.index('SUBCASE')
slabel = label2[:ilabel2].strip().split('=')
assert slabel[0] == 'PVAL ID', slabel
label2 = slabel[0].strip() + '=' + slabel[1].strip()
elif 'SUBCASE' in label2:
# 'SUBCASE 10'
# 'SUBCASE = 10'
# 'SUBCASE = 1 SEGMENT = 1'
# 'SUBCASE = 1 HARMONIC = 0 ,C'
slabel = label2.split('$')[0].strip().split()
# 'SUBCASE 10'
# 'SUBCASE = 10'
# 'SUBCASE = 1 SEGMENT = 1'
# 'SUBCASE = 1 HARMONIC = 0 ,C'
if len(slabel) == 2:
label2 = ''
elif len(slabel) == 3 and slabel[1] == '=':
label2 = ''
else:
assert slabel[0] == 'SUBCASE', slabel
# 'SEGMENT = 1'
label2 = slabel[3] + '=' + slabel[5]
elif 'SUBCOM' in label2:
subcom, isubcase = label2.split()
label2 = ''
elif 'SYM' in label2 or 'REPCASE' in label2:
# 'SYM 401'
# 'REPCASE 108'
pass
#else:
#print('label2 = %r' % label2)
#print('subcasee = %r' % subcase_expected)
return label2 |
def _cal_num_each_train_labal(num_labeled_train, category):
"""calculate the number of data of each class from the whole labeled data size and the number of class"""
return int(num_labeled_train / len(category)) |
def key_by(items, key, children_key=False):
"""
Index a list of dicts by the value of given key - forced to lowercase.
Set child_key to say 'children' to recurse down a tree with 'children' attributes on nodes.
"""
index = {}
def inner(items, parent_key=None):
for item in items:
if parent_key:
item["parent_key"] = parent_key
_item_key = item[key] if isinstance(item[key], int) else item[key].lower()
index[_item_key] = item
if item.get(children_key):
inner(item[children_key], _item_key)
inner(items)
return index |
def is_not_round(neuron_max_x_y, coef=2):
"""Receives the length/width of the object, and returns True if the object is not round.
coef: the x/y or y/x maximal ratio."""
x_len, y_len = neuron_max_x_y
if (x_len/y_len) > coef or (y_len/x_len) > coef:
return True
else:
return False |
def dictify(dict_, dict_type=dict):
"""Turn nested dicts into nested dict_type dicts.
E.g. turn OrderedDicts into dicts, or dicts into OrderedDicts.
"""
if not isinstance(dict_, dict):
return dict_
new_dict = dict_type()
for key in dict_:
if isinstance(dict_[key], dict):
new_dict[key] = dictify(dict_[key], dict_type=dict_type)
elif isinstance(dict_[key], list):
new_dict[key] = [dictify(x, dict_type=dict_type) for x in dict_[key]]
else:
new_dict[key] = dict_[key]
return new_dict |
def _2str(s):
"""Convert s to a string. Return '.' if s is None or an empty string."""
return '.' if s is None or str(s) == '' else str(s) |
def _output_list(data):
"""Returns a list rather than a numpy array, which is unwanted."""
return [0, 1, 2, 3], {} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.