content stringlengths 42 6.51k |
|---|
def check_b4_adding(word, gpat, ypat, unused):
"""
Return True if:
gpat indicates that a letter is green and a letter does not match
ypat indicates that a letter is yellow and this letter matches or
this letter is not found in the word
the letter matches a letter known to be unused
@param word String word to check
@param gpat String Green pattern
@param ypat String yellow pattern
@param unused String unused letters
@return True/False
"""
bad = False
for indx, letter in enumerate(gpat):
if letter != '':
if letter != word[indx]:
return True
for indx, ylets in enumerate(ypat):
if ylets != '':
for ltr in ylets:
if ltr == word[indx]:
return True
if ltr not in word:
return True
for letter in word:
if letter in unused:
bad = True
break
return bad |
def flatten(d: dict, parent_key: str = '', sep: str = '_'):
"""
Args:
d:
parent_key:
sep:
Returns:
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items) |
def ual_grouper_base(basename: str) -> str:
"""
Returns a string to use in LDAP queries that provide the Grouper
ismemberof stem organization that UA Libraries use for patron
management
Note that this only returns a string, it is not RFC 4512
compatible. See :func:`requiam.ldap_query.ual_ldap_query`
Usage:
.. highlight:: python
.. code-block:: python
grouper_base = ldap_query.ual_grouper_base('ual-faculty')
> "ismemberof=arizona.edu:dept:LBRY:pgrps:ual-faculty"
:param basename: Grouper group name basename.
Options are: ual-dcc, ual-faculty, ual-hsl, ual-staff,
ual-students, ual-grads, ual-ugrads
:return: ``ismemberof`` attribute
"""
return f"ismemberof=arizona.edu:dept:LBRY:pgrps:{basename}" |
def rotate_left(num: int, num_size: int, shift_bits: int) -> int:
"""
Rotate a number num of num_size bits by shift_bits bits. See
https://en.wikipedia.org/wiki/Bitwise_operation#Rotate_no_carry for more information.
:param num: the number to rotate
:param num_size: the size of the number in bits
:param shift_bits: the number of bits the number is rotated by
:return: the rotated number
"""
mask = (2 ** shift_bits - 1) << (num_size - shift_bits)
bits = (num & mask) >> (num_size - shift_bits)
num = (num << shift_bits) & (2 ** num_size - 1)
num |= bits
return num |
def get_table_data(soup, text, default=None):
# type (BeautifulSoup.tag, str) -> str
"""Attempts to retrieve text data from a table based on the header value"""
if default is None:
default = ""
if soup is None:
return default
if text is None:
return default
head = soup.find("th", text=text)
if head is None:
return default
data = head.findNext("td").text
return default if data is None or not data else data |
def to_yellow(string):
""" Converts a string to yellow color (8bit)
Returns:
str: the string in yellow color
"""
return f"\u001b[33m{string}\u001b[0m" |
def sphinx_encode(string: str) -> str:
"""Replace tilde, hyphen and single quotes with their LaTeX commands."""
return (
string.replace("~", "\\textasciitilde{}")
.replace("-", "\\sphinxhyphen{}")
.replace("'", "\\textquotesingle{}")
) |
def make_url_absolute(url):
"""
Make sure a URL starts with '/'.
"""
if not url.startswith('/'):
url = "/" + url
return url |
def is_bool(x):
"""Tests if something is a boolean."""
return isinstance(x, bool) |
def _format_addresses(addresses):
"""Return a formatted string of networks and IP addresses
:param addresses: a dict of network IP addresses
:rtype: a string of semi-colon separated networks and IP addresses
"""
if not addresses:
return ""
output = []
for (network, addr_list) in addresses.items():
if not addr_list:
continue
addresses_csv = ', '.join(
[addr['addr'] for addr in addr_list]
)
group = "%s=%s" % (network, addresses_csv)
output.append(group)
return '; '.join(sorted(output)) |
def _edges_to_adjacency_list(edges):
"""Convert a list of edges into an adjacency list."""
adj = {}
for i, j in edges:
if i in adj: # pragma: no cover
ni = adj[i]
else:
ni = adj[i] = set()
if j in adj:
nj = adj[j]
else:
nj = adj[j] = set()
ni.add(j)
nj.add(i)
return adj |
def median(data):
"""Finding the median from a list"""
# 1. Sort the list
new_data = sorted(data)
if len(new_data) == 1:
print(new_data[0])
return new_data[0]
else:
# Odd
if len(new_data) % 2 == 1:
mid_value = len(new_data) // 2
print(new_data[mid_value])
return new_data[mid_value]
# Even
elif len(new_data) % 2 == 0:
mid_value = len(data) // 2
median_val = (new_data[mid_value] + new_data[mid_value - 1]) / 2.0
print(median_val)
return median_val |
def unpack(a_dict,states):
"""
For implementational simplicity, V, RPE, etc, values for all RL models
implemented here are stored in a dict, keyed on each state. This
function uses states to unpack that dict into a list whose order
matches states.
Note: Null (0) states are silently dropped.
"""
from copy import deepcopy
a_dict_copy = deepcopy(a_dict)
# otherwise the .pop() below
# would destroy a_dict
a_list = []
for s in states:
if (s == 0) or (s == '0'):
a_list.append(0.)
continue
else:
a_list.append(a_dict_copy[s].pop(0))
return a_list |
def unsigned_to_varint(num: int) -> bytes:
"""
convert an unsigned int to varint.
"""
if num < 0 or num > 0xffffffffffffffff:
raise OverflowError(f"can't convert {num} to varint")
if num <= 0xfc:
return num.to_bytes(1, 'little')
elif num <= 0xffff:
return b'\xfd' + num.to_bytes(2, 'little')
elif num <= 0xffffffff:
return b'\xfe' + num.to_bytes(4, 'little')
else:
return b'\xff' + num.to_bytes(8, 'little') |
def propeller_efficiency(thrust, shaft_power, velocity):
"""Efficiency of converting shaft power to thrust power."""
return thrust * velocity / shaft_power |
def diffuse_ratio(DIFF_data, ghi_data):
"""Function calculates the Diffuse ratio.
Parameters
----------
DIFF_data : array_like
Diffuse horizontal irradiation data
ghi_data : array_like
global horizontal irradiation data array
Returns
-------
K : float
diffuse_ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
K = DIFF_data / ghi_data
return K |
def env2human(action):
"""Needs because Gym env would only work with 0,1,2 as states
but this is confusing as a human.
We have:
-1 == sell == 0 in env
0 == hold == 1 in env
1 == buy == 2 in env
Parameters
----------
int
Action that the environment accepts
Returns
-------
action : int
Human readable action
"""
return int(action - 1) |
def generate_config_expected_result(config) -> dict:
"""Generate expected result for config"""
expected_result = {
'visible': not config['ui_options']['invisible'],
'editable': not config['read_only'],
"content": config['default'],
}
if config['required'] and not config['default']:
expected_result['save'] = False
expected_result['alerts'] = True
else:
expected_result['save'] = True
expected_result['alerts'] = False
if config['read_only']:
expected_result['save'] = False
if config['ui_options']['invisible']:
expected_result['save'] = False
if config['ui_options']['advanced'] and not config['ui_options']['invisible']:
expected_result['visible_advanced'] = True
else:
expected_result['visible_advanced'] = False
return expected_result |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x) |
def are_configurations_equal(configuration1, configuration2, keys):
"""
Compare two configurations. They are considered equal if they hold the same values for all keys.
:param configuration1: the first configuration in the comparison
:param configuration2: the second configuration in the comparison
:param keys: the keys to use for comparison
:return: boolean indicating if configurations are equal or not
"""
for key in keys:
if (configuration1[key] != configuration2[key]):
return False
return True |
def format_training_confs(config):
"""
Set up configs for training/validation.
"""
t_config = {
'batch_size' : config['train']['batch_size'],
'num_per_class' : config['train']['num_per_class'][0],
'num_classes' : config['train']['num_classes'],
'size' : config['size']
}
v_config = {
'batch_size' : config['train']['batch_size'],
'num_per_class' : config['train']['num_per_class'][1],
'num_classes' : config['train']['num_classes'],
'size' : config['size']
}
return (t_config, v_config) |
def x_win_condition(board_list):
"""Condition for 'X' to win."""
return (
(board_list[1] == "X" and board_list[2] == "X" and board_list[3] == "X")
or (board_list[1] == "X" and board_list[4] == "X" and board_list[7] == "X")
or (board_list[1] == "X" and board_list[5] == "X" and board_list[9] == "X")
or (board_list[2] == "X" and board_list[5] == "X" and board_list[8] == "X")
or (board_list[3] == "X" and board_list[6] == "X" and board_list[9] == "X")
or (board_list[3] == "X" and board_list[5] == "X" and board_list[7] == "X")
or (board_list[4] == "X" and board_list[5] == "X" and board_list[6] == "X")
or (board_list[7] == "X" and board_list[8] == "X" and board_list[9] == "X")
) |
def character_correction(sequences_list, min_length, null_character='N'):
"""
Some perturbation based interpretability methods (e.g. lime)
might introduce null characters which are not viable input.
These are by default replaced with 'N' (for any character).
The sequence is padded to min_length characters.
"""
return [
s.replace('\x00', null_character).ljust(min_length, null_character)
for s in sequences_list
] |
def zero_separation(n):
"""Zero separation in normalized r based on radial order n."""
return 1 / n ** 2 |
def get_last_revision_in_list(revision_list):
"""Gets the last revision in list."""
return revision_list[-1] |
def get_rect_size_from_quad(quad):
"""
Find the size of quad image.
:param quad:
:return:
"""
sz_x = ((abs(quad[1][0] - quad[0][0]) + abs(quad[3][0] - quad[2][0])) / 2.)
sz_y = ((abs(quad[2][1] - quad[0][1]) + abs(quad[3][1] - quad[1][1])) / 2.)
return sz_x, sz_y |
def is_true(arg) -> bool:
"""Return if the given argument is a True value"""
if isinstance(arg, str):
return arg.lower() in ('yes', 'true', 'on', '1')
if isinstance(arg, bool):
return arg
if isinstance(arg, int):
return arg != 0
raise Exception(f'Invalid boolean arg: {arg}') |
def count_receive(link_counters):
"""Find the receive count in the counters returned by a CommunicationLink."""
for processor in link_counters.values():
if 'receive' in processor:
return processor['receive'] |
def newton_sqrt(x):
"""uses the Newton method to return square root"""
val = x
while True:
last = val
val = (val + x /val) * 0.5
if abs(val - last) < 1e-9:
break
return val |
def apply_overlay(base, overlay, append=False):
"""
:param base: Dict of yaml to apply changes to. Gets mutated
:param overlay: Dict of changes. Identical structure to base
:param append: True to append, false to replace values
:return: base dict with changes applied. Mutation of base input dict
"""
for k1, v1 in overlay.items():
if not isinstance(v1, dict):
if append:
base[k1] += v1
else:
base[k1] = v1
else:
for k2, v2 in v1.items():
if not isinstance(v2, dict):
if append:
base[k1][k2] += v2
else:
base[k1][k2] = v2
else:
for k3, v3 in v2.items():
if not isinstance(v3, dict):
if append:
base[k1][k2][k3] += v3
else:
base[k1][k2][k3] = v3
else:
for k4, v4 in v3.items():
if not isinstance(v4, dict):
if append:
base[k1][k2][k3][k4] += v4
else:
base[k1][k2][k3][k4] = v4
else:
raise NotImplementedError("Exceeds current yaml max depth")
return base |
def count_neighbours(grid, row, col):
"""
Input: Three arguments. A grid as a tuple of tuples with integers (1/0),
a row number and column number for a cell as integers.
Output: How many neighbouring cells have chips as an integer.
"""
# using the hint: form a list of neighbor rules
NEIGHBORS = ((-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1))
# create a list of all cell positions in the provided grid
boundary = [(r,c) for r in range(len(grid)) for c in range(len(grid))]
# build a list of cells to count
moore_neighbors = [(r+row, c+col) for r,c in NEIGHBORS]
# count the cells that are within the bounds of the grid
return sum([grid[r][c] for (r,c) in moore_neighbors if (r,c) in boundary]) |
def get_rtol(rtol=None):
"""Get default numerical threshold for regression test."""
return 1e-3 if rtol is None else rtol |
def copy_dict(source_dict, diffs):
"""Returns a copy of source_dict, updated with the new key-value pairs in diffs."""
result = dict(source_dict)
result.update(diffs)
return result |
def isEmulator(title):
"""Returns True if 'emulator' is written in stream title, False otherwise."""
return 'emulator' in title |
def remove_comments(css):
"""Remove all CSS comment blocks."""
iemac = False
preserve = False
comment_start = css.find("/*")
while comment_start >= 0:
# Preserve comments that look like `/*!...*/`.
# Slicing is used to make sure we don"t get an IndexError.
preserve = css[comment_start + 2:comment_start + 3] == "!"
comment_end = css.find("*/", comment_start + 2)
if comment_end < 0:
if not preserve:
css = css[:comment_start]
break
elif comment_end >= (comment_start + 2):
if css[comment_end - 1] == "\\":
# This is an IE Mac-specific comment; leave this one and the
# following one alone.
comment_start = comment_end + 2
iemac = True
elif iemac:
comment_start = comment_end + 2
iemac = False
elif not preserve:
css = css[:comment_start] + css[comment_end + 2:]
else:
comment_start = comment_end + 2
comment_start = css.find("/*", comment_start)
return css |
def _int2coord(x, y, dim):
"""Convert x, y values in dim x dim-grid coordinate system into lng, lat values.
Parameters:
x: int x value of point [0, dim); corresponds to longitude
y: int y value of point [0, dim); corresponds to latitude
dim: int Number of coding points each x, y value can take.
Corresponds to 2^level of the hilbert curve.
Returns:
Tuple[float, float]: (lng, lat)
lng longitude value of coordinate [-180.0, 180.0]; corresponds to X axis
lat latitude value of coordinate [-90.0, 90.0]; corresponds to Y axis
"""
assert dim >= 1
assert x < dim
assert y < dim
lng = x / dim * 360 - 180
lat = y / dim * 180 - 90
return lng, lat |
def find_sample_name_mismatch(rows):
"""
if we have eg. paired sample file names file1_1.fastq.gz and file1_2.fastq.gz
then both of these files must have the same sample name (probably file1)
This function returns any mismatches
"""
ret = list()
for (index, subindex), row in rows.items():
if subindex != 1:
sam_name_1 = row.get("sample_name")
sam_name_2 = rows.get((index, "1"), dict()).get("sample_name")
if sam_name_1 != sam_name_2:
ret.append((index, sam_name_1, sam_name_2))
return ret |
def _get_elem_neighbors(p, nelx, nely, nelz):
"""
Gets the elements which surround the element in point p1 (no diagonals).
"""
eset = {}
if nelz == 0:
e1 = (max(p[0]-1, 0), p[1])
e2 = (min(p[0]+1, nely-1), p[1])
e3 = (p[0], max(p[1]-1, 0))
e4 = (p[0], min(p[1]+1, nelx-1))
eset = {e1, e2, e3, e4}
else:
e1 = (max(p[0]-1, 0), p[1], p[2])
e2 = (min(p[0]+1, nelz-1), p[1], p[2])
e3 = (p[0], max(p[1]-1, 0), p[2])
e4 = (p[0], min(p[1]+1, nely-1), p[2])
e5 = (p[0], p[1], max(p[2]-1, 0))
e6 = (p[0], p[1], min(p[2]+1, nelx-1))
eset = {e1, e2, e3, e4, e5, e6}
return list(eset) |
def RemoveIntervalsContained( intervals ):
"""
remove intervals that are fully contained in another.
[(10, 100), (20, 50), (70, 120), (130, 200), (10, 50), (140, 210), (150, 200)]
results:
[(10, 100), (70, 120), (130, 200), (140, 210)]
"""
if not intervals:
return []
new_intervals = []
intervals.sort()
last_from, last_to = intervals[0]
for this_from, this_to in intervals[1:]:
# this is larger:
if this_from <= last_from and this_to >= last_to:
last_from,last_to = this_from,this_to
continue
# last is larger
if last_from <= this_from and last_to >= this_to:
continue
# no complete overlap
new_intervals.append( (last_from, last_to ) )
last_from,last_to = this_from,this_to
new_intervals.append( ( last_from, last_to ))
return new_intervals |
def _acct(v):
"""Convert to accounting (float) for excel, but default to original value."""
try:
if v is None or v == '':
return ''
return float(v)
except ValueError:
return v |
def ToUpperCamelCase(s):
"""Converts a string to upper camel case.
Examples:
foo => Foo
foo_bar => FooBar
foo__bar => FooBar
foo_bar_baz => FooBarBaz
Args:
s: The string to convert to upper camel case.
Returns:
The upper camel cased string.
"""
return ''.join(fragment.capitalize() for fragment in s.split('_')) |
def gTabIni(a,n):
"""Genere un tableau de a avec n elements"""
tab=[]
for i in range (0,n) :
tab.append(a)
return(tab) |
def getType(obj):
""" generates string representation of class of obj
discarding decoration """
return str(obj.__class__).split("'")[1].split(".")[-1] |
def plural_helper(count, word_stem):
"""returns <count> <word_stem>[s]"""
result = str(count) + ' ' + word_stem
if count == 1:
return result
return result + 's' |
def get_inline_views_from_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
inline_views = []
for _, opts in fieldsets or ():
if 'fieldsets' in opts:
inline_views += get_inline_views_from_fieldsets(opts.get('fieldsets'))
elif 'inline_view' in opts:
inline_views.append(opts.get('inline_view'))
return inline_views |
def filterResultList(phoneName,resultsList):
""" Takes in a phone name, and a list of PriceAPI search result dicitonaries. Returns a filtered list with
blacklisted sites removed (ebay, phone carries)
- only correct phones
- unlocked phones
- Price in USD
- has a set price
- returns None if the final list is empty
"""
blacklistNames = """ ebay ebay - marketplace sprint at&t verizon straight talk """
phoneName = phoneName.lower()
# Pay no mind to this hideous list of lambdas
filters = [
lambda d : "name" in d and phoneName in d["name"].lower(),
lambda d : "name" in d and "unlocked" in d["name"].lower(),
lambda d : "price" in d or ("min_price" in d and ("max_price" in d and (d["min_price"] == d["max_price"]))),
lambda d : "currency" in d and d["currency"] == "USD",
lambda d : ("price" in d and ( d["price"] != None and int(d["price"].replace(".","")) != 0)) or ("min_price" in d and d["min_price"] != None and (int(d["min_price"].replace(".","")) != 0)),
lambda d : not "shop_name" in d or not d["shop_name"].lower() in blacklistNames
]
# Filter out products which don't even have the phone name
# apply remaining filters
filtered = resultsList
for f in filters:
filtered = filter(f,filtered)
# Filter out "plus" models when looking for non-plus models
if not "+" in phoneName or not "plus" in phoneName:
filtered = filter(lambda d : not "+" in d["name"] and not "plus" in d["name"] , filtered)
# If the filtered list isn't empty, then return it. Return none otherwise
filtered = list(filtered)
if filtered:
return filtered
return None |
def fromstringtolist(strlist, map_function=int):
"""
read list written as string
map_function function to convert from string to suited element type
"""
if strlist in ("None", None):
return None
list_elems = strlist[1:-1].split(",")
toreturn = [map_function(elem) for elem in list_elems]
return toreturn |
def generate_color_brew(n):
"""
Generates an evenly distributed range
of hue values in the HSV colour scale.
Arguments:
n -- an int with the number of hue values
you want to obtain
Returns:
a python list with those hue values
"""
hue_step = 360 / float(n)
return [color * hue_step / 360.0 for color in range(n)] |
def find_annotations(list_filenames, all_annotations):
"""
Function that generates list of annotations according to the given filenames
Input:
list_filenames: List of filenames, that shall be found in all_annotations
all_annotations: List of dictionaries, containing annotations for all images
"""
found_annotations = []
for filename in list_filenames:
annot = next(
item for item in all_annotations if item["filename"] == filename)
found_annotations.append(annot)
return found_annotations |
def union_probability(p_a, p_b, p_intersection):
"""Compute the probability of P(A or B) given P(A), P(B) and P(A and B)"""
return p_a + p_b - p_intersection |
def my_specificity_score(labels, predictions):
"""
specificity scoring function
:param labels: actual class labels
:param predictions: predicted class labels
:return: specificity score
"""
tp = fp = tn = fn = 0
for x in range(len(labels)):
if (predictions[x] == labels[x]) and (labels[x] == 1):
tp += 1
elif (predictions[x] != labels[x]) and (labels[x] == 1):
fn += 1
elif (predictions[x] == labels[x]) and (labels[x] == 0):
tn += 1
elif (predictions[x] != labels[x]) and (labels[x] == 0):
fp += 1
score = tn / (tn + fp)
return score |
def _is_weblog_entry(pagename):
"""Return True if the page is a weblog entry."""
if not pagename:
return
parts = pagename.split("/")
if not parts[0] == "articles":
return
if not len(parts) == 5:
return
if parts[-1] == "index":
return
return True |
def owner_ids_to_string_array(owner_ids):
"""
Converts a snowflake owner id to an array of strings.
Parameters
----------
owner_ids : `iterable` of `int`
Snowflakes.
Returns
-------
owner_id_array : `list` of `str`
"""
return [str(owner_id) for owner_id in owner_ids] |
def sortie(laby):
"""
Cette fonction calcule l'indice de la ligne et de la conne
de la liste de listes laby qui contiennent un 3.
S'il n'y en a pas, elle retourne None et affiche un message d'erreur
"""
for i in range(len(laby)):
for j in range(len(laby[i])):
if laby[i][j] == 3:
return i, j # pas besoin de continuer, on renvoit le resultat
print("Erreur: pas de 3 dans la liste de listes laby. \n laby = \n ", laby)
return None |
def fix_spans_due_to_empty_words(action_dict, words):
"""Return modified (action_dict, words)"""
def reduce_span_vals_gte(d, i):
for k, v in d.items():
if type(v) == dict:
reduce_span_vals_gte(v, i)
continue
try:
a, b = v
if a >= i:
a -= 1
if b >= i:
b -= 1
d[k] = [[a, b]]
except ValueError:
pass
except TypeError:
pass
# remove trailing empty strings
while words[-1] == "":
del words[-1]
# fix span
i = 0
while i < len(words):
if words[i] == "":
reduce_span_vals_gte(action_dict, i)
del words[i]
else:
i += 1
return action_dict, words |
def IsPsuedoClass(cmd, line):
"""Check the line to see if its a link pseudo class
@param cmd: current command
@param line: line of the command
@return: bool
"""
if cmd.endswith(u':'):
token = line.split()[-1]
pieces = token.split(u":")
if pieces[0] == 'a' or pieces[0].startswith('a.'):
return True
return False |
def osc(last, type):
"""
R=[AG], Y=[CT], K=[GT], M=[AC], S=[GC], W=[AT], and the four-fold
degenerate character N=[ATCG]
"""
if type == "r":
if last == "a": return("g")
if last == "g": return("a")
return("a")
if type == "y":
if last == "c": return("t")
if last == "t": return("c")
return("c")
if type == "k":
if last == "g": return("t")
if last == "t": return("g")
return("g")
if type == "m":
if last == "a": return("c")
if last == "c": return("a")
return("a")
if type == "s":
if last == "g": return("c")
if last == "c": return("g")
return("g")
if type == "w":
if last == "a": return("t")
if last == "t": return("a")
return("a")
if type == "n":
if last == "a": return("c")
if last == "c": return("g")
if last == "g": return("t")
if last == "t": return("a")
return("a")
return(type) |
def _get_hash_key(f, args, kwargs):
"""
Return a hash for a given function invocation.
Args:
f: the function
args: positional arguments (list)
kwargs: keyword arguments (dict)
Returns:
a hashed key (int), or None if any argument was unhashable
"""
try:
return hash((f.__module__, f.__name__,
tuple(args),
tuple(sorted(kwargs.items()))))
except TypeError:
return None |
def short_type_constraint(value):
"""Test short_type constraint."""
if value >= -32768 and value <= 32767:
return True
return False |
def calculate_bootstrap(bootstrap_size, length):
"""
Calculate the bootstrap size for the data of given length.
Parameters
----------
bootstrap_size : int, float, default=None
Bootstrap size for training. Must be one of:
- int : Use `bootstrap_size`.
- float : Use `bootstrap_size * n_samples`.
- None : Use `n_samples`.
length : int
Length of the data to be bootstrapped.
Returns
-------
bootstrap : int
Actual bootstrap size.
"""
if bootstrap_size is None:
return length
elif isinstance(bootstrap_size, int) and bootstrap_size > 0:
return bootstrap_size
elif isinstance(bootstrap_size, float) and 0 < bootstrap_size <= 1:
return int(bootstrap_size * length)
else : raise ValueError("Bootstrap Size must be None, a positive int or float in (0,1]") |
def get_json_value(json_input, keys_path):
"""Returns the value specified in keys_path (using dot notation) or an empty string if the key doesn't exist"""
if not isinstance(json_input, dict):
return ""
keys = keys_path.split(".")
try:
value = json_input[keys[0]]
for key in keys[1:]:
value = value[key]
return value
except KeyError:
return "" |
def flip_square(square, direction):
"""Flip square in left or up direction."""
if direction == 'left':
return [
row[::-1] for row in square
]
elif direction == 'up':
return [
row[:]
for row in square[::-1]
] |
def _unfold_continuations(code_string):
"""Removes any backslash line continuations from the code."""
return code_string.replace('\\\n', '') |
def _merge_config(new, base):
"""
Internal function that merges config sections into a base config.
Returns the new config data structure.
"""
# Thanks, StackOverflow! http://stackoverflow.com/questions/823196/yaml-merge-in-python
# If we've got two dicts, merge them.
if isinstance(new, dict) and isinstance(base, dict):
for k, v in base.items():
if k not in new:
new[k] = v
else:
new[k] = _merge_config(new[k], v)
# If we didn't merge our dicts, this still returns the item we want to add.
return new |
def timeout_check(value):
"""Check Timeout Argument.
Checks timeout for validity.
Keyword Arguments:
value -- Time in seconds to wait before timing out request.
Return Value:
Floating point number representing the time (in seconds) that should be
used for the timeout.
NOTE: Will raise an exception if the timeout in invalid.
"""
from argparse import ArgumentTypeError
try:
timeout = float(value)
except ValueError:
raise ArgumentTypeError(f"Timeout '{value}' must be a number.")
if timeout <= 0:
raise ArgumentTypeError(f"Timeout '{value}' must be greater than 0.0s.")
return timeout |
def remove_cations(SMILES):
"""
Removes periodic table group 1 and 7 counterions from the SMILES
strings.
Args:
-----
SMILES (str) -- the SMILES string representation of the
molecule.
Returns:
--------
SMILES (str) -- the string representation of the molecule with
the counterions omitted.
"""
# Assertions
assert isinstance(SMILES, str), 'the SMILES must be a string'
# Functionality
split_SMILES = SMILES.split(".")
ion_list = ['[Li+]', '[Na+]', '[K+]', '[Rb+]', '[Cs+]', '[Fr+]', '[F-]',
'[Cl-]', '[Br-]', '[I-]', '[At-]']
SMILES = [i for i in split_SMILES if i not in ion_list]
SMILES = '.'.join(SMILES)
return SMILES |
def avg_real(scores):
""" Calculates the average for accuracy or std error without 'none'
Args:
- scores: list of accuracy or std errors
Returns:
- Avg if list is nonempty and real
Exception:
None
"""
scores_no_none = [x for x in scores if x != None]
if len(scores_no_none) == 0:
return None
else:
return sum(scores_no_none) / len(scores_no_none) |
def deepupdate(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for key, value in original.items():
if key not in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update |
def content_lines(lines):
"""Return lines with content."""
return [line for line in lines if line and line[0] == "#"] |
def find_instances(string, character):
"""
The function returns all the indices of the
character present in the string.
A list is returned
"""
return [index for index, letter in enumerate(string) if letter == character] |
def get_hyperion_device_id(server_id: str, instance: int) -> str:
"""Get an id for a Hyperion device/instance."""
return f"{server_id}_{instance}" |
def isstring(token):
"""
Determine if the token is a string.
:param token: The token.
:return: Boolean representing whether the token is a string.
"""
return token[0] == '"' and token[-1] == '"' |
def round_channels(num_channels: int, width_coeff: float=1,
divisor: int=8, min_depth=None) -> int:
"""Round number of filters based on width coefficient."""
if width_coeff == 1:
return num_channels
min_depth = min_depth or divisor
new_num_channels = max(min_depth, int(num_channels * width_coeff + divisor / 2) // divisor * divisor)
# make sure the round down does not go down by more than 10%
if new_num_channels < 0.9 * num_channels:
new_num_channels += divisor
return int(new_num_channels) |
def getContour(semitones):
""" Given a list of integers defining the size and direction of a series of musical intervals in semitones,
this function encodes the contour of the melody with Parsons code for musical contour where u=up, d=down, r=repeat.
"""
contour = ''
for p in semitones:
if p == 0:
contour = contour + 'r' # repeated
elif p > 0:
contour = contour + 'u' # up
elif p < 0:
contour = contour + 'd' # down
return contour |
def candidate_matches(o, sm_current, list_matches):
"""
checks if an article shares authors with all other articles in a list
:param o: examined article
:param sm_current: list of articles that share authors
:param list_matches: list of all combinations of two articles that share authors
:return: True iff o shares articles with all articles in sm_current
"""
for elem in sm_current:
if [elem, o] not in list_matches and [o, elem] not in list_matches:
return False
return True |
def inline(text):
"""
Convert all newline characters to HTML entities:
This can be used to prevent Hypertag from indenting lines of `text` when rendering parent nodes,
and to safely insert `text` inside <pre>, <textarea>, or similar elements.
"""
return text.replace('\n', ' ') |
def uniform_but_one_dataset_no_weight(n, p):
"""Generates an unweighted dataset according to the AMS lower bound.
Args:
n: The size of the dataset
p: The frequency moment (used as a parameter in the lower bound)
Returns:
A list of keys, where all keys appear once, except for one key which
accounts for half of the p-th frequency moment
"""
elements = []
for i in range(n):
elements.append(i)
for i in range(int(n**(1.0 / p)) - 1):
elements.append(1)
return elements |
def repeat(x, num_times, assert_at_least_one_rep=False):
"""
Repeat x num_times times.
Parameters
----------
x : tuple or Circuit
the operation sequence to repeat
num_times : int
the number of times to repeat x
assert_at_least_one_rep : bool, optional
if True, assert that num_times > 0. This can be useful when used
within a create_circuits inner loop to build a operation sequence
lists where a string must be repeated at least once to be added
to the list.
Returns
-------
tuple or Circuit (whichever x was)
"""
if assert_at_least_one_rep: assert(num_times > 0)
return x * num_times |
def coordinates_within_board(n: int, x: int, y: int) -> bool:
"""Are the given coordinates inside the board?"""
return x < n and y < n and x >= 0 and y >= 0 |
def command_result_processor_parameter_unexpected(parameter_value):
"""
Command result message processor if a received parameter is unexpected.
Parameters
----------
parameter_value : `str`
Received parameter.
Returns
-------
message : `str`
"""
message_parts = []
message_parts.append('Unexpected parameter: ')
message_parts.append(repr(parameter_value))
message_parts.append('.\n')
return ''.join(message_parts) |
def _SecondsToMicroseconds(secs):
"""Converts seconds to microseconds."""
return int(secs * 1000000) |
def compare_tags(tag_list, other_tag_list):
"""
compare two tags lists with the same tag format:
(`tag_name`, `start_offset`, `end_offset`, `value`)
"""
tags_amount = len(tag_list)
if tags_amount != len(other_tag_list):
print(
"missmatch of amount of tags %d vs %d" % (tags_amount, len(other_tag_list))
)
return False
tag_list.sort(key=lambda x: x[1])
other_tag_list.sort(key=lambda x: x[1])
for i in range(tags_amount):
if len(tag_list[i]) != len(other_tag_list[i]):
print("missmatch of tags format")
return False
for j in range(len(tag_list[i])):
if tag_list[i][j] != other_tag_list[i][j]:
print(
"missmatch of tags %s vs %s"
% (tag_list[i][j], other_tag_list[i][j])
)
return False
return True |
def is_inplace(argument: str) -> bool:
"""Check that argument is in-place value.
:param str argument: argument string
:return: True if argument is a digit of decimal else False
:rtype: bool
"""
checks = [
str.isdigit,
str.isdecimal,
]
return any(check(argument) for check in checks) |
def is_number(s):
"""
Validate string as a number
"""
try:
float(s)
return True
except ValueError:
return False |
def sexpr_print_sexpr(sexpr):
"""Prints a python S-expression as a string S-expression."""
if isinstance(sexpr, str):
return sexpr
elif isinstance(sexpr, int):
return str(sexpr)
elif isinstance(sexpr, tuple):
assert len(sexpr) > 1, sexpr
parts = map(sexpr_print_sexpr, sexpr)
return '({})'.format(' '.join(parts))
else:
raise ValueError(sexpr) |
def newman_conway(num):
""" Returns a list of the Newman Conway numbers for the given value.
Time Complexity: O(n)
Space Complexity: ?
"""
if(num == 0):
raise ValueError
memo = []
if (num == 1):
memo = [0, 1]
elif (num == 2):
memo = [0, 1, 1]
else:
memo = [0, 1, 1]
num_terms = len(memo) - 1 # ignore element at index 0
for i in range(3, num + 1):
memo.append( memo[memo[i-1]] + memo[i - memo[i-1]] )
memo.pop(0) # remove element at index 0
converted_list = [str(element) for element in memo]
return " ".join(converted_list) |
def merge_sort(collection):
"""Pure implementation of the merge sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> merge_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> merge_sort([])
[]
>>> merge_sort([-2, -5, -45])
[-45, -5, -2]
"""
length = len(collection)
if length > 1:
midpoint = length // 2
left_half = merge_sort(collection[:midpoint])
right_half = merge_sort(collection[midpoint:])
i = 0
j = 0
k = 0
left_length = len(left_half)
right_length = len(right_half)
while i < left_length and j < right_length:
if left_half[i] < right_half[j]:
collection[k] = left_half[i]
i += 1
else:
collection[k] = right_half[j]
j += 1
k += 1
while i < left_length:
collection[k] = left_half[i]
i += 1
k += 1
while j < right_length:
collection[k] = right_half[j]
j += 1
k += 1
return collection |
def _powerlaw(x0, y0, slope, x):
"""
From point (x0, y0) and parameter slope, returns y = f(x) such that:
> f(x) = a * (x ** slope)
> f(x0) = y0
Parameters
----------
x0, y0, slope, x : float
Returns
-------
y = f(x) : float
"""
return y0 * ((x/x0) ** slope) |
def expected_count(af, effective_mutation_rate):
"""Calculate the expected number of somatic variants
greater than a given allele frequency given an effective mutation
rate, according to the model of Williams et al. Nature
Genetics 2016"""
return effective_mutation_rate * (1.0 / af - 1.0) |
def _bool_encode(d):
"""
Converts boolean values to lowercase strings
"""
for k, v in d.items():
if isinstance(v, bool):
d[k] = str(v).lower()
return d |
def c2str(rgb):
""" Represent a color tuple as a string
Args:
rgb (:obj:`tuple` of int): Color representation as (R, G, B), where
each color is an integer between 0-255.
Returns:
string: Comma separated string with values for each channel
"""
return '%d,%d,%d' % rgb |
def scale_text(figsize, text_size):
"""Scale text to figsize."""
if text_size is None and figsize is not None:
if figsize[0] <= 11:
return 12
else:
return figsize[0]
else:
return text_size |
def penn2morphy(penntag):
""" Converts Penn Treebank tags to WordNet. """
morphy_tag = {'NN':'n', 'JJ':'a',
'VB':'v', 'RB':'r'}
try:
return morphy_tag[penntag[:2]]
except:
return 'n' |
def get_first(queryset, default=None):
""" Return first element of queryset or default """
if queryset:
return queryset[0]
return default |
def find_second_maximum(lst):
"""
finding two highest numbers and storing into a sorted list
return tail
assumption: all numbers are positive
"""
highest_nums = [float('-inf'), float('-inf')]
for i in lst:
if i > highest_nums[1]:
if i > highest_nums[0]:
highest_nums[1] = highest_nums[0]
highest_nums[0] = i
elif i < highest_nums[0]:
highest_nums[1] = i
return highest_nums[1] |
def inclusion_check(n_timepoints, mean_fd, max_fd, n_spikes, fd_th):
"""
Checking if participant is recommended to be excluded from analysis
based on motion parameters and spikes regressors.
Inputs
-------
n_timepoints: number of timepoints
mean_fd: mean framewise_displacement (FD)
max_fd: maximum FD
n_spikes: number of spikes
fd_th: threshold for mean FD
Outputs
-------
returns 0 if subject should be excluded due to head motion
or 1 if there is no reason to exclude subject based on submitted threshold.
"""
if mean_fd > fd_th:
return 0
elif max_fd > 5:
return 0
elif n_spikes/n_timepoints > 0.20:
return 0
else:
return 1 |
def to_product(product_tuple):
"""Parse a tuple into valid 'Product' object.
Args:
product_tuple: Tuple containing StockCode, Description, Quantity and UnitPrice.
Returns:
Product (dictionary).
"""
return {
'id': str(product_tuple[0]),
'description': str(product_tuple[1]),
'quantity': int(product_tuple[2]),
'unitPrice': float(product_tuple[3])
} |
def extract_episode_details(season, episode_response):
"""Clean and extract episode details response.
Take an episode details response and cleans the data.
Extract the relevant fields needed to construct an
Episode object.
Args:
season: The season number
episode_response: An episode_details response
Returns:
episode_details: Dictionary with relevant episode
information
"""
try:
rating = float(episode_response['imdbRating'])
except ValueError:
# Rating may come through as 'N/A' if episode has not aired
rating = None
return {
'title': episode_response['Title'],
'episode': int(episode_response['Episode']),
'season': season,
'ratings': {'imdb': rating},
} |
def pre_hash(s):
"""
Prepends a string with its length.
EXAMPLES::
sage: from sage.doctest.parsing import pre_hash
sage: pre_hash("abc")
'3:abc'
"""
return "%s:%s" % (len(s), s) |
def mapset(fn, the_set):
"""map a function across a set as though it were a list"""
ls = list(the_set)
print(ls)
reslist = map(fn, ls)
print(reslist)
resset = set(reslist)
print(resset)
return resset |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.