content stringlengths 42 6.51k |
|---|
def factorial(x):
"""This is a recursive function
to find the factorial of an integer"""
if x == 1:
return 1
else:
return (x * factorial(x - 1)) |
def chunked(l, chunk_size):
"""Split list `l` it to chunks of `chunk_size` elements."""
return [l[i:i + chunk_size] for i in range(0, len(l), chunk_size)] |
def collapse(s):
"""collapse(s) -> s, with runs of whitespace replaced with single spaces"""
return ' '.join(s.split()).strip() |
def _duration_to_nb_windows(
duration, analysis_window, round_fn=round, epsilon=0
):
"""
Converts a given duration into a positive integer of analysis windows.
if `duration / analysis_window` is not an integer, the result will be
rounded to the closest bigger integer. If `duration == 0`, returns `0`.
If `duration < analysis_window`, returns 1.
`duration` and `analysis_window` can be in seconds or milliseconds but
must be in the same unit.
:Parameters:
duration: float
a given duration in seconds or ms.
analysis_window: float
size of analysis window, in the same unit as `duration`.
round_fn: callable
function called to round the result. Default: `round`.
epsilon: float
small value to add to the division result before rounding.
E.g., `0.3 / 0.1 = 2.9999999999999996`, when called with
`round_fn=math.floor` returns `2` instead of `3`. Adding a small value
to `0.3 / 0.1` avoids this error.
Returns:
--------
nb_windows: int
minimum number of `analysis_window`'s to cover `durartion`. That means
that `analysis_window * nb_windows >= duration`.
"""
if duration < 0 or analysis_window <= 0:
err_msg = "'duration' ({}) must be >= 0 and 'analysis_window' ({}) > 0"
raise ValueError(err_msg.format(duration, analysis_window))
if duration == 0:
return 0
return int(round_fn(duration / analysis_window + epsilon)) |
def search_non_residue(p):
"""Find a non residue of p between 2 and p
Args:
p: a prime number
Returns:
a integer that is not a quadratic residue of p
or -1 if no such number exists
"""
for z in range(2, p):
if pow(z, (p - 1) // 2, p) == p - 1:
return z
return -1 |
def catch_parameter(opt):
"""Change the captured parameters names"""
switch = {'-h': 'help', '-a': 'activity', '-c': 'folder',
'-b': 'model_file', '-v': 'variant', '-r': 'rep'}
return switch.get(opt) |
def shorten(body, keep_header=0, keep_trailer=0):
"""
Smartly shorten a given string.
"""
# Normalize byte-like objects
try:
body = body.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
# Cut header
if (keep_header
and not keep_trailer
and len(body) > keep_header):
return '..%s' % body[:keep_header]
# Cut footer
if (keep_trailer
and not keep_header
and len(body) > keep_trailer):
return '..%s' % body[-keep_header:]
if (keep_header
and keep_trailer
and len(body) > keep_header + keep_trailer):
return '%s .. %s' % (body[:keep_header], body[-keep_trailer:])
return body |
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
"""
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'):
raise ValueError
if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'):
raise ValueError
if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'):
raise ValueError
if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'):
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'):
raise ValueError
if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'):
raise ValueError
longitude = (ord(locator[0]) - ord('A')) * 20 - 180
latitude = (ord(locator[1]) - ord('A')) * 10 - 90
longitude += (ord(locator[2]) - ord('0')) * 2
latitude += (ord(locator[3]) - ord('0'))
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord('A')) * (2 / 24)
latitude += ((ord(locator[5])) - ord('A')) * (1 / 24)
# move to center of subsquare
longitude += 1 / 24
latitude += 0.5 / 24
else:
# move to center of square
longitude += 1;
latitude += 0.5;
return latitude, longitude |
def format_su_cmd(cmd, user):
"""
Format the command to be executed by other user using su option
Args:
cmd (str): Command to be formatted
user (str): User to executed the command
Returns:
str: Formatted command
"""
return 'su -lc "{cmd}" {user}'.format(cmd=cmd, user=user) |
def catchable_exceptions(exceptions):
"""Returns True if exceptions can be caught in the except clause.
The exception can be caught if it is an Exception type or a tuple of
exception types.
"""
if isinstance(exceptions, type) and issubclass(exceptions, BaseException):
return True
if (
isinstance(exceptions, tuple)
and exceptions
and all(issubclass(it, BaseException) for it in exceptions)
):
return True
return False |
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result |
def T(name, content=None, **props):
"""Helper function for building components."""
return dict(_name=name, text=content, _props=props) |
def _get_extent(gt, cols, rows):
""" Return the corner coordinates from a geotransform
:param gt: geotransform
:type gt: (float, float, float, float, float, float)
:param cols: number of columns in the dataset
:type cols: int
:param rows: number of rows in the dataset
:type rows: int
:rtype: list of (list of float)
:return: List of four corner coords: ul, ll, lr, ur
>>> gt = (144.0, 0.00025, 0.0, -36.0, 0.0, -0.00025)
>>> cols = 4000
>>> rows = 4000
>>> _get_extent(gt, cols, rows)
[[144.0, -36.0], [144.0, -37.0], [145.0, -37.0], [145.0, -36.0]]
"""
ext = []
xarr = [0, cols]
yarr = [0, rows]
for px in xarr:
for py in yarr:
x = gt[0] + (px * gt[1]) + (py * gt[2])
y = gt[3] + (px * gt[4]) + (py * gt[5])
ext.append([x, y])
yarr.reverse()
return ext |
def isnumeric(string):
"""Check whether the string is numeric."""
return string.isnumeric() |
def hex_to_rgb(hexcode):
"""'#FFFFFF' -> [255,255,255] """
# Pass 16 to the integer function for change of base
return [int(hexcode[i:i+2], 16) for i in range(1, 6, 2)] |
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
{stylename: styleclass}, return a string rep of the list of keys.
Used to update the documentation.
"""
return "[{}]".format("|".join(map(" '{}' ".format, sorted(_styles)))) |
def get_class_value(x):
""" returns the int value for the ordinal value class
:param x: a value that is either 'crew', 'first', 'second', or 'third'
:return: returns 3 if 'crew', 2 if first, etc.
"""
if x == 'crew':
return 3
elif x == 'first':
return 2
elif x == 'second':
return 1
else:
return 0 |
def get_movie_and_zmw_from_name(name):
"""Given a string of pacbio zmw name or read name, return movie and zmw"""
try:
fs = name.strip().split(' ')[0].split('/')
movie, zmw = fs[0], fs[1]
return movie, int(zmw)
except ValueError:
raise ValueError("Read %r is not a PacBio read." % name) |
def beta_est_method_to_params(beta_estimate_method):
""" One of the following
'max' or 'ols'
"""
beta_est_kwargs = {
'max': dict(
beta_estimate_method='max',
est_after_log=False,
),
'max-post': dict(
beta_estimate_method='max',
est_after_log=True,
),
'ols': dict(
beta_estimate_method='ols',
est_after_log=False,
),
'ols-post': dict(
beta_estimate_method='ols',
est_after_log=True,
),
'quantile': dict(
beta_estimate_method='quantile',
est_after_log=False,
),
}
if beta_estimate_method not in beta_est_kwargs:
raise ValueError("Unrecognized beta_estimate_method")
return beta_est_kwargs[beta_estimate_method] |
def compute_train_batch_metrics(output, target, metrics):
"""Computes the given metrics on the given batch
Args:
output (:obj:`torch.Tensor`): The model output
target (:obj:`torch.Tensor`): The labels for the current batch
metrics (list): List of metrics to track
Returns:
(dict of :obj:`mlbench_core.evaluation.pytorch.metrics.MLBenchMetric`: float): The metric
and its computed value
"""
# Compute metrics for one batch
result = {}
for metric in metrics:
metric_value = metric(output, target).item()
result[metric] = metric_value
return result |
def generate_entry_type(docs_flat_dict):
"""[summsary]
Args:
docs_flat_dict ([type]): [description]
Returns:
[type]: [description]
"""
docs_entry_type = []
for flat_dict in docs_flat_dict:
entry_type = []
for entry in flat_dict:
entry_type.append((entry, type(flat_dict[entry])))
docs_entry_type.append(entry_type)
return docs_entry_type |
def _multi_pattern(*patterns):
"""
combine multiple rgular expression
>>> _multi_pattern("(A+)", "(B+)")
'(?:(A+)|(B+))'
>>> re.findall(_, "AAABBBAAA")
[('AAA', ''), ('', 'BBB'), ('AAA', '')]
"""
return "(?:%s)" % "|".join(patterns) |
def merge(left, right):
"""
Merge 2 array into a sorted one.
Args:
left: array to sort
right: array to sort
Returns:
merged sorted array
"""
merged = []
left_index = 0
right_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(right[right_index])
right_index += 1
else:
merged.append(left[left_index])
left_index += 1
merged += left[left_index:]
merged += right[right_index:]
return merged |
def generate_census_tract_dcids(ctfips):
"""
Args:
ctfips: a census tract FIPS code
Returns:
the matching dcid for the FIPS code
"""
dcid = "dcid:geoId/" + str(ctfips).zfill(11)
return dcid |
def arbg_int_to_rgba(argb_int):
"""Convert ARGB integer to RGBA array.
:param argb_int: ARGB integer
:type argb_int: int
:return: RGBA array
:rtype: list[int]
"""
red = (argb_int >> 16) & 255
green = (argb_int >> 8) & 255
blue = argb_int & 255
alpha = (argb_int >> 24) & 255
return [red, green, blue, alpha] |
def _combine_params(left, right):
"""Combine to lists of name,value pairs."""
d = {}
for p in left:
d[p["name"]] = p
for p in right:
d[p["name"]] = p
result = []
for _, v in d.items():
result.append(v)
return result |
def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):
"""Reconstruct solution velocity vectors.
"""
V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1
V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2
V_t1 = gamma * sigma * (y + ll * x) / r1
V_t2 = gamma * sigma * (y + ll * x) / r2
return [V_r1, V_r2, V_t1, V_t2] |
def sanitize_axis(shape, axis):
"""
Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent
and is checked to be within bounds
Parameters
----------
shape : tuple of ints
shape of an array
axis : ints or tuple of ints
the axis to be sanitized
Returns
-------
sane_axis : int or tuple of ints
the sane axis
Raises
-------
ValueError
if the axis cannot be sanitized, i.e. out of bounds.
TypeError
if the the axis is not integral.
Examples
-------
>>> sanitize_axis((5,4,4),1)
1
>>> sanitize_axis((5,4,4),-1)
2
>>> sanitize_axis((5, 4), (1,))
(1,)
>>> sanitize_axis((5, 4), 1.0)
TypeError
"""
# scalars are handled like unsplit matrices
if len(shape) == 0:
axis = None
if axis is not None:
if not isinstance(axis, int) and not isinstance(axis, tuple):
raise TypeError("axis must be None or int or tuple, but was {}".format(type(axis)))
if isinstance(axis, tuple):
axis = tuple(dim + len(shape) if dim < 0 else dim for dim in axis)
for dim in axis:
if dim < 0 or dim >= len(shape):
raise ValueError("axis {} is out of bounds for shape {}".format(axis, shape))
return axis
if axis is None or 0 <= axis < len(shape):
return axis
elif axis < 0:
axis += len(shape)
if axis < 0 or axis >= len(shape):
raise ValueError("axis {} is out of bounds for shape {}".format(axis, shape))
return axis |
def _most_derived_metaclass(meta, bases):
"""Selects the most derived metaclass of all the given metaclasses.
This will be the same metaclass that is selected by
.. code-block:: python
class temporary_class(*bases, metaclass=meta): pass
or equivalently by
.. code-block:: python
types.prepare_class('temporary_class', bases, metaclass=meta)
"Most derived" means the item in {meta, type(bases[0]), type(bases[1]), ...}
which is a non-strict subclass of every item in that set.
If no such item exists, then :exc:`TypeError` is raised.
:type meta: `type`
:type bases: :class:`Iterable` of `type`
"""
most_derived_metaclass = meta
for base_type in map(type, bases):
if issubclass(base_type, most_derived_metaclass):
most_derived_metaclass = base_type
elif not issubclass(most_derived_metaclass, base_type):
# Raises TypeError('metaclass conflict: ...')
return type.__new__(meta, str('temporary_class'), bases, {})
return most_derived_metaclass |
def _guessFileFormat(file, filename):
"""Guess whether a file is PDB or PDBx/mmCIF based on its filename and contents."""
filename = filename.lower()
if '.pdbx' in filename or '.cif' in filename:
return 'pdbx'
if '.pdb' in filename:
return 'pdb'
for line in file:
if line.startswith('data_') or line.startswith('loop_'):
file.seek(0)
return 'pdbx'
if line.startswith('HEADER') or line.startswith('REMARK') or line.startswith('TITLE '):
file.seek(0)
return 'pdb'
# It's certainly not a valid PDBx/mmCIF. Guess that it's a PDB.
file.seek(0)
return 'pdb' |
def str2bytes(x):
"""Convert input argument to bytes"""
if type(x) is bytes:
return x
elif type(x) is str:
return bytes([ ord(i) for i in x ])
else:
return str2bytes(str(x)) |
def karvonen(intensity, rest, maximum):
"""
The Karvonen Method for target heart rate (THR) - using a range of 50% to 85% intensity. The formula is used to calculate heart rate for exercise at a percentage training intensity.
args:
intensity (float): given as a decimal between 0 and 1
rest (float): resting heart rate, given in beats/minute
maximum (float): maximum heart rate, given in beats/minute
Returns:
float: heart rate for exercise at the given intensity, given in beats/minute
"""
return intensity * (maximum - rest) + rest |
def most_frequent(data):
"""
determines the most frequently occurring string in the sequence.
"""
frequency = {}
for symbol in data:
if symbol in frequency:
frequency[symbol] += 1
else:
frequency[symbol] = 1
return max(frequency.items(), key=lambda x: x[1])[0] |
def default_join(row, join, *elements):
"""
Joins a set of objects as strings
:param row: The row being transformed (not used)
:param join: The string used to join the elements
:param elements: The elements to join
:return: The joined string
"""
return str(join).join(map(str, elements)) |
def disj(J1,J2):
""" is set J1 in set J2"""
j1 = set(J1)
res = j1.isdisjoint(set(J2))
return res |
def group_by_commas(n):
"""This add a comma at every third space when len is > 3."""
answer = '{:,}'.format(n)
return answer |
def CGy(N2, Omega, k, l, m, v, f):
"""
Horizontal group speed in y-direction in a flow
"""
K2 = k**2 + l**2 + m**2
cgy = (l * m**2 * (N2 - f**2))/(K2**2 * Omega) + v
return cgy |
def infer_relationship(coeff: float, ibs0: float, ibs2: float) -> str:
"""
Inferres relashionship labels based on the kin coefficient
and ibs0 and ibs2 values.
"""
if coeff < 0.2:
result = 'unrelated'
elif coeff < 0.38:
result = 'below_first_degree'
elif coeff <= 0.62:
if ibs0 / ibs2 < 0.005:
result = 'parent-child'
elif 0.015 < ibs0 / ibs2 < 0.052:
result = 'siblings'
else:
result = 'first_degree'
elif coeff < 0.8:
result = 'first_degree_or_duplicate_or_twins'
elif coeff >= 0.8:
result = 'duplicate_or_twins'
else:
result = 'nan'
return result |
def bubble_sort(L):
"""(list) -> list
Reorder the items in L from smallest to largest.
>>> bubble_sort([6, 5, 4, 3, 7, 1, 2])
[1, 2, 3, 4, 5, 6, 7]
"""
# keep sorted section at end of list
# repeated until all is sorted
for _ in L:
# traverse the list
for i in range(len(L) - 1):
# compare pairs
if L[i] > L[i + 1]:
# swap larger elements into the higher position
# a, b = b, a
L[i], L[i + 1] = L[i + 1], L[i]
return L |
def rle_get_at( rle, pos ):
"""
Return the attribute at offset pos.
"""
x = 0
if pos < 0:
return None
for a, run in rle:
if x+run > pos:
return a
x += run
return None |
def reverse_table(tableList):
"""
brief: Reverse value in a given list
Args:
@param tableList : the table list to be scanned
Raises:
throws an exception (ValueError) when the list is empty
Return: Return the reversed array
"""
if not(isinstance(tableList, list)):
raise ValueError('The parameter given is not type of list')
for idx in range(len(tableList)):
currentValue = tableList[idx]
currentIndex = tableList.index(currentValue)
tableList.pop(currentIndex)
tableList.insert(0, currentValue)
return tableList |
def remove_trailing_zeros(input_list: list):
"""
remove trailing zeros
:param input_list: list of clusters
:return: clusters after removing trailing zeros
"""
index = len(input_list) - 1
for i in range(len(input_list) - 1, -1, -1):
if input_list[i] == ',' or input_list[i] == '0' or input_list[i] == '':
continue
else:
index = i
break
return input_list[0:index] |
def get_path(environ):
"""
Get the path
"""
from wsgiref import util
request_uri = environ.get('REQUEST_URI', environ.get('RAW_URI', ''))
if request_uri == '':
uri = util.request_uri(environ)
host = environ.get('HTTP_HOST', '')
scheme = util.guess_scheme(environ)
prefix = "{scheme}://{host}".format(scheme=scheme, host=host)
request_uri = uri.replace(prefix, '')
return request_uri |
def try_int(n):
"""
Takes a number *n* and tries to convert it to an integer. When *n* has no decimals, an integer
is returned with the same value as *n*. Otherwise, a float is returned.
"""
n_int = int(n)
return n_int if n == n_int else n |
def port_class_def(ip_port):
"""
no port => 0
well known port [0,1023] => 1
registered port [1024,49151] => 2
dynamic port [49152,65535] => 3
"""
if isinstance(ip_port,bytes):
ip_port = int.from_bytes(ip_port,byteorder='big')
if 0 <= ip_port <= 1023:
return 1
elif 1024 <= ip_port <= 49151 :
return 2
elif 49152 <= ip_port <= 65535 :
return 3
else:
return 0 |
def compute_edit_distance(s: str, t: str) -> int:
"""Compute the Levenshtein distance
>>> compute_edit_distance("", "abc")
3
>>> compute_edit_distance("abc", "")
3
>>> compute_edit_distance("bc", "abc")
1
>>> compute_edit_distance("abc", "bc")
1
>>> compute_edit_distance("ac", "abc")
1
>>> compute_edit_distance("abc", "ac")
1
>>> compute_edit_distance("ab", "abc")
1
>>> compute_edit_distance("abc", "ab")
1
>>> compute_edit_distance("kitten", "sitting")
3
>>> compute_edit_distance("sitting", "kitten")
3
"""
d = list(range(len(s)+1))
prev_d = [0]*(len(s)+1)
for j, tchar in enumerate(t, 1):
prev_d, d = d, prev_d
d[0] = j
for i, schar in enumerate(s, 1):
deletion = d[i-1] + 1
insertion = prev_d[i] + 1
substitution = prev_d[i-1] + (0 if schar == tchar else 1)
d[i] = min(deletion, insertion, substitution)
return d[len(s)] |
def fmt_addr(addr):
""" Format a Bluetooth hardware address as a hex string.
Args:
addr (bytes): raw Bluetooth address in dongle format.
Returns:
str. Address in xx:xx:xx:xx:xx:xx format.
"""
return ':'.join(reversed(['{:02x}'.format(v) for v in bytearray(addr)])) |
def _monotone_end_condition(inner_slope, chord_slope):
"""
Return the "outer" (i.e. first or last) slope given the "inner"
(i.e. second or penultimate) slope and the slope of the
corresponding chord.
"""
# NB: This is a very ad-hoc algorithm meant to minimize the change in slope
# within the first/last curve segment. Especially, this should avoid a
# change from negative to positive acceleration (and vice versa).
# There might be a better method available!?!
if chord_slope < 0:
return -_monotone_end_condition(-inner_slope, -chord_slope)
assert 0 <= inner_slope <= 3 * chord_slope
if inner_slope <= chord_slope:
return 3 * chord_slope - 2 * inner_slope
else:
return (3 * chord_slope - inner_slope) / 2 |
def eratosthenes(n):
"""Eratosthenes Algorithm
eratosthenes(n) -> list
This function will take the number n and return the list of prime numbers less than n in a list.
The function does this by first creating a list of all numbers smaller than n into a list.
We then implement Eratosthenes' Algorithm by using nested while loops to traverse through the list,
and removing the numbers that are not prime numbers.
"""
# implementing our initial variables
counter = 2
final_list= []
# creating a while loop to fill our list with all the numbers smaller than n
while counter < n:
final_list.append(counter)
counter += 1
#
### INSTRUCTOR COMMENT:
# Above is a cumbersome way to create a list of consecutive integers (though it works).
# Consider range(n), which does this automatically.
# Or, if you need to create a nontrivial list, consider a list comprehension.
#
# Nested while loop to traverse through our list, divide the first number of the list
# with the rest of the numbers and if it is divisible, remove the number from the list.
# We then increase our counter to find the next prime number and continue until we reach the end of the list.
counter = 0
while counter < len(final_list):
count2 = counter + 1
while count2<len(final_list):
if final_list[count2] % final_list[counter] == 0:
final_list.remove(final_list[count2])
count2 += 1
counter += 1
return final_list |
def decode_dataset_id(dataset_id):
"""Decode a dataset ID encoded using `encode_dataset_id()`.
"""
dataset_id = list(dataset_id)
i = 0
while i < len(dataset_id):
if dataset_id[i] == '_':
if dataset_id[i + 1] == '_':
del dataset_id[i + 1]
else:
char_hex = dataset_id[i + 1:i + 3]
dataset_id[i + 1:i + 3] = []
char_hex = ''.join(char_hex)
dataset_id[i] = chr(int(char_hex, 16))
i += 1
return ''.join(dataset_id) |
def calc_auc(raw_arr):
"""clac_auc"""
arr = sorted(raw_arr, key=lambda d: d[0], reverse=True)
pos, neg = 0., 0.
for record in arr:
if abs(record[1] - 1.) < 0.000001:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
for record in arr:
if abs(record[1] - 1.) < 0.000001:
tp += 1
else:
fp += 1
xy_arr.append([fp / neg, tp / pos])
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc |
def shapestr(v):
"""Return shape string for numeric variables suitable for printing"""
try:
shape = v.shape
except AttributeError:
return "scalar"
else:
return "array "+"x".join([str(i) for i in shape]) |
def get_price(item):
"""Finds the price with the default locationGroupId"""
the_price = "No Default Pricing"
for price in item.get('prices', []):
if not price.get('locationGroupId'):
the_price = "%0.4f" % float(price['hourlyRecurringFee'])
return the_price |
def _castep_find_final_structure(flines):
""" Search for info on final structure in .castep file.
Parameters:
flines (list): list of lines in file.
Returns:
int: line number in file where total energy of final structure is printed.
"""
optimised = False
finish_line = 0
success_string = 'Geometry optimization completed successfully'
failure_string = 'Geometry optimization failed to converge after'
annoying_string = 'WARNING - there is nothing to optimise - skipping relaxation'
# look for final "success/failure" string in file for geometry optimisation
for line_no, line in enumerate(reversed(flines)):
if success_string in line:
finish_line = len(flines) - line_no
optimised = True
break
if annoying_string in line:
finish_line = len(flines) - line_no
optimised = True
break
if failure_string in line:
finish_line = len(flines) - line_no
optimised = False
break
# now wind back to get final total energies and non-symmetrised forces
for count, line in enumerate(reversed(flines[:finish_line])):
if 'Final energy, E' in line or 'Final energy =' in line:
finish_line -= count + 2
break
return finish_line, optimised |
def _is_named_tuple(x):
"""Check if we're dealing with a NamedTuple."""
t = type(x)
b = t.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(t, "_fields", None)
if not isinstance(f, tuple):
return False
return all(isinstance(n, str) for n in f) |
def compile_report_dict(TP, FP, FN, precision, recall, F1):
"""
Generate a dictionary of all the metrics, to be used to generate a report.
"""
remark = """
Your result was not a perfect match. Therefore your score is calculated as (F1*0.9).
If you had a perfect F1 score, this means that you returned all tuples perfectly,
but forgot to order them.
"""
res = {'TP': TP,
'FP': FP,
'FN': FN,
'precision': precision,
'recall': recall,
'F1': F1,
'Remark': remark}
return res |
def time_to_num(time):
"""
time: a string representing a time, with hour and minute separated by a colon (:)
Returns a number
e.g. time_to_num(9:00) -> 9
time_to_num(21:00) -> 21
time_to_num(12:30) -> 12.5
"""
time_comps = time.split(":")
if int(time_comps[1]) == 0:
return int(time_comps[0])
return int(time_comps[0]) + int(time_comps[1])/60 |
def resolve_bases(bases):
"""Resolve MRO entries dynamically as specified by PEP 560."""
new_bases = list(bases)
updated = False
shift = 0
for i, base in enumerate(bases):
if isinstance(base, type):
continue
if not hasattr(base, "__mro_entries__"):
continue
new_base = base.__mro_entries__(bases)
updated = True
if not isinstance(new_base, tuple):
raise TypeError("__mro_entries__ must return a tuple")
else:
new_bases[i+shift:i+shift+1] = new_base
shift += len(new_base) - 1
if not updated:
return bases
return tuple(new_bases) |
def my_handler(*args, **kwargs):
"""This docstring will be used in the generated function by default"""
print("my_handler called !")
return args, kwargs |
def const_unicode_to_singlebyte(codec, u2m, uc):
""" """
s = "const static uint8_t unicode_to_%s_u%x = 0x%02x;"%(codec, uc, u2m[uc])
return s |
def head(seq):
"""
Description
----------
Return the first value in the sequence.
Parameters
----------
seq : (list or tuple or string) - sequence to get first value of
Returns
----------
any - first value of the sequence
Example
----------
>>> lst = [1, 2, 3, 4, 5]
>>> head(lst)
-> 1
"""
if not isinstance(seq, (list, tuple, str)):
raise TypeError("param 'seq' must be a list, tuple, or string")
if len(seq) == 0:
return None
return seq[0] |
def split_o_flags_args(args):
"""
Splits any /O args and returns them. Does not take care of flags overriding
previous ones. Skips non-O flag arguments.
['/Ox', '/Ob1'] returns ['/Ox', '/Ob1']
['/Oxj', '/MP'] returns ['/Ox', '/Oj']
"""
o_flags = []
for arg in args:
if not arg.startswith('/O'):
continue
flags = list(arg[2:])
# Assume that this one can't be clumped with the others since it takes
# an argument itself
if 'b' in flags:
o_flags.append(arg)
else:
o_flags += ['/O' + f for f in flags]
return o_flags |
def flip(tpl):
"""
>>> flip((1, 2))
(2, 1)
"""
return (tpl[1], tpl[0]) |
def product_permutations(p1, p2):
"""
(p1 p2)(i) = p1(p2(i))
"""
perm = [p1[p2[i]] for i in range(len(p1))]
return perm |
def maxVal(toConsider, avail):
"""Assumes toConsider a list of items, avail a weight
Returns a tuple of the total weight of a solution to the
0/1 knapsack problem and the items of that solution"""
if toConsider == [] or avail == 0:
result = (0, ())
elif toConsider[0].getCost() > avail:
#Explore right branch only
result = maxVal(toConsider[1:], avail)
else:
nextItem = toConsider[0]
#Explore left branch
withVal, withToTake = maxVal(toConsider[1:],
avail - nextItem.getCost())
withVal += nextItem.getValue()
#Explore right branch
withoutVal, withoutToTake = maxVal(toConsider[1:], avail)
#Choose better branch
if withVal > withoutVal:
result = (withVal, withToTake + (nextItem,))
else:
result = (withoutVal, withoutToTake)
return result |
def _compress_for_consolidate(max_vol, plan, **kwargs):
"""
Combines as many aspirates as can fit within the maximum volume
"""
target = None
new_target = None
d_vol = 0
temp_aspirates = []
new_transfer_plan = []
def _append_aspirates():
nonlocal d_vol, temp_aspirates, new_transfer_plan, target
if not temp_aspirates:
return
for a in temp_aspirates:
new_transfer_plan.append({
'aspirate': {
'location': a['location'], 'volume': a['volume']
}
})
new_transfer_plan.append({
'dispense': {
'location': target, 'volume': d_vol
}
})
d_vol = 0
temp_aspirates = []
for i, p in enumerate(plan):
this_vol = p['aspirate']['volume']
new_target = p['dispense']['location']
if (new_target is not target) or (this_vol + d_vol > max_vol):
_append_aspirates()
target = new_target
d_vol += this_vol
temp_aspirates.append(p['aspirate'])
_append_aspirates()
return new_transfer_plan |
def lazy_value_or_error(value):
"""
Evaluates a value like lazy_value(), but returns _error_sentinel on exception.
"""
try:
return value() if callable(value) else value
except Exception:
return _error_sentinel |
def duration_str(s):
"""
s: number of seconds
Return a human-readable string.
Example: 100 => "1m40s", 10000 => "2h46m"
"""
if s is None:
return None
m = int(s // 60)
if m == 0:
return "%.1fs" % s
s -= m * 60
h = int(m // 60)
if h == 0:
return "%dm%ds" % (m, s)
m -= h * 60
d = int(h // 24)
if d == 0:
return "%dh%dm" % (h, m)
h -= d * 24
y = int(d // 365)
if y == 0:
return "%dd%dh" % (d, h)
d -= y * 365
return "%dy%dd" % (y, d) |
def format_octes(size, highest_label="Go", formated=True):
"""
Format size to an High based Octets
Args:
size (float): data size in Ko
highest_label (str): ["Ko", "Mo", "Go", "To"]
"""
power = 1024
n = 0
power_labels = ["Ko", "Mo", "Go", "To"]
while size >= power:
size /= power
n += 1
if highest_label == power_labels[n]:
break
if formated:
return f"{round(size, 2)} {power_labels[n]}"
return round(size, 2), power_labels[n] |
def write_file(filename="", text=""):
"""Write a string to a text file"""
with open(filename, 'w') as f:
c = f.write(text)
return c |
def bash_wrap(cmd_str):
"""Escape single quotes in a shell command string and wrap it with ``bash
-c '<string>'``.
This low-tech replacement works because we control the surrounding string
and single quotes are the only character in a single-quote string that
needs escaping.
"""
return "bash -c '%s'" % cmd_str.replace("'", "'\\''") |
def sizeof_fmt(num, suffix='B'):
"""Return human readable version of in-memory size.
Code from Fred Cirera from Stack Overflow:
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) |
def solve(a0, a1, a2, b0, b1, b2):
"""
Given two triplets A(a0, a1, a2) and B(b0, b1, b2) calculate the comparison
points of each triplet with the following rules:
If a[i] > b[i], then A gets 1 point
If a[i] < b[i], then B gets 1 point
If a[i] == b[i], then no points are awarded
"""
# Initialize the comparison sums of each triplet.
asum = 0
bsum = 0
# Repackage the triplets into tuples containing the elements to compare to
# make it easier.
for (a, b) in ((a0, b0), (a1, b1), (a2, b2)):
if a > b:
asum += 1
elif a < b:
bsum += 1
return (asum, bsum) |
def measure_counts_nondeterministic(shots, hex_counts=True):
"""Measure test circuits reference counts."""
targets = []
if hex_counts:
# Measure |++> state
targets.append({'0x0': shots / 4, '0x1': shots / 4,
'0x2': shots / 4, '0x3': shots / 4})
else:
# Measure |++> state
targets.append({'00': shots / 4, '01': shots / 4,
'10': shots / 4, '11': shots / 4})
return targets |
def time_to_secs(hhmmss):
""" Convert hh:mm:ss into seconds """
try:
hh_mm_ss = hhmmss.split(":")
secs = int(hh_mm_ss[0]) * 3600 + int(hh_mm_ss[1]) * 60 + int(hh_mm_ss[2])
except:
secs = 0
return secs |
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
Parameters
----------
x, y - 2D position coordinates of texture file texture.png
n = 4 - hard coded size of texture in file
Returns
-------
8 integers, the bounding coordinates of each texture square
"""
m = 1.0 / n #This values is essentially hard coded to be .25 as n=4 in the function definition
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m #This return is what sends that proper coordinates each texture, as it important to change due to
#possible difference in height for where the block can be placed, which is why it is not hard coded. |
def is_symbol_line(line):
"""Line from symbol table defines a symbol name."""
return line.startswith('Symbol.') |
def normalize_letters(one_letter_code) :
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.' :
return 'X'
else :
return one_letter_code.upper() |
def nbai(b6, b11):
"""
Normalized Difference Bareness Index (Zhao and Chen, 2005).
.. math:: NDBaI = (b6 - b11) / (b6 + b11)
:param b6: Red-edge 2.
:type b6: numpy.ndarray or float
:param b11: SWIR 1.
:type b11: numpy.ndarray or float
:returns NDBaI: Index value
.. Tip::
Zhao, H., Chen, X., 2005. Use of normalized difference bareness \
index in quickly mapping bare areas from TM/ETM+. in: Proceedings \
of the 2005 IEEE International Geoscience and Remote Sensing \
Symposium 3, pp. 1666. doi:10.1109/IGARSS.2005.1526319.
"""
NDBaI = (b6 - b11) / (b6 + b11)
return NDBaI |
def background_colormap(val):
"""
Basic 1 to 5 background colormap for nice dataframe printing or excel export purposes.
Example:
df.style.applymap(background_colormap)
"""
color = ['#FB676D', '#FBA977', '#FEE987', '#B1D584', '#62C073']
return 'background-color: %s' % color[val - 1] |
def find_source_profile(profile, aws_profiles):
"""Retrieve the source profile for a profile."""
this_aws_profile = [
x for x in aws_profiles
if x.get('Name') == profile
]
try:
this_aws_profile = this_aws_profile[0]
except IndexError:
print(f"Error, profile ${profile} not found in aws config")
return None
return [
x.replace('source_profile_', '')
for x in this_aws_profile['Tags']
if x is not None and x.startswith('source_profile_')
][0] |
def _low_bits(k, n):
"""Return lowest k bits of n."""
return (((1 << k) - 1) & n) |
def evaluate_type(value):
"""Evaluates the type in relation to its value
Args:
value : value to evaluate
Returns:
Evaluated type {integer,number,string}
Raises:
"""
if isinstance(value, list):
evaluated_type = "array"
elif isinstance(value, dict):
evaluated_type = "string"
else:
try:
float(value)
try:
# Evaluates if it is a int or a number
if str(int(float(value))) == str(value):
# Checks the case having .0 as 967.0
int_str = str(int(float(value)))
value_str = str(value)
if int_str == value_str:
evaluated_type = "integer"
else:
evaluated_type = "number"
else:
evaluated_type = "number"
except ValueError:
evaluated_type = "string"
except ValueError:
evaluated_type = "string"
return evaluated_type |
def changeArray(x, y):
"""
x: list of len x
y: list of len y, where y > x
This function loops through 2 lists, and
inserts a "blank" where there is a gap
"""
# ensure that y is always bigger
if len(x) > len(y):
x, y = y, x
# arrange lists
x.sort()
y.sort()
# Here, we assume that x will always have missing items
p1 = 0
p2 = 0
newarr = []
while p2 < len(y):
if x[p1] == y[p2]:
newarr.append(x[p1])
p1 += 1
p2 += 1
elif x[p1] > y[p2]:
newarr.append('blank')
p2 += 1
return newarr |
def find_uncertain_cell(sudoku_possible_values, k):
"""
Finds a cell where there is still more than one possibility.
Inputs:
- sudoku_possible_values: all the possible values from the current step of the game.
- k: the size of the grid.
Output:
- the found cell.
"""
# YOUR CODE HERE
for row in range(k * k):
for col in range(k * k):
if len(sudoku_possible_values[row][col]) > 1:
return row, col |
def get_realizations(n_realizations, batch_size, slurm_idx):
"""Generate a list of realizations to process for the current node."""
batches = range(0, n_realizations, batch_size)
try:
return [list(range(i, i + batch_size, 1)) for i in batches][slurm_idx]
except IndexError:
msg = f'No realization batch exists for SLURM_ARRAY_TASK_ID = {slurm_idx}.'
raise IndexError(msg) |
def translate_alpha_to_x(alpha, x_input, x_baseline):
"""Translates alpha to the point coordinates within straight-line interval.
Args:
alpha: the relative location of the point between x_baseline and x_input.
x_input: the end point of the straight-line path.
x_baseline: the start point of the straight-line path.
Returns:
The coordinates of the point within [x_baseline, x_input] interval
that correspond to the given value of alpha.
"""
assert 0 <= alpha <= 1.0
return x_baseline + (x_input - x_baseline) * alpha |
def find_time_period_per_segment(prod_data):
"""
Finds the segment id and latest time connected to it
:param prod_data: Mapped production data
:type prod_data: list
:return: dict of segments and the latest time they were handled
:rtype: dict
"""
segment_times = {}
for data in prod_data:
if str(data["segment"]) not in segment_times:
segment_times[str(data["segment"])] = {"earliest_time": data["time"], "latest_time": data["time"]}
elif data["time"] > segment_times[str(data["segment"])]["latest_time"]:
segment_times[str(data["segment"])]["latest_time"] = data["time"]
elif data["time"] < segment_times[str(data["segment"])]["earliest_time"]:
segment_times[str(data["segment"])]["earliest_time"] = data["time"]
return segment_times |
def pg2dtypes(pgtype):
"""Returns equivalent dtype for input `pgtype`."""
mapping = {
'smallint': 'int16', 'int2': 'int16',
'integer': 'int32', 'int4': 'int32', 'int': 'int32',
'bigint': 'int64', 'int8': 'int64',
'real': 'float32', 'float4': 'float32',
'double precision': 'float64', 'float8': 'float64',
'numeric': 'float64', 'decimal': 'float64',
'text': 'object',
'boolean': 'bool', 'bool': 'bool',
'date': 'datetime64[D]',
'timestamp': 'datetime64[ns]', 'timestamp without time zone': 'datetime64[ns]',
'timestamptz': 'datetime64[ns]', 'timestamp with time zone': 'datetime64[ns]',
'USER-DEFINED': 'object',
}
return mapping.get(str(pgtype), 'object') |
def mapFromTo(x, a, b, c, d):
"""map() function of javascript"""
y = (float(x) - float(a))/(float(b) - float(a)) * \
(float(d) - float(c)) + float(c)
return y |
def find_water_volume_blocked_in_towers(towers):
"""
Solution for finding water volume occupied in towers of different heights.
Assumed that, width of tower is 1 unit.
:param towers: list of tower heights
:return: unit water occupied
"""
res = 0
n = len(towers)
for_idx = 0
# traverse forward
while (for_idx < n):
j = for_idx + 1
sel_towers = []
while (j < n and towers[for_idx] >= towers[j]):
sel_towers.append(towers[j])
j += 1
if j < n:
for t_h in sel_towers:
res += abs(towers[for_idx] - t_h)
for_idx = j
back_idx = n - 1
# traverse backward
while(back_idx > -1):
j = back_idx - 1
sel_towers = []
while (j > -1 and towers[back_idx] >= towers[j]):
sel_towers.append(towers[j])
j -= 1
if j > -1:
for t_h in sel_towers:
res += abs(towers[back_idx] - t_h)
back_idx = j
return res |
def get_sample_type(sample, sample_mapping):
"""Return the key in sample_mapping that has a match to the start of sample
If two sample types include a sample name base in their sample name list that match
the start of sample, then return the one matching sample more specificically, i.e.
that with the longest match.
"""
possibilities = {
s: s_type
for (s_type, s_list) in sample_mapping.items()
for s in s_list
if sample.startswith(s)
}
return possibilities[max(possibilities.keys(), key=len)] |
def _nativevlan(port_data):
"""Return vlan for specific ifIndex.
Args:
port_data: Data dict related to the port
ifindex: ifindex in question
Returns:
vlan: VLAN number
"""
# Initialize key variables
vlan = None
# Determine native VLAN tag number for Cisco devices
if 'vlanTrunkPortNativeVlan' in port_data:
vlan = int(port_data['vlanTrunkPortNativeVlan'])
# Determine native VLAN tag number for Juniper devices
if 'dot1qPvid' in port_data:
vlan = port_data['dot1qPvid']
# Return
return vlan |
def sum_three_2020(entries):
"""
>>> sum_three_2020([1721, 979, 366, 299, 675, 1456])
241861950
"""
for i, entry1 in enumerate(entries, start=1):
for j, entry2 in enumerate(entries[i:], start=i):
for entry3 in entries[j:]:
if entry1 + entry2 + entry3 == 2020:
return entry1 * entry2 * entry3
return -1 |
def changeExt(path, ext=None):
"""
Add or change the extension of a filepath
@param path: filepath to modify
@type path: string
@param ext: extension to add to filepath
@type ext: string
@return: filepath with old extension removed and any new extension added
@rtype: string
"""
path = (path[:path.rindex(".")] if "." in path else path)
if ext is not None:
path = ".".join([path, ext])
return path |
def doi_formatting(input_doi):
"""Reformat loosely structured DOIs.
Currently only doing simplistic removal of 8 common http prefixes
and changing case to upper.
End DOI should be in format 10.NNNN/*, not as url
Parameters
----------
input_doi: str
Notes
----------
This focuses on known potential issues. This currently
returns no errors, potential improvement for updates.
"""
input_doi = input_doi.upper()
input_doi = input_doi.replace(" ", "")
# All DOI prefixes begin with '10'
if str(input_doi).startswith("10"):
formatted_doi = str(input_doi)
elif str(input_doi).startswith("DOI:"):
formatted_doi = input_doi[4:]
elif str(input_doi).startswith("HTTPS://DOI.ORG/DOI:"):
formatted_doi = input_doi[20:]
elif str(input_doi).startswith("HTTPS://DX.DOI.ORG/DOI:"):
formatted_doi = input_doi[23:]
elif str(input_doi).startswith("HTTP://DOI.ORG/DOI:"):
formatted_doi = input_doi[19:]
elif str(input_doi).startswith("HTTP://DX.DOI.ORG/DOI:"):
formatted_doi = input_doi[22:]
elif str(input_doi).startswith("HTTPS://DOI.ORG/"):
formatted_doi = input_doi[16:]
elif str(input_doi).startswith("HTTPS://DX.DOI.ORG/"):
formatted_doi = input_doi[19:]
elif str(input_doi).startswith("HTTP://DOI.ORG/"):
formatted_doi = input_doi[15:]
elif str(input_doi).startswith("HTTP://DX.DOI.ORG/"):
formatted_doi = input_doi[18:]
else:
formatted_doi = str(input_doi)
return formatted_doi |
def dicttocsv_ml(d):
"""Converts the bday dict to CSV string"""
csvstring = u""
for occ in d['occupation']:
line = u"{0:d},{1:d},{2:d},{3},{4},{5:d},{6}\n".format(d['year'],
d['month'],
d['day'],
d['fullname'],
d['nationality'],
d['double_nationality'],
occ)
csvstring = csvstring + line
return csvstring |
def check_vid_id_has_dash(vid_id):
"""Check if the video id has a dash and return one with it if it doesn't"""
if '-' not in vid_id:
for i in range(len(vid_id)):
if vid_id[i] in '0123456789':
vid_id = vid_id[:i] + '-' + vid_id[i:]
break
return vid_id |
def toInt(value):
"""
Helper method to convert type to int
"""
try:
value = int(value)
return value
except (ValueError, TypeError):
return None |
def cacheDirName(workflowID):
"""
:return: Name of the cache directory.
"""
return f'cache-{workflowID}' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.