content
stringlengths 42
6.51k
|
|---|
def is_close(a,
b,
rel_tol=1e-9,
abs_tol=0.0,
method='weak'):
"""
returns True if a is close in value to b. False otherwise
:param a: one of the values to be tested
:param b: the other value to be tested
:param rel_tol=1e-9: The relative tolerance -- the amount of error
allowed, relative to the magnitude of the input
values.
:param abs_tol=0.0: The minimum absolute tolerance level -- useful for
comparisons to zero.
:param method: The method to use. options are:
"asymmetric" : the b value is used for scaling the tolerance
"strong" : The tolerance is scaled by the smaller of
the two values
"weak" : The tolerance is scaled by the larger of
the two values
"average" : The tolerance is scaled by the average of
the two values.
NOTES:
-inf, inf and NaN behave similarly to the IEEE 754 Standard. That
-is, NaN is not close to anything, even itself. inf and -inf are
-only close to themselves.
Complex values are compared based on their absolute value.
The function can be used with Decimal types, if the tolerance(s) are
specified as Decimals::
isclose(a, b, rel_tol=Decimal('1e-9'))
See PEP-0485 for a detailed description
See also:
http://www.boost.org/doc/libs/1_34_0/libs/test/doc/components/test_tools/floating_point_comparison.html
http://floating-point-gui.de/errors/comparison/
http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
"""
if method not in ("asymmetric", "strong", "weak", "average"):
raise ValueError('method must be one of: "asymmetric",'
' "strong", "weak", "average"')
if rel_tol < 0.0 or abs_tol < 0.0:
raise ValueError('error tolerances must be non-negative')
if a == b: # short-circuit exact equality
return True
# use cmath so it will work with complex or float
# print a
# print b
# if cmath.isinf(a) or cmath.isinf(b):
# # This includes the case of two infinities of opposite sign, or
# # one infinity and one finite number. Two infinities of opposite sign
# # would otherwise have an infinite relative tolerance.
# return False
diff = abs(b - a)
if method == "asymmetric":
return (diff <= abs(rel_tol * b)) or (diff <= abs_tol)
elif method == "strong":
return (((diff <= abs(rel_tol * b)) and
(diff <= abs(rel_tol * a))) or
(diff <= abs_tol))
elif method == "weak":
return (((diff <= abs(rel_tol * b)) or
(diff <= abs(rel_tol * a))) or
(diff <= abs_tol))
elif method == "average":
return ((diff <= abs(rel_tol * (a + b) * 0.5) or
(diff <= abs_tol)))
|
def get_property_content(content, prefix):
"""Add the prefix to each object in the content and return the
concatenated string with "," as the separator.
Args:
content: the list containing property objects
prefix: the prefix before dcid, such as "dcs:"
Returns:
objects separated by comma. For example:
"dcs:bio/CWH41_YEAST,dcs:bio/RPN3_YEAST"
"""
if not content:
return None
item_list = []
for obj in content:
item_list.append(prefix + obj)
return ','.join(item_list)
|
def updateFile(original, header, startIdx, endIdx):
""" creates a new file with an updated header, then returns it """
newFile = []
for i in range(startIdx): # keep the segment occurring before the start idx
newFile.append(original[i])
newFile.extend(header) # add to the new file with the desired header
for i in range(endIdx + 1, len(original)): # place the rest of the original file that is needed
newFile.append(original[i])
return newFile
|
def compare_configs(c1, c2):
"""Compare two configurations list to see if they are the same.
returns true if they are the same
"""
for i in range(len(c1)):
if c1[i]!=c2[i]: return False
return True
|
def secientificNotation(num):
"""
change the number into scientific notation after three decimal places
:param num: input number
:return: scientific notation after two decimal places
"""
if num > 1e12:
res = num / 1e12
unit = 'T'
elif num > 1e9:
res = num / 1e9
unit = 'G'
elif num > 1e6:
res = num / 1e6
unit = 'M'
elif num > 1e3:
res = num / 1e3
unit = 'K'
elif num > 0:
res = num
unit = 'B'
else:
res = num
unit = ''
res = str(round(res, 3)) + unit
return res
|
def get_version(inc_version, exc_version):
"""
Method to determine whether to use including or excluding version
:param inc_version: Including version
:param exc_version: Excluding version
:return: Including version if it is not empty or *. Excluding version otherwise
"""
if inc_version and inc_version != "*":
return inc_version
return exc_version
|
def gcd(a, b):
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while True:
if b == 0:
return a
a, b = b, a % b
|
def link_ids(id1, id2):
"""
Takes two id arguments.
Returns XML for id1 or links id1 and id2 together
"""
if id1 and id2: # Both exist, so link them
return '<link>' + id1 + id2 + '</link>'
else:
return id1
|
def f_gw(x, alpha):
"""
Calculates f_gw as given by eq (17) and (20)
"""
return (1.+x**(3.-alpha))**3 / ( 4.*x**3 * ( 1.+ (4.-alpha) *x**(3.-alpha) ) )
|
def equal_near(item_1: float, item_2: float, thresold: float = 0.1) -> bool:
"""Is two item close to equl?
Return True is difference less than thresold.
Args:
item_1 (float): First item.
item_2 (float): Second item.
thresold (float, optional): Thresold for compare. Defaults to 0.01.
Returns:
bool: Return True if difference less than thresold.
"""
return abs(1 - (item_1 / item_2)) < thresold
|
def compare_sets(s1, s2):
"""Compare the sets."""
if len(s1) != 0:
# return s1 == s2
return s1 - s2
return False
|
def bisect(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
# Use __lt__ to match the logic in list.sort() and in heapq
if x < a[mid]:
hi = mid
else:
lo = mid+1
return lo
|
def quickSort(ll=[]):
"""The out of place quick sort algorithm
"""
if len(ll) <= 1:
return ll
else:
pp = ll[len(ll)-1]
lp = [x for x in ll if x < pp]
le = [x for x in ll if x == pp]
lg = [x for x in ll if x > pp]
return (quickSort(lp) + le + quickSort(lg))
|
def convertToURL( cPathname ):
"""Convert a Windows or Linux pathname into an OOo URL."""
if len( cPathname ) > 1:
if cPathname[1:2] == ":":
cPathname = "/" + cPathname[0] + "|" + cPathname[2:]
cPathname = cPathname.replace( "\\", "/" )
cPathname = "file://" + cPathname
return cPathname
|
def bytes_needed(i):
"""
Compute the number of bytes needed to hold arbitrary-length integer i
"""
bn = 1
while True:
i >>= 7
if i == 1:
# negative sign bit
return bn + 1
elif i == 0:
return bn
i >>= 1
bn += 1
|
def factorial(number):
"""Return the factorial of a number."""
accumulator = 1
for n in range(1, number + 1):
accumulator *= n
return accumulator
|
def enforce_not_None(e):
"""Enforce non-nullness of input. Used for typechecking and runtime safety."""
if e is None:
raise Exception("Input is None.")
return e
|
def is_youtube(raw_song):
""" Check if the input song is a YouTube link. """
status = len(raw_song) == 11 and raw_song.replace(" ", "%20") == raw_song
status = status and not raw_song.lower() == raw_song
status = status or 'youtube.com/watch?v=' in raw_song
return status
|
def get_thresholdtable_from_fpr(scores,labels, fpr_list):
"""Calculate the threshold score list from the FPR list
Args:
score_target: list of (score,label)
Returns:
threshold_list: list, the element is threshold score calculated by the
corresponding fpr
"""
threshold_list = []
live_scores = []
for score, label in zip(scores,labels):
if label == 0:
live_scores.append(float(score))
live_scores.sort(reverse=True)
live_nums = len(live_scores)
for fpr in fpr_list:
i_sample = int(fpr * live_nums)
i_sample = max(1, i_sample)
threshold_list.append(live_scores[i_sample - 1])
return threshold_list
|
def _get_table(infobox_line):
"""Converts the infobox into a one row table."""
cells = infobox_line.split('\t')
# remove empty cells
cells = list(filter(lambda x: x.find('<none>') == -1, cells))
columns = {cell[0:cell.split(':')[0].rfind('_')] for cell in cells}
table = {col: dict() for col in columns}
for cell in cells:
delimiter_position_value = cell.find(':')
column_index = cell[0:delimiter_position_value]
value = cell[delimiter_position_value + 1:]
delimiter_column_index = column_index.rfind('_')
column = column_index[0:delimiter_column_index]
index = column_index[delimiter_column_index + 1:]
table[column][index] = value
infobox_line_as_table = []
for column in table:
row_value = ' '.join(
table[column][index] for index in sorted(table[column].keys()))
infobox_line_as_table.append({
'column_header': column,
'row_number': 1,
'content': row_value,
})
return infobox_line_as_table
|
def _resolve_text(unicode_bytes):
"""Resolves Facebook text data, which is often dumped as Unicode bytes, to actual text."""
return unicode_bytes.encode("charmap").decode("utf8")
|
def is_not_empty(test_object):
"""Return True if the test_object is not None or empty."""
if test_object:
if isinstance(test_object, str):
if test_object.strip():
# test_object is not None AND myString is not empty or blank
return True
return False
return True
return False
|
def controls(t):
"""The output should be a list of M tuple"""
# [(1., 0.), (0.,0.), (-1., 0.), (0., 1.), (0., -1.)]
return [(0., 0.3)]
|
def is_checkpoint(block_number: int, epoch_length: int) -> bool:
"""
Return ``True`` if the given ``block_number`` is a checkpoint, otherwise ``False``.
"""
return block_number % epoch_length == 0
|
def eq_div(N, i):
""" Equally divide N examples among i buckets. For example, `eq_div(12,3) = [4,4,4]`. """
return [] if i <= 0 else [N // i + 1] * (N % i) + [N // i] * (i - N % i)
|
def initials(string, sep = ".", sepAtEnd = True):
"""Returns the initials of a string"""
splitString = string.split(" ")
theInitialList = [i[:1].capitalize() for i in splitString]
return sep.join(theInitialList)+sep*sepAtEnd
|
def append_zeros_to_filtname(filtname):
"""Adds zeros to standardize the size of all filternames
Parameters:
filtname (str) - name of the filter file
Returns:
filtname (str) - name of the filter file, possibly now with zeroes inserted
"""
while len(filtname) < 15:
filtname = filtname[:6] + '0' + filtname[6:]
return filtname
|
def coords_api_to_json_tilt(ang_up):
"""converts from API coordinates to robot coordinates."""
ang_down = -ang_up
return ang_down
|
def checksave(save_all, pix_list, save_ratio, save_count_annotated, save_count_blank):
"""
Checks whether or not an image chip should be saved
:param save_all: (bool) saves all chips if true
:param pix_list: list of pixel values in image mask
:param save_ratio: ratio of annotated chips to unannotated chips
:param save_count_annotated: total annotated chips saved
:param save_count_blank: total blank chips saved
:return: bool
"""
if save_all is True:
save = True
elif save_count_annotated / float(save_count_blank) > save_ratio:
save = True
elif len([x for x in pix_list if x > 0]) > 0:
save = True
else:
save = False
return save
|
def get_shipping_costs(request, shipping_method):
"""Returns a dictionary with the shipping price and tax for the passed
request and shipping method.
The format of the dictionary is: {"price" : 0.0, "tax" : 0.0}
"""
if shipping_method is None:
return {
"price_net": 0.0,
"price_gross": 0.0,
"tax": 0.0
}
price_gross = shipping_method.get_price_gross(request)
price_net = shipping_method.get_price_net(request)
tax = shipping_method.get_tax(request)
return {
"price_net": price_net,
"price_gross": price_gross,
"tax": tax
}
|
def lower_keys(x):
"""converts the payload dict keys to all lowercase to match schema
Args:
x:payload
"""
if isinstance(x, dict):
return dict((k.lower(),v) for k, v in x.items())
else:
return "msg_fromat_incorrect"
|
def reverseComplement(seq):
"""Reverse Complement a fonction, must be in capital letter."""
return seq.replace('A','t').replace('T','a').replace('C','g').replace('G','c').replace('a','A').replace('t','T').replace('c','C').replace('g','G')[::-1]
|
def buildKmers(sequence, ksize):
"""Returns k-mers on the basis of ksize."""
kmers = []
n_kmers = len(sequence) - ksize + 1
for i in range(n_kmers):
kmer = sequence[i:i + ksize]
kmers.append(kmer)
return kmers
|
def td_path_join(*argv):
"""Construct TD path from args."""
assert len(argv) >= 2, "Requires at least 2 tdpath arguments"
return "/".join([str(arg_) for arg_ in argv])
|
def getint(data, offset, intsize):
""" Retrieve an integer (big-endian) and new offset from the current offset """
value = 0
while intsize > 0:
value = (value << 8) + data[offset]
offset += 1
intsize -= 1
return value, offset
|
def merge(a, b, path=None):
"""From https://stackoverflow.com/a/7205107"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
pass # ignore conflicts, left dict wins.
else:
a[key] = b[key]
return a
|
def get_possible_celeb(matrix):
"""Take an n x n matrix that has m[i][j] = True iff i knows j and return
person who is maybe a celebrity."""
possible_celeb = 0
for p in range(1, len(matrix)):
if (matrix[possible_celeb][p]
or not matrix[p][possible_celeb]):
possible_celeb = p
return possible_celeb
|
def get_bigquery_raster_schema(band_column='value', band_type='INT64'):
"""
Generate Bigquery table schema for a raster
"""
return [
{
'name': band_column,
'type': band_type
},
{
'name': 'geom',
'type': 'GEOGRAPHY'
}
]
|
def med_leftpad(s, n):
"""Takes a string and a number (guaranteed to be greater than length of the string), and returns that string, padded on its left side with empty space characters such that the length of the string equals the number."""
return ' ' * (n - len(s)) + s
|
def find_replace_tuple(t, aliasDict):
"""
Replace elements of t according to rules in `aliasDict`.
Parameters
----------
t : tuple or list
The object to perform replacements upon.
aliasDict : dictionary
Dictionary whose keys are potential elements of `t` and whose values
are tuples corresponding to a sub-sequence that the given element should
be replaced with. If None, no replacement is performed.
Returns
-------
tuple
"""
t = tuple(t)
if aliasDict is None: return t
for label, expandedStr in aliasDict.items():
while label in tuple(t):
i = t.index(label)
t = t[:i] + tuple(expandedStr) + t[i + 1:]
return t
|
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (len(name) > 4 and
name[:2] == name[-2:] == '__' and
name[2] != '_' and
name[-3] != '_')
|
def trans_delay(val):
"""
Given ESM value, return transfer delay (ms).
Please refer to 10.5.6.5, TS24.008 for more details.
:param val: the value encoded in the ESM NAS message
"""
if val <= 15:
return val * 10
elif val <= 31:
return 200 + (val - 16) * 50
elif val <= 62:
return 1000 + (val - 32) * 100
else:
return None
|
def hide_index_html_from(path):
"""Remove ``index.html`` suffix as well as trailing slashes (if
any).
"""
if not path.endswith('index.html'):
return path
return path[:-10].rstrip('/')
|
def join_nums_and_pairs(verses, joiner=', '):
"""
Given an array of ints and int pairs, return a single string
of individual verse numbers and verse ranges: [3,5,(7,10)] --> "3, 5, 7-10".
"""
return joiner.join([str(x) if isinstance(x, int) else '%d-%d' % x for x in verses])
|
def factor_of_all_upto(num, limit):
""" Returns True if num is divisible by all numbers in the range of integers
in 1 up to and including limit.
# Speed up by skipping 1. Any integer is divisible by 1!
# If the number ends in 1, 3, 7, or 9, it's more likely to be prime.
# Check backwards from the largest possible factor
"""
start = 2
for factor in range(start, limit + 1):
if num % factor != 0:
return False
return True
|
def log10(x):
"""
Taking a number, giving it a base and raising it to a power is log
"""
temp = x
count = 0
while temp >= 10:
temp = temp / 10
count += 1
return count
|
def to_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case. For example, "some_var" would become "someVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
"""
parts = snake_case_string.lstrip('_').split('_')
return parts[0] + ''.join([i.title() for i in parts[1:]])
|
def get_precision(TP, FP):
""" precision positive predictive value """
precision = TP / (TP + FP)
return precision
|
def beat_period_to_tempo(beat, Fs):
"""Convert beat period (samples) to tempo (BPM) [FMP, Section 6.3.2]
Notebook: C6/C6S3_BeatTracking.ipynb
Args:
beat (int): Beat period (samples)
Fs (scalar): Sample rate
Returns:
tempo (float): Tempo (BPM)
"""
tempo = 60 / (beat / Fs)
return tempo
|
def generate_shifts(key):
"""Generate the vigenere shifts from the key."""
return list(map(lambda x: ord('z') - ord(x) + 1, key))
|
def one_step_integrator(state, ders, ps, dt):
"""RK4 integrates state with derivative and input for one step of dt
:param state: state of the variables
:param ders: derivative functions
:param ps: perturbations [p1,p2,...] (default 0)
:param dt: time step
:return: state after one integration step"""
D = len(state)
# 1
k1 = [ders[i](state,ps) for i in range(D)]
# 2
state2 = [state[i]+k1[i]*dt/2.0 for i in range(D)]
k2 = [ders[i](state2,ps) for i in range(D)]
# 3
state3 = [state[i]+k2[i]*dt/2.0 for i in range(D)]
k3 = [ders[i](state3,ps) for i in range(D)]
# 4
state4 = [state[i]+k3[i]*dt for i in range(D)]
k4 = [ders[i](state4,ps) for i in range(D)]
# put togeather
statef = [state[i] + (k1[i]+2*k2[i]+2*k3[i]+k4[i])/6.0*dt for i in range(D)]
return statef
|
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
|
def digits_to_num(L, reverse=False):
"""Returns a number from a list of digits, given by the lowest power of 10 to the highest, or
the other way around if `reverse` is True"""
digits = reversed(L) if reverse else L
n = 0
for i, d in enumerate(digits):
n += d * (10 ** i)
return n
|
def _prev_char(s: str, idx: int):
"""Returns the character from *s* at the position before *idx*
or None, if *idx* is zero.
"""
if idx <= 0:
return None
else:
return s[idx - 1]
|
def dm_fibonancci(n):
"""
Finding the dm_Fibonacci sequence with seeds of 1, 1, 2 for n = 0, 1, 2 respectively
The sequence is 0,1,1,2,3,5,8,13,..., where
the recursive relation is dm_fibonancci(n) = dm_fibonancci(n-1) + 2* dm_fibonancci(n-2) - dm_fibonancci(n-3)
:param n: the index, starting from 0
:return: the sequence
"""
# assume n is positive integer
n = n//1 if n>=1 else 0 # ensure n is non-negative integer
c1=1
c2=2
c3=-1
if n>2:
return c1* dm_fibonancci(n-1) + c2* dm_fibonancci(n-2) + c3* dm_fibonancci(n-3)
# n should be <= 1 here. Anything greater than 0, assume it's 1
return 2 if (n==2) else 1 if (n==1) else 1
|
def edist(xloc, yloc, hw):
"""Calculates the euclidean distance"""
return ((xloc - hw)**2 + (yloc - hw)**2)**0.5
|
def desc_coord(texts):
"""
Extract description and coordinates. Remove everything below `Total`
Return (description, coordinates)
"""
desc_res = []
vertices_res = []
for text in texts[1:]: # 0th bounding box is whole picture
desc = text.description.encode('utf-8', 'ignore').decode('utf-8')
if desc.lower() == 'total' or desc.lower() == 'tip' or \
desc.lower() == 'guide':
break # remove everything below `Total` and `Tip`
desc_res.append(desc)
# get coordinates
vertices = [(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices]
vertices_res.append(vertices)
return desc_res, vertices_res
|
def iswithin(angle, X, dx):
"""iswithin function. Is angle within dx of X?"""
try:
if abs(float(angle) - X) <= dx:
return True
else:
return False
except ValueError: # Handle if we have missing data in numerical column
return False
|
def fade(begin, end, factor):
""" return value within begin and end at < factor > (0.0..1.0)
"""
# check if we got a bunch of values to morph
if type(begin) in [list, tuple]:
result = []
# call fade() for every of these values
for i in range(len(begin)):
result.append(fade(begin[i], end[i], factor))
elif type(begin) is int:
# round result to int if begin is an int
result = int(round(begin + (end - begin) * factor))
else:
# return the resulting float
result = begin + (end - begin) * factor
return result
|
def primeSieve(k):
"""return a list with length k + 1, showing if list[i] == 1, i is a prime
else if list[i] == 0, i is a composite, if list[i] == -1, not defined"""
def isPrime(n):
"""return True is given number n is absolutely prime,
return False is otherwise."""
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
result = [-1] * (k + 1)
for i in range(2, int(k + 1)):
if isPrime(i):
result[i] = 1
else:
result[i] = 0
return result
|
def isclose(a, b, atol=1e-01):
""" Check if a and b are closer than tolerance level atol
return abs(a - b) < atol
"""
return abs(a - b) < atol
|
def mycomp(strin, pattern, exact=False):
"""check pattern against strin. If exact, then retrun
pattern==strin, else return strin.find(pattern)>-1."""
if exact:
return bool(strin==pattern)
else:
return bool(strin.find(pattern)>-1)
|
def get_error_directory_does_not_exists(dir_kind):
"""dir kind = [dir, file ,url]"""
return f"Error: Directory with {dir_kind} does not exist:"
|
def is_point_in_corner_circular_boundary(distance, circular_radius, boundary_range):
"""
judge whether a point is in boundary area for four corner circular
"""
if circular_radius - distance > boundary_range:
return True
else:
return False
|
def do_match(ctg_e, ref_e, cigar_val, ctg_seq, ctg_clv):
"""
:param ctg_e: contig end index
:param ref_e: reference end index
:param ctg_clv: cleavage site in contig coordinate
"""
ctg_b = ctg_e - cigar_val
ref_b = ref_e - cigar_val
if ctg_e > ctg_clv:
if ctg_b <= ctg_clv:
seq_to_add = ctg_seq[ctg_b: ctg_clv + 1]
else:
seq_to_add = ''
else:
seq_to_add = ctg_seq[ctg_b: ctg_e]
next_ctg_e = ctg_b
next_ref_e = ref_b
return next_ctg_e, next_ref_e, seq_to_add
|
def bubblesort(x):
"""
input: bubble sort takes in a list or numpy array
output: returns the list or array in sorted order
notes: use bubble sort algorithm -- adapted from pseudocode in Cormen textbook
"""
assign = 0
cond = 0
for i in range(len(x)):
for j in range(len(x)-1, i, -1):
if x[j] < x[j-1]:
cond += 1
x[j], x[j-1] = x[j-1], x[j]
assign += 3
return [x, cond, assign]
|
def calc_rp(R, Rp):
"""
Calculate a parallel resistor to match R (R = Rp//Rx)
"""
if R >= Rp : return None
return R * Rp / (Rp - R)
|
def var_series(get_var_series, n) -> list:
"""
:param get_var_series: func return random value
:param n: number of elements
:return: variation series
"""
l = []
for i in range(n):
l.append(get_var_series())
l.sort()
return l
|
def guess_extension(content_type):
"""
Python's `mimetypes.guess_extension` is no use because it simply returns
the first of an unordered set. We use the same set of media types here,
but take a reasonable preference on what extension to map to.
"""
return {
'application/javascript': '.js',
'application/msword': '.doc',
'application/octet-stream': '.bin',
'application/oda': '.oda',
'application/pdf': '.pdf',
'application/pkcs7-mime': '.p7c',
'application/postscript': '.ps',
'application/vnd.apple.mpegurl': '.m3u',
'application/vnd.ms-excel': '.xls',
'application/vnd.ms-powerpoint': '.ppt',
'application/x-bcpio': '.bcpio',
'application/x-cpio': '.cpio',
'application/x-csh': '.csh',
'application/x-dvi': '.dvi',
'application/x-gtar': '.gtar',
'application/x-hdf': '.hdf',
'application/x-latex': '.latex',
'application/x-mif': '.mif',
'application/x-netcdf': '.nc',
'application/x-pkcs12': '.p12',
'application/x-pn-realaudio': '.ram',
'application/x-python-code': '.pyc',
'application/x-sh': '.sh',
'application/x-shar': '.shar',
'application/x-shockwave-flash': '.swf',
'application/x-sv4cpio': '.sv4cpio',
'application/x-sv4crc': '.sv4crc',
'application/x-tar': '.tar',
'application/x-tcl': '.tcl',
'application/x-tex': '.tex',
'application/x-texinfo': '.texinfo',
'application/x-troff': '.tr',
'application/x-troff-man': '.man',
'application/x-troff-me': '.me',
'application/x-troff-ms': '.ms',
'application/x-ustar': '.ustar',
'application/x-wais-source': '.src',
'application/xml': '.xml',
'application/zip': '.zip',
'audio/basic': '.au',
'audio/mpeg': '.mp3',
'audio/x-aiff': '.aif',
'audio/x-pn-realaudio': '.ra',
'audio/x-wav': '.wav',
'image/gif': '.gif',
'image/ief': '.ief',
'image/jpeg': '.jpe',
'image/png': '.png',
'image/svg+xml': '.svg',
'image/tiff': '.tiff',
'image/vnd.microsoft.icon': '.ico',
'image/x-cmu-raster': '.ras',
'image/x-ms-bmp': '.bmp',
'image/x-portable-anymap': '.pnm',
'image/x-portable-bitmap': '.pbm',
'image/x-portable-graymap': '.pgm',
'image/x-portable-pixmap': '.ppm',
'image/x-rgb': '.rgb',
'image/x-xbitmap': '.xbm',
'image/x-xpixmap': '.xpm',
'image/x-xwindowdump': '.xwd',
'message/rfc822': '.eml',
'text/css': '.css',
'text/csv': '.csv',
'text/html': '.html',
'text/plain': '.txt',
'text/richtext': '.rtx',
'text/tab-separated-values': '.tsv',
'text/x-python': '.py',
'text/x-setext': '.etx',
'text/x-sgml': '.sgml',
'text/x-vcard': '.vcf',
'text/xml': '.xml',
'video/mp4': '.mp4',
'video/mpeg': '.mpeg',
'video/quicktime': '.mov',
'video/webm': '.webm',
'video/x-msvideo': '.avi',
'video/x-sgi-movie': '.movie'
}.get(content_type, '')
|
def content_parser(content,identifiers):
"""
Content is a list of words and identifiers is a dict of lists
"""
info_list = []
index = 0
for word in content:
for i in identifiers.keys():
try:
word = str(word)
except:
try:
word = word.split()[0]
except:
#some random instances of unicode weird things.
pass
e = [str(z) for z in identifiers[i]]
for t in e:
if t in word:
info_list.append((word,index))
break
index += 1
if info_list:
return info_list
|
def solution(A):
"""
DINAKAR
Idea is minimum average can be either 2 or 3 element ie. if you want to choose 4 then that again can be divided in
to 2,2 groups, same for 6 can be divided in 3,3 group
We calculate prefix sum to find average easily for given start and end position
We do average either for 2 oe 3 element of array as shown in inner loop
for j in range(i + 1, min(i + 3, len_ar)):
at the end it is going to be order or n+2or3 = order of n o(n)
:param A:
:return:
"""
len_ar = len(A)
prefix_sum = [0] * (len_ar + 1)
for i in range(1, len_ar + 1):
prefix_sum[i] = prefix_sum[i - 1] + A[i - 1]
print(prefix_sum)
# max element in array by 3
max_average = 100000 / 3
min_idx_start = 0
for i in range(len_ar):
print()
for j in range(i + 1, min(i + 3, len_ar)):
print("For i " + str(i) + " j " + str(j))
average = (prefix_sum[j + 1] - prefix_sum[i]) / (j - i + 1)
print("Avg " + str(average))
if average < max_average:
min_idx_start = i
max_average = average
print("Min from here " + str(min_idx_start) + " max_average so far => " + str(max_average))
return min_idx_start
|
def is_sorted(array):
"""Write a function called is_sorted that takes a list as a parameter and returns True
if the list is sorted in ascending order and False otherwise. You can assume (as a precondition) that
the elements of the list can be compared with the relational operators <, >, etc."""
copy_array = array[:]
copy_array.sort()
return copy_array == array
|
def easy_iscat(a):
"""Takes in a string and returns 'meow' if it is the exact string 'cat', otherwise 'woof'."""
return 'meow' if a == 'cat' else 'woof'
|
def _get_non_None_elements(iter_of_vals):
""" Returns non None values. """
return [x for x in iter_of_vals if x is not None]
|
def createCutDownLists(
inputSlope, lowElasticModulus, inputxList, inputyList, strainValue,
highElasticModCuttingRange):
"""Takes only the relevant section of the input curve, so finding
intersection point is faster"""
if inputSlope > lowElasticModulus:
if strainValue >= (max(inputxList) - highElasticModCuttingRange - 1):
""" prevents issues where lots of identical strain values near end mess up
indexing (since .index() takes lowest index)"""
cutDownxList = (
[x for x in inputxList if x > (strainValue - highElasticModCuttingRange)])
cutLowList = []
for i in inputxList:
if i not in cutDownxList:
cutLowList.append(i)
else:
break
numBelow = len(cutLowList)
startingIndex = numBelow
endingIndex = startingIndex + len(cutDownxList) + 1
cutDownyList = inputyList[startingIndex: endingIndex - 1: 1]
else:
cutDownxList = (
[x for x in inputxList if
x > (strainValue - highElasticModCuttingRange) and
x < (strainValue + highElasticModCuttingRange)])
cutLowList = []
for i in inputxList:
if i not in cutDownxList:
cutLowList.append(i)
else:
break
numBelow = len(cutLowList)
startingIndex = numBelow
endingIndex = startingIndex + len(cutDownxList) + 1
cutDownyList = inputyList[startingIndex: endingIndex - 1: 1]
return cutDownxList, cutDownyList
else:
return inputxList, inputyList
|
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
|
def is_basestring(s):
"""Return True for any string type (for str/unicode on Py2 and bytes/str on Py3)."""
return isinstance(s, (str, bytes))
|
def cmp_zorder(lhs, rhs):
"""Compare z-ordering
Code taken from https://en.wikipedia.org/wiki/Z-order_curve
"""
def less_msb(x: int, y: int):
return x < y and x < (x ^ y)
# Assume lhs and rhs array-like objects of indices.
assert len(lhs) == len(rhs)
# Will contain the most significant dimension.
msd = 2
# Loop over the other dimensions.
for dim in [1, 0]:
# Check if the current dimension is more significant
# by comparing the most significant bits.
if less_msb(lhs[msd] ^ rhs[msd], lhs[dim] ^ rhs[dim]):
msd = dim
return lhs[msd] - rhs[msd]
|
def convert_to_orca_zmatrix(lines):
"""Convert a proper zmatrix into an orca zmatrix"""
# First line
element = lines[0].split()[0]
orca_zmatrix = [[element, '0', '0', '0', '0', '0', '0']]
# Second lines
element, atom1, distance = lines[1].split()[:3]
orca_zmatrix.append([element, atom1, '0', '0', distance, '0', '0'])
# Third line
if len(lines) > 2:
element, atom1, distance, atom2, angle = lines[2].split()[:5]
orca_zmatrix.append([element, atom1, atom2, '0', distance, angle, '0'])
# All other lines
for line in lines[3:]:
element, atom1, distance, atom2, angle, atom3, dihedral = line.split()[:7]
orca_zmatrix.append([element, atom1, atom2, atom3,
distance, angle, dihedral])
return orca_zmatrix
|
def make_features_dicts(all_features: dict, feat_list: list) -> dict:
"""Takes a dict of all features and a list of strings with all desired features
and returns a dict with these features.
----------
all_features
dictionary of all available features and their possible values
feat_list
list of feature names to be filtered for
Returns
-------
dict
filtered features dictionary
"""
return {x: all_features[x] for x in feat_list}
|
def _get_segmentation_strategy(segmentation):
"""Get the baci string for a geometry pair strategy."""
if segmentation:
return 'segmentation'
else:
return 'gauss_point_projection_without_boundary_segmentation'
|
def get_partition_lengths(partitions):
"""
Takes an array of partitions and returns and array with their lenghts.
Parameters
----------
partitions: array
Array containing the partitions.
Returns
-------
array
Array containing partition lengths.
"""
return [len(p) for p in partitions]
|
def _format_coredump_stdout(cmd_ret):
"""
Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
"""
ret_dict = {}
for line in cmd_ret["stdout"].splitlines():
line = line.strip().lower()
if line.startswith("enabled:"):
enabled = line.split(":")
if "true" in enabled[1]:
ret_dict["enabled"] = True
else:
ret_dict["enabled"] = False
break
if line.startswith("host vnic:"):
host_vnic = line.split(":")
ret_dict["host_vnic"] = host_vnic[1].strip()
if line.startswith("network server ip:"):
ip = line.split(":")
ret_dict["ip"] = ip[1].strip()
if line.startswith("network server port:"):
ip_port = line.split(":")
ret_dict["port"] = ip_port[1].strip()
return ret_dict
|
def update_dict(dict_a, dict_b):
"""
Because dict.update(d) does not return the new dict
>>> a = {'a': 1, 'b': 2}
>>> update_dict(a, {'b': 3, 'c': 3})
{'a': 1, 'c': 3, 'b': 3}
"""
dict_a.update(dict_b)
return dict_a
|
def get_series_number(nrrd_file):
"""
(str) -> str
Return series number for the given nrrd_file.
>>>nrrd_file = '/paulsen/MRx/PHD_032/0454/31774/ANONRAW/0454_31774_DWI-31_7.nrrd'
>>>get_scan_type(nrrd_file)
7
"""
return nrrd_file.split("_")[-1].replace(".nrrd", "")
|
def _build_index_vcf_command_str(bgzipped_vcf):
"""Generate command string to index vcf file."""
command = " ".join([
'tabix -p vcf', bgzipped_vcf
])
return command
|
def _create_extension_feed_item_mutate_operations(
client, extension_feed_item_resource_names
):
"""Creates MutateOperations for the sitelink extension feed items that will
be removed.
Args:
client: an initialized GoogleAdsClient instance.
extension_feed_item_resource_names: the extension feed item resource
names.
Returns:
An array of MutateOperations for the extension feed items.
"""
mutate_operations = []
# Create a MutateOperation for each extension feed item to remove.
for resource_name in extension_feed_item_resource_names:
mutate_operation = client.get_type("MutateOperation")
mutate_operation.extension_feed_item_operation.remove = resource_name
mutate_operations.append(mutate_operation)
return mutate_operations
|
def avoid_already_replied_id(db_data, reply_id):
"""
This function avoid tweets already replied
"""
already_replied = False
db_ids = db_data[1:]
for tw_id in db_ids:
if reply_id == tw_id:
print("Tweet ID: ", reply_id, " already replied" )
already_replied = True
return already_replied
|
def _check_cors_origin(origin, allowed_origins):
"""
Check if an origin match cors allowed origins
"""
if isinstance(allowed_origins, list):
if origin in allowed_origins:
return origin
elif allowed_origins == '*':
return allowed_origins
elif allowed_origins == origin:
# Cors origin is either * or specific origin
return allowed_origins
|
def departure_filter(row):
""" Copy departure time from arrival if missing """
if not row["departure_time"] and row["arrival_time"]:
row["departure_time"] = row["arrival_time"]
return row
|
def standardise1(x,l1,l2):
"""standardise data x to [-1,1]
Parameters
x: original data
l1: lower bound
l2: upper bound
----------
Returns
-------
standardised data
"""
x-=l1
x*=2/float(l2-l1)
x-=1
return x
|
def is_pii_table(table_id):
"""
Return True if specified table is a pii table
:param table_id: identifies the table
:return: True if specified table is a pii table, False otherwise
"""
return table_id.startswith('pii') or table_id.startswith('participant')
|
def get_ltm_cells(cells):
"""converts matrix indices so all are below the diagonal
cells: list of indices into a 2D integer-indexable object
(typically a list or lists of array of arrays)
"""
new_cells = []
for i, j in cells:
if i == j:
continue
if i < j:
i, j = j, i
new_cells.append((i, j))
# remove duplicates
new_cells = sorted(set(new_cells))
return new_cells
|
def rev_comp(seq):
""" Get reverse complement """
match_dict = {'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'}
return ''.join([match_dict[x] for x in seq][::-1])
|
def _gather_input_features(extracted_results):
"""Gather input features.
Args:
extracted_results (List[List[Dict]]):
Multi-frame feature extraction results
Returns:
List[List[dict]]: Multi-frame feature extraction results
stored in a nested list. Each element of the outer list is the
feature extraction results of a single frame, and each element of
the inner list is the extracted results of one person,
which contains:
features (ndarray): extracted features
track_id (int): unique id of each person, required when
``with_track_id==True```
"""
sequence_inputs = []
for frame in extracted_results:
frame_inputs = []
for res in frame:
inputs = dict()
if 'features' in res:
inputs['features'] = res['features']
if 'track_id' in res:
inputs['track_id'] = res['track_id']
frame_inputs.append(inputs)
sequence_inputs.append(frame_inputs)
return sequence_inputs
|
def quoteattr(data):
"""Quote an attribute value.
Escape less then the obvious xml.saxutils.quoteattr,
e.g. do not convert `\n` to ` `.
"""
return '"%s"' % data.replace('"', """)
|
def last(*args):
"""Return last value from any object type - list,tuple,int,string"""
if len(args) == 1:
return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1]
return args[-1]
|
def FindShortestPath(graph, start, end, path=[]):
"""This is code I found online to find shortest paths. It should take all
edge weights as 1? I thik this is dijkstra. I had trouble implementing
other dijkstra code
From website:
Note that while the user calls find_graph() with three arguments, it calls
itself with a fourth argument: the path that has already been traversed.
The default value for this argument is the empty list, '[]', meaning no
nodes have been traversed yet. This argument is used to avoid cycles
This is slow and should only be used when losses are present"""
path = path + [start]
if start == end:
return path
if start not in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = FindShortestPath(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
|
def ensure_trailing_slash(url: str) -> str:
"""When comparing URLs, e.g. "http://example.com/" and "http://example.com", use this method to normalise the comparison."""
if "?" in url: # If the URL contains a parameter (e.g. https://example.com/search?page=1), ignore trailing slash.
return url
return url if url[-1] == "/" else f"{url}/"
|
def indent(data: str, spaces: int) -> str:
"""
Indent every line in 'data' by 'spaces'
"""
result = []
for line in data.splitlines():
result.append(" " * spaces + line)
return "\n".join(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.