content
stringlengths 42
6.51k
|
|---|
def stringify(lst):
"""
Joins list elements to one string.
:param lst: list. List of strings.
:return: string. Joined list of strings to one string.
"""
return ''.join(str(x) for x in lst)
|
def fix_line_ending(content):
"""Fix line ending of `content` by changing it to \n.
:param bytes content: content of the subtitle.
:return: the content with fixed line endings.
:rtype: bytes
"""
return content.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
|
def _encoded_string(s):
"""Encode the string-like argument as bytes if suitable"""
return s.encode('ascii') if hasattr(s, 'encode') else s
|
def tupleToList(tupleTo):
"""
tupleToList(...) method of tupleo.tuple instance
T.tupleToList(tupleTo) -> None -- convert tuple to List to Full Depth Level.
"""
if type(tupleTo)==tuple:
tupleTo = list(tupleTo)
for i in range(len(tupleTo)):
if type(tupleTo[i])==tuple:
tupleTo[i]=list(tupleTo[i])
tupleToList(tupleTo[i])
elif type(tupleTo[i])==list:
tupleToList(tupleTo[i])
else:
pass
return tupleTo
|
def parentLevel(parentOp, childOp):
"""
determines if op1 is a parent of op2 at any depth. Returns None or the
depth of parenthood. i.e. op2.parent(returnValue) will yield op1.
This method returns None so that op2.parent(returnValue) will error in
that case.
"""
if parentOp == childOp:
return 0
parentLev = 1
while True:
parent = childOp.parent(parentLev)
if parent is None:
return None
elif parent is parentOp:
return parentLev
parentLev += 1
|
def coord_list_to_svg_path(coord_list):
"""
Turn a list of points into an SVG path
"""
path = '' #'M 0,0 '# % (coord_list[0]['coord'].x, coord_list[0]['coord'].y)
last_action_type = ''
for action in coord_list:
if action['type'] == 'move':
if last_action_type != 'M':
path += 'M '
path += '%s,%s ' % (action['coord'].x, -action['coord'].y)
last_action_type = 'M'
if action['type'] == 'draw':
if last_action_type != 'L':
path += 'L '
path += '%s,%s ' % (action['coord'].x, -action['coord'].y)
last_action_type = 'L'
return path
|
def fasta_to_sequence(fasta):
""" Convert a multiline fasta sequence to one line sequence"""
f = fasta.strip().split("\n")
if len(f) > 0:
return "".join(f[1:])
else:
return ""
|
def pg_info(link):
"""Populate page test info
:param link: url for page being tested as String
:return: object containing information for page being tested
"""
page_info = {
'tag': 'response',
'url': link,
'title': 'Not tested',
'http status': '---',
'front end response time (ms)': 'Not tested',
'back end response time (ms)': 'Not tested'
}
return page_info
|
def format_row(row):
"""
Transforms [1, 2, 3] -> 1 2 3
"""
return " ".join(str(i) for i in row)
|
def centroid(points):
"""
Compute the centroid (average lat and lon) from a set of points
((lat,lon) pairs).
"""
lat_avg = sum([p[0] for p in points]) / len(points)
lon_avg = sum([p[1] for p in points]) / len(points)
return (lat_avg, lon_avg)
|
def find_non_sep_position(text, norm_position, norm_sep="|", morph_sep="-"):
"""
Finds the actual position of character n in a string, such that n is the
number of characters traversed excluding separator characters
:param text: the text to search for a position in
:param norm_position: position ignoring separators
:param norm_sep: clean separator to ignore when counting, default '|'
:param norm_sep: morph separator to ignore when counting, default '-'
:return: position in string not ignoring separators
"""
cursor = -1
position = -1
for c in text:
position +=1
if c != norm_sep and c != morph_sep:
cursor +=1
if cursor == norm_position:
break
return position
|
def find_span_linear(degree, knot_vector, num_ctrlpts, knot, **kwargs):
""" Finds the span of a single knot over the knot vector using linear search.
Alternative implementation for the Algorithm A2.1 from The NURBS Book by Piegl & Tiller.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param num_ctrlpts: number of control points, :math:`n + 1`
:type num_ctrlpts: int
:param knot: knot or parameter, :math:`u`
:type knot: float
:return: knot span
:rtype: int
"""
span = degree + 1 # Knot span index starts from zero
while span < num_ctrlpts and knot_vector[span] <= knot:
span += 1
return span - 1
|
def CalculateForecastStats(matched, available, possible=None):
"""Calculate forecast percentage stats.
Args:
matched: The number of matched impressions.
available: The number of available impressions.
possible: The optional number of possible impressions.
Returns:
The percentage of impressions that are available and possible.
"""
if matched > 0:
available_percent = (float(available) / matched) * 100.
else:
available_percent = 0
if possible is not None:
if matched > 0:
possible_percent = (possible/float(matched)) * 100.
else:
possible_percent = 0
else:
possible_percent = None
return available_percent, possible_percent
|
def is_balanced_int(root):
"""Check if balanced."""
if root is None:
return 0
left = is_balanced_int(root.left)
right = is_balanced_int(root.right)
if left < 0 or right < 0 or abs(left - right) > 1:
return -1
return max((left, right)) + 1
|
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b."""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
|
def piece_not(piece: str) -> str:
"""
helper function to return the other game piece that is not the current game piece
Preconditions:
- piece in {'x', 'o'}
>>> piece_not('x')
'o'
>>> piece_not('o')
'x'
"""
return 'x' if piece == 'o' else 'o'
|
def generate_org_data(org_name, first_name, last_name, email, capabilities):
"""
generate_org_data(org_name, first_name, last_name, email, capabilities)
Generates org create data from inputs and returns org data object.
"""
capability_name_list = []
for capability in capabilities:
capability_name_list.append({"Name": capability})
org_data = {
"name": org_name,
"owners": [{"firstName": first_name, "lastName": last_name, "email": email}],
"capabilities": capability_name_list
}
return org_data
|
def fib(n):
"""
Finding the Fibonacci sequence with seeds of 0 and 1
The sequence is 0,1,1,2,3,5,8,13,..., where
the recursive relation is fib(n) = fib(n-1) + fib(n-2)
:param n: the index, starting from 0
:return: the sequence
"""
n = n//1 if n>=1 else 0 # ensure n is non-negative integer
if n>1:
return fib(n-1) + fib(n-2)
# n should be <= 1 here. Anything greater than 0, assume it's 1
return 1 if (n==1) else 0
|
def array_unique(l):
"""
Removes all duplicates from `l`.
"""
return list(set(l))
|
def parse_arb_id(arb_id: int):
"""Return IDH and IDL."""
return divmod(arb_id, 0x100)
|
def is_sphinx_markup(docstring):
"""Returns whether a string contains Sphinx-style ReST markup."""
# this could be made much more clever
return ("`" in docstring or "::" in docstring)
|
def government_furloughing(t, states, param, t_start_compensation, t_end_compensation, b_s):
"""
A function to simulate reimbursement of a fraction b of the income loss by policymakers (f.i. as social benefits, or "tijdelijke werkloosheid")
Parameters
----------
t : pd.timestamp
current date
param: float
initialised value of b
t_start_compensation : pd.timestamp
startdate of compensation
t_end_lockdown : pd.timestamp
enddate of compensation
b_s: float
fraction of lost labor income furloughed to consumers under 'shock'
Returns
-------
b: float
fraction of lost labor income compensated
"""
if t < t_start_compensation:
return param
elif ((t >= t_start_compensation) & (t < t_end_compensation)):
return b_s
else:
return param
|
def iupac_converter(iupac_code):
"""
Return a list of all possible bases corresponding to a given iupac
nucleotide code.
"""
iupac_dict = {"A": "A", "C": "C", "G": "G", "T": "T", "R": "AG", "Y": "CT",
"S": "GC", "W": "AT", "K": "GT", "M": "AC", "B": "CGT",
"D": "AGT", "H": "ACT", "V": "ACG", "N": "ACGT"}
try:
return list(iupac_dict[iupac_code.upper()])
except KeyError:
print(("Non-IUPAC nucleotide code {}. Code must be one of {}").format(
iupac_code, "".join(list(iupac_dict.keys()))
))
return []
|
def file_sorting_key(filename):
"""
Extract a key for a filename sort, putting JPEGs before everything else.
"""
extension = filename.lower()[filename.rfind(".") + 1 :]
key = 0 if extension in ("jpg", "jpeg") else 1
return (key, filename)
|
def _force_float(v):
""" Converts given argument to float. On fail logs warning and returns 0.0.
Args:
v (any): value to convert to float
Returns:
float: converted v or 0.0 if conversion failed.
"""
try:
return float(v)
except Exception as exc:
return float('nan')
logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
|
def mean(values):
"""Compute the mean of a sequence of numbers."""
return sum(values) / len(values)
|
def lucas(n, elem):
"""A simple function to print lucas series of n-numbers"""
# check if n is correct
# we can only allow n >=3 and n as an integer number
# since we always expect user to provide two elements
# these two elements will be the starting number of the series
try:
n = int(n)
except ValueError:
raise TypeError("lucas series is only available for n-digits, where n is an integer, and n >= 3")
if n < 3:
raise ValueError("lucas series is only available for n-digits, where n is an integer, and n >= 1")
# evaluate user-input elements
try:
elem = eval(elem)
except SyntaxError:
raise TypeError("unable to understand type of first two elements of the series")
if type(elem) == tuple:
# convert `elem` to list
# so that we can append new values into list
elem = list(elem)
# when we are assured that the value of n is correct,
# we can now calculate the lucas series upto n-elements
# and finally return it as a string seperated by space
for _ in range(n - 2):
elem.append(sum(elem[-2:]))
return " ".join(map(str, elem))
|
def get_current_role(line: str, column: int, default: str = "any"):
"""
Parse current line with cursor position to get current role.
Valid roles:
- :role:`target`
- :role:`Text <target>`
- :domain:role:`target`
- :domain:one:two:`target`
Default role:
- `Foo`
- `Foo <bar>`
"""
if column >= len(line):
return None
# Find where the role name ends
for j in range(column, -1, -1):
if line[j] == "`":
if j == 0 or line[j - 1].isspace():
return default
if line[j - 1] == ":":
break
return None
else:
return None
# Find where the role starts
i = j
while i >= 0:
if line[i].isspace():
break
i -= 1
i += 1
if line[i] != ":" or i >= j:
return None
return line[i + 1 : j - 1]
|
def fds_crc(data, checksum=0x8000):
"""
Do not include any existing checksum, not even the blank checksums 00 00 or FF FF.
The formula will automatically count 2 0x00 bytes without the programmer adding them manually.
Also, do not include the gap terminator (0x80) in the data.
If you wish to do so, change sum to 0x0000.
"""
size = len(data)
for i in range(size + 2):
if i < size:
byte = data[i]
else:
byte = 0x00
for bit_index in range(8):
bit = (byte >> bit_index) & 0x1
carry = checksum & 0x1
checksum = (checksum >> 1) | (bit << 15)
if carry:
checksum ^= 0x8408
return checksum.to_bytes(2, "little")
|
def dict_to_arg_flags_str(flags_dict):
"""
Converts a dictionary to a commandline arguments string
in the format '--<key0>=value0 --<key1>=value1 ...'
"""
return ' '.join(
['--{}={}'.format(k, flags_dict[k]) for k in flags_dict.keys()])
|
def debian_number(major='0', minor='0', patch='0', build='0'):
"""Generate a Debian package version number from components."""
return "{}.{}.{}-{}".format(major, minor, patch, build)
|
def _validate_tag(string):
""" Extracts a single tag in key[=value] format """
result = {}
if string:
comps = string.split('=', 1)
result = {comps[0]: comps[1]} if len(comps) > 1 else {string: ''}
return result
|
def postorder(root):
"""Postorder depth-first traverse a binary tree."""
ans = []
node, stack = root, []
prev = None
while node or stack:
if node:
stack.append(node)
node = node.left
else:
node = stack[-1]
if node.right and node.right != prev: node = node.right
else:
ans.append(node.val)
stack.pop()
prev = node
node = None
return ans
|
def fix_resource(first_resource, resource):
"""Use the first resource to fill in other resources.
"""
for property_ in first_resource.keys():
if not resource.get(property_):
resource[property_] = first_resource[property_]
return resource
|
def toggle_modal_tab(n1, n2, is_open):
"""
:return: Open modal callback if user clicks tab 2 description button on tab 2.
"""
if n1 or n2:
return not is_open
return is_open
|
def atf_fc_uri(article_uri):
"""URI of feature collection"""
return article_uri+"/featurecollection"
|
def unpickle(factory, args, kwargs):
"""Unpickle something by calling a factory"""
return factory(*args, **kwargs)
|
def _calc_input_panel_rect(panel_size, input_width):
"""
Calc rectangle of the panel image for inputting to neural network model.
Because panel image isn't square but neural network model postulate square
:param panel_size: size of source panel image [width, height]
:param input_width: width of input image for neural network model
:return: rectangle of panel image [left, top, right, bottom]
"""
w, h = panel_size
scale = min(float(input_width) / w, float(input_width) / h)
w, h = (round(w * scale), round(h * scale))
x, y = (input_width - w) // 2, (input_width - h) // 2
return [x, y, x + w, y + h]
|
def get_aton(mmsi: str) -> bool:
"""
Gets the AIS Aids to Navigation (AtoN) status of a given MMSI.
AIS Aids to Navigation (AtoN):
AIS used as an aid to navigation uses the format 9192M3I4D5X6X7X8X9
where the digits 3, 4 and 5 represent the MID and X is any figure
from 0 to 9. In the United States, these MMSIs are reserved for the
federal government.
Src: https://www.navcen.uscg.gov/?pageName=mtmmsi
:param mmsi: str MMSI as decoded from AIS data.
:return: bool True if MMSI belongs to an AtoN, otherwise False.
"""
return str(mmsi)[:2] == "99"
|
def int_string(string_list):
"""
This absolutely assumed everything is int parseable
"""
return [int(x) for x in string_list]
|
def get_excluded_classes(std_class_names, config_dict, cazy_dict):
"""Define the CAZy classes that will not be scraped.
This includes classes for which not Families have been specified for scraping.
:param std_class_names: list of standardised CAZy class names
:param config_dict: configuration dict defining classes and families to be scraped
:param cazy_dict: dict, accepted CAZy classes synonyms
Return list of CAZy classes not to be scraped.
"""
# retrieve list of CAZy classes from which all families are to be scraped
cazy_classes = config_dict["classes"]
# retrieve the names of classes for which specific families to be scraped have been named
for key in config_dict:
if (key != "classes") and (key not in cazy_classes) and (len(config_dict[key]) != 0):
# add the class of families to be scraped to the list of CAZy classes to be scraped
cazy_classes.append(key)
# create list of CAZy classes not to be scraped
excluded_classes = std_class_names
excluded_classes = list(set(excluded_classes).difference(cazy_classes))
if len(excluded_classes) != 0:
# change names of classes into format for excluding classes during scrape
index = 0
for index in range(len(excluded_classes)):
excluded_classes[index] = f"<strong>{excluded_classes[index]}</strong>"
else:
excluded_classes = None
return excluded_classes
|
def raises_keyerr(k, m):
"""
Determine whether a mapping is missing a particular key.
This helper is useful for explicitly routing execution through __getitem__
rather than using the __contains__ implementation.
:param object k: the key to check for status as missing
:param Mapping m: the key-value collection to query
:return bool: whether the requested key is missing from the given mapping,
with "missing" determined by KeyError encounter during __getitem__
"""
try:
m[k]
except KeyError:
return True
else:
return False
|
def check_for_null(array):
"""
check for null string values in array
"""
while "" in array:
array.remove("") if "" in array else array
return array
|
def bytes_to_int(b: bytes) -> int:
"""
Convert bytes to an int, with hardcoded Endianness.
"""
return int.from_bytes(b, "little")
|
def allowed_transitions(states):
"""
this function takes a set of states and uses it to compute the allowed transitions
it assumes the model is acyclic ie. individuals can only transition towards states
to the right of it in the list
Parameters
----------
states : list
a list with the set of states in the model
Returns
----------
list
all of the transitions possible as pairs (two-element lists)
"""
lst = []
for i in range(0, len(states)):
for j in range(i+1, len(states)):
lst.append([i, j])
return lst
|
def coerce_value(val):
"""
Coerce config variables to proper types
"""
def isnumeric(val):
try:
float(val)
return True
except ValueError:
return False
if isnumeric(val):
try:
return int(val)
except ValueError:
return float(val)
lower_val = str(val.lower())
if lower_val in ('true', 'false'):
if 'f' in lower_val:
return False
else:
return True
if ',' in val:
return [coerce_value(v.strip()) for v in val.split(',')]
return val
|
def strip_data(data):
""" retire les blancs et les retour chariot"""
b=data.replace('\n',' ').replace('\t',' ').strip().replace(' ',' ')
c=b.replace(' ',' ')
while (b != c):
b=c
c=b.replace(' ',' ')
pass
return b
|
def recursive_string_interpolation(string, obj, max_depth=5):
"""Recursively perform string interpolation."""
for iteration in range(max_depth):
previous_string = string
string = string % obj
if string == previous_string:
break
return string
|
def _nih_segmented(h, grouping=6):
"""Segment hex-hash with dashes in nih style RFC6920_
>>> _nih_segmented("0123456789abcdef")
"012345-6789ab-cdef"
.. _RFC6920: https://www.ietf.org/rfc/rfc6920
"""
segmented = []
while h:
segmented.append(h[:grouping])
h = h[grouping:]
return "-".join(segmented)
|
def promotion_from_piecetype(piecetype):
"""
get piece type char from numeric piecetype
"""
piecetypes = {1: 'n', 2: 'b', 3: 'r', 4: 'q'}
return piecetypes[piecetype]
|
def chiSquare(observed, expected):
"""takes in a list of numbers, the observed and expected values and outputs the chi-square value"""
total = 0
for i in range(len(observed)):
total += (((observed[i] - expected[i])**2)/expected[i])
return total
|
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
|
def col_map_from_header(header_str):
""" create column map that maps column names to column numbers
from a csv string
"""
col_map = {}
header_fields = header_str.rstrip().split(",");
for i,field in enumerate(header_fields):
col_map[field] = i
return col_map
|
def parse_derivative_info(dstring):
"""
Input:
string: string of the form *,*x,*y,*xx, *xy, *yx, *yy, where *
stands for any letter.
Output:
tuple, encoding derivative information
"""
# Return tuples
if type(dstring) is tuple:
return dstring
s = list(dstring)
if len(s) == 1:
#
# No derivatives
#
return (0,)
elif len(s) == 2:
#
# First order derivative
#
if s[1] == 'x':
# wrt x
return (1,0)
elif s[1] == 'y':
# wrt y
return (1,1)
else:
raise Exception('Only two variables allowed.')
elif len(s) == 3:
#
# Second derivative
#
if s[1]=='x' and s[2]=='x':
# f_xx
return (2,0,0)
elif s[1]=='x' and s[2]=='y':
# f_xy
return (2,0,1)
elif s[1]=='y' and s[2]=='x':
# f_yx
return (2,1,0)
elif s[1]=='y' and s[2]=='y':
# f_yy
return (2,1,1)
else:
raise Exception('Use *xx,*xy,*yx, or *yy. * is any letter.')
else:
raise Exception('Higher order derivatives not supported.')
|
def increment_list(a_list, index):
"""Increment the last integer in a list, then and appends it. Adds index+1 if list list is empty"""
if len(a_list) == 0:
a_list.append(index + 1)
else:
last_index = a_list[-1]
a_list.append(last_index + 1)
return a_list
|
def get_sma(data, length):
""" Returns the simple moving average over a period of LENGTH """
if len(data) < length:
return "Not enough Data"
subset = data[-length:]
return sum(subset) / len(subset)
|
def same_first_last(L: list) -> bool:
"""Precondition: len(L) >= 2
Return True if and only if first item of the list is the same as the
last.
>>> same_first_last([3, 4, 2, 8, 3])
True
>>> same_first_last(['apple', 'banana', 'pear'])
False
>>> same_first_last([4.0, 4.5])
False
"""
if (L[0] == L[len(L)-1]):
return True
else:
return False
|
def twos_comp(val, num_bits):
"""compute the 2's compliment of int value val"""
if( (val&(1<<(num_bits-1))) != 0 ):
val = val - (1<<num_bits)
return val
|
def maximize_tbreaks(tbreaks):
"""Remove tbreaks that are non-maximal.
i.e. remove tbreaks that are subsets of other tbreaks."""
subsets = set()
for tb1 in tbreaks:
for tb2 in tbreaks:
if tb1 < tb2:
subsets.add(tb1)
tb1.discard()
break
return tbreaks - subsets
|
def split_indices(l,lookup):
"""Return two lists, within and without. within contains the
indices of all elements of l that are in lookup, while without
contains the remaining elements of l."""
within,without = [],[]
for (i,v) in enumerate(l):
try:
ind = lookup.index(v)
within.append((i,ind))
except ValueError: # v not found in lookup
without.append((i,v))
return within,without
|
def class_ioerror(log):
"""An error classifier.
log the console log text
Return None if not recognized, else the error type string.
"""
if ('ext3_abort called' in log and
'Read-only file system' not in log and
'Remounting filesystem read-only' not in log):
return 'IOERROR'
return None
|
def bracketIP6(ip):
"""Put brackets around an IPv6 address, just as tor does."""
return "[%s]" % ip
|
def breaklines(text, char_limit=60):
"""
Break text into lines so none is loner than char_limit.
"""
settext = []
for par in text.split("\n"):
chunk = []
chunk_len = 0
for word in par.split(" "):
if len(word) + chunk_len > char_limit:
settext.append(" ".join(chunk))
chunk = []
chunk_len = 0
chunk.append(word)
chunk_len += len(word)
splitpar = " ".join(chunk)
if splitpar:
settext.append(splitpar)
return "\n".join(settext)
|
def bytesto(bytes, to: str, bsize: int = 1024) -> float:
"""Takes bytes and returns value convereted to `to`"""
a = {"k": 1, "m": 2, "g": 3, "t": 4, "p": 5, "e": 6}
r = float(bytes)
return bytes / (bsize ** a[to])
|
def compare_solutions_list(dict_1, dict_2, keys, info={}):
"""Compare solutions returning a list of dictionaries for each key"""
return [{**info, **{'key': k, 'model': dict_1[k], 'actual': float(dict_2[k])}}
for k in keys if k in dict_1.keys()]
|
def convert_to_bytes(mem_str):
"""Convert a memory specification, potentially with M or G, into bytes.
"""
if str(mem_str)[-1].upper().endswith("G"):
return int(round(float(mem_str[:-1]) * 1024 * 1024))
elif str(mem_str)[-1].upper().endswith("M"):
return int(round(float(mem_str[:-1]) * 1024))
else:
return int(round(float(mem_str)))
|
def build_success_api_response(returnEntity):
"""Return a formatted API response."""
# type: (dict[str, str], str) -> dict[str, dict[str, str]]
return { "apiResponse" : returnEntity }
|
def leap_year(year):
"""
Given a year, report if it is a leap year.
:param year int - The year to check
:return bool - Leap year or not.
On every year that is evenly divisible by 4
except every year that is evenly divisible by 100
unless the year is also evenly divisible by 400
For example, 1997 is not a leap year, but 1996 is. 1900 is not a leap year, but 2000 is.
"""
is_a_leap_year = False
if year % 4 == 0:
if year % 100 == 0 and year % 400 == 0:
is_a_leap_year = True
elif year % 100 == 0:
is_a_leap_year = False
else:
is_a_leap_year = True
return is_a_leap_year
|
def JavaDataTypeToC(java_type):
"""Returns a C datatype for the given java type."""
java_pod_type_map = {
'int': 'jint',
'byte': 'jbyte',
'char': 'jchar',
'short': 'jshort',
'boolean': 'jboolean',
'long': 'jlong',
'double': 'jdouble',
'float': 'jfloat',
}
java_type_map = {
'void': 'void',
'String': 'jstring',
'Throwable': 'jthrowable',
'java/lang/String': 'jstring',
'java/lang/Class': 'jclass',
'java/lang/Throwable': 'jthrowable',
}
if java_type in java_pod_type_map:
return java_pod_type_map[java_type]
elif java_type in java_type_map:
return java_type_map[java_type]
elif java_type.endswith('[]'):
if java_type[:-2] in java_pod_type_map:
return java_pod_type_map[java_type[:-2]] + 'Array'
return 'jobjectArray'
elif java_type.startswith('Class'):
# Checking just the start of the name, rather than a direct comparison,
# in order to handle generics.
return 'jclass'
else:
return 'jobject'
|
def closestNode(coord,points):
"""Return closest point to coord from points"""
dists = [(pow(point[0] - coord[0], 2) + pow(point[1] - coord[1], 2), point) for point in points]
# list of (dist, point) tuples
nearest = min(dists)
return nearest[1]
|
def is_valid_matrix2D(lst):
"""
Checks if there is at least one positive value in the given 2 dimensional list.
:param lst: list of elements
:return: True if at least one positive value exist otherwise false.
"""
sz = len(lst[0])
for i in range(0, sz):
for j in range(0, sz):
if lst[i][j] > 0.0:
return True
return False
|
def b_edge_reversed(s, initial, initial_reversed):
"""
If a b-edge can be validly reversed, return the result of the reversal.
Otherwise, return None.
"""
# precondition for validity:
if s.startswith('B') and (s.endswith('B') or s.endswith(initial) or s.endswith(initial_reversed)):
s2 = s[-1:0:-1]
# postcondition for validity:
if initial in s2 or initial_reversed in s2:
return s2
|
def relativedatapath(datapath):
"""
relative data path
:param datapath:
:return:
"""
return 'data/' + datapath
|
def average(input):
"""
Returns a list with a running average of input
The running average is a list with the same size
as the input. Each element at position n is the
average of all of the elements in input[:n+1]
Example: average([1, 3, 5, 7]) returns
[1.0, 2.0, 3.0, 4.0]
Parameter input: The data to process
Precondition: input an iterable, each element a number
"""
result = [] # Accumulator
sum = 0 # Accumulator helper
count = 0 # Accumulator helper
for x in input:
sum += x
count += 1
result.append(sum/count)
return result
|
def as_range(positions):
"""
Given a list of positions, merge them into intervals if possible
"""
l = list(positions)
if len(l) > 1:
return '{0}-{1}'.format(l[0], l[-1])
else:
return '{0}'.format(l[0])
|
def fn_k2_TE(h_11,c_11,beta_11):
"""EM coupling factor k^2 for thickness expander mode, as function of:
-- piezoelectric tensor component h_11
-- elastic stiffness tensor component c_11
-- impermittivity tensor component beta_11.
"""
return h_11**2 / (c_11 * beta_11)
|
def smartJoin(words):
"""
Joins list of words with spaces, but is smart about not adding spaces
before commas.
"""
input = " ".join(words)
# replace " , " with ", "
input = input.replace(" , ", ", ")
# replace " ( " with " ("
input = input.replace("( ", "(")
# replace " ) " with ") "
input = input.replace(" )", ")")
return input
|
def linearized_best_response(y):
"""A linearization of the best-response of the weights to some hyperparameter at some point.
:param y: The hyperparameter to evaluate the linearization at.
:return: The linearized best-response.
"""
return -1.0*y + 0.0
|
def check_stat_type(stat_list: list) -> bool:
"""check that all the statistics are numerical"""
stat_input_error = False
for stat in stat_list:
try:
# Tries to convert the input into float
# (string type input containing numeric character does not cause errors)
float(stat)
except ValueError: # Trows an exception if float(stat) causes an error (non-numerical statistics)
print('ERROR: One or more of the stats is invalid. Insert only numbers')
stat_input_error = True # Notification of exception (used to cause the new request for statistics)
break # Exits the for loop since a non-numeric statistic is not acceptable
return stat_input_error
|
def get_val(dic, key, default=None):
"""
Traverse through the levels of a dictionary to obtain the
specified key, safely handling any missing levels. For example, if the
key is "app.error", it will find the "error" entry in the "app"
dictionary; if either the app dictionary or the error entry is missing,
then the default will be returned.
"""
if not key:
return default
try:
current = dic
for attribute in key.split("."):
current = current[attribute]
return current
except KeyError:
return default
|
def get_range(value, max_num=None):
"""
Returns the range over a given value.
Usage::
{% load libs_tags %}
{% for item in object_list.count|get_range %}
{{ item }} // render real items here
{% endfor %}
{% for item in object_list.count|get_range:5 %}
// render placeholder items here
{% endfor %}
:param value: The number to pass to the range function
:param max_num: Optional. Use this if you want to get a range over the
difference between the actual number and a maximum amount. This can
be useful to display placeholder items in a situation where the
space must always be filled up with 5 items but your actual list
might only have 2 items.
"""
if max_num:
value = max_num - value
return range(value)
|
def str_sized(string: str, lenth: int, end: str = ' ') -> str:
"""Cuts off a long string to conform with `lenth`, replace the end of the string with `end` if provided.
Args:
string (str): the string to format
lenth (int): the max lenth of the string
end (str): replace the end of the cut string with `end`
Returns:
str: the formatted string
"""
lenth -= len(end)
if lenth <= 0 or len(string) <= lenth:
return string
else:
return string[:lenth] + end
|
def get_shared_content_cache_key_ptr(site_id, slug, language_code):
"""
Get the rendering cache key for a sharedcontent block.
This key is an indirection for the actual cache key,
which is based on the object ID and parent ID.
"""
return "sharedcontent_key.{0}.{1}.{2}".format(site_id, slug, language_code)
|
def strip_byte_order_prefix(string, prefix_chars='<>|='):
"""
>>> from comma.numpy import strip_byte_order_prefix
>>> strip_byte_order_prefix('<f8')
'f8'
"""
return string[1:] if string.startswith(tuple(prefix_chars)) else string
|
def regular_polygon_area(perimeter, apothem):
"""Returns the area of a regular polygon"""
perimeter = float(perimeter)
apothem = float(apothem)
if (perimeter < 0.0 or apothem < 0.0):
raise ValueError('Negative numbers are not allowed')
return perimeter * apothem / 2
|
def num_states(spin_str_element):
"""
This function evaluates de spin number string, formatted as s=a/b and
returns the number of states 2*s + 1.
In the table we have three type of strings:
1. spin numbers integers formatted with 1 or 2 characters, e.g s=1, and s=10.
2. spin numbers formatted with 3 characters. e.g. s=3/2.
3. spin numbers formatted with 4 characters. e.g. s=11/2
Parameters
----------
:var spin_str_element: This string class element contains the information
in [88:102], about spin, parity, and
isospin charge.
Returns:
--------
:var states: this integer variable contains the number of states associated
to the `spin_str_element` string
"""
if len(spin_str_element) == 1 or len(spin_str_element) == 2:
states = 2*int(spin_str_element) + 1
return states
elif len(spin_str_element) == 3:
num = int(spin_str_element[0])
den = int(spin_str_element[2])
states = 2*num//den + 1
return states
elif len(spin_str_element) == 4:
num = int(spin_str_element[0:2])
den = int(spin_str_element[3])
states = 2*num//den + 1
return states
else:
return None
|
def foo(arg):
"""The World is Yours"""
return arg.format(123)
|
def html_attrs_tuple_to_string(attrs):
"""Converts a set of HTML attributes tuple to an HTML string.
Converts all HTML attributes returned by
:py:meth:`html.parser.HTMLParser.handle_starttag` ``attrs`` value into
their original HTML representation.
Args:
attrs (list): List of attributes, each item being a tuple with two
values, the attribute name as the first and the value as the
second.
Returns:
str: HTML attributes string ready to be used inside a HTML tag.
"""
response = ''
for i, (name, value) in enumerate(attrs):
response += '%s' % name
if value is not None:
response += '="%s"' % value
if i < len(attrs) - 1:
response += ' '
return response
|
def parsenext(string,delimiter):
""" Returns all text from the start of the input string up to the
occurence of the delimiter."""
delimlen=len(delimiter)
i=0
strlen=len(string)
while(i<strlen):
if string[i:i+delimlen]==delimiter:
break
i+=1
return string[0:i]
|
def _pyval_field_major_to_node_major(keys, values, depth):
"""Regroup each field (k, v) from dict-of-list to list-of-dict.
Given a "field-major" encoding of the StructuredTensor (which maps each key to
a single nested list containing the values for all structs), return a
corresponding "node-major" encoding, consisting of a nested list of dicts.
Args:
keys: The field names (list of string). Must not be empty.
values: The field values (list of python values). Must have the same length
as `keys`.
depth: The list depth at which dictionaries should be created.
Returns:
A nested list of dict, with depth `depth`.
"""
assert keys
if depth == 0:
return dict(zip(keys, values))
nvals = len(values[0])
assert all(nvals == len(values[i]) for i in range(1, len(values)))
return [
_pyval_field_major_to_node_major(keys, value_slice, depth - 1)
for value_slice in zip(*values)
]
|
def hours_to_minutes( hours: str ) -> int:
"""Converts hours to minutes."""
return int(hours)*60
|
def to_list(raw: str) -> list:
"""If the `raw` string is formatted like a list, it is converted to a list,
otherwise returns a list with `raw` as the single item.
"""
raw = raw.strip()
retval = []
if raw[0] == '[' and raw[-1] == ']':
for item in raw[1:-1].split(','):
retval += [item.strip().strip("'\"")]
else:
retval += [raw]
return retval
|
def getsteps(num_of_steps, limit):
"""
Helper function for display_percent_done
"""
steps = []
current = 0.0
for i in range(0, num_of_steps):
if i == num_of_steps-1:
steps.append(int(round(limit)))
else:
steps.append(int(round(current)))
current += float(limit)/float(num_of_steps-1)
return steps
|
def humanize_megabytes(mb):
"""
Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).
"""
if not mb:
return ''
if mb >= 1048576:
return f'{int(mb / 1048576)} TB'
if mb >= 1024:
return f'{int(mb / 1024)} GB'
return f'{mb} MB'
|
def aoi_from_latlon(ylat, xlong):
"""
Example:
lat = 38.459702
long = -122.438332
aoi = aoi_from_latlon(lat, long)
"""
# Intent is for an AOI of a single pixel
# Approx 12 cm, ie <1m
dx = 0.000001
dy = 0.000001
aoi = {
"type": "Polygon",
"coordinates": [
[
[xlong, ylat],
[xlong, ylat+dy],
[xlong+dx, ylat+dy],
[xlong+dx, ylat],
[xlong, ylat],
]
],
}
return aoi
|
def lineTextAnalyzis(line):
"""
:rtype: object
"""
values = line.split("\t")
if len(values) == 4:
vocabulary = values[0]
meaning = values[1]
types = values[2]
tags = values[3]
return vocabulary, meaning, types, tags
else:
return None
|
def _check_valid_type_dict(payload):
"""_check_valid_type_dict checks whether a dict is a correct serialization of a type
Args: payload(dict)
"""
if not isinstance(payload, dict) or len(payload) != 1:
return False
for type_name in payload:
if not isinstance(payload[type_name], dict):
return False
property_types = (int, str, float, bool)
property_value_types = (int, str, float, bool, dict)
for property_name in payload[type_name]:
if not isinstance(property_name, property_types) or not isinstance(
payload[type_name][property_name], property_value_types):
return False
return True
|
def alpha_to_index(char):
"""Takes a single character and converts it to a number where A=0"""
translator = {
"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "H": 7,
"I": 8, "J": 9, "K": 10, "L": 11, "M": 12, "N": 13, "O": 14,
"P": 15, "Q": 16, "R": 17, "S": 18, "T": 19, "U": 20, "V": 21,
"W": 22, "X": 23, "Y": 24, "Z": 25,
}
return translator[char.upper()]
|
def git_pattern_handle_blanks(git_pattern: str) -> str:
"""
Trailing spaces are ignored unless
they are quoted with backslash ("\").
in fact all spaces in gitignore CAN be escaped
it is not clear if they NEED to be escaped,
but it seems like !
see: https://stackoverflow.com/questions/10213653
wcmatch.glob.globmatch supports both forms
>>> assert git_pattern_handle_blanks(r'something \\ \\ ') == 'something\\ \\ '
>>> assert git_pattern_handle_blanks(r'something \\ \\ ') == 'something\\ \\ '
>>> assert git_pattern_handle_blanks(r'some\\ thing \\ ') == 'some\\ thing\\ '
>>> assert git_pattern_handle_blanks(r'some thing \\ ') == 'some thing\\ '
"""
parts = [part.strip() for part in git_pattern.split("\\ ")]
return "\\ ".join(parts)
|
def heron(a):
"""Calculates the square root of a"""
eps = 0.0000001
old = 1
new = 1
while True:
old,new = new, (new + a/new) / 2.0
print(old, new)
if abs(new - old) < eps:
break
return new
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.