content
stringlengths 42
6.51k
|
|---|
def payload2tuple(payload, output_type = float):
"""
Extract a tuple from a payload message
usefull for messages that returns an array [a,b,c,d,....]
payload: str
the payload to be parsed
output_type: a type
the type to use to parse the payload (Default float)
"""
splitted_payload = payload.split(',')
return tuple((output_type(x) for x in splitted_payload))
|
def str2bool(v):
""" Convert string to bool
Args:
v: String indicating True or False
Returns:
Ture if found in the list, otherwise False
"""
return v.lower() in ("yes", "true", "t", "1")
|
def resolve_flags(bfr, flags):
"""
Helper to resolve flag values and names.
Arguments:
#bfr:integer
The buffer containing flags.
#flags:dict
The dictionary of flag names and values.
"""
return {k: v & bfr != 0 for k, v in flags.items()}
|
def parseTitle(value):
"""
"""
if not value: return None
return value.strip().lower()
|
def red_sequence_ri_zevol_sigmoid_params(r):
"""
"""
c0_ymin, c1_ymin = 0.2646, -0.0063
ymin = c0_ymin + c1_ymin*r
c0_ymax, c1_ymax = 0.0419, -0.00695
ymax = c0_ymax + c1_ymax*r
return ymin, ymax
|
def get_commands_to_add_members(proposed, existing):
"""Gets commands required to add members to an existing port-channel
Args:
existing (dict): existing config as defined in nxos_portchannel
proposed (dict): proposed config as defined in nxos_portchannel
Returns:
list: ordered list of commands to be sent to device to add members to
a port-channel
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
proposed_members = proposed['members']
existing_members = existing['members']
members_to_add = set(proposed_members).difference(existing_members)
members_to_add_list = list(members_to_add)
commands = []
if members_to_add_list:
for member in members_to_add_list:
commands.append('interface ' + member)
commands.append('channel-group {0} mode {1}'.format(
existing['group'], proposed['mode']))
return commands
|
def DensHumid(tempc, pres, e):
"""Density of moist air.
This is a bit more explicit and less confusing than the method below.
INPUTS:
tempc: Temperature (C)
pres: static pressure (hPa)
e: water vapor partial pressure (hPa)
OUTPUTS:
rho_air (kg/m^3)
SOURCE: http://en.wikipedia.org/wiki/Density_of_air
"""
tempk = tempc + 273.15
prespa = pres * 100.0
epa = e * 100.0
Rs_v = 461.52 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
pres_da = prespa - epa
rho_da = pres_da / (Rs_da * tempk)
rho_wv = epa/(Rs_v * tempk)
return rho_da + rho_wv
|
def solve_knapsack_2d_array(profits, weights, capacity):
"""
capacity
0 1 2 3 4 5
0 0 0 0 0 0 0
1 0 1 1 1 1 1
2 0 1 2 3 3 3
3 0 1 2 3 4 5
5 0 1 2 3 4 5
0 1 2 3 4 5
0 1 2 3 4 5
profits -> [1, 2, 3, 4],
weights -> [1, 2, 3, 5]
Time O(c*w)
Space O(c*w)
"""
n = len(weights)
dp = [[0 for _ in range(capacity + 1)] for _ in range(n + 1)]
for r in range(1, n + 1):
for c in range(1, capacity + 1):
w = weights[r - 1]
if w > c:
dp[r][c] = dp[r - 1][c]
else:
dp[r][c] = max(dp[r - 1][c], dp[r - 1][c - w] + profits[r - 1])
return dp[-1][-1]
|
def l(s):
"""Split a byte string to ord's of chars."""
return [[x][0] for x in s]
|
def reverse_functional(value):
"""Reverse string in a functional way using the "reduce" function."""
from functools import reduce
return reduce((lambda result, char: char + result), value, "")
|
def parse_sample_info(sample_dat, sparse=True, format_list=None):
"""
Parse genotype information for each sample
Note, it requires the format for each variants to
be the same.
"""
if sample_dat == [] or sample_dat is None:
return None
# require the same format for all variants
format_all = [x[0].split(":") for x in sample_dat]
if format_list is None:
format_list = format_all[0]
RV = {}
for _key in format_list:
RV[_key] = []
if sparse:
## sparse matrix requires all keys
format_set_all = [set(x) for x in format_all]
if format_set_all.count(set(format_list)) != len(format_all):
print("Error: require the same format for all variants.")
exit()
RV['indices'] = []
RV['indptr'] = [0]
RV['shape'] = (len(sample_dat[0][1:]), len(sample_dat))
missing_val = ":".join(["."] * len(format_list))
cnt = 0
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
key_idx = [format_all[j].index(_key) for _key in format_list]
for i in range(len(_line[1:])): #cell i
if _line[i+1] == missing_val or _line[i+1] == ".":
continue
_line_key = _line[i+1].split(":")
for k in range(len(format_list)):
RV[format_list[k]].append(_line_key[key_idx[k]])
cnt += 1
RV['indices'].append(i)
RV['indptr'].append(cnt)
else:
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
_line_split = [x.split(":") for x in _line[1:]]
for _key in format_list:
if _key in format_all[j]:
k = format_all[j].index(_key)
_line_key = [x[k] for x in _line_split]
RV[_key].append(_line_key)
else:
RV[_key].append(".")
return RV
|
def clear_sentences(data):
"""
Cleaning sentences, removing special characters and articles
"""
sentences = list()
for record in data:
sentence = record['reviewText']
sentence = sentence.lower()
for char in "?.!/;:,":
sentence = sentence.replace(char, '')
sentence = sentence.split(sep=' ')
sentence = [word for word in sentence if len(word) > 1]
sentences.append(sentence)
return sentences
|
def quote_sheetname(sheetname):
"""
Add quotes around sheetnames if they contain spaces.
"""
if "'" in sheetname:
sheetname = sheetname.replace("'", "''")
if (" " in sheetname
or "-" in sheetname
or "'" in sheetname):
sheetname = u"'{0}'".format(sheetname)
return sheetname
|
def _accesslen(data) -> int:
"""This was inspired by the `default_collate` function.
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/
"""
if isinstance(data, (tuple, list)):
item = data[0]
if not isinstance(item, (float, int, str)):
return len(item)
return len(data)
|
def postman_headers_to_conf_headers(postman_headers, skip_authorization_header: bool = False):
"""
postman_headers = [
{
"key": "Content-Type",
"value": "application/json",
"type": "text"
},
{
"key": "Accept",
"value": "application/json",
"type": "text"
}
]
to =>
[{'Content-Type': 'application/json'}, {'Accept': 'application/json'}]
"""
if not postman_headers:
return None
headers = []
for ph in postman_headers:
if skip_authorization_header and ph['key'] == 'Authorization':
continue
headers.append({
ph['key']: ph['value']
})
return headers
|
def type_is_subclass(type_, type_or_tuple):
"""Check sub-class.
Return false if ``type_`` is a recursive type.
"""
return isinstance(type_, type) and issubclass(type_, type_or_tuple)
|
def pms_to_addrportsq(poolmembers):
""" Converts PoolMembers into a list of address, port dictionaries """
return [{'address': p._node.name, 'port': p._port} for p in poolmembers]
|
def get_lineprotocol_bool(param):
"""lineprotocol expect boolean values to be string like true or false, not capitalize"""
if param:
return "true"
else:
return "false"
|
def merge_config_dictionaries(*dicts):
"""
Merges n dictionaries of configuration data
:param list<dicts>:
:return dict:
"""
res_dict = {}
if isinstance(dicts, list):
if len(dicts) == 1 and isinstance(dicts[0], dict):
return dicts[0]
else:
for dictionary in dicts:
if isinstance(dictionary, dict):
res_dict.update(dictionary)
return res_dict
|
def read_data_string(text, delimiter=',', newline='\n', has_headerline=True):
"""Reads a delimited string into a list of dictionaries. Functions very similar to :meth:`numpy.genfromtxt`, but for strings instead of text files.
Parameters
----------
text : str
String of row/column data with delimiters and new line indicators given in `delimiter` and `newline`.
delimiter : str, optional
Delimiter used in `text`, by default ','
newline : str, optional
New line indicator used in `text`, by default '\n'
has_headerline : bool, optional
If True, treats the first line of `text` as headers. If False, treats the first line of `text` as data and makes generic headers, by default True
Returns
-------
:obj:`list` of :obj:`dict`
A list of dictionaries containing the data from `text`
"""
lines = text.split(newline)
# Generate headers
if has_headerline:
# If the text has headerlines, get them
headers = lines.pop(0).split(delimiter)
else:
# If the text doesn't have headerlines, make generic ones
headers = [str(i+1) for i in range(len(lines[0].split(delimiter)))]
data = []
for line in lines:
# For each line, check if data is missing
if len(line.split(delimiter)) == len(headers):
# If there is no missing data on this line, initialize a dictionary for the line data
line_data = {}
for header, value in zip(headers, line.split(delimiter)):
# For each column in the line, add to the line_data dict (header as key and value as value)
line_data[header] = value
# Append the line_data dict to the data list
data.append(line_data)
return data
|
def split_metrics(line):
"""split a line of metric deltas into a dict, for example:
'get +1, get_hit +1' -> {'get':1, 'get_hit':1}
'request_free -1, request_parse +1' -> {'request_free':-1, 'request_parse':1}
"""
metrics = line.split(',')
d = {}
for m in metrics:
name,delta = m.strip().split(' ')
d[name.strip()] = int(delta)
return d
|
def secondsToHms(seconds):
"""Convert seconds to hours, mins, secs
seconds - seconds to be converted, >= 0
return hours, mins, secs
"""
assert seconds >= 0, 'seconds validation failed'
hours = int(seconds / 3600)
mins = int((seconds - hours * 3600) / 60)
secs = seconds - hours * 3600 - mins * 60
return hours, mins, secs
|
def db2pow(xdb):
"""Convert decibels (dB) to power
.. doctest::
>>> from spectrum import db2pow
>>> p = db2pow(-10)
>>> p
0.1
.. seealso:: :func:`pow2db`
"""
return 10.**(xdb/10.)
|
def trapezint(f, a, b, n):
"""
Uses trapezoid rule to find the integral of a function
"""
sum = 0.0
h = (b - a) / float(n)
for counter in range(int(n)):
sum += (1 / 2.0) * h * (f(a + counter * h) + f (a + (counter + 1) * (h)))
return sum
|
def fusion_processes_to_rates(process_list):
"""
Define fusion processes between compartments.
Parameters
==========
process_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains fission rates in the following format:
.. code:: python
[
(coupling_compartment_0, coupling_compartment_1, rate, target_compartment_0 ),
...
]
Example
-------
Fusion of reactants "A", and "B" to form "C".
.. code:: python
fusion_processes_to_rates([
("A", "B", reaction_rate, "C" ),
])
"""
quad_rates = []
for source0, source1, rate, target in process_list:
# target compartment gains one entity
quad_rates.append((source0, source1, target, rate))
# source compartments lose one entity each
quad_rates.append((source0, source1, source0, -rate))
quad_rates.append((source0, source1, source1, -rate))
return quad_rates
|
def eia_mer_url_helper(build_url, config, args):
"""Build URL's for EIA_MER dataset. Critical parameter is 'tbl', representing a table from the dataset."""
urls = []
for tbl in config['tbls']:
url = build_url.replace("__tbl__", tbl)
urls.append(url)
return urls
|
def get_empty_marker_error(marker_type):
"""
Generate error message for empty marker.
"""
msg = (
"no configuration file is specified in "
"``@pytest.mark.gdeploy_config_{0}`` decorator "
"of this test case, please add at least one "
"configuration file file name as a parameter into the marker, eg. "
"``@pytest.mark.gdeploy_config_{0}('config.conf')``")
return msg.format(marker_type)
|
def get_all_index_in_list(L, item):
"""
get all the indexies of the same items in the list
:param L: list
:param item: item to be found
:return: the indexies of all same items in the list
"""
return [index for (index, value) in enumerate(L) if value == item]
|
def isnum(value):
"""
Check if a value is a type of number (decimal or integer).
:type value: object
:param value: The value to check.
"""
try:
return bool(isinstance(value, (float, int)))
except RuntimeError:
return False
|
def comma_join(fields):
"""
Converts everything in the list to strings and then joins
them with commas.
"""
return ",".join(map(str, fields))
|
def cond_formula(criteria, colorformat):
"""helper function for returning xlsxwriter cond. formatting dicts
for formula conditions i.e. cond. format dependet on another cell
"""
formDict = {'type': 'formula',
'criteria': criteria,
'format': colorformat}
return formDict
|
def filter_fields(fieldmap, top=(), include=(), exclude=()):
"""
Returns fieldmap filtered by include and exclude patterns.
@param fieldmap {field name: field type name}
@param top parent path as (rootattr, ..)
@param include [((nested, path), re.Pattern())] to require in parent path
@param exclude [((nested, path), re.Pattern())] to reject in parent path
"""
result = type(fieldmap)() if include or exclude else fieldmap
for k, v in fieldmap.items() if not result else ():
trail, trailstr = top + (k, ), ".".join(top + (k, ))
for is_exclude, patterns in enumerate((include, exclude)):
matches = any(p[:len(trail)] == trail[:len(p)] or r.match(trailstr)
for p, r in patterns) # Match by beginning or wildcard pattern
if patterns and (not matches if is_exclude else matches):
result[k] = v
elif patterns and is_exclude and matches:
result.pop(k, None)
if include and exclude and k not in result: # Failing to include takes precedence
break # for is_exclude
return result
|
def max_in_col(data, col_id):
"""If data is a list of tuples, return the maximum """
return max(int(row[col_id]) for row in data)
|
def get_hosts_cpu_frequency(ceilo, hosts):
"""Get cpu frequency for each host in hosts.
:param ceilo: A Ceilometer client.
:type ceilo: *
:param hosts: A set of hosts
:type hosts: list(str)
:return: A dictionary of (host, cpu_frequency)
:rtype: dict(str: *)
"""
hosts_cpu_total = dict() #dict of (host, cpu_max_frequency)
for host in hosts:
host_id = "_".join([host, host])
cpu_frequency_list = ceilo.samples.list(meter_name='compute.node.cpu.frequency',
limit=1, q=[{'field':'resource_id','op':'eq','value':host_id}])
if cpu_frequency_list:
hosts_cpu_total[host] = cpu_frequency_list[0].counter_volume
return hosts_cpu_total
|
def notas(*num, situacao=False):
"""
Recebe varias notas e retorna o boletim com a quantidade de notas,
maior e menor, a media, e se caso quiser, a situacao.
:param num: recebe uma, varias ou nenhuma nota.
:param situacao: mostra a situacao do boletim (opcional)
:return: retorna o boletim.
"""
boletim = {
'quantidade': len(num),
'maior': max(num),
'menor': min(num),
'media': sum(num) / len(num)
}
if situacao:
if boletim['media'] >= 7:
boletim['situacao'] = 'Boa'
elif boletim['media'] >= 5:
boletim['situacao'] = 'Razoavel'
else:
boletim['situacao'] = 'Ruim'
print('~' * 40)
return boletim
|
def jaccard_similarity_set(a, b):
"""jaccard_similarity_set
Calculates the Jaccard similarity between two sets.
Parameters
----------
a : :obj:`set`
b : :obj:`set`
Returns
-------
:obj:`float`
The Jaccard similarity between `a` and `b`.
"""
a = set(a)
b = set(b)
intersection = len(a.intersection(b))
union = len(a.union(b))
return intersection / union
|
def twos_comp_dec(val, bits):
"""returns the signed int value from the 2's complement val with n bits
- https://stackoverflow.com/questions/1604464/twos-complement-in-python"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val
|
def CalculateOSNames(os_name, os_variants):
"""Calculates all the names an OS can be called, according to its variants.
@type os_name: string
@param os_name: base name of the os
@type os_variants: list or None
@param os_variants: list of supported variants
@rtype: list
@return: list of valid names
"""
if os_variants:
return ["%s+%s" % (os_name, v) for v in os_variants]
else:
return [os_name]
|
def domain(x, y, s_w, s_h):
"""
The function estimates the rectangle that includes the given point.
Arguments:
x and y coordinates of the point and width and height of the screen
Return:
integer 0 <= n <= 5
"""
if x < s_w / 3:
return 0 if y < s_h / 2 else 3
if s_w / 3 < x < s_w * 2 / 3:
return 1 if y < s_h / 2 else 4
if x > s_w * 2 / 3:
return 2 if y < s_h / 2 else 5
|
def return_period_from_string(arg):
"""
Takes a string such as "days=1,seconds=30" and strips the quotes
and returns a dictionary with the key/value pairs
"""
period = {}
if arg[0] == '"' and arg[-1] == '"':
opt = arg[1:-1] # remove quotes
else:
opt = arg
for o in opt.split(","):
key, value = o.split("=")
period[str(key)] = int(value)
return period
|
def __subfields(t, d):
""" Unpack subfield data into tuples. """
# Remove extra trailing subfield delimiters, if neccessary, before
# splitting into subfields
subf = d.rstrip(b"\x1f").split(b"\x1f")
# No subfields means it's a control field, with no indicators and
# no subfield code
if len(subf) == 1:
return (t, None, None, (None, d.decode()))
return (t, chr(subf[0][0]), chr(subf[0][1])) + \
tuple([(chr(s[0]), s[1:].decode()) for s in subf[1:]])
|
def filterize(d):
"""
Return dictionary 'd' as a boto3 "filters" object by unfolding it to a list of
dict with 'Name' and 'Values' entries.
"""
return [{"Name": k, "Values": [v]} for k, v in d.items()]
|
def cpm2usv(cpm_value):
"""
Using chart at:
http://nukeprofessional.blogspot.jp/2012/04/geiger-counter-interpretation.html
"""
usv_per_click = 0.1/12
return cpm_value * usv_per_click
|
def applyModel(model, p):
"""
get p' = model(p)
in this case,
model=(tx,ty), p=(x,y), p'=(x',y'),
and
x'=x+tx, y'=y+ty
"""
tx = model[0]
ty = model[1]
x = p[0]
y = p[1]
xprime = x + tx
yprime = y + ty
pprime = [xprime, yprime]
return pprime
|
def remove_duplicates(transfers_in, transfers_out):
"""
If we are replacing lots of players (e.g. new team), need to make sure there
are no duplicates - can't add a player if we already have them.
"""
t_in = [t["element_in"] for t in transfers_in]
t_out = [t["element_out"] for t in transfers_out]
dupes = list(set(t_in) & set(t_out))
transfers_in = [t for t in transfers_in if not t["element_in"] in dupes]
transfers_out = [t for t in transfers_out if not t["element_out"] in dupes]
return transfers_in, transfers_out
|
def mult(p1, p2):
"""Multiply two polynomials in GF(2^4)/x^4 + x + 1"""
p = 0
while p2:
if p2 & 0b1:
p ^= p1
p1 <<= 1
if p1 & 0b10000:
p1 ^= 0b11
p2 >>= 1
return p & 0b1111
|
def is_stanza(sentences: str):
"""
Check if input is a stanza or not
param sentences: sentences to check
return: is stanza or not
"""
return len(sentences.split("\n\n")) == 1
|
def ordinal(number):
"""
Return the suffix that should be added to a number to denote the position
in an ordered sequence such as 1st, 2nd, 3rd, 4th.
Examples::
>>> ordinal(1)
"st"
>>> ordinal(2)
"nd"
>>> ordinal(1002)
"nd"
>>> ordinal(1003)
"rd"
>>> ordinal(-11)
"th"
>>> ordinal(-1021)
"st"
"""
number = abs(int(number))
if number % 100 in (11, 12, 13):
return "th"
else:
return {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
|
def et_to_str(node):
"""Get the text value of an Element, returning None if not found."""
try:
return node.text
except AttributeError:
return None
|
def add(a: int, b, c=5, d=7., e=None):
"""Some cool addition.
It's super complicated.
You know, adding and stuff.
Parameters
----------
b : int
This is the second complicated parameter
super complicated
e : int, optional
"""
if e is None:
e = 0
return a + b + c + d + e
|
def convert_list_to_xyz_str(atoms: list):
"""Convert nested list of atom and coordinates list into xyz-string.
Args:
atoms (list): Atom list of type `[['H', 0.0, 0.0, 0.0], ['C', 1.0, 1.0, 1.0], ...]`.
Returns:
str: Information in xyz-string format.
"""
xyz_str = str(int(len(atoms))) + "\n"
for a_iter in atoms:
xyz_str = xyz_str + "\n"
_line_str = "{:} {:.10f} {:.10f} {:.10f}".format(*a_iter)
xyz_str = xyz_str + _line_str
return xyz_str
|
def _GetMetdataValue(metadata, key):
"""Finds a value corresponding to a given metadata key.
Args:
metadata: metadata object, i.e. a dict containing containing 'items'
- a list of key-value pairs.
key: name of the key.
Returns:
Corresponding value or None if it was not found.
"""
for item in metadata['items']:
if item['key'] == key:
return item['value']
return None
|
def get_all_tags(data, **kws):
"""Get all tags from PV data.
Parameters
----------
data : list(dict)
List of dict, each dict element is of the format:
``{'name': PV name (str), 'owner': str, 'properties': PV properties (list[dict]), 'tags': PV tags (list[dict])}``.
Keyword Arguments
-----------------
name_only : True or False
If true, only return list of property names.
Returns
-------
ret : list of dict
dict: {'name': property_name, 'value': None, 'owner': owner}
"""
t_list = []
for r in data:
new_t = [
{'name': p['name'], 'owner': p['owner']}
for p in r['tags']
]
[t_list.append(i) for i in new_t if i not in t_list]
if kws.get('name_only', False):
return sorted([t['name'] for t in t_list])
else:
return t_list
|
def fill_not_enough_to_code(record: dict):
"""
Function to add "not enough to code" label when comments are blank.
Parameters
----------
record : dict
Input record.
Returns
-------
type
Record with `prov_measure` and `prov_category` values altered conditionally.
"""
if record['comments'] == '' and record['prov_category'] != 'school_closed':
record['prov_measure'] = 'not_enough_to_code'
record['prov_category'] = 'not_enough_to_code'
return(record)
|
def _match(doc, query):
"""Decide whether doc matches query."""
for k, v in query.items():
if doc.get(k, object()) != v:
return False
return True
|
def escape_id(s):
"""Return an escaped string suitable for node names, menu entries,
and xrefs anchors."""
bad_chars = ',:.()@{}'
for bc in bad_chars:
s = s.replace(bc, ' ')
s = ' '.join(s.split()).strip()
return s
|
def get_port(url, default="80"):
"""
Extracts the port, or returns value of default param (defaults to 80)
"""
if url.find("//") != -1:
url = url[url.find("//")+2:url.find("/",url.find("//")+2)]
parts = url.split(":")
if len(parts) == 2:
port = parts[1]
extracted_port = ""
for i in range(0, len(port)):
if port[i] in [0,1,2,3,4,5,6,7,8,9]:
extracted_port += port[i]
else:
if extracted_port != "":
return extracted_port
else:
return default
elif len(parts) == 1:
return default
else:
raise Exception("More than one : was found in the URL, or the URL is empty: " + url)
|
def snake_to_camel(snake_str):
"""
:param snake_str: string
:return: string converted from a snake_case to a CamelCase
"""
components = snake_str.split("_")
return "".join(x.title() for x in components)
|
def _strip_flags_for_testing(flags):
"""Accepts the default configure/build flags and strips out those
incompatible with the SQLite tests.
When configuring SQLite to run tests this script uses a configuration
as close to what Chromium ships as possible. Some flags need to be
omitted for the tests to link and run correct. See comments below.
"""
test_flags = []
for flag in flags:
# Omitting features can cause tests to hang/crash/fail because the
# SQLite tests don't seem to detect feature omission. Keep them enabled.
if flag.startswith('SQLITE_OMIT_'):
continue
# Some tests compile with specific SQLITE_DEFAULT_PAGE_SIZE so do
# not hard-code.
if flag.startswith('SQLITE_DEFAULT_PAGE_SIZE='):
continue
# Some tests compile with specific SQLITE_DEFAULT_MEMSTATUS so do
# not hard-code.
if flag.startswith('SQLITE_DEFAULT_MEMSTATUS='):
continue
# If enabled then get undefined reference to `uregex_open_63' and
# other *_64 functions.
if flag == 'SQLITE_ENABLE_ICU':
continue
# If defined then the fts4umlaut tests fail with the following error:
#
# Error: unknown tokenizer: unicode61
if flag == 'SQLITE_DISABLE_FTS3_UNICODE':
continue
test_flags.append(flag)
return test_flags
|
def extractLeafs(nt, leafDict):
"""Given a newick tree object, it returns a dict of
leaf objects. Operates recursively.
"""
if nt is None:
return None
nt.distance=0
if nt.right is None and nt.left is None:
leafDict[nt.iD] = True
else:
extractLeafs(nt.right, leafDict=leafDict)
extractLeafs(nt.left , leafDict=leafDict)
|
def tie_account_to_order(AccountKey, order):
"""tie_account_to_order - inject the AccountKey in the orderbody.
An order specification is 'anonymous'. To apply it to an account it needs
the AccountKey of the account.
Parameters
----------
AccountKey: string (required)
the accountkey
order: dict representing an orderbody or <...>Order instance
the details of the order.
"""
_r = order.copy() if isinstance(order, dict) else order.data.copy()
# add the key to the orderbody, but ONLY if this is not a positionclose
# body
if "PositionId" not in _r:
_r.update({'AccountKey': AccountKey})
# and add it to related orders in Orders (if any)
if 'Orders' in _r:
for o in _r['Orders']:
o.update({'AccountKey': AccountKey})
return _r
|
def get_value_line(data, label:str, steps:int):
""" """
new_data = [0,0,0, None,None,None]
for x in range(steps):
try:
new_data[x] = data[label][x]
except IndexError:
continue
#
if new_data[3] == None:
new_data[3] = new_data[0]
if new_data[4] == None:
new_data[4] = new_data[1]
if new_data[5] == None:
new_data[5] = new_data[2]
#
return new_data
|
def inpoly(xv, yv, xt, yt):
"""
Originally in C by Bob Stein and Craig Yap
http://www.visibone.com/inpoly/
Converted to python by Bryan Miller
2013.04.18
Inputs:
xv - x vertices of polygon (does not have to be 'closed', ie. last = first)
yv - y vertices of polygon
xt - x of test point(s)
yt - y of test point(s)
# 2016.06.25 - generalize to handle input arrays
"""
nvert = len(xv)
if nvert != len(yv) or nvert < 3:
return -1
l_xt = xt
l_yt = yt
try:
npoints = len(l_xt)
except Exception:
l_xt = [l_xt]
npoints = len(l_xt)
try:
npointsy = len(l_yt)
except Exception:
l_yt = [l_yt]
npointsy = len(l_yt)
if npoints != npointsy:
return -1
inside = [False for ii in range(npoints)]
for jj in range(npoints):
xold = xv[nvert-1]
yold = yv[nvert-1]
for i in range(nvert):
xnew = xv[i]
ynew = yv[i]
if xnew > xold:
x1 = xold
x2 = xnew
y1 = yold
y2 = ynew
else:
x1 = xnew
x2 = xold
y1 = ynew
y2 = yold
# /* edge "open" at one end */
if (xnew < l_xt[jj]) == (l_xt[jj] <= xold) and (l_yt[jj]-y1)*(x2-x1) < ((y2-y1)*(l_xt[jj]-x1)):
inside[jj] = not inside[jj]
xold = xnew
yold = ynew
if npoints == 1:
inside = inside[0]
return inside
|
def _separate_server_port_string(sql_server_and_port):
"""separates a server:port string
Parameters
----------
sql_server_and_port : str
the server:port string
Returns
-------
tuple
server (str) and port number
Raises
------
ValueError
if the string isn't properly formatted
"""
split_tuple = str(sql_server_and_port).split(":")
if len(split_tuple) == 1:
server = split_tuple[0]
port = 1433
elif len(split_tuple) == 2:
server, port = split_tuple
else:
raise ValueError(
"Only one : should appear in server name,"
" and it should be used to divide hostname from port number"
)
return server, port
|
def nth_row_pascal(n):
"""
Runtime compl O(n^2)
Space compl O(2n)
:param: - n - index (0 based)
return - list() representing nth row of Pascal's triangle
"""
curr_row = [1]
for i in range(1, n + 1):
prev_row = curr_row
curr_row = [1]
for j in range(i - 1):
curr_row.append(prev_row[j] + prev_row[j + 1])
curr_row.append(1)
return curr_row
|
def SFVec(point, format="%f"):
"""
points a (3,) object
"""
return " ".join([format % x for x in point])
|
def quick_sort(ARRAY):
"""Pure implementation of quick sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> quick_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> quick_sort([])
[]
>>> quick_sort([-2, -5, -45])
[-45, -5, -2]
"""
ARRAY_LENGTH = len(ARRAY)
if( ARRAY_LENGTH <= 1):
return ARRAY
else:
PIVOT = ARRAY[0]
GREATER = [ element for element in ARRAY[1:] if element > PIVOT ]
LESSER = [ element for element in ARRAY[1:] if element <= PIVOT ]
return quick_sort(LESSER) + [PIVOT] + quick_sort(GREATER)
|
def convert_weight_pounds(weight) -> int:
"""
Converts Kilograms into Pounds
"""
pounds = round((weight * 2.20462), 5)
return pounds
|
def _make_set(value):
"""
Converts range/set specification to a concrete set of numbers
'[1-3]' => {1, 2, 3}
'{1,2,3}' => {1, 2, 3}
'{[1-3]} => {1, 2, 3}
'{[1-3],[5-7]} => {1, 2, 3, 5, 6, 7}
"""
result = set()
for vrange in value.strip('{} ').split(','):
if '[' not in vrange:
try:
result.add(int(vrange))
except ValueError:
pass
else:
try:
start, end = vrange.strip('[] ').split('-')
result.update(range(int(start.strip()), int(end.strip()) + 1))
except ValueError:
pass
return result
|
def fibonacci(n):
"""
Def: In mathematics, the Fibonacci numbers are the numbers in the following
integer sequence, called the Fibonacci sequence, and characterized by the
fact that every number after the first two is the sum of the two preceding
ones: `0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ...`
"""
fibonacci_sequence = [0, 1]
for i in range(0, n, 1):
next = fibonacci_sequence[i] + fibonacci_sequence[i + 1]
fibonacci_sequence.append(next)
return fibonacci_sequence[:-2]
|
def _proteinTagPresent(fastaHeader, tag):
"""Checks wheter a tag string is present in the fastaHeader.
:param fastaHeader: str, protein entry header from a fasta file
:returns: bool, True if tag is present in fastaHeader
"""
return (tag in fastaHeader)
|
def truncate(st, size=80):
"""
Truncate string to the maximum given size.
"""
if size is None:
return st
if len(st) > size:
return st[:size - 3] + '...'
return st
|
def days_in_month_360(month=0, year=0):
"""Days of the month (360 days calendar).
Parameters
----------
month : int, optional
(dummy value).
year : int, optional
(dummy value).
Returns
-------
out : list of int
days of the month.
Notes
-----
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in 360 days calendar with months:
:data:`Cal360`.
"""
return list(range(1, 31))
|
def humanize_name(name):
""" Return a canonical representation of a name in First Last format."""
if not isinstance(name, str):
return name
elif name.upper() == name:
return " ".join([part.strip().title() for part in name.split(",")][::-1])
else:
return " ".join([part.strip() for part in name.split(",")][::-1])
|
def get_err(result, stats):
"""
Return an 'error measure' suitable for informing the user
about the spread of the measurement results.
"""
a, b = stats['q_25'], stats['q_75']
return (b - a) / 2
|
def isValidMac(mac):
"""Checks whether MAC address is valid."""
assert mac is not None
bytes = mac.split(":")
if len(bytes) != 6:
return False
for byte in bytes:
val = int(byte, 16)
if val < 0 or val > 255:
return False
return True
|
def _ranges(points):
""" walks the list of points and finds the
max/min x & y values in the set """
minX = points[0][0]; minY = points[0][1]
maxX = minX; maxY = minY
for x,y in points:
minX = min(x, minX)
minY = min(y, minY)
maxX = max(x, maxX)
maxY = max(y, maxY)
return ((minX, minY), (maxX, maxY))
|
def group_indexes_by(data, group):
"""
groups the indexes of an array.
data: Array of which the indexes should be sorted
group: Array of functions that should return if an item belongs to a group.
"""
# create an array of groups
groups = [[] for g in group]
# iterate through the data
for j, d in enumerate(data):
# and through the groups
for i, g in enumerate(group):
if g(d):
groups[i].append(j)
return groups
|
def code_type(m) -> str:
"""Returns a macro name"""
return ''.join(m)
|
def wrap(content,tag,lf=False):
"""embeds content string with html tags"""
if lf is False:
return "<"+tag+">"+content+"</"+tag+">"
else:
return "<"+tag+">\n"+content+"\n"+"</"+tag+">"
|
def get_instance_from_preempted_operation(operation, base_target_link) -> str:
"""Returns the instance name from a preempted |operation|."""
return operation['targetLink'][len(base_target_link):]
|
def say_hello(who):
"""Sample function."""
return "Hello %s!" % who
|
def _lcs(x, y):
"""
Finds longest common subsequence
Code adopted from https://en.wikibooks.org/wiki/Algorithm_Implementation/
Strings/Longest_common_subsequence#Python
"""
m = len(x)
n = len(y)
# An (m+1) times (n+1) matrix
c = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
c[i][j] = c[i - 1][j - 1] + 1
else:
c[i][j] = max(c[i][j - 1], c[i - 1][j])
def back_track(i, j):
if i == 0 or j == 0:
return ""
elif x[i - 1] == y[j - 1]:
return back_track(i - 1, j - 1) + x[i - 1]
else:
if c[i][j - 1] > c[i - 1][j]:
return back_track(i, j - 1)
else:
return back_track(i - 1, j)
return len(back_track(m, n))
|
def ensure_unique_index(index, indices, i=1): # Indexed to 1 so +1 == 2nd, 3rd, 4th, etc. game
"""Check if index is in indices, modify index until it's unique, and return the unique index
If the index is unique, it's returned as is. Otherwise, the function calls itself and increments i. The recursion
stops when the index and numerical suffix (i) are not in indices. Used to create unique identifiers for multiple
matchups between the same teams.
Args:
index: A string index to check for in indices
indices: A list of indices to check the index against
i: A numerical suffix used to modify index until it does not exist in indices
Returns:
index, or a modified form of index, that does not exist in indices
"""
if index in indices:
i = i+1
test_index = "{}{}".format(index, i)
if test_index in indices:
return ensure_unique_index(index, indices, i)
else:
return test_index
else:
return index
|
def bboxes_overlap(
min_lon_1, min_lat_1, max_lon_1, max_lat_1, min_lon_2, min_lat_2, max_lon_2, max_lat_2
):
"""
Returns a boolean representing whether the given two bboxes overlap at any point.
"""
# If one bbox is on left side of other
if min_lon_1 >= max_lon_2 or min_lon_2 >= max_lon_1:
return False
# If one bbox is above other
if min_lat_1 >= max_lat_2 or min_lat_2 >= max_lat_1:
return False
return True
|
def get_json_path(data_dir: str,
data_type: str,
split: str = '1.0') -> str:
"""
Call as
get_json_path(data_dir=data_dir, data_type=data_type)
:param data_dir:
:param data_type:
:param split:
:return:
"""
json_path = f"{data_dir}/visdial_{split}_{data_type}.json"
return json_path
|
def serialize_notification(notification):
"""Serialize notification from db."""
if notification is None:
return {}
notification.triggered_on = notification.triggered_on.isoformat()
notification = notification.__dict__
del notification["_sa_instance_state"]
return notification
|
def check(N):
"""
Test if N is prime.
Inputs:
- N: an integer
Output:
- A boolean
"""
potential_factor = 1
while potential_factor < N / 2:
potential_factor = potential_factor + 1
if (N % potential_factor == 0):
return False
return True
|
def unlistify(x):
"""Convert a list of one element into that element.
No-op on list of multiple elements."""
if not isinstance(x, (list, tuple)):
raise TypeError("unlistify expects a list or tuple")
if len(x) == 1:
return x[0]
else:
return x
|
def blocks_aligned(code, block_len, max_rand):
"""Check if code contains repeating blocks.
Code contains max_rand number of random bytes as prefix; check if the
prefix happens to divisible by the block length, whcih can be observed by
repeating blocks immediately following the prefix. Return first index
following repeating blocks if available, else 0.
"""
start1, start2, start3 = 0, 0 + block_len, 0 + (block_len * 2)
aligned = False
while start1 < max_rand + block_len:
fst = code[start1: start2]
snd = code[start2: start3]
third = code[start3: start3 + block_len]
# check for collision against randomly generated prefix
if fst == snd and snd != third:
aligned = True
break
else:
start1, start2, start3 = start2, start3, start3 + block_len
return start3 if aligned else None
|
def Hbits(probs):
"""Entropy of discrete distribution, measured in bits."""
from math import log
return sum(-x*log(x, 2) for x in probs if x !=0)
|
def convert_overlap(raw):
"""
Convert the short overlap strings from infernal into more readable names.
"""
if raw == "*" or raw == "unique":
return u"unique"
if raw == "^" or raw == "best":
return u"best"
if raw == "=" or raw == "secondary":
return u"secondary"
raise Exception("Unknown overlap symbol %s" % raw)
|
def get_public_metadata(container_output):
"""
Return a string containing any public metadata from stdout from the logs of this container.
"""
PREFIX=b'PUBLIC: '
to_return = b""
for line in container_output.splitlines():
if line.startswith(PREFIX):
to_return += line[len(PREFIX):]
to_return += b"\n"
if to_return == b"":
return None
else:
return to_return.strip()
|
def hex_to_binary(hex_string: str) -> str:
"""Convert hexadecimal string to binary
Args:
hex_string (str): Hexadecimal string
Returns:
str: Binary string
"""
return f"{int(hex_string, 16):0{len(hex_string * 4)}b}"
|
def episode_finished(stats):
"""
Function called after each episode of the agent (after designing an entire candidate solution).
Args:
stats: Statistics to be printed.
Returns:
True, meaning to continue running.
"""
print(stats)
return True
|
def get_middle(s):
"""
You are going to be given a word. Your job is to return the middle character of the word. If the word's length is
odd, return the middle character. If the word's length is even, return the middle 2 characters.
:param s: A word (string) of length 0 < str < 1000 (In javascript you may get slightly more than 1000
in some test cases due to an error in the test cases). You do not need to test for this. This
is only here to tell you that you do not need to worry about your solution timing out.
:return: The middle character(s) of the word represented as a string.
"""
return s[int((len(s)-1)/2):int(len(s)/2+1)]
|
def get_nrows(nrows: str):
"""
Convert a nrows string either to integer or None.
Parameters
----------
nrows : str
String with number of rows or 'None'.
Returns
-------
nrows : Union[int, None]
Number of rows as int or None if conversion fails.
"""
try:
return int(nrows)
except ValueError:
return None
|
def lists_to_dicts(obj):
"""Convert lists in a JSON-style object to dicts recursively
Examples:
>>> lists_to_dicts([3, 4])
{"0": 3, "1": 4}
>>> lists_to_dicts([3, [4, 5]])
{"0": 3, "1": {"0": 4, "1": 5}}
>>> lists_to_dicts({"a": [3, 4], "b": []})
{"a": {"0": 3, "1": 4}, "b": {}}
"""
if isinstance(obj, dict):
return {key: lists_to_dicts(value) for key, value in obj.items()}
if isinstance(obj, list):
return {str(idx): lists_to_dicts(value) for idx, value in enumerate(obj)}
return obj
|
def median(x):
"""
Median of X.
:param list or tuple x: array to calculate median.
:return: median.
:rtype: int or float
:raise ValueError: when len of x == 0.
"""
if x:
sorted_x = sorted(x)
n = len(x)
mid = n // 2
if n % 2 == 1:
return sorted_x[mid]
else:
return (sorted_x[mid] + sorted_x[mid-1]) / 2
else:
raise ValueError('len of x == 0')
|
def get_middle_four_bit(bits6):
"""Return first and last bit from a binary string"""
fourbits = bits6[1:5]
return fourbits
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.