content
stringlengths 42
6.51k
|
|---|
def crossProduct(p1, p2, p3):
"""
Cross product implementation: (P2 - P1) X (P3 - P2)
:param p1: Point #1
:param p2: Point #2
:param p3: Point #3
:return: Cross product
"""
v1 = [p2[0] - p1[0], p2[1] - p1[1]]
v2 = [p3[0] - p2[0], p3[1] - p2[1]]
return v1[0] * v2[1] - v1[1] * v2[0]
|
def gatekeeper_add_to_list_display(serial=False):
"""
This adds fields to list_display for the Admin changelist page for the model.
"""
if serial:
return ['show_publish_status', 'is_live', 'default_live']
return ['show_publish_status','available_to_public']
|
def lcg(x, length=16):
"""Linear congruential generator"""
if x == 0:
return bytes(length)
out = bytearray(length)
for i in range(length):
x = 214013 * x + 2531011 & 2147483647
out[i] = x >> 16 & 255
return bytes(out)
|
def has_a_double_not_in_larger_group(s):
"""
>>> has_a_double_not_in_larger_group('1234')
False
>>> has_a_double_not_in_larger_group('111123')
False
>>> has_a_double_not_in_larger_group('135679')
False
>>> has_a_double_not_in_larger_group('223450')
True
>>> has_a_double_not_in_larger_group('111111')
False
>>> has_a_double_not_in_larger_group('123789')
False
>>> has_a_double_not_in_larger_group('112233')
True
>>> has_a_double_not_in_larger_group('123444')
False
>>> has_a_double_not_in_larger_group('111122')
True
"""
l = [int(c) for c in s]
while len(l) > 0:
current_l = []
current_l.append(l.pop(0))
while len(l) > 0:
if l[0] == current_l[0]:
current_l.append(l.pop(0))
else:
break
if len(current_l) == 2:
return True
return False
|
def make_key(pattoo_agent_program, key):
"""Prepend the Agent program name to the key for uniqueness.
Args:
pattoo_agent_program: Program name
key: Key
Returns:
result: Result
"""
# Return
result = '{}_{}'.format(pattoo_agent_program, key)
return result
|
def planetary_temp(S, A, L=1.0):
"""Calculate the planetary temperature.
SL(1-A) = sT**4
Arguments
---------
S : float
Incident solar energy.
A : float
Planetary albedo.
Keyword Arguments
-----------------
L = 1.0 : float
Normalised stellar luminosity.
"""
sigma = 5.67032e-8 # Stephan-Bolzmann constant.
return ((S*L*(1-A))/sigma)**(1/4.)
|
def __neighcom(node, graph, status, weight_key):
"""
Compute the communities in the neighborhood of node in the graph given
with the decomposition node2com
"""
weights = {}
for neighbor, datas in graph[node].items():
if neighbor != node:
edge_weight = datas.get(weight_key, 1)
neighborcom = status.node2com[neighbor]
weights[neighborcom] = weights.get(neighborcom, 0) + edge_weight
return weights
|
def validate_int(arg):
"""Guard against value errors when attempting to
convert a null to int"""
if len(arg) < 1:
return 0
return int(arg)
|
def _exp_format(val, prec):
""" [Docstring]
"""
# Convert val using string formatting: Always a leading space;
# positive values with another leading space; negatives with the negative
# sign; one digit in front of the decimal, 'dec' digits after.
# Capital 'E' for the exponent.
out = " {{: #1.{0}E}}".format(prec).format(val)
# Return the results
return out
|
def file_type(filename, stream=False):
""" Detect potential compressed file
Returns the gz, bz2 or zip if a compression is detected, else None.
"""
magic_dict = { "\x1f\x8b\x08": "gz", "\x42\x5a\x68": "bz2", "\x50\x4b\x03\x04": "zip" }
max_len = max(len(x) for x in magic_dict)
if not stream:
with open(filename) as f:
file_start = f.read(max_len)
for magic, filetype in list(magic_dict.items()):
if file_start.startswith(magic):
return filetype
else:
for magic, filetype in list(magic_dict.items()):
if filename[:len(magic)] == magic:
return filetype
return None
|
def cipher(map_from, map_to, code):
""" map_from, map_to: strings where each contain
N unique lowercase letters.
code: string (assume it only contains letters also in map_from)
Returns a tuple of (key_code, decoded).
key_code is a dictionary with N keys mapping str to str where
each key is a letter in map_from at index i and the corresponding
value is the letter in map_to at index i.
decoded is a string that contains the decoded version
of code using the key_code mapping. """
key_code = {}
for l in range(0, len(map_from)):
key_code[map_from[l]] = map_to[l]
decoded = ''
for i in code:
decoded += key_code[i]
return (key_code, decoded)
|
def accuracy(y_true, y_pred):
"""Accuracy score function.
Easy-to-use word tokenize function.
Example:
>>> from reason.metrics import accuracy
>>> accuracy(y_true, y_pred)
0.9358
Args:
y_true (list): Real labels.
y_pred (list): Predicted labels returned by classifier.
Returns:
float: Accuracy score.
"""
length = len(y_true)
correct = 0
for i in range(length):
if y_true[i] == y_pred[i]:
correct += 1
return float('{:.4f}'.format(correct / length))
|
def _split(text, plan):
"""Recursive function to split the *text* into an n-deep list,
according to the :py:class:`hl7._ParsePlan`.
"""
# Base condition, if we have used up all the plans
if not plan:
return text
if not plan.applies(text):
return plan.container([text])
# Parsing of the first segment is awkward because it contains
# the separator characters in a field
if plan.containers[0] == plan.factory.create_segment and text[:3] in ['MSH', 'FHS']:
seg = text[:3]
sep0 = text[3]
sep_end_off = text.find(sep0, 4)
seps = text[4:sep_end_off]
text = text[sep_end_off + 1:]
data = [plan.factory.create_field('', [seg]), plan.factory.create_field('', [sep0]), plan.factory.create_field(sep0, [seps])]
else:
data = []
if text:
data = data + [_split(x, plan.next()) for x in text.split(plan.separator)]
# Return the instance of the current message part according
# to the plan
return plan.container(data)
|
def _strip_tweet_hashtags(status_text: str) -> str:
"""Strip out words from tweet that are hashtags (ie. begin with a #)."""
text_split = [word for word in status_text.split() if not word.startswith("#")]
text = " ".join(text_split)
return text
|
def check_config_ex_len(model, config_ex):
"""Get length for model config_ex."""
if model == "0001":
if len(config_ex) == 6:
return True
elif model == "0002":
if len(config_ex) == 3:
return True
elif model == "0100":
if len(config_ex) == 8:
return True
elif model == "0102":
if len(config_ex) == 8:
return True
elif model == "0103":
if len(config_ex) == 2:
return True
elif model == "0104":
if len(config_ex) == 2:
return True
elif model == "0105":
if len(config_ex) == 2:
return True
elif model == "0107":
if len(config_ex) == 5:
return True
return False
|
def _list(values):
"""
>>> assert _list([1,2,[3,4,5,[6,7]],dict(a =[8,9], b=[10,[11,12]])]) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> assert _list(1) == [1]
>>> assert _list(dict(a=1, b=2)) == [1,2]
"""
if isinstance(values, list):
return sum([_list(df) for df in values], [])
elif isinstance(values, dict):
return _list(list(values.values()))
else:
return [values]
|
def make_initiator_target_all2all_map(initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
|
def period_at_end(token):
"""
Args:
token (str): word being evaluated
Returns:
binary: True if last character is a period, false if not.
"""
if list(token).pop() is ".":
return True
else:
return False
|
def linear_service_fee(principal, fee=0.0):
"""Calculate service fee proportional to the principal.
If :math:`S` is the principal and :math:`g` is the fee aliquot, then the
fee is given by :math:`gS`.
"""
return float(principal * fee)
|
def divide_grupos(alunos, grupos):
"""Function that gets the total of students and groups and does the division, in a optimized way.
If it's a round division, returns the number of students per group
If it's not a round division, the list presents:
[quantity of groups of type 1 quantity of students per groups of type 1, quantity of groups of type 1 quantity of students per groups of type 2]
>>> divide_grupos(40, 5)
8
>>> divide_grupos(20, 4)
5
>>> divide_grupos(21, 4)
[3, 5, 1, 6]
>>> divide_grupos(38, 8)
[6, 5, 2, 4]
"""
if alunos % grupos == 0:
return alunos // grupos
elif (alunos % grupos < alunos // grupos):
lista = []
al1 = alunos // grupos
gr1 = grupos - 1
gr2 = grupos - gr1
al2 = alunos - gr1 * al1
lista.append(gr1)
lista.append(al1)
lista.append(gr2)
lista.append(al2)
return lista
else:
lista = []
al1 = alunos // grupos
al1 += 1
gr1 = grupos - 1
gr2 = grupos - gr1
al2 = alunos - gr1 * al1
if al1 - al2 >= 2:
lista_nova = []
gr1 -= 1
gr2 += 1
al2 += 1
lista_nova.append(gr1)
lista_nova.append(al1)
lista_nova.append(gr2)
lista_nova.append(al2)
return lista_nova
else:
lista.append(gr1)
lista.append(al1)
lista.append(gr2)
lista.append(al2)
return lista
if __name__ == '__main__':
import doctest
doctest.testmod()
|
def value_type(value, types):
"""
Check that the ``value`` type is one of ``types``.
Parameters
----------
value: Any
Variable to check its type.
types: type or tuple or array
Acceptable types.
Could be one type, or a tuple or array of types.
Raises
------
ValueError
Raised when ``value`` is not any of the specified ``types``.
Returns
-------
success: bool
Return True.
"""
if not isinstance(value, types):
if isinstance(types, (tuple, list)):
string_types = types[0].__name__
for i in range(1, len(types)):
string_types += ' or ' + types[i].__name__
else:
string_types = types.__name__
raise ValueError(
'Value {value} is {value_type}, but should be {types}!'
.format(value=value, value_type=type(value).__name__, types=string_types)
)
return True
|
def extend_list_series(nestlist):
"""Extend nested lists in lists"""
series=[]
for n in nestlist:
series+=n
return series
|
def round_down(x, n):
# type: (int, int) -> int
"""Round down `x` to nearest `n`."""
return x // n * n
|
def calculate_grid_points(size, buffer, bars_per_line, lines_per_page):
"""
Calculates and returns two lists.
The first list consists of x-coordinates of all bar lines.
The second list consists of y-coordinates of all center staff lines.
Parameters
----------
size : 2-tuple of ints
Pixel size of the output image (X,Y).
buffer : int
Size of white space on all sides of the output image, in pixels.
bars_per_line : int
lines_per_page : int
"""
x_list = []
y_list = []
for i in range(bars_per_line + 1):
x_list.append(buffer + i * (size[0]-2*buffer) / bars_per_line)
for i in range(lines_per_page):
y_list.append(buffer
+ ((size[1]-2*buffer) / lines_per_page)/2
+ i*(size[1]-2*buffer) / lines_per_page)
return x_list, y_list
|
def pretty_ssh_key_hash(pubkey_fingerprint):
"""
Returns a pretty json from raw pubkey
KEY_BITS KEY_HASH [JERK] (AUTH_TYPE)
"""
try:
key_bits = int(pubkey_fingerprint.split(' ')[0])
except ValueError:
key_bits = 0
except IndexError:
key_bits = 0
try:
key_hash = pubkey_fingerprint.split(' ')[1]
except IndexError:
key_hash = pubkey_fingerprint
try:
auth_type = pubkey_fingerprint.split('(')[-1].split(')')[0]
except IndexError:
auth_type = 'Unknown'
rate = 'UNKNOWN'
if auth_type == 'DSA':
rate = 'VERY LOW'
elif (auth_type == 'RSA' and key_bits >= 4096) or (auth_type == 'ECDSA' and key_bits >= 256):
rate = 'HIGH'
elif auth_type == 'RSA' and key_bits >= 2048:
rate = 'MEDIUM'
elif auth_type == 'RSA' and key_bits < 2048:
rate = 'LOW'
elif auth_type == 'ED25519' and key_bits >= 256:
rate = 'VERY HIGH'
return {'bits': key_bits, 'hash': key_hash, 'auth_type': auth_type, 'rate': rate}
|
def find_prime(n):
"""
Finds a prime greater than n. In this case, it finds the first prime
greater than n.
"""
primes = [3]
candidate = 5
while primes[-1] < n:
is_prime = True
for prime in primes:
if candidate % prime == 0:
is_prime = False
continue
if is_prime: primes.append(candidate)
candidate += 2
return primes[-1]
|
def lines_into_traces (lines):
"""Convert a list of split ASCII text lines into traces (a list of lists of floats)"""
traces = []
num_of_traces = len(lines[0]) #work out how many traces from the no of columns
## make an empty list
for i in range(num_of_traces):
traces.append([])
## transpose lines into traces made from columns
for line in lines:
#print (line)
for i in range (num_of_traces):
#NEW AP
#print (line[i])
try:
traces[i].append (float(line[i]))
except:
#element is empty or not a number, so skip
continue
return traces
|
def ind_to_sub(n, ix):
"""Convert index from flattened upper triangular matrix to pair subindex.
Parameters
----------
n : int
Dimension size of square array.
ix : int
Index to convert.
Returns
-------
subix : tuple
(i,j)
"""
k = 0
for i in range(n-1):
for j in range(i+1,n):
if k==ix:
return (i,j)
k += 1
|
def _rk4_(xy, f, t, dt, **kwargs):
"""Integrate one time step with RK4"""
k1 = dt * f(t, xy, **kwargs)
k2 = dt * f(t + 0.5*dt, xy + 0.5*k1, **kwargs)
k3 = dt * f(t + 0.5*dt, xy + 0.5*k2, **kwargs)
k4 = dt * f(t + 0.5*dt, xy + 0.5*k3, **kwargs)
return xy + (k1 + k2 + k3 + k4)/6.
|
def tf(value):
"""
Wraps the value with Terraform interpolation syntax.
Usage: "{{ 'module.example.arn' | tf }}"
Output: "${module.example.arn}"
"""
return '${' + value + '}'
|
def get_matching_points(requested_file_names, all_file_names, object_points, image_points):
"""
Gets the object points and image points of a requested set of files
:param requested_file_names: files to look through
:param all_file_names: the list of file names
:param object_points: the object points list of the images in the given directory
:param image_points: the image points list of the images in the given directory
:return: the requested object points and image points
"""
requested_file_nameset = set(requested_file_names)
requested_object_points = []
requested_image_points = []
for index, filename in enumerate(all_file_names):
if filename in requested_file_nameset:
requested_object_points.append(object_points[index])
requested_image_points.append(image_points[index])
return requested_object_points, requested_image_points
|
def is_watched_asn(parameters, asn):
"""Is this process responsible for the given AS ?"""
if parameters["ases"] is not None:
# if there is an ases file we check against it
if asn in parameters["ases"]:
return True
else:
# otherwise the AS are distributed between processes according
# to there job id
if asn % parameters["num_jobs"] == parameters["job_id"]:
return True
return False
|
def update_config(config, update, merge=True):
"""
Update ``config`` directory keys from the ``update`` directory, if the
same key is not present in ``config``.
Else merge the value from two keys if ``merge`` key argument is set to
``True``.
"""
result = {}
for key, value in config.items():
if key in update and merge:
if isinstance(value, dict):
result[key] = update_config(value, update[key])
elif isinstance(value, list):
result[key] = value+update[key]
else:
result[key] = update[key]
elif key in update:
result[key] = update[key]
else:
result[key] = value
for key, value in update.items():
if key not in config:
result[key] = value
return result
|
def build_cgi_environ(wsgi_environ, git_project_root, user=None):
"""Build a CGI environ from a WSGI environment:
CONTENT_TYPE
GIT_PROJECT_ROOT = directory containing bare repos
PATH_INFO (if GIT_PROJECT_ROOT is set, otherwise PATH_TRANSLATED)
QUERY_STRING
REMOTE_USER
REMOTE_ADDR
REQUEST_METHOD
The git_project_root parameter must point to a directory that contains
the git bare repo designated by PATH_INFO. See the git documentation.
The git repo (my-repo.git) is located at GIT_PROJECT_ROOT + PATH_INFO
(if GIT_PROJECT_ROOT is defined) or at PATH_TRANSLATED.
If REMOTE_USER is set in wsgi_environ, you should normally leave user
alone.
"""
cgi_environ = dict(wsgi_environ)
none_string_keys = []
for key, value in cgi_environ.items(): # NOT iteritems, due to "del"
if not isinstance(value, str):
none_string_keys.append(key)
for key in none_string_keys:
del cgi_environ[key]
cgi_environ['GIT_HTTP_EXPORT_ALL'] = '1'
cgi_environ['GIT_PROJECT_ROOT'] = git_project_root
if user:
cgi_environ['REMOTE_USER'] = user
cgi_environ.setdefault('REMOTE_USER', 'unknown')
return cgi_environ
|
def factorial(n):
"""Return factorial.
Args:
n (int): Argument (non-negative)
Returns:
Factorial of n
"""
assert type(n) == int and n >= 0
if n == 0:
return 1
else:
return n * factorial(n-1)
|
def str_to_microsec(str_1, str_2):
"""
Change the 2 provided numbers into a number of microseconds.
Examples inputs:
"100", "us"
"2.76", "ms"
"3", "s"
"""
if str_2 == "us":
k = 1.
elif str_2 == "ms":
k = 1.e3
elif str_2 == "s":
k = 1.e6
else:
raise ValueError('Unrecognized time format: {:}.'.format(str_2))
value = float(str_1) * k
return value
|
def pad(coll, size, padding, left=True):
"""
Pad the collection `coll` with `padding` items
until it reaches the length of `size`.
By default do padding in the beginning (on the left side).
"""
padding_size = size - len(coll)
if padding_size <= 0:
return coll
padding = [padding] * padding_size
new_list = list(coll)
return padding + new_list if left else new_list + padding
|
def find_lcs(s1, s2):
"""find_lcs"""
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
|
def curate_list(input_list, words_list):
"""
:param input_list:
:type input_list:
:param words_list:
:type words_list:
:return:
:rtype:
"""
final_list = []
for token in input_list:
if len(token.strip()) == 0:
continue
if token.strip() in words_list:
final_list.append(token.strip())
return final_list
|
def float_or_na(s):
"""
Convert string to float or NaN for "NA" for missing data
Parameters:
s - string representation of integer or "NA"
Return value:
integer value of s or NA_VALUE
"""
return float("NaN") if s == "NA" else float(s)
|
def list_duplicates_of(seq,item):
"""
Identifies the position of duplicated sequences
"""
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
|
def get_average_score(tscores):
"""
Gets the average score for models cross validated using the cross_validate function.
:param tscores: List. The scores to be averaged. Should be the result of a cross validation using the cross_validate function.
"""
score = 0
for i in range(0,len(tscores)):
score += tscores[i-1]
score = score/len(tscores)
return score
|
def listAppend(list:list,appendObj):
"""Syntatic sugar. This appends the obj to the list and returns it."""
if appendObj: list.append(appendObj); return appendObj
|
def transfer_user_click(user_click):
"""
:param user_click:dict,key userid,value:[itemid1,itemid2]
:return:dict,key itemid,value:[userid1,userid2]
"""
item_click_by_user = {}
for user in user_click:
item_list = user_click[user]
for itemid in item_list:
item_click_by_user.setdefault(itemid, [])
item_click_by_user[itemid].append(user)
return item_click_by_user
|
def convertJsonToDictionary(data):
"""
:param data: this is a caching efficiency dictionary in a string format
:return: the python dictionary format of the data string
"""
# this is the format of the output
output = {"#Payloads": [], "#queries": [],
"#uniqueQueries": [], "PayloadSize (MB)": [], "folders": []}
# extract the string main components
data = data.strip('{}')
componenets = data.split(',"')
folders_value = ''
for comp in componenets[4:]:
if folders_value:
folders_value += ',"'
folders_value += comp
componenets = componenets[:4] + [folders_value]
# add the right values to the correpondant key
for comp in componenets:
elements = comp.split(':')
key, value = elements[0].strip('"'), elements[1].strip('[]')
if key == "folders":
value = value.split('","')
for val in value:
val = val.strip('"')
output["folders"].append(val)
else:
value = value.split(',')
if key == "PayloadSize (MB)":
output[key] = [float(val) for val in value]
else:
output[key] = [int(val) for val in value]
return output
|
def _build_archive_name(software, version, extension):
"""
Builds the name of an archive file for a software release.
:param software: software to build archive file name for
:type software: str
:param version: release of software to build archive file name for
:type version: str
:param extension: extension of the archive file
:type extension: str
:rtype: str
"""
return "{}-{}{}".format(software, version, extension)
|
def mean(arr):
"""Returns mean of arr"""
return sum(arr) / len(arr) if arr else None
|
def number(string):
"""Helper function to convert strings to int or floats.
Input: string
Output: Int, Float or String if non convertable.
"""
try:
return int(string)
except TypeError:
return float(string)
except ValueError:
return string
|
def rstrip_tuple(t: tuple):
"""Remove trailing zeroes in `t`."""
if not t or t[-1]:
return t
right = len(t) - 1
while right > 0 and t[right - 1] == 0:
right -= 1
return t[:right]
|
def selective_title(str):
"""
Convert string to Title Case except for key initialisms
Splits input string by space character, applies Title Case to each element
except for ["NHS", "PCN", "CCG", "BNF", "std"], then
joins elements back together with space
Parameters
----------
str : str
string to be selectively converted to Title Case
Returns
-------
str
Selectively title-cased string
"""
ALLCAPS = ["NHS", "PCN", "CCG", "BNF", "std", "STP", "(STP)", "NHS"]
return " ".join(
[w.title() if w not in ALLCAPS else w for w in str.split(" ")]
)
|
def prob9(total=1000):
"""
A Pythagorean triplet is a set of three natural numbers, a < b < c, for
which, a**2 + b**2 = c**2
For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
a = 1
while True:
for i in range(2, total):
b = i
c = total - a - b
if a ** 2 + b ** 2 == c ** 2:
return a * b * c
a += 1
|
def traverse_config_set(target, *args):
"""
>>> traverse_set({'level': {'one': 1}}, 'level', 'one', 42)
{'level': {'one': 42}}
"""
# Seperate the path down from the value to set
path, value = args[:-1], args[-1]
current = target
last = target
for level in path:
if not level in current:
current[level] = {"arg": None, "config": {}}
last = current[level]
current = last["config"]
last["arg"] = value
return target
|
def first(targets, cat, kwargs):
"""A target chooser that simply picks the first from the given list
This is the default, particularly for the case of only one element in
the list
"""
targ = targets[0]
if cat:
s = cat[targ]
if kwargs and targ in kwargs:
s = kwargs.configure_new(**kwargs[targ])
return s
else: # pragma: no cover
# for testing only
return targ
|
def recup_centre(tuile_choisi, tuiles_zone_centre, tuile_permier_joueur):
"""
recuperer toute les tuiles de la zone centre de la meme couleur que
la tuile choisie
"""
tuiles = []
nombre = tuiles_zone_centre.count(tuile_choisi)
# corespond au nombre de tuile de la meme couleur que la tuile choisie
for i in range(nombre):
tuiles_zone_centre.remove(tuile_choisi)
# supprime toute les tuiles de la meme couleur sue la tuile choisi
resultat = [tuile_choisi]*nombre
if tuile_permier_joueur:
resultat.append("premier_joueur")
tuile_permier_joueur = False
# le resultat correspond au tuiles choisit, auquel on rajoute la tuile premier joueur si besoin
return resultat, tuiles_zone_centre, tuile_permier_joueur
|
def bit_length(input):
"""
Return the bit length of input.
EX: 7 (0b111) has length 3
EX: 8 (0b1000) has length 4
"""
return len(bin(input)) - 2
|
def create_c1(dataset):
"""
Create a list of unique items in transaction data.
Represent each item as a set of length 1.
"""
c = []
for data in dataset:
for item in data:
if not [item] in c:
c.append([item])
c.sort()
return list(map(frozenset, c))
|
def bool_prop(name, node):
"""Boolean property"""
try:
return node.get(name)
except KeyError:
return None
|
def _init_weight(Dataset, d, method ="zero"):
"""initialization of weights
Parameters :
Dataset(Iterable):- data points with 1 at dth dimension and label at d+1th dimension
d(int):- dimension of data points
method(string):- method of initialisation with "zero" by default, "zero" giving zero initial weights while "first" giving the weights the same as the first data point
Return ;
w(Iterable):- initial weights
t(int):- number of updating rounds
"""
if(method == 'zero'):
return([0 for i in range(d)],0)
if(method == 'first' and len(Dataset[0])== d+1):
return(Dataset[0][:d],0)
|
def inherit_fom_basemodel(model: dict):
"""Change the schema to inherit from _OpenAPIGenBaseModel."""
base = {
'allOf': [
{
'$ref': '#/components/schemas/_OpenAPIGenBaseModel'
},
{
'type': 'object',
'properties': {}
}
]
}
high_level_keys = {'title', 'description'}
for key, value in model.items():
if key in high_level_keys:
base[key] = model[key]
else:
base['allOf'][1][key] = value
return base
|
def real_letter(character, key):
""" Afla caracterul """
if character.isalpha():
character = ord(character)-key
if character < ord('a'):
character = ord('z') - abs(ord('a') - character) + 1
return chr(character)
else:
return character
|
def marriage_tag(context, source):
"""
Reformat your_marriage step
Also show/hide optional questions
"""
show_all = False
tags = []
first_column = '<tr><td width="75%" style="padding-right: 5%">'
second_column = '</td><td width="25%">'
end_tag = '</td></tr>'
marriage_location = ""
married_date = ""
married_date_q = ""
common_law_date = ""
common_law_date_q = ""
marital_status_you = ""
marital_status_you_q = ""
marital_status_spouse = ""
marital_status_spouse_q = ""
# get married_marriage_like value to check if legally married or not
for question in context.get('prequalification', ''):
if question['question_id'] == 'married_marriage_like' and question['value'] == 'Legally married':
show_all = True
break
elif question['question_id'] == 'married_marriage_like':
break
for item in source:
q_id = item['question_id']
value = item['value']
q_name = item['question__name']
if q_id == 'when_were_you_married':
married_date_q = q_name
married_date = value
elif q_id == 'when_were_you_live_married_like':
common_law_date_q = q_name
common_law_date = value
elif q_id.startswith('where_were_you_married'):
if value == 'Other':
continue
marriage_location += value + '<br />'
elif q_id == 'marital_status_before_you':
marital_status_you_q = q_name
marital_status_you = value
elif q_id == 'marital_status_before_spouse':
marital_status_spouse_q = q_name
marital_status_spouse = value
if show_all and married_date != "":
tags.append(first_column + married_date_q + second_column + married_date + end_tag)
if common_law_date != "":
tags.append(first_column + common_law_date_q + second_column + common_law_date + end_tag)
if show_all and marriage_location != "":
tags.append(first_column + "Where were you married" + second_column + marriage_location + end_tag)
if marital_status_you != "":
tags.append(first_column + marital_status_you_q + second_column + marital_status_you + end_tag)
if marital_status_spouse != "":
tags.append(first_column + marital_status_spouse_q + second_column + marital_status_spouse + end_tag)
return ''.join(tags)
|
def flatten(l):
"""Flatten, like in ruby"""
return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list else [l]
|
def t(a, b):
""" @MG:reduce-on """
return a + b + 3
|
def mean_across_arrays(arrays):
"""
Computes elementwise mean across arrays.
E.g. for input [[1, 2, 4], [5, 3, 6]] returns [3, 2.5, 5]
:param arrays: list of arrays of the same length
:return: elementwise average across arrays
"""
out_arr = []
n_arrays = len(arrays)
# Iterate through the elements in an array
for i in range(len(arrays[0])):
sm = 0
# Iterate through all the arrays
for array in arrays:
sm += array[i]
out_arr.append(sm/n_arrays)
return out_arr
|
def check_message_id_format(message_id):
"""Returns message id with < and > prepended and appended respectively
Required format for exchangelib filter."""
message_id = message_id.strip()
if not message_id.startswith("<"):
message_id = f"<{message_id}"
if not message_id.endswith(">"):
message_id = f"{message_id}>"
return message_id
|
def ObjToString(obj, extra=' '):
"""
:param obj:
:param extra: (Default value = ' ')
"""
if obj is None:
return 'None'
return str(obj.__class__) + '\n' + '\n'.join(
(extra + (str(item) + ' = ' +
(ObjToString(obj.__dict__[item], extra + ' ') if hasattr(obj.__dict__[item], '__dict__') else str(
obj.__dict__[item])))
for item in sorted(obj.__dict__)))
|
def primary_function(x1, y1, x2, y2):
"""
a = (y2- y1) / (x2 -x1)
b = y1 - ax1
Return
y = ax + b
----------
a: float
b: float
"""
a = (y2 -y1) / ((x2 -x1))
b = y1 - a * x1
return [a, b]
|
def _median3(comparables, lo, mid, hi):
"""Sort the three elements of an array in ascending order in place and
return the middle index
Arguments:
comparables -- an array of which the elements can be compared
lo -- index 1 (inclusive)
mid -- index 2 (inclusive)
hi -- index 3 (inclusive)
"""
if comparables[lo] > comparables[mid]:
comparables[lo], comparables[mid] = comparables[mid], comparables[lo]
if comparables[mid] > comparables[hi]:
comparables[mid], comparables[hi] = comparables[hi], comparables[mid]
if comparables[lo] > comparables[mid]:
comparables[lo], comparables[mid] = comparables[mid], comparables[lo]
return mid
|
def convert_R_to_numpy_params(mu, theta):
"""
Convert mean/dispersion parameterization of a negative binomial to the ones numpy supports
See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations
From https://stackoverflow.com/a/47406400/2966723
"""
r = theta
var = mu + 1 / r * mu ** 2
p = (var - mu) / var
return r, 1 - p
|
def stringToInt(message):
"""
Convert input string message into an integer
"""
string_to_binary = message.encode('utf8')
return int.from_bytes(string_to_binary, byteorder='big', signed=False)
|
def group(pairs):
"""Given (key,value) pairs, return a table mapping each key to a
list of all its values."""
table = {}
for k, v in pairs:
table.setdefault(k, []).append(v)
return table
|
def cer(e):
"""
Canonicalize the representation of an undirected edge.
Works by returning a sorted tuple.
"""
return tuple(sorted(e))
|
def get_E2K_subdict(the_dict, main_key, sub_key):
"""Returns the subdictionary specified by main_key
and sub_key, returning an empty dictionary if any is missing.
This is for use in the post-processing functions."""
return the_dict[main_key].get(sub_key, {}) \
if the_dict.get(main_key) else {}
|
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
|
def get_extra_couchdbs(config, couch_database_url, extra_db_names=()):
"""
Create a mapping from database prefix to database url
:param config: list of database strings or tuples
:param couch_database_url: main database url
"""
extra_dbs = {}
postfixes = []
for row in config:
if isinstance(row, tuple):
_, postfix = row
if postfix:
postfixes.append(postfix)
postfixes.extend(extra_db_names)
for postfix in postfixes:
extra_dbs[postfix] = '%s__%s' % (couch_database_url, postfix)
return extra_dbs
|
def right_zero_pad(val,length=8):
""" Right-zero-pad short-form angle strings with zeros.
This reduces amount of error checking required, and makes decimal conversions more consistent
This will also make the IOD/UK/RDE angle lengths uniform (UK/RDE adds one more digit of precision on the right)
"""
val = val.rstrip()
zpadval = val + "0"*(length-len(val))
return zpadval
|
def get_non_lib(functions):
"""
Get all non-library functions
@param functions: List of db_DataTypes.dbFunction objects
@return: a subset list of db_DataTypes.dbFunction objects that are not library functions.
"""
return [f for f in functions if not f.is_lib_func]
|
def antiderE(V0,B0,B0pr,V):
"""
antiderivative of the Birch Murnaghan E(V)
"""
antider = (9*B0*V0*(-((-6 + B0pr)*V) - ((-4 + B0pr)*V0**2)/V + \
3*(-14 + 3*B0pr)*V0*(V0/V)**(1/3) + \
3*(-16 + 3*B0pr)*V*(V0/V)**(2/3)))/16
return antider
|
def Get_IOState_upstream(topo, begin_TM):#{{{
"""
Get inside/outside state for the loop before the current TM helix
Input:
topo topology sequence of the protein
begin_TM sequence position at the beginning of the TM helix
(begin_TM, end_TM) defines the location of the TM helix
in the sequence
Output:
state 'i' or 'o', if all gaps, return empty string ""
"""
i = begin_TM
while i >= 0:
if topo[i] in ['i','o']:
return topo[i]
i -= 1
return ''
|
def _get_sensitive_attibutes(known_sensitive_features, features):
"""
Return sensitive attributes in appropriate format
"""
# Extract new names of sensitive attributes
_sensitive_attributes = {} # it is a map because each entry contains all one-hot encoded variables
for _column in features:
if("_" in _column and _column.split("_")[0] in known_sensitive_features):
if(_column.split("_")[0] not in _sensitive_attributes):
_sensitive_attributes[_column.split("_")[0]] = [_column]
else:
_sensitive_attributes[_column.split("_")[0]].append(_column)
elif(_column in known_sensitive_features):
if(_column not in _sensitive_attributes):
_sensitive_attributes[_column] = [_column]
else:
_sensitive_attributes[_column].append(_column)
# Finally make a 2d list
sensitive_attributes = []
for key in _sensitive_attributes:
sensitive_attributes.append(_sensitive_attributes[key])
return sensitive_attributes
|
def _is_base_font(name):
"""
Used to filter out some special variants that we don't need
"""
MODIFIERS = ["Display", "Mono", "Slanted"]
for m in MODIFIERS:
if name.endswith(m):
return False
return True
|
def timestamp_to_day_timestamp(the_timestamp):
"""timestamp to day-timestamp
Args:
the_timestamp (int): the timestamp in sec
Returns:
int: day-timestamp
"""
the_block = the_timestamp // 86400
return the_block * 86400
|
def fib1(a,b,n):
"""Calculate the nth fibonacci number using the seeds a and b"""
if n==1:
return a
elif n==2:
return b
else:
return fib1(a,b,n-1)+fib1(a,b,n-2)
|
def split_host_port(host_port):
"""Return a tuple containing (host, port) of a string possibly
containing both. If there is no port in host_port, the port
will be None.
Supports the following:
- hostnames
- ipv4 addresses
- ipv6 addresses
with or without ports. There is no validation of either the
host or port.
"""
colon_count = host_port.count(':')
if colon_count == 0:
# hostname or ipv4 address without port
return host_port, None
elif colon_count == 1:
# hostname or ipv4 address with port
return host_port.split(':', 1)
elif colon_count >= 2:
# ipv6 address, must be bracketed if it has a port at the end, i.e. [ADDR]:PORT
if ']:' in host_port:
host, port = host_port.split(']:', 1)
if host[0] == '[':
# for valid addresses, should always be true
host = host[1:]
return host, port
else:
# no port; may still be bracketed
host = host_port
if host[0] == '[':
host = host[1:]
if host[-1] == ']':
host = host[:-1]
return host, None
|
def is_sorted(items):
"""Return a boolean indicating whether given items are in sorted order.
Running time: Worst case is O(n)
Memory usage: O(1)"""
for i in range(len(items)-1):
if items[i] > items[i+1]:
return False
return True
|
def checke_do_reset(board_info):
"""."""
return board_info.get('upload.auto_reset', '') == 'true'
|
def params_to_lists(params):
"""Dictionaries are more convenient for storing and working with the parameters of the gaussians and lorentzians,
but leastsq wants the initial parameters as a list (as far as I can tell...). This will take a list of dictionaries
and convert it to a single list according to the following order:
yoffset, ymax, halfwidth, x0 (repeating ymax, halfwidth, and x0 for any additional functions)"""
if type(params) != list: raise TypeError('Incorrect data type: function params_to_list needs a list of dictionaries.')
listofparams = [params[0]['yoffset']] # yoffset should be the same for all functions, so just pass it from the first one.
for peak in params:
listofparams.append(peak['ymax'])
listofparams.append(peak['halfwidth'])
listofparams.append(peak['x0'])
return listofparams
|
def calc_channel_current(E, sigma, A):
"""
Calculate channel current
"""
I = E * sigma * A
return I
|
def match_nested_lists(l1, l2):
""" Match nested lists term for term
:param l1: first list
:param l2: second list
:return: True or False
This differs from "match_lists_as_sets" in the sense that order is important. The
lists in question can only contain other lists or objects for which == is a valid
comparison.
"""
if not isinstance(l1, list): return False
if not isinstance(l2, list): return False
if len(l1) != len(l2): return False
for i in range(len(l1)):
if isinstance(l1[i], list) and isinstance(l2[i], list):
if not match_nested_lists(l1[i], l2[i]): return False
elif not isinstance(l1[i], list) and not isinstance(l2[i], list):
if l1[i] != l2[i]: return False
else: return False
return True
|
def sanitize_headers(headers):
"""Sanitize sensitive request headers for logging"""
results = dict(headers)
# Redact instead of remove Authorization header so that those
# using Basic Auth can debug if needed
if results.get('Authorization'):
results['Authorization'] = '***redacted***'
return results
|
def get_reversed_dictionary(dictionary, keys):
"""Return reveresed dictionary."""
return {dictionary.get(k): k for k in keys if dictionary.get(k)}
|
def string_compare_rule(mob_param_attributes, hmi_param_attributes):
"""Function checks presence of "minlength"="1" in HMI_API if
"minlength" is omitted in MobileAPI.
Should be checked only for "type"="String"
"""
attr = "minlength"
if attr not in mob_param_attributes:
if attr not in hmi_param_attributes:
return {attr: None}, {attr: None}
elif hmi_param_attributes[attr] != "1":
return {attr: None}, {attr: hmi_param_attributes[attr]}
else:
mob_param_attributes[attr] = "1"
return {}, {}
|
def get_seq_middle(seq_length):
"""Returns relative index for the middle frame in sequence."""
half_offset = int((seq_length - 1) / 2)
return seq_length - 1 - half_offset
|
def get_lgnd_labels(handles, labels, key):
"""Returns zipped handles and labels for which labels contains key."""
return [pair for pair in zip(handles, labels) if key in pair[1]]
|
def sum_node_list(node_list):
"""Custom sum function in order to avoid create redundant nodes in Python sum implementation."""
from operator import add
from functools import reduce
return reduce(add, node_list)
|
def scale(pot, scale_factor):
""" Scale the potential by scaling factor
:param pot: potential along a coordinate
:type pot: dict[tuple(float)] = float
:param scale_coeff: initial scaling coeffcient
:type scael_coeff: float
:param num_tors: number of torsions used in scaling
:type num_tors: int
:rtype:
"""
new_pot = {}
for idx, val in pot.items():
new_pot[idx] = val * scale_factor
return new_pot
|
def no_disp_cpl(on=0):
"""Negar Acesso as Configuracoes de Video
DESCRIPTION
Esta opcao desabilita o icone no do Painel de Controle de Configuracao
de Video, negando aos usuarios acesso a quaisquer configuracoes de video.
COMPATIBILITY
Todos.
MODIFIED VALUES
NoDispCPL : dword : 00000000 = Desabilitado; 00000001 = Habilitado.
"""
if on:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispCPL"=dword:00000001'''
else:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispCPL"=dword:00000000'''
|
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
|
def get_method_type(method):
"""
Returns either "graph_fn" OR "api" OR "other" depending on which method (and method
name) is passed in.
Args:
method (callable): The actual method to analyze.
Returns:
Union[str,None]: "graph_fn", "api" or "other". None if method is not a callable.
"""
# Not a callable: Return None.
if not callable(method):
return None
# Simply recognize graph_fn by their name.
elif method.__name__[:9] == "_graph_fn":
return "graph_fn"
else:
return "unknown"
|
def is_equivalent(seq1, seq2):
"""
Checks for existence of a bijection between input sequences
seq1 and seq2.
"""
letters1 = set(seq1)
letters2 = set(seq2)
distinct_mappings = set(zip(seq1, seq2))
return (len(letters1) == len(letters2) == len(distinct_mappings)
and len(seq1) == len(seq2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.