content
stringlengths 42
6.51k
|
|---|
def matmul2(Ma, Mb):
"""
@brief Implements boolean -> integer matrix mult .
"""
assert len(Ma[0]) == len(Mb), \
"Ma and Mb sizes aren't compatible"
size = len(Mb)
Mres = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(size):
for k in range(size):
Mres[i][j] ^= Ma[i][k]*255 and Mb[k][j]
return Mres
|
def getUShort(data, index):
"""Returns two bytes from data as an unsigned 16-bit value"""
return (data[index+1] << 8) + data[index]
|
def binary_to_hex(binary_data, byte_offset, length):
"""Converts binary data chunk to hex"""
hex_data = binary_data[byte_offset:byte_offset + length].hex()
return ' '.join(a+b for a,b in zip(hex_data[::2], hex_data[1::2]))
|
def metadataset2dataset_key(metadataset_name):
"""Return the dataset name corresponding to a metadataset name
Metadatasets are not ever stored in the HDF5 and instead are only used to
store data needed to correctly calculate dataset values. This function
maps a metadataset name to its corresponding dataset name.
Args:
metadataset_name (str): Name of a metadataset
Returns:
str: Name of corresponding dataset name, or None if `metadataset_name`
does not appear to be a metadataset name.
"""
if '/_num_' not in metadataset_name:
return None
else:
return metadataset_name.replace('/_num_', '/', 1)
|
def evaluate_accuracy(predictions, targets):
"""Evaluate accuracy
Args
predictions: (list of string)
targets: (list of list os string)
"""
correct = 0
total = len(targets)
for prediction, target in zip(predictions, targets):
if prediction in target:
correct += 1
accuracy = round((correct / total) * 100, 4)
return accuracy
|
def convert_coordinates(q, conversion, axisorder):
"""
Convert a 3-tuple in data coordinates into to simplex data
coordinates for plotting.
Parameters
----------
q: 3-tuple
the point to be plotted in data coordinates
conversion: dict
keys = ['b','l','r']
values = lambda function giving the conversion
axisorder: String giving the order of the axes for the coordinate tuple
e.g. 'blr' for bottom, left, right coordinates.
Returns
-------
p: 3-tuple
The point converted to simplex coordinates.
"""
p = []
for k in range(3):
p.append(conversion[axisorder[k]](q[k]))
return tuple(p)
|
def average_price(quantity_1, price_1, quantity_2, price_2):
"""Calculates the average price between two asset states."""
return (quantity_1 * price_1 + quantity_2 * price_2) / \
(quantity_1 + quantity_2)
|
def x_encode(X):
"""
Onehot encoder of sequences in X list
"""
X_encoding_dict = {'*': 0}
X_enc = []
for vec in X:
enc_vec = []
for aa in vec:
if aa not in X_encoding_dict:
max_val = max(X_encoding_dict.values())
X_encoding_dict.update({aa: max_val+1})
enc_vec.append(X_encoding_dict[aa])
X_enc.append(enc_vec)
return X_enc, X_encoding_dict
|
def get_fid2fidx_mappings(ch_info_dic):
"""
Go through channel features, looking for numerical features in need
of standard deviation calculation. Map feature ID to feature channel
index. Return both-way mappings.
>>> ch_info_dic = {'fa': ['C', [0], ['embed'], 'embed'], 'pc.con': ['N', [3], ['phastcons_score'], 'prob']}
>>> get_fid2fidx_mappings(ch_info_dic)
({'pc.con': 3}, {3: 'pc.con'})
"""
stdev_fid2fidx_dic = {}
stdev_fidx2fid_dic = {}
for fid in ch_info_dic:
feat_type = ch_info_dic[fid][0] # C or N.
feat_idxs = ch_info_dic[fid][1] # channel numbers occupied by feature.
l_idxs = len(feat_idxs)
if feat_type == "N" and l_idxs == 1:
stdev_fid2fidx_dic[fid] = feat_idxs[0]
stdev_fidx2fid_dic[feat_idxs[0]] = fid
return stdev_fid2fidx_dic, stdev_fidx2fid_dic
|
def einsteinA(S: float, frequency: float):
"""
Calculate the Einstein A coefficient for a transition with
specified transition frequency and intrinsic linestrength.
Parameters
----------
S : float
Intrinsic linestrength; unitless
frequency : float
Transition frequency in MHz
Returns
-------
float
Einstein A coefficient in units of per second
"""
# Prefactor is given in the PGopher Intensity formulae
# http://pgopher.chm.bris.ac.uk/Help/intensityformulae.htm
# Units of the prefactor are s^-1 MHz^-3 D^-2
# Units of Einstein A coefficient should be in s^-1
prefactor = 1.163965505e-20
return prefactor * frequency ** 3.0 * S
|
def _get_more_static_shape(shape0, shape1):
"""Compare two shapes with the same rank,
and return the one with fewer symbolic dimension.
"""
assert len(shape0) == len(shape1)
num_sym_dim0 = 0
num_sym_dim1 = 0
for dim0, dim1 in zip(list(shape0), list(shape1)):
if not isinstance(dim0, int):
num_sym_dim0 += 1
if not isinstance(dim1, int):
num_sym_dim1 += 1
if num_sym_dim0 < num_sym_dim1:
return shape0
return shape1
|
def _is_char(c):
"""Returns true iff c is in CHAR as specified in HTTP RFC."""
return ord(c) <= 127
|
def myround(x, base=5):
"""
Round a number to nearest '5'.
"""
return int(base * round(float(x)/base))
|
def generate_kinesis_event(region, partition, sequence, data):
"""
Generates a Kinesis Event
:param str region: AWS Region
:param str partition: PartitionKey in Kinesis
:param str sequence: Sequence Number as a string
:param str data: Data for the stream
:return dict: Dictionary representing the Kinesis Event
"""
return {
"Records": [{
"eventID": "shardId-000000000000:{}".format(sequence),
"eventVersion": "1.0",
"kinesis": {
"approximateArrivalTimestamp": 1428537600,
"partitionKey": partition,
"data": data,
"kinesisSchemaVersion": "1.0",
"sequenceNumber": sequence
},
"invokeIdentityArn": "arn:aws:iam::EXAMPLE",
"eventName": "aws:kinesis:record",
"eventSourceARN": "arn:aws:kinesis:EXAMPLE",
"eventSource": "aws:kinesis",
"awsRegion": region
}]
}
|
def clean_dict(dictionary):
"""Removes any `None` entires from the dictionary"""
return {k: v for k, v in dictionary.items() if v}
|
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
|
def extract_tag(tags, prefix):
"""Extracts a tag and its value.
Consumes a tag prefix, and extracts the value corresponding to it.
Args:
tags (list): the list of tags to check
prefix (str): the tag prefix
Returns:
A tuple of tag and tag value.
"""
for tag in tags:
if tag.startswith(prefix):
suffix = tag[len(prefix):]
if len(suffix) <= 1 or not suffix[0] == '=':
raise ValueError('Invalid tag: \'%s\'' % tag)
return prefix, suffix[1:]
return None, None
|
def get_nearest_value(iterable, value):
"""
simple return nearest value inside given iterable object
"""
return min(iterable, key=lambda x: abs(int(x.split(".")[0]) - value))
|
def num_trees(n):
"""Returns the number of unique full binary trees with exactly n leaves. E.g.,
1 2 3 3 ...
* * * *
/ \ / \ / \
* * * * * *
/ \ / \
* * * *
>>> num_trees(1)
1
>>> num_trees(2)
1
>>> num_trees(3)
2
>>> num_trees(8)
429
"""
"*** YOUR CODE HERE ***"
if n==1 or n==2:
return 1
return sum(num_trees(k)*num_trees(n-k) for k in range(1,n))
|
def parse_print_dur(print_dur):
"""
Parse formatted string containing print duration to total seconds.
>>> parse_print_dur(" 56m 47s")
3407
"""
h_index = print_dur.find("h")
hours = int(print_dur[h_index - 2 : h_index]) if h_index != -1 else 0
m_index = print_dur.find("m")
minutes = int(print_dur[m_index - 2 : m_index]) if m_index != -1 else 0
s_index = print_dur.find("s")
seconds = int(print_dur[s_index - 2 : s_index]) if s_index != -1 else 0
return hours * 60 * 60 + minutes * 60 + seconds
|
def get_status_message(status):
"""Returns the message of the given Mesos task status, possibly None
:param status: The task status
:type status: :class:`mesos_pb2.TaskStatus`
:returns: The task status message
:rtype: string
"""
if hasattr(status, 'message'):
return status.message
return None
|
def mean(x):
""" Calculate Arithmetic Mean without using numpy """
return sum([float(a) for a in x]) / float(len(x))
|
def cleanfield(value):
"""
remove spaces
"""
if not value:
return None
value = str(value)
value = value.strip()
return value
|
def pre_process_station_name(x):
"""
Standarized the station names. This step is necesary to merge different data sets later
"""
x = x.lower()
x = x.split()
return x[0]
|
def require_any(json, keys):
""" Require that the given dict-from-json-object contains at least one of the given keys. """
for k in keys:
if k in json:
return True
return False
|
def quandl_apikey_set(apikey, filename=None):
"""Store the Quandl Token in $HOME/.updoon_quandl
Parameters:
-----------
apikey : str
The API Key from the Quandl Website.
See https://www.quandl.com/account/api
filename : str
Absolute path to the text where the
Quandl API Key is stored (Optional)
"""
# set default path
if filename is None:
import pathlib
filename = str(pathlib.Path.home()) + "/.updoon_quandl"
# write string to file
fileptr = open(filename, 'w')
fileptr.write(apikey)
fileptr.close()
return None
|
def summing_it(data):
"""
Calculates sum of database dumps
:param data: accepts multi-dimensional iterable data type
:return: returns total amount in a FLOAT
"""
if data is None:
print("Something's gone horribly wrong.")
return 0
total = 0
for entry in data:
total += entry[0]
return round(total, 2)
|
def binary_tree_to_dll(node, head, tail):
"""This function converts a binary tree to a doubly linked list with recursive approach
input: tree root or node, head and tail pointer of DLL
returns : the head and tail of the the linked lists
"""
if node == None :
return head, tail
head, tail = binary_tree_to_dll(node.left, head, tail) # converting the left subtree
# updating the tail of the list to point towards current node
if head == None:
head = node
else :
tail.right = node
node.left = tail
tail = node # shifting the tail to the latest node
head, tail = binary_tree_to_dll(node.right, head, tail) # converting the right subtree
return head, tail
|
def combine_variables(os_environ, args_variables):
"""Utility function to update environment with user-specified variables
.. note: When there is a key clash the user-specified args take precedence
:param os_environ: os.environ dict
:param args_variables: variables parsed from command line
:returns: merged dict
"""
variables = dict(os_environ)
if args_variables is not None:
variables.update(dict(args_variables))
return variables
|
def _extract_hostname(url: str) -> str:
"""Get the hostname part of the url."""
if "@" in url:
hn = url.split("@")[-1]
else:
hn = url.split("//")[-1]
return hn
|
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
|
def use_c(c_val: str) -> str:
"""
Prints a string
"""
print(c_val)
return c_val
|
def insertion_sort(arr):
"""Refresher implementation of inserstion sort - in-place & stable.
:param arr: List to be sorted.
:return: Sorted list.
"""
for i in range(1, len(arr)):
tmp = arr[i]
j = i
# find the position for insertion
for j in range(i, len(arr)):
# the position is found if the prev element is smaller than current
if arr[j - 1] < tmp:
break
# shift to the right
arr[j] = arr[j - 1]
arr[j] = tmp
return arr
|
def nearest_square(y):
"""
Takes an integer argument limit, and returns the largest square number
that is less than limit. A square number is the product of an
integer multiplied by itself, for example 36 is a square number because
it equals 6*6.
"""
x = 0
while( x ** 2 <= y ): x += 1
if x >= 1: return (x - 1) ** 2
else: return 0
|
def _q(alpha, d, e, M):
"""
Solve Eq. 9
"""
return 2. * alpha * d * (1. - e) - M**2
|
def json_to_one_level(obj, parent=None):
"""
Take a dict and update all the path to be on one level.
Arguments:
output (dict): The dict to proceed.
parent (str): The parent key. Used only with recursion.
Return:
dict: The updated obj.
"""
output = {}
for key, value in obj.items():
if isinstance(value, dict):
if parent is None:
output.update(json_to_one_level(value, key))
else:
output.update(
json_to_one_level(value, u".".join([parent, key]))
)
elif isinstance(value, list):
for index, item in enumerate(value):
item = {
str(index): item
}
if parent is None:
output.update(json_to_one_level(item, u".".join([key])))
else:
output.update(
json_to_one_level(item, u".".join([parent, key]))
)
else:
if parent is not None:
output[u".".join([parent, key])] = value
else:
output[key] = value
return output
|
def remove(condition, all, names):
"""
Remove experiments that fulfill a boolean condition.
Example::
all = remove('w < 1.0 and p = 1.2) or (q in (1,2,3) and f < 0.1', all, names)
(names of the parametes must be used)
"""
import copy
for ex in copy.deepcopy(all): # iterate over a copy of all!
c = condition
for n in names: # replace names by actual values
#print 'replace "%s" by "%s"' % (n, repr(ex[names.index(n)]))
c = c.replace(n, repr(ex[names.index(n)]))
# note the use of repr: strings must be quoted
#print 'remove ',remove
if eval(c): # if condition
all.remove(ex)
return all
|
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
"""
if isinstance(prefixes, str):
return word.split(prefixes)[-1]
for prefix in prefixes:
word = word.split(prefix)[-1]
return word
|
def loc_sum(vals):
"""in case 'sum' does not exist, such as on old machines"""
try: tot = sum(vals)
except:
tot = 0
for val in vals: tot += val
return tot
|
def header_token(token):
"""
Set token in header
:param token:
:return Dict:
"""
return {'Authorization': '{0} {1}'.format('JWT', token)}
|
def get_samples_from_datalist(datalist):
"""
return a samples object from a list of data dicts
"""
return [[x] for x in datalist]
|
def paragraphs(linelist):
"""
Break a list of lines at blank lines into a list of line-lists.
"""
plist = []
newlinelist = []
for line in linelist:
line = line.strip()
if line:
newlinelist.append(line)
elif newlinelist:
plist.append(newlinelist)
newlinelist = []
if newlinelist:
plist.append(newlinelist)
return plist
|
def view_schedules(description, bus_stop_code, bus_selected, time):
"""
Message that will be sent when user wants to view their scheduled messages
"""
return '<b>Bus Stop: </b>{}\n<b>Bus Stop Code: </b>/{}\n<b>Buses: </b>{}<b>\nTime:</b> {}H\n' \
'<b>Frequency:</b> Daily'.format(description, bus_stop_code, bus_selected, time)
|
def select_choice(current, choices):
""" For the radiolist, get us a list with the current value selected """
return [(tag, text, (current == tag and "ON") or "OFF")
for tag, text in choices]
|
def _flatten(obj_to_vars):
"""
Object section is prefixed to variable names except the `general` section
[general]
warning = red
>> translates to: warning = red
[panel]
title = white
>> translates to: panel_title = white
"""
all_vars = dict()
for obj, vars_ in obj_to_vars.items():
if obj == 'general':
all_vars.update(vars_)
else:
all_vars.update({f"{obj}_{k}": v for k, v in vars_.items()})
return all_vars
|
def leap_check(year) -> str:
""" Check if a year is a leap year """
if ((year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0)):
return f"{year} is a leap year!"
else:
return f"{year} is not a leap year!"
|
def string_to_array(s):
"""Convert pipe separated string to array."""
import math
if isinstance(s, str):
out = s.split("|")
elif math.isnan(s):
out = []
else:
raise ValueError("Value must be either string of nan")
return out
|
def get_datatype(value):
""" Determine most appropriate SQLite datatype for storing value.
SQLite has only four underlying storage classes: integer, real,
text, and blob.
For compatibility with other flavors of SQL, it's possible to
define columns with more specific datatypes (e.g. 'char',
'date'), but since these are still mapped to the underlying
storage classes there's not much point in using them when
generating native SQLite SQL.
Therefore, this function takes an incoming value and attempts
to guess the best SQLite datatype for storing it. This can
then be used to help decide the schema of the column where the
value will be stored.
It defaults to the text datatype, not the super-general blob
datatype, because read_csv reads each row in as text rather
than binary data.
Unlike in other SQL flavors, misdefining a column's datatype
affinity is not fatal, because in the end any type of value
can be stored in any column. In the end, the datatype
returned by this function is just a suggestion for SQLite.
See:
* https://www.sqlite.org/datatype3.html
* http://ericsink.com/mssql_mobile/data_types.html
* http://www.techonthenet.com/sqlite/datatypes.php
"""
try:
int(value)
return 'INTEGER'
except ValueError:
try:
float(value)
return 'REAL'
except ValueError:
return 'TEXT'
|
def RPL_YOUREOPER(sender, receipient, message):
""" Reply Code 381 """
return "<" + sender + ">: " + message
|
def idfn(fixture_value):
"""
This function creates a string from a dictionary.
We use it to obtain a readable name for the config fixture.
"""
return str(
"-".join(["{}:{}".format(k, v) for k, v in fixture_value.items()])
)
|
def call(f, *args, **kwargs):
"""Call f with args and kwargs"""
return f(*args, **kwargs)
|
def add_annotation(x, y, z):
""" Create plotly annotation dict. """
return {
"x": x,
"y": y,
"z": z,
"font": {"color": "black"},
"bgcolor": "white",
"borderpad": 5,
"bordercolor": "black",
"borderwidth": 1,
"captureevents": True,
"ay": -100,
"arrowcolor": "white",
"arrowwidth": 2,
"arrowhead": 0,
"text": "Click here to annotate<br>(Click point to remove)",
}
|
def get_permission(code):
"""Get permission."""
permission = {
0: "deny",
2: "ro",
3: "rw"
}
return permission.get(code, None)
|
def is_clean_packet(packet): # pragma: no cover
"""
Returns whether or not the parsed packet is valid
or not. Checks that both the src and dest
ports are integers. Checks that src and dest IPs
are valid address formats. Checks that packet data
is hex. Returns True if all tests pass, False otherwise.
"""
if not packet['src_port'].isdigit(): return False
if not packet['dest_port'].isdigit(): return False
if packet['src_ip'].isalpha(): return False
if packet['dest_ip'].isalpha(): return False
return True
|
def pad_sentences(sentences, sentence_length, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sentence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
|
def kelvin_to_rankine(kelvin: float, ndigits: int = 2) -> float:
"""
Convert a given value from Kelvin to Rankine and round it to 2 decimal places.
Wikipedia reference: https://en.wikipedia.org/wiki/Kelvin
Wikipedia reference: https://en.wikipedia.org/wiki/Rankine_scale
"""
return round(kelvin * 9 / 5, ndigits)
|
def _find_replica_groups(global_x, inner_x, inner_y, outer_group_size):
"""Find replica groups for SPMD."""
groups_x = global_x // inner_x
inner_replica_groups = []
for group_id in range(outer_group_size):
sub_group_ids = []
row_group_id = group_id // groups_x
col_group_id = group_id % groups_x
starting_id = row_group_id * inner_y * global_x + col_group_id * inner_x
for y in range(inner_y):
inner_group_ids = list(range(starting_id, starting_id + inner_x))
if y % 2 == 1:
inner_group_ids.reverse()
sub_group_ids.extend(inner_group_ids)
starting_id += global_x
inner_replica_groups.append(sub_group_ids)
return inner_replica_groups
|
def yesify(val):
"""Because booleans where not invented 150 years ago."""
return "yes" if val else "no"
|
def buildResultString(data):
"""
Creates a line for the algorithm result text file.
Parameters
----------
dict : dictionary
The dictionary which contains the data for a repititon of an algorithm.
Returns
----------
A string which represents one line of the algorithm result text file.
"""
content = "RESULT\t"
for key, value in data.items():
content += "{}={}\t".format(key, value)
content += "\n"
return content
|
def hgvs_str(gene_symbols, hgvs_p, hgvs_c):
""" """
if hgvs_p[0] != "None":
return hgvs_p[0]
if hgvs_c[0] != "None":
return hgvs_c[0]
return "-"
|
def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out
|
def build_url_params(params):
"""
Tag that takes a dict in a template and turns it into a url
params string, including the ?. Only accepts things that
can be converted into a string
Arugments
obj: a dictionary
"""
data = []
# put our params into our extra data
for item in params.items():
key = str(item[0])
val = str(item[1])
data.append( '%s=%s' % (key,val) )
if len(data):
return '?%s' % '&'.join(data)
else:
return ''
|
def sizeof_fmt(num) -> str:
"""Print size of a byte number in human-readable format.
Args:
num: File size in bytes.
Return:
str: File size in human-readable format.
"""
for unit in ["B", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
if abs(num) < 100:
return "%3.1f%s" % (num, unit)
return "%3.0f%s" % (num, unit)
num /= 1024.0
return "%.1f%s" % (num, "Y")
|
def kernel_sigmas(n_kernels):
"""
get sigmas for each gaussian kernel.
:param n_kernels: number of kernels (including exactmath.)
:param lamb:
:param use_exact:
:return: l_sigma, a list of simga
"""
bin_size = 2.0 / (n_kernels - 1)
l_sigma = [0.001] # for exact match. small variance -> exact match
if n_kernels == 1:
return l_sigma
l_sigma += [0.1] * (n_kernels - 1)
return l_sigma
|
def titleize(text):
"""Capitalizes all the words and replaces some characters in the string
to create a nicer looking title.
"""
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk[0].upper() + chunk[1:] for chunk in text.split(" ") if len(chunk) >= 1]
return " ".join(chunks)
|
def identity(*args):
"""
Return whatever is passed in
Examples
--------
>>> x = 1
>>> y = 2
>>> identity(x)
1
>>> identity(x, y)
(1, 2)
>>> identity(*(x, y))
(1, 2)
"""
return args if len(args) > 1 else args[0]
|
def compile_playable_podcast(playable_podcast):
"""
@para: list containing dict of key/values pairs for playable podcasts
"""
items = []
for podcast in playable_podcast:
items.append({
'label': podcast['title'],
'thumbnail': podcast['thumbnail'],
'path': podcast['url'],
# 'info': podcast['desc'],
'is_playable': True,
})
return items
|
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
|
def make_sampling_histogram(unique_words):
"""Helper function for test_stochastic_sample (below).
Given a list of words, return a dictionary representing a histogram.
All values begin at zero.
Param: unique_words(list): every distinct type of word, will be a key
Return: histogram_empty(dict)
"""
histogram_empty = dict()
for word in unique_words:
histogram_empty[word] = 0
return histogram_empty
|
def pay_minimums(principal, payment):
"""Make the minimum payments first."""
if principal - payment <= 0:
payment = principal
return principal - payment, payment
|
def _make_url(term):
"""
Retrun a url
"""
return f"http://rest.kegg.jp/get/{term}"
|
def update_jstruct(jstruct, elem_struct, val, keep):
"""
Update json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:param keep: if true write None values instead of skipping them
:return: json structure updated
"""
if len(elem_struct) == 1:
try:
if val == '':
val = None
if val == None and not keep:
del jstruct[elem_struct[0]]
else:
jstruct[elem_struct[0]] = val
except:
print(" [ERR] Can not associate value ", val, "to field", elem_struct[0])
jstruct[elem_struct[0]] = None
pass
else:
elem = elem_struct.pop(0)
jstruct[elem] = update_jstruct(jstruct[elem], elem_struct, val, keep)
return jstruct
|
def coord_map(dimensions, coordinate, mode):
"""
Wrap a coordinate, according to a given mode.
:param dimensions: int, maximum coordinate.
:param coordinate: int, coordinate provided by user. May be < 0 or > dimensions
:param mode: {'W', 'S', 'R', 'E'}, Whether to wrap, symmetric reflect, reflect or use the nearest
coordinate if `coord` falls outside [0, dim).
:return: int, co
"""
max_coordinate = dimensions - 1
if mode == "S":
if coordinate < 0:
coordinate = -coordinate - 1
if coordinate > max_coordinate:
if (coordinate // dimensions) % 2 != 0: # ?
return max_coordinate - (coordinate % dimensions)
else:
return coordinate % dimensions
elif mode == "W":
if coordinate < 0:
return max_coordinate - (-coordinate - 1) % dimensions
if coordinate > max_coordinate:
return coordinate % dimensions
elif mode == "E":
if coordinate < 0:
return 0
elif coordinate > max_coordinate:
return max_coordinate
elif mode == "R":
if dimensions == 1:
return 0
elif coordinate < 0:
if (-coordinate // max_coordinate) % 2 != 0:
return max_coordinate - (-coordinate % max_coordinate)
else:
return -coordinate % max_coordinate
elif coordinate > max_coordinate:
if (coordinate // max_coordinate) % 2 != 0:
return max_coordinate - (coordinate % max_coordinate)
else:
return coordinate % max_coordinate
return coordinate
|
def insertionSort(myList: list):
"""
Sorts the list by inserting the numbers in a specific index.
"""
for index in range(1, len(myList)):
actualValue = myList[index]
actualPosition = index
while actualPosition > 0 and myList[actualPosition - 1] > actualValue:
myList.insert(actualPosition, myList.pop(actualPosition-1))
actualPosition -= 1
myList[actualPosition] = actualValue
return myList
|
def jk_from_i(i, olist):
"""
Given an organized list (Wyckoff positions or orientations), determine the
two indices which correspond to a single index for an unorganized list.
Used mainly for organized Wyckoff position lists, but can be used for other
lists organized in a similar way
Args:
i: a single index corresponding to the item's location in the
unorganized list
olist: the organized list
Returns:
[j, k]: two indices corresponding to the item's location in the
organized list
"""
num = -1
found = False
for j , a in enumerate(olist):
for k , b in enumerate(a):
num += 1
if num == i:
return [j, k]
print("Error: Incorrect Wyckoff position list or index passed to jk_from_i")
return None
|
def human_readable_time(time):
"""
Convert global time in the system to the human-readable time
:return: human-readable time as a string
"""
hours = str(time // 3600 % 24)
minutes = str((time // 60) % 60)
seconds = str(time % 60)
if len(minutes) != 2:
minutes = "0" + minutes
if len(seconds) != 2:
seconds = "0" + seconds
return hours + ":" + minutes + ":" + seconds
|
def dehexify(data):
"""
A URL and Hexadecimal Decoding Method.
Credit: Larry Dewey.
In the case of the SIEM API, this is used only when dealing with the pricate API calls.
"""
hexen = {
"\x1c": ",", # Replacing Device Control 1 with a comma.
"\x11": "\n", # Replacing Device Control 2 with a new line.
"\x12": " ", # Space
"\x22": '"', # Double Quotes
"\x23": "#", # Number Symbol
"\x27": "'", # Single Quote
"\x28": "(", # Open Parenthesis
"\x29": ")", # Close Parenthesis
"\x2b": "+", # Plus Symbol
"\x2d": "-", # Hyphen Symbol
"\x2e": ".", # Period, dot, or full stop.
"\x2f": "/", # Forward Slash or divide symbol.
"\x7c": "|", # Vertical bar or pipe.
}
uri = {
"%11": ",", # Replacing Device Control 1 with a comma.
"%12": "\n", # Replacing Device Control 2 with a new line.
"%20": " ", # Space
"%22": '"', # Double Quotes
"%23": "#", # Number Symbol
"%27": "'", # Single Quote
"%28": "(", # Open Parenthesis
"%29": ")", # Close Parenthesis
"%2B": "+", # Plus Symbol
"%2D": "-", # Hyphen Symbol
"%2E": ".", # Period, dot, or full stop.
"%2F": "/", # Forward Slash or divide symbol.
"%3A": ":", # Colon
"%7C": "|", # Vertical bar or pipe.
}
for (enc, dec) in hexen.items():
data = data.replace(enc, dec)
for (enc, dec) in uri.items():
data = data.replace(enc, dec)
return data
|
def bioconductor_experiment_data_url(package, pkg_version, bioc_version):
"""
Constructs a url for an experiment data package tarball
Parameters
----------
package : str
Case-sensitive Bioconductor package name
pkg_version : str
Bioconductor package version
bioc_version : str
Bioconductor release version
"""
return (
'https://bioconductor.org/packages/{bioc_version}'
'/data/experiment/src/contrib/{package}_{pkg_version}.tar.gz'.format(**locals())
)
|
def str_to_bool(string):
"""
Parses string into boolean
"""
string = string.lower()
return True if string == "true" or string == "yes" else False
|
def valid(circuit, param):
""" checks validity of parameters
Parameters
----------
circuit : string
string defining the circuit
param : list
list of parameter values
Returns
-------
valid : boolean
Notes
-----
All parameters are considered valid if they are greater than zero --
except for E2 (the exponent of CPE) which also must be less than one.
"""
p_string = [x for x in circuit if x not in 'ps(),-/']
for i, (a, b) in enumerate(zip(p_string[::2], p_string[1::2])):
if str(a + b) == "E2":
if param[i] <= 0 or param[i] >= 1:
return False
else:
if param[i] <= 0:
return False
return True
|
def _left_decorator(item):
""" Removed packages """
return u'-' + item
|
def calculate(nth_number):
"""Returns the difference between the sum of the squares and the
square of the sum of the specified number of the first natural numbers"""
sums = sum(number for number in range(1, nth_number + 1))
sum_of_squares = 0
for number in range(1, nth_number + 1):
sum_of_squares += number ** 2
answer = sums ** 2 - sum_of_squares
return answer
|
def other_identifiers_to_metax(identifiers_list):
"""Convert other identifiers to comply with Metax schema.
Arguments:
identifiers_list (list): List of other identifiers from frontend.
Returns:
list: List of other identifiers that comply to Metax schema.
"""
other_identifiers = []
for identifier in identifiers_list:
id_dict = {}
id_dict["notation"] = identifier
other_identifiers.append(id_dict)
return other_identifiers
|
def RawLintWhitespace(data):
"""Make sure whitespace is sane."""
ret = []
if not data.endswith('\n'):
ret.append('missing newline at end of file')
return ret
|
def dict_if_none(arg):
"""Return an empty dict if arg is None."""
return arg if arg is not None else {}
|
def str_as_bool(val):
""" Interpret the string input as a boolean
"""
if val.lower() in ("false", "none", "no", "0"):
return False
return True
|
def is_hdfs_path(path):
"""
Check if a given path is HDFS uri
Args:
path (str): input path
Returns:
bool: True if input is a HDFS path, False otherwise
>>>is_hdfs_path("/tdk")
False
>>>is_hdfs_path("hdfs://aa:123/bb/cc")
True
"""
return path.startswith("hdfs://")
|
def imply(pred1,pred2):
"""
Returns True if pred1 implies pred2 , i.e. not pred1 or pred2.
pred1, pred2 are bool vars
"""
return (not pred1) or pred2
|
def hex2rgb(color):
"""Turns a "#RRGGBB" hexadecimal color representation into a (R, G, B)
tuple.
Arguments:
color -- str
Return: tuple
"""
code = color[1:]
if not (len(color) == 7 and color[0] == "#" and code.isalnum()):
raise ValueError('"%s" is not a valid color' % color)
red = int(code[:2], 16)
green = int(code[2:4], 16)
blue = int(code[4:6], 16)
return (red, green, blue)
|
def convert_string_to_bool(string):
"""Converts string to bool.
Args:
string: str, string to convert.
Returns:
Boolean conversion of string.
"""
return False if string.lower() == "false" else True
|
def find_duplicates(items):
"""
Takes a list and returns a list of any duplicate items.
If there are no duplicates, return an empty list.
Code taken from:
https://stackoverflow.com/questions/9835762/how-do-i-find-the-duplicates-in-a-list-and-create-another-list-with-them
"""
seen = {}
duplicates = []
for item in items:
if item not in seen:
seen[item] = 1
else:
if seen[item] == 1:
duplicates.append(item)
seen[item] += 1
return duplicates
|
def in_answer(a):
"""
Manipulation rule for answers 'inside' in QA pairs.
"""
entailment = f'the image is {a}.'
contradiction = f'the image is not {a}.'
return (entailment, contradiction), None
|
def ellipsis(data, length=20):
"""
Add a "..." if a string y greater than a specific length
:param data:
:param length: length taking into account to cut the string
:return:
"""
data = str(data)
return (data[:length] + '..') if len(data) > length else data
|
def fibonacci(n):
"""returns a list of the first n fibonacci values"""
n0 = 0
n1 = 1
fib_list = []
if type(n) != type(0) or n<=0:
raise Exception("'%s' is not a positive int" % str(n))
for i in range(n):
fib_list.append(n1)
(n0, n1) = (n1, n0+n1)
return fib_list
|
def applescript_escape(string):
"""Escape backlsahes and double quotes for applescript"""
return string.replace('\\', '\\\\').replace('"', '\\"')
|
def merge_dicts(*dicts):
"""
Merge dictionaries into a new dict (new keys overwrite the old ones).
Args:
dicts (tuple[dict]): Dictionaries to be merged together.
Returns:
merged (dict): The merged dict (new keys overwrite the old ones).
Examples:
>>> d1 = {1: 2, 3: 4, 5: 6}
>>> d2 = {2: 1, 4: 3, 6: 5}
>>> d3 = {1: 1, 3: 3, 6: 5}
>>> dd = merge_dicts(d1, d2)
>>> print(tuple(sorted(dd.items())))
((1, 2), (2, 1), (3, 4), (4, 3), (5, 6), (6, 5))
>>> dd = merge_dicts(d1, d3)
>>> print(tuple(sorted(dd.items())))
((1, 1), (3, 3), (5, 6), (6, 5))
"""
merged = {}
for item in dicts:
merged.update(item)
return merged
|
def fetch_param(ctx, name):
""" Try to fetch a click.Parameter from a click.Context (or its parents)
"""
# Try to raise error
parent = ctx
while parent is not None:
params = {p.name: p for p in parent.command.params}
if name in params:
return parent, params[name]
else:
parent = getattr(parent, 'parent', None)
return None, None
|
def answer(maze):
"""
I can largely re-use my code from foobar2-1.
We'll traverse the maze like a graph. I debated converting it to one, but
determined it won't do much to make the algorithm more intuitive.
To solve this question, I perform a depth-first search and then look up the distance to the exit.
:param maze: The maze to solve.
:return: The distance to the southeastern corner of the maze.
"""
class grid:
grid = []
width = len(maze)
height = len(maze[0])
costs = []
ourmaze = grid()
ourmaze.grid = maze
ourmaze.costs = [[10000 for col in range(ourmaze.width)] for row in range(ourmaze.height)]
def dfs(x, y, currcost, passedwall):
"""
On each pass of this algorithm, we consider moving in every
cardinal direction. If we've passed a wall already, we no
longer try to pass any more.
:param x: x co-ordinate of this recursive iteration
:param y: y co-ordinate of this recursive iteration
:param currcost: The current cost of our maze
:return: nothing
"""
# This isn't java, so we're not protected from out-of-bounds accesses.
if x not in range(ourmaze.width) or y not in range(ourmaze.height):
return
# If a cost has been found that is lower than ours, this work is pointless.
if currcost > ourmaze.costs[ourmaze.width-1][ourmaze.height-1]:
return
# As well, if we've already reach this node with a less expensive path, we exit.
# But if we haven't, we update the cost.
if currcost > ourmaze.costs[x][y]:
return
elif currcost < ourmaze.costs[x][y]:
ourmaze.costs[x][y] = currcost
# If we're on a 1, we have to do one of two things:
if ourmaze.grid[x][y] == 1:
# either exit
if passedwall:
return
# or update passedwall
else:
passedwall = True
dfs(x, y+1, currcost+1, passedwall)
dfs(x+1, y, currcost+1, passedwall)
dfs(x, y-1, currcost+1, passedwall)
dfs(x-1, y, currcost+1, passedwall)
dfs(0, 0, 1, False)
return ourmaze.costs[ourmaze.width-1][ourmaze.height-1]
|
def _ifconfig_cmd(netif, params=[]):
"""Construct commands to manipulate ifconfig."""
cmd = ['ifconfig', netif]
cmd.extend(params)
return cmd
|
def bytes_(s, encoding='latin-1', errors='strict'):
""" This function is a copy of ``pyramid.compat.bytes_``
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, str): # pragma: no cover
return s.encode(encoding, errors)
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.