content
stringlengths 42
6.51k
|
|---|
def calc_user_ira(assets):
"""
Calculates how much the user will put into their IRA
based on assets.
IRS limits for the IRA is 5,500 per year.
"""
IRA = 5500
if assets <= IRA:
return assets
else:
return IRA
|
def find_first(items, predicate):
"""Find first element that match predicate in collection"""
return next((x for x in items if predicate(x)), None)
|
def _has_version(name):
"""Check whether a package identifier has a version component."""
return name.rpartition("-")[2].replace(".", "").isdigit()
|
def modname_python(fragments):
"""
Generates Python styled module names from fragments.
"""
return '.'.join(fragments)
|
def convert_14_to_tuple(datetime_no_delims):
""" Converts 14-digit datetime (e.g. 20160130235959) to (Y,M,D,H,M,S) tuple [e.g. (2016,01,30,23,59,59)].
"""
dtnd_str = str(datetime_no_delims)
return (int(dtnd_str[0:4]), int(dtnd_str[4:6]), int(dtnd_str[6:8]),
int(dtnd_str[8:10]), int(dtnd_str[10:12]), int(dtnd_str[12:14]))
|
def removeSmartQuote(txt):
"""
removeSmartQuote(txt):
Changes typographic quotation marks back to straight ones
"""
return txt.replace(chr(0x201c), '"').replace(chr(0x201d), '"').replace(chr(0x2018), "'").replace(chr(0x2019), "'")
|
def convertArrayInTupleList(array):
"""
Convert an array (or a list) of element in a list of tuple where each element is a tuple with two sequential element of the original array/list
Parameters
----------
array : numpy array/list
Returns
-------
tuple_list. List of tuple
Given the input array = [a, b, c, d ...] the tuple_list will be [(a, b), (b, c), (c, d) ...]
"""
tuple_list = []
for i in range(len(array) - 1):
tmp_tuple = (array[i], array[i + 1])
tuple_list.append(tmp_tuple)
return tuple_list
|
def extract_columns(data):
""" EXTRACTS COLUMNS TO USE IN `DictWriter()` """
columns = []
column_headers = data[0]
for key in column_headers:
columns.append(key)
return columns
|
def _get_transaction_urlencode(body):
"""Get the transaction for URL encoded transaction data to `POST <auth>`."""
body = body.decode("utf-8")
# Confirm there is only one URL parameter.
body_arr = body.split("&")
if len(body_arr) != 1:
return 0, "multiple query params provided"
# The format of the transaction parameter key-value pair should be
# `transaction=<AAA...>`.
transaction_param = body_arr[0]
[key, value] = transaction_param.split("<")
key = key[:-1] # Remove trailing `=`.
if key != "transaction":
return 0, "no transaction provided"
envelope_xdr = value[:-1] # Remove trailing `>`.
return 1, envelope_xdr
|
def turn_radius_helper(v):
"""Helper function for turn_radius"""
if 0.0 <= v < 500.0:
return 0.006900 - 5.84e-6 * v
elif 500.0 <= v < 1000.0:
return 0.005610 - 3.26e-6 * v
elif 1000.0 <= v < 1500.0:
return 0.004300 - 1.95e-6 * v
elif 1500.0 <= v < 1750.0:
return 0.003025 - 1.10e-6 * v
elif 1750.0 <= v < 2500.0:
return 0.001800 - 0.40e-6 * v
else:
return 0.0
|
def euler_problem_28(n=1001):
"""
Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the diagonals is 101.
What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
"""
# insight: we can totally get away with flattening the spiral. Then we are essentially trying to find the sum of a subsequence with a certain spacing pattern.
# more insight: for each square, the average of the four elements in the corners equals the middle element on the left side.
# 3^2 - (3^2 - 1^2) * (3/8) = 3^2 * (5/8) + 1^2 * (3/8) = 6
# 5^2 - (5^2 - 3^2) * (3/8) = 5^2 * (5/8) + 3^2 * (3/8) = 19
def sum_corners(side_length):
assert side_length % 2 == 1
if side_length == 1:
return 1
return 4 * int(side_length ** 2 * (5 / 8) + (side_length - 2) ** 2 * (3 / 8))
# add contribution from each layer
total = 0
for k in range(1, n + 1, 2):
total += sum_corners(k)
return total
|
def rem_time(seconds, abbreviate=False, spaced=None):
"""
Convert seconds into remaining time (up to years).
"""
units = ['years', 'weeks', 'days', 'hours', 'minutes', 'seconds']
data = {item: 0 for item in units}
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
years, weeks = divmod(weeks, 52)
data.update(seconds=seconds, minutes=minutes, hours=hours, days=days, weeks=weeks, years=years)
if abbreviate is True:
if spaced is True:
return ', '.join('%d %s' % (v, k[0]) for k,v in data.items() if v != 0)
return ', '.join('%d%s' % (v, k[0]) for k,v in data.items() if v != 0)
result = []
for k,v in data.items():
if v > 1:
if spaced is True or spaced is None:
result.append('%d %s' % (v, k))
elif spaced is False:
result.append('%d%s' % (v, k))
elif v == 1:
if spaced is True or spaced is None:
result.append('%d %s' % (v, k[:-1]))
elif spaced is False:
result.append('%d%s' % (v, k[:-1]))
return ', '.join(result)
|
def _name_from_tags(tags):
"""Get name from tags."""
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
|
def remove_readonly_fields(patron):
""" remove readonly fields from patron dict
must run patron records through this fn before PUTting to Koha """
read_only_patron_fields = ("anonymized", "restricted", "updated_on")
for field in read_only_patron_fields:
patron.pop(field, None)
return patron
|
def us(qty):
"""
Convert qty to truncated string with unit suffixes.
eg turn 12345678 into 12.3M
"""
if qty<1000:
return str(qty)
for suf in ['K','M','G','T','P','E']:
qty /= 1000
if qty<1000:
return "%3.1f%s" % (qty, suf)
|
def get_unique_words(sentences):
"""
Input: An array of sentences(array of words) obtained after preprocessing
Output: A dictionary of unique words, each with an assigned index to define the order
"""
# A dictionary to track whether a word has been seen before or not
dic = {}
# Initialize an empty set of unique words
unique_words = {}
index = 0
for sentence in sentences:
for word in sentence:
if word not in dic:
dic[word] = 1
unique_words[word] = index
index += 1
return unique_words
|
def parseHeaders(headers):
"""
Parse headers.
@param (str) headers
@return (dict)
"""
ret = {}
tmp = headers.split("\r\n")
if tmp:
ret["0"] = tmp.pop(0) # status line
for tm in tmp:
t = tm.split(":", 1)
if len(t) == 2:
ret[t[0].strip()] = t[1].strip()
return ret
|
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
|
def stfreq(f,length, srate):
"""
[int] = stfreq(f,length, srate)
Convert frequencies f in Hz into rows of the stockwell transform
given sampling rate srate and length of original array
note: length * (1.0/srate)
# in C this would be: return floor(f * len / srate + .5);
"""
# return int( f*(length//srate)+0.5)
return int(round(f*length/srate))
|
def unicode_to_ascii(s):
"""
converts s to an ascii string.
s: unicode string
"""
ret = ""
for ch in s:
try:
ach = str(ch)
ret += ach
except UnicodeEncodeError:
ret += "?"
return ret
|
def expand_markings(granular_markings):
"""Expand granular markings list.
If there is more than one selector per granular marking. It will be
expanded using the same marking_ref.
Example:
>>> expand_markings([
... {
... "selectors": [
... "description",
... "name"
... ],
... "marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
... }
... ])
[
{
"selectors": [
"description"
],
"marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
},
{
"selectors": [
"name"
],
"marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
}
]
Args:
granular_markings: The granular markings list property present in a
SDO or SRO.
Returns:
list: A list with all markings expanded.
"""
expanded = []
for marking in granular_markings:
selectors = marking.get('selectors')
marking_ref = marking.get('marking_ref')
lang = marking.get('lang')
if marking_ref:
expanded.extend(
[
{'marking_ref': marking_ref, 'selectors': [selector]}
for selector in selectors
],
)
if lang:
expanded.extend(
[
{'lang': lang, 'selectors': [selector]}
for selector in selectors
],
)
return expanded
|
def get_yes_no(value):
""" coercing boolean value to 'yes' or 'no' """
return 'yes' if value else 'no'
|
def scoreBehaviors(behaviorTags):
"""
Scores the identified behaviors.
Args:
behaviorTags: ["Downloader", "Crypto"]
Returns:
score: 3.5
verdict: "Likely Benign"
behaviorData: ["Downloader - 1.5", "Crypto - 2.0"]
"""
scoreValues = {
# Negative
# Behaviors which are generally only seen in Malware.
"Code Injection": 10.0,
"Key Logging": 3.0,
"Screen Scraping": 2.0,
"AppLocker Bypass": 2.0,
"AMSI Bypass": 2.0,
"Clear Logs": 2.0,
"Coin Miner": 6.0,
"Embedded File": 4.0,
"Abnormal Size": 2.0,
"Ransomware": 10.0,
"DNS C2": 2.0,
"Disabled Protections": 4.0,
"Negative Context": 10.0,
"Malicious Behavior Combo": 6.0,
"Known Malware": 10.0,
# Neutral
# Behaviors which require more context to infer intent.
"Downloader": 1.5,
"Starts Process": 1.5,
"Script Execution": 1.5,
"Compression": 1.5,
"Hidden Window": 0.5,
"Custom Web Fields": 1.0,
"Persistence": 1.0,
"Sleeps": 0.5,
"Uninstalls Apps": 0.5,
"Obfuscation": 1.0,
"Crypto": 2.0,
"Enumeration": 0.5,
"Registry": 0.5,
"Sends Data": 1.0,
"Byte Usage": 1.0,
"SysInternals": 1.5,
"One Liner": 2.0,
"Variable Extension": 2.0,
# Benign
# Behaviors which are generally only seen in Benign scripts - subtracts from score.
"Script Logging": -1.0,
"License": -2.0,
"Function Body": -2.0,
"Positive Context": -3.0,
}
score = 0.0
behaviorData = list()
for behavior in behaviorTags:
if "Known Malware:" in behavior:
behaviorData.append("%s: %s - %s" % (behavior.split(":")[0], behavior.split(":")[1], scoreValues[behavior.split(":")[0]]))
behavior = behavior.split(":")[0]
elif "Obfuscation:" in behavior:
behaviorData.append("%s: %s - %s" % (behavior.split(":")[0], behavior.split(":")[1], scoreValues[behavior.split(":")[0]]))
behavior = behavior.split(":")[0]
else:
behaviorData.append("%s - %s" % (behavior, scoreValues[behavior]))
score += scoreValues[behavior]
if score < 0.0:
score = 0.0
# These verdicts are arbitrary and can be adjusted as necessary.
if score == 0 and behaviorTags == []:
verdict = "Unknown"
elif score < 4:
verdict = "Low Risk"
elif 6 > score >= 4:
verdict = "Mild Risk"
elif 6 <= score <= 10:
verdict = "Moderate Risk"
elif 10 < score <= 20:
verdict = "Elevated Risk"
else:
verdict = "Severe Risk"
#verdict = "Unknown"
return score, verdict, behaviorData
|
def _delta(i, j):
"""The Kronecker delta. Returns 1 if i == j, 0 otherwise."""
return int(i == j)
|
def _make_CSV_line(username, language):
"""Return a WikiMetrics compatible CSV line."""
return "%s, %swiki" % (username, language)
|
def check_model_structure(model, inner_key="model"):
"""Checks the model structure to see if it contains all the
main expected keys
"""
return (isinstance(model, dict) and 'resource' in model and
model['resource'] is not None and
('object' in model and inner_key in model['object'] or
inner_key in model))
|
def histogram(s):
"""."""
d = dict()
for ch in s:
if ch not in d:
d[ch] = 1
else:
d[ch] += 1
return d
|
def _splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
|
def parse_line(line):
"""Takes a line of space seperated values, returns the values in a list.
"""
line_list = line.strip().split(' ')
return line_list
|
def filter_results_table(search_str: str, result_table: str) -> str:
"""
Uses search_str to locate interesting elements in the lines of the results table
Expected format of results table: Result1\nResult2\n
:param search_str: str: text to be present
:param result_table: str: results extracted from table
:return: str. Tab concatenated lines that matched
"""
return "____\t".join([line for line in result_table.split("\n") if line.lower().find(search_str.lower()) >= 0])
|
def xgcd(a, b):
"""return (g, x, y) such that a*x + b*y = g = gcd(a, b)"""
x0, x1, y0, y1 = 0, 1, 1, 0
while a != 0:
q, b, a = b // a, a, b % a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return b, x0, y0
|
def cmake_cache_string(name, string, comment=""):
"""Generate a string for a cmake cache variable"""
return 'set(%s "%s" CACHE STRING "%s")\n\n' % (name, string, comment)
|
def walk(d, path):
"""Walk dict d using path as sequential list of keys, return last value."""
if not path: return d
return walk(d[path[0]], path[1:])
|
def FIND(find_text, within_text, start_num=1):
"""
Returns the position at which a string is first found within text.
Find is case-sensitive. The returned position is 1 if within_text starts with find_text.
Start_num specifies the character at which to start the search, defaulting to 1 (the first
character of within_text).
If find_text is not found, or start_num is invalid, raises ValueError.
>>> FIND("M", "Miriam McGovern")
1
>>> FIND("m", "Miriam McGovern")
6
>>> FIND("M", "Miriam McGovern", 3)
8
>>> FIND(" #", "Hello world # Test")
12
>>> FIND("gle", "Google", 1)
4
>>> FIND("GLE", "Google", 1)
Traceback (most recent call last):
...
ValueError: substring not found
>>> FIND("page", "homepage")
5
>>> FIND("page", "homepage", 6)
Traceback (most recent call last):
...
ValueError: substring not found
"""
return within_text.index(find_text, start_num - 1) + 1
|
def restframe_wl(x, z):
"""
Transform a given spectrum x into the restframe, given the redshift
Input:
x: observed wavelengths of a spectrum, in Angstrom or nm for example
z: redshift
Return:
restframe spectrum
"""
return x / (1.0 + z)
|
def get_pg_uri(user, password, host, port, dbname) -> str:
"""Returns PostgreSQL URI-formatted string."""
return f'postgresql://{user}:{password}@{host}:{port}/{dbname}'
|
def get_seat_id(seat_str: str) -> int:
"""Parse the seat specification string, and return the seat ID
:param seat_str: Seat specification string
:return: Seat ID
"""
row = 0
col = 0
for i, char in enumerate(seat_str[:7]):
row += (char == 'B') * 2 ** (6 - i)
for i, char in enumerate(seat_str[7:]):
col += (char == 'R') * 2 ** (2 - i)
return row * 8 + col
|
def parse_mlil(context, mlil_list):
""" Helps the GUI go from lists of instruction data to a cleanly formatted string """
newText = ""
for mlil in mlil_list:
if mlil is not None:
tokens = mlil.deref_tokens if hasattr(mlil, 'deref_tokens') else mlil.tokens
newText += "{}: ".format(mlil.instr_index)
newText += (''.join(context.escape(str(token)) for token in tokens))
else:
newText += ('None')
newText += context.newline
if(len(mlil_list) > 0):
return newText.strip(context.newline)
else:
return 'None'
|
def normalize_windows(window_data):
"""Normalize data"""
normalized_data = []
for window in window_data:
normalized_window = [((float(p) / float(window[0])) - 1) for p in window]
normalized_data.append(normalized_window)
return normalized_data
|
def _method_with_pos_reference_1(fake_value, other_value):
"""
:type other_value: [list, dict, str]
"""
return other_value.lower()
|
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
|
def get_ppn_and_nodes(num_procs, procs_per_node):
"""Return the number of nodes and processors per node to use."""
if num_procs >= procs_per_node:
ppn = procs_per_node
num_nodes = num_procs // ppn
else:
ppn = num_procs
num_nodes = 1
return ppn, num_nodes
|
def convert_hyperparameters(dict_hyperparams):
"""Convert the hyperparameters to the format accepted by the library."""
hyperparams = []
if not isinstance(dict_hyperparams, dict):
raise TypeError('Hyperparams must be a dictionary.')
for name, hyperparam in dict_hyperparams.items():
hp_type = hyperparam['type']
if hp_type == 'int':
hp_range = hyperparam.get('range') or hyperparam.get('values')
hp_min = int(min(hp_range))
hp_max = int(max(hp_range))
hyperparams.append({
'name': name,
'type': 'range',
'bounds': [hp_min, hp_max]
})
elif hp_type == 'float':
hp_range = hyperparam.get('range') or hyperparam.get('values')
hp_min = float(min(hp_range))
hp_max = float(max(hp_range))
hyperparams.append({
'name': name,
'type': 'range',
'bounds': [hp_min, hp_max]
})
elif hp_type == 'bool':
hyperparams.append({
'name': name,
'type': 'choice',
'bounds': [True, False]
})
elif hp_type == 'str':
hp_range = hyperparam.get('range') or hyperparam.get('values')
hyperparams.append({
'name': name,
'type': 'choice',
'bounds': hp_range,
})
return hyperparams
|
def next_bigger(n):
"""Finds the next bigger number with the same digits."""
# Convert n to a string and a list in reverse for ease.
n_string = str(n)[::-1]
n_list = [int(x) for x in n_string]
# Go through each digit and identify when there is a lower digit.
number_previous = n_list[0]
for position, number in enumerate(n_list):
if number_previous > number:
# Create slice of numbers that need to be changed.
n_list_piece = n_list[:position+1]
# Set the starting digit which will be the next higher than the "number"
first_set_list = list(set(n_list_piece))
first_set_list.sort()
number_position = first_set_list.index(number)
first = first_set_list[number_position+1]
# Numbers after the position will always be in sorted order.
n_list_piece.sort()
n_list_piece.remove(first)
n_list_piece = [first] + n_list_piece
n_string_piece = ""
for z in n_list_piece:
n_string_piece += str(z)
# Magic
if n_list[position+1:]:
n_string = n_string[position+1:]
solution = n_string[::-1] + n_string_piece
else:
solution = n_string_piece
return int(solution)
else:
number_previous = number
return -1
|
def create_result_4_array(list):
"""
The conversion to the format of the JSON string the specified array
"result" is the key.
"""
result = ""
for data in list:
result = result + data.get_json() + ","
print(result)
result = '{"result":[' + result[0:-1] + ']}'
return result
|
def params_deepTools_plotHeatmap_endLabel(wildcards):
"""
Created:
2017-08-02 13:15:45
Aim:
what to show take argument with spaces which is not very good with paths.
"""
tmp = str(wildcards['deepTools_plotHeatmap_endLabel_id']);
endLabel = {
'end': "'end'",
'0': "'0'"
}
return "--endLabel " + endLabel[tmp]
|
def squares_in_rectangle(length, width):
"""This function is the solution to the Codewars Rectangle into Squares Kata
that can be found at:
https://www.codewars.com/kata/55466989aeecab5aac00003e/train/python."""
if length == width:
return None
squares = []
while length > 0 and width > 0:
if width < length:
squares.append(width)
length = length - width
else:
squares.append(length)
width = width - length
return squares
|
def getTwosComplement(raw_val, length):
"""Get two's complement of `raw_val`.
Args:
raw_val (int): Raw value
length (int): Max bit length
Returns:
int: Two's complement
"""
val = raw_val
if raw_val & (1 << (length - 1)):
val = raw_val - (1 << length)
return val
|
def _findLine(comp,fileLines):
""" Find a line number in the file"""
# Line counter
c = 0
# List of indices for found lines
found = []
# Loop through all the lines
for line in fileLines:
if comp in line:
# Append if found
found.append(c)
# Increse the counter
c += 1
# Return the found indices
return found
|
def e_leste(arg):
"""
e_leste: direcao --> logico
e_sul(arg) tem o valor verdadeiro se arg for o elemento 'E' e falso caso
contrario.
"""
return arg == 'E'
|
def is_valid_host(parser, host_str):
"""
Check the validity of the host string arguments
"""
params = host_str.split(':')
if len(params) == 4:
params[1] = int(params[1])
return params
parser.error("The host string %s is not valid!" % host_str)
|
def subtract_matrices(a, b):
"""
Subtracts matrix b from matrix a aka (a-b)
Args:
a: matrix to subtract from
b: matrix to subtract away from a
Returns:
m: resulting matrix from a-b
"""
m = []
# Loop through each spot and subtract the values from eachother before adding to m.
for row_index, row in enumerate(a):
new_row = []
for col_index, col in enumerate(b):
new_row.append(a[row_index][col_index] - b[row_index][col_index])
m.append(new_row)
return m
|
def union_set_from_dict(smu_info_dict):
"""
The smu_info_dict has the following format
smu name -> set()
"""
result_set = set()
for smu_set in smu_info_dict.values():
result_set = result_set.union(smu_set)
return result_set
|
def get_sort_code(options, unused_value):
"""
Gets the sort code of a given set of options and value
Parameters
----------
options : List[int/str]
the options for a parameter
unused_value : int; str
the value of the parameter
"""
sort_code = 0
if 'COMPLEX' in options:
sort_code += 1
if 'SORT2' in options:
sort_code += 2
if 'RANDOM' in options:
sort_code += 4
return sort_code
|
def k1(f, t, y, paso):
"""
f : funcion a integrar. Retorna un np.ndarray
t : tiempo en el cual evaluar la funcion f
y : para evaluar la funcion f
paso : tamano del paso a usar.
"""
output = paso * f(t, y)
return output
|
def read_translated_file(filename, data):
"""Read a file inserting data.
Args:
filename (str): file to read
data (dict): dictionary with data to insert into file
Returns:
list of lines.
"""
if filename:
with open(filename) as f:
text = f.read().replace('{{', '{').replace('}}', '}')
return text.format(**data or {}).rstrip('\n').split('\n')
else:
return None
|
def getTJstr(text, glyphs, simple, ordering):
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple:
if glyphs is None: # simple and not Symbol / ZapfDingbats
otxt = "".join([hex(ord(c))[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
else: # Symbol or ZapfDingbats
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
return "[<" + otxt + ">]"
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(4, "0") for c in text])
else: # CJK: use char codes, no glyphs
otxt = "".join([hex(ord(c))[2:].rjust(4, "0") for c in text])
return "[<" + otxt + ">]"
|
def get_grid(size):
"""Create grid with size."""
return [[[] for _ in range(size)]
for _ in range(size)]
|
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
|
def format_len(x):
"""
>>> format_len('abc')
3
>>> format_len(('(', ('(', 'def', ')'), 'yz', ')'))
11
"""
if not isinstance(x, (list, tuple)): return len(x)
if len(x) > 3: sep_len = 2 * (len(x) - 3)
else: sep_len = 0
return sum(map(format_len, x)) + sep_len
|
def getletters(mcode):
""" Get a list of morsecode from a string of mcode split by space"""
letterlist = mcode.split(" ")
return letterlist
|
def chisquare(ddl):
"""
Returns the 5% critical value of the chi-square distribution
with *ddl* degrees of liberty (maximum: 100).
"""
table=( 3.841, 5.991, 7.815, 9.488, 11.07, 12.592, 14.067, 15.507, 16.919, 18.307,
19.675, 21.026, 22.362, 23.685, 24.996, 26.296, 27.587, 28.869, 30.144, 31.41,
32.671, 33.924, 35.172, 36.415, 37.652, 38.885, 40.113, 41.337, 42.557, 43.773,
44.985, 46.194, 47.4, 48.602, 49.802, 50.998, 52.192, 53.384, 54.572, 55.758,
56.942, 58.124, 59.304, 60.481, 61.656, 62.83, 64.001, 65.171, 66.339, 67.505,
68.669, 69.832, 70.993, 72.153, 73.311, 74.468, 75.624, 76.778, 77.931, 79.082,
80.232, 81.381, 82.529, 83.675, 84.821, 85.965, 87.108, 88.25, 89.391, 90.531,
91.67, 92.808, 93.945, 95.081, 96.217, 97.351, 98.484, 99.617, 100.749, 101.879,
103.01, 104.139, 105.267, 106.395, 107.522, 108.648, 109.773, 110.898, 112.022, 113.145,
114.268, 115.39, 116.511, 117.632, 118.752, 119.871, 120.99, 122.108, 123.225, 129.561)
return table[ddl-1]
|
def convert_to_letter(grade):
"""Convert a decimal number to letter grade"""
grade = round(grade, 1)
if grade >= 82.5:
return 'A'
elif grade >= 65:
return 'B'
elif grade >= 55:
return 'C'
elif grade >= 50:
return 'D'
else:
return 'F'
|
def logM2L(color, a, b):
"""
Calculate the mass-to-light ratio based on the color.
logM2L = a + (b * color)
Parameters
----------
color : float or array like
The color of the galaxy.
a : float
The normalization of the relation.
b : float
The slope of the relation.
Returns
-------
logm2l : float or array like
The logarithmic mass-to-light ratio.
Notes
-----
None.
"""
logm2l = a + b * color
return logm2l
|
def define_engine(engine_option_value):
"""
Define engine files, name and options.
"""
ed1, ed2 = {}, {}
e1 = {'proc': None, 'cmd': None, 'name': 'test', 'opt': ed1, 'tc': '', 'depth': 0}
e2 = {'proc': None, 'cmd': None, 'name': 'base', 'opt': ed2, 'tc': '', 'depth': 0}
for i, eng_opt_val in enumerate(engine_option_value):
for value in eng_opt_val:
if i == 0:
if 'cmd=' in value:
e1.update({'cmd': value.split('=')[1]})
elif 'option.' in value:
# Todo: support float value
# option.QueenValueOpening=1000
optn = value.split('option.')[1].split('=')[0]
optv = int(value.split('option.')[1].split('=')[1])
ed1.update({optn: optv})
e1.update({'opt': ed1})
elif 'tc=' in value:
e1.update({'tc': value.split('=')[1]})
elif 'name=' in value:
e1.update({'name': value.split('=')[1]})
elif 'depth=' in value:
e1.update({'depth': int(value.split('=')[1])})
elif i == 1:
if 'cmd=' in value:
e2.update({'cmd': value.split('=')[1]})
elif 'option.' in value:
optn = value.split('option.')[1].split('=')[0]
optv = int(value.split('option.')[1].split('=')[1])
ed2.update({optn: optv})
e2.update({'opt': ed2})
elif 'tc=' in value:
e2.update({'tc': value.split('=')[1]})
elif 'name=' in value:
e2.update({'name': value.split('=')[1]})
elif 'depth=' in value:
e2.update({'depth': int(value.split('=')[1])})
return e1, e2
|
def labelFromString(s):
"""Work out if this operand is a label or not"""
# Is it numeric?
try:
operand = int(s)
return operand, None # just a normal number
except:
pass
# Must be a label
return None, s
|
def no_error(val_foo, give_val=False):
"""Run val_foo with no inputs, test whether it raises an error"""
try:
val = val_foo()
success = True
except:
val = None
success = False
if give_val:
return success, val
else:
return success
|
def encode_tick_rule_array(tick_rule_array: list) -> str:
"""
Encode array of tick signs (-1, 1, 0)
:param tick_rule_array: (list) of tick rules
:return: (str) encoded message
"""
message = ''
for element in tick_rule_array:
if element == 1:
message += 'a'
elif element == -1:
message += 'b'
elif element == 0:
message += 'c'
else:
raise ValueError('Unknown value for tick rule: {}'.format(element))
return message
|
def calculate_complexity_factor(n_circuit_planes: int) -> float:
"""Calculate the complexity factor (piC).
:param n_circuit_planes: the number of planes in the PCB/PWA.
:return: _pi_c; the calculated value of the complexity factor.
:rtype: float
"""
return 0.65 * n_circuit_planes**0.63 if n_circuit_planes > 2 else 1.0
|
def getSurvivalBoolData(ICounts, TCounts, IThreshold, TThreshold):
"""
I stands for init, as in initialCounts, T stands for Transfered, as in transfered Counts.
"""
IAtoms, TAtoms = [[] for _ in range(2)]
for IPoint, TPoint in zip(ICounts, TCounts):
IAtoms.append(IPoint > IThreshold)
TAtoms.append(TPoint > TThreshold)
return IAtoms, TAtoms
|
def process_origin(data, template):
"""
Replace {$origin} in template with a serialized $ORIGIN record
"""
record = ""
if data is not None:
record += "$ORIGIN %s" % data
return template.replace("{$origin}", record)
|
def is_valid_namespace(namespace):
"""Returns true if the given namespace is valid
:param `namespace`: String to test
:returns: Validation result
:rtype: bool
"""
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
if namespace[0] in digits:
return False
if namespace.find("-") != -1:
return False
if namespace.find(" ") != -1:
return False
return True
|
def _ggm_prob_wait_whitt_z(ca2, cs2):
"""
Equation 3.8 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
approximation for intermediate term z (see Eq 3.6)
"""
z = (ca2 + cs2) / (1.0 + cs2)
return z
|
def dot(u1,u2):
""" Dot product of two vectors """
return u1[0]*u2[0] + u1[1]*u2[1] + u1[2]*u2[2]
|
def wulffmaker_gamma(energy):
"""
Returns the string to be used for the Wulffmaker default gamma values.
Arguments
---------
energy: iterable
Any iterable that holds the surface energies
Returns
-------
str
String to be copied to wulffmaker for the surface energies.
"""
gamma_string = "pickGamma[i_] :=\n"
gamma_string += "Which[\n"
idx = 1
for idx,value in enumerate(energy):
idx += 1
gamma_string += "i=={},\n".format(idx)
gamma_string += "{:.4f},\n".format(value)
gamma_string += "True,\n"
gamma_string += "1]"
return gamma_string
|
def maxLen(n, arr):
"""
The idea is to iterate through the array and for every element arr[i],
calculate sum of elements form 0 to i (this can simply be done as sum += arr[i]).
If the current sum has been seen before, then there is a zero sum array.
Hashing is used to store the sum values, so that we can quickly store sum
and find out whether the current sum is seen before or not.
"""
maxLn = 0
h = {}
sth = 0
for i in range(n):
sth += arr[i]
if arr[i] == 0:
maxLn = max(1, maxLn)
# If the element itself is 0, length is 1
if sth == 0:
maxLn = i+1
# If the sum till current index is 0, then the entire length becomes
# the max length
if sth not in h:
h[sth] = i
else:
ln = i - h[sth]
maxLn = max(maxLn, ln)
return maxLn
|
def path_similarity(path_1, path_2, feature_names, min_max_feature_values):
"""path_similarity function computes the similarity of two paths (rules)
Args:
path_1: the first path
path_2: the second path
feature_names: the list of features
min_max_feature_values: the min and max possible values of each feature
Return:
similarity: the similarity of the paths
"""
similarity = 0
for i in feature_names:
if i in path_1 and i in path_2:
if len(path_1[i]) == 2:
l1 = path_1[i][1][1]
u1 = path_1[i][0][1]
else:
if path_1[i][0][0] == '<=':
u1 = path_1[i][0][1]
l1 = min_max_feature_values[i][0]
else:
l1 = path_1[i][0][1]
u1 = min_max_feature_values[i][1]
if len(path_2[i]) == 2:
l2 = path_2[i][1][1]
u2 = path_2[i][0][1]
else:
if path_2[i][0][0] == '<=':
u2 = path_2[i][0][1]
l2 = min_max_feature_values[i][0]
else:
l2 = path_2[i][0][1]
u2 = min_max_feature_values[i][1]
if u1 <= l2 or u2 <= l2:
similarity = similarity
else:
inter = min(u1, u2) - max(l1, l2)
union = max(u1, u2) - min(l1, l2)
if union != 0:
similarity = similarity + inter / union
elif i not in path_1 and i not in path_2:
similarity = similarity + 1
similarity = similarity / len(feature_names)
return similarity
|
def get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheParser.add_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime"]
}
return deps.get(mod_name, [])
|
def d_key(phi, e):
""" The Decryption Key Generator """
for d in range(1, phi):
if d*e % phi == 1: return d
|
def deal_successive_space(text):
"""
deal successive space character for text
1. Replace ' '*3 with '<space>' which is real space is text
2. Remove ' ', which is split token, not true space
3. Replace '<space>' with ' ', to get real text
:param text:
:return:
"""
text = text.replace(' ' * 3, '<space>')
text = text.replace(' ', '')
text = text.replace('<space>', ' ')
return text
|
def get_comment_for_domain(domain):
"""Describe a domain name to produce a comment"""
if domain.endswith((
'.akamaiedge.net.',
'.akamaized.net',
'.edgekey.net.',
'.static.akamaitechnologies.com.')):
return 'Akamai CDN'
if domain.endswith('.amazonaws.com.'):
return 'Amazon AWS'
if domain.endswith('.cdn.cloudflare.net.'):
return 'Cloudflare CDN'
if domain.endswith('.mail.gandi.net.') or domain == 'webmail.gandi.net.':
return 'Gandi mail hosting'
if domain == 'webredir.vip.gandi.net.':
return 'Gandi web forwarding hosting'
if domain == 'dkim.mcsv.net.':
return 'Mailchimp mail sender'
if domain.endswith('.azurewebsites.net.'):
return 'Microsoft Azure hosting'
if domain.endswith('.lync.com.'):
return 'Microsoft Lync'
if domain == 'clientconfig.microsoftonline-p.net.':
# https://docs.microsoft.com/en-gb/office365/enterprise/external-domain-name-system-records
return 'Microsoft Office 365 tenant'
if domain.endswith(('.office.com.', '.office365.com.')):
return 'Microsoft Office 365'
if domain.endswith('.outlook.com.'):
return 'Microsoft Outlook mail'
if domain in ('redirect.ovh.net.', 'ssl0.ovh.net.'):
return 'OVH mail provider'
if domain.endswith('.hosting.ovh.net.'):
return 'OVH shared web hosting'
if domain.endswith('.rev.sfr.net.'):
return 'SFR provider'
return None
|
def suite_in_func(x):
"""
>>> suite_in_func(True)
(42, 88)
>>> suite_in_func(False)
(0, 0)
"""
y = z = 0
if x: y = 42; z = 88
return y, z
|
def hammingDistance(this: list, that: list):
"""
Calculates the hamming distance between two 3x3 lists
"""
count = 0
for i in range(3):
for j in range(3):
if this[i][j] != that[i][j]:
count += 1
return count
|
def interpolate(x1, y1, x2, y2, current_step, total_steps):
"""Interpolates between two 2d points.
Args:
x1, y1, x2, y2: coords representing the two 2d points
current_step, total_steps: ints representing the current progress (example 2, 10 represents 20%)
Returns:
2-float tuple representing a 2d point
"""
dx1, dy1 = (x2 - x1) / total_steps, (y2 - y1) / total_steps
mx1, my1 = x1 + dx1 * current_step, y1 + dy1 * current_step
return mx1, my1
|
def count_chars(text: str, include_spaces=False) -> int:
"""
Count number of characters in a text.
Arguments:
---------
text: str
Text whose words are to be counted.
include_spaces: bool, default=False
Count spaces as characters.
Returns:
-------
Number of Characters: int
Length of words in text.
"""
if not isinstance(text, str):
raise TypeError("Text must be in string format not {}".format(type(text)))
if not include_spaces:
text = text.replace(" ", "") # replace space with no space
return len(text)
|
def compare_repository_dependencies( ancestor_repository_dependencies, current_repository_dependencies ):
"""Determine if ancestor_repository_dependencies is the same as or a subset of current_repository_dependencies."""
# The list of repository_dependencies looks something like: [["http://localhost:9009", "emboss_datatypes", "test", "ab03a2a5f407"]].
# Create a string from each tuple in the list for easier comparison.
if len( ancestor_repository_dependencies ) <= len( current_repository_dependencies ):
for ancestor_tup in ancestor_repository_dependencies:
ancestor_tool_shed, ancestor_repository_name, ancestor_repository_owner, ancestor_changeset_revision = ancestor_tup
found_in_current = False
for current_tup in current_repository_dependencies:
current_tool_shed, current_repository_name, current_repository_owner, current_changeset_revision = current_tup
if current_tool_shed == ancestor_tool_shed and \
current_repository_name == ancestor_repository_name and \
current_repository_owner == ancestor_repository_owner and \
current_changeset_revision == ancestor_changeset_revision:
found_in_current = True
break
if not found_in_current:
return 'not equal and not subset'
if len( ancestor_repository_dependencies ) == len( current_repository_dependencies ):
return 'equal'
else:
return 'subset'
return 'not equal and not subset'
|
def filter_rule_ids(all_keys, queries):
"""
From a set of queries (a comma separated list of queries, where a query is either a
rule id or a substring thereof), return the set of matching keys from all_keys. When
queries is the literal string "all", return all of the keys.
"""
if not queries:
return set()
if queries == 'all':
return set(all_keys)
# We assume that all_keys is much longer than queries; this allows us to do
# len(all_keys) iterations of size len(query_parts) instead of len(query_parts)
# queries of size len(all_keys) -- which hopefully should be a faster data access
# pattern due to caches but in reality shouldn't matter. Note that we have to iterate
# over the keys in all_keys either way, because we wish to check whether query is a
# substring of a key, not whether query is a key.
#
# This does have the side-effect of not having the results be ordered according to
# their order in query_parts, so we instead, we intentionally discard order by using
# a set. This also guarantees that our results are unique.
results = set()
query_parts = queries.split(',')
for key in all_keys:
for query in query_parts:
if query in key:
results.add(key)
return results
|
def match_candidates_by_order(images, max_neighbors):
"""Find candidate matching pairs by sequence order."""
print('3333333333333333333333333 in matching by order')
if max_neighbors <= 0:
return set()
n = (max_neighbors + 1) // 2
pairs = set()
for i, image in enumerate(images):
a = max(0, i - n)
b = min(len(images), i + n)
for j in range(a, b):
if i != j:
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
|
def parse_req(req):
"""WARNING: This depends on each line being structured like ``A==1``
"""
[package, verion] = req.strip().split('==')
return package
|
def dirquery(obj, pattern=None, case_sensitive=False):
"""
take dir of an obj and search for matches based on pattern
Parameters
----------
pattern : string
obj : python object
case_sensitive : bool
"""
if pattern is None:
return dir(obj)
import re
flag = 0
if case_sensitive:
flag = 0
else:
flag = re.IGNORECASE
# -----------------------
matches = []
for atr in dir(obj):
rematch = re.search(pattern, atr, flag)
if rematch is None:
continue
matches.append(atr)
return matches
|
def check_args(names, *inputs):
"""Checks a series of inputs, and renames them and packages them, if they are not None.
Parameters
----------
names : list of str
List of names to apply to the given inputs.
*inputs
Any input variables to check.
Returns
-------
dict
A dictionary with the new names and values, for all non None inputs.
"""
return {label : value for label, value in zip(names, inputs) if value}
|
def minDepth(root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
if not root.left:
return 1 + minDepth(root.right)
if not root.right:
return 1 + minDepth(root.left)
return 1 + min(minDepth(root.left), minDepth(root.right))
|
def convertToGenomicCoordinate( transcriptomic_coordinate, exon_list_genomic, transcript_id ):
"""
"""
exon_list_transcriptomic = []
for exon in exon_list_genomic:
exon_start, exon_end = exon
exon_length = exon_end - exon_start + 1
if len( exon_list_transcriptomic ) == 0:
exon_list_transcriptomic.append( [1, exon_length] )
else:
exon_list_transcriptomic.append( [exon_list_transcriptomic[-1][1] + 1, exon_length + exon_list_transcriptomic[-1][1]] )
"""if transcript_id=="1.62.0":
print(transcriptomic_coordinate)"""
for exon_num, exon in enumerate( exon_list_transcriptomic ):
exon_start, exon_end = exon
if exon_start <= transcriptomic_coordinate <= exon_end:
"""if transcript_id=="1.62.0":
print(exon_list_genomic[exon_num][0],transcriptomic_coordinate,exon_start,exon_list_genomic[exon_num][0]+transcriptomic_coordinate-exon_start)"""
return exon_list_genomic[exon_num][0] + transcriptomic_coordinate - exon_start
"""print(exon_list_transcriptomic,transcriptomic_coordinate)
print("="*100)"""
|
def create_grid(width, height):
""" Create a two-dimensional grid of specified size. """
return [[0 for x in range(width)] for y in range(height)]
|
def total_duration(final_note_list):
"""
To compute total duration from the SCAMP note list supplied
@param final_note_list: SCAMP notes list [ "Carntic Note", ["Instrument", "Volume", "Duration"], ...]
@return: total duration
"""
sum = 0.0
for note in final_note_list:
[_,[_,_,durn]] = note
sum += durn
return sum
|
def leadingZeros(value, desired_digits):
"""
Given an integer, returns a string representation, padded with [desired_digits] zeros.
http://www.djangosnippets.org/snippets/543/
"""
num_zeros = int(desired_digits) - len(str(value))
padded_value = []
while num_zeros >= 1:
padded_value.append("0")
num_zeros = num_zeros - 1
padded_value.append(str(value))
return "".join(padded_value)
|
def _get_field_names_from_dict_list(dict_list):
"""_get_field_names_from_dict_list."""
return list(dict_list[0].keys())
|
def byte_kv_to_utf8(kv):
"""
:param kv: a dict of byte keys:values.
:return: a dict of utf-8 keys:values.
"""
return {
k.decode('utf-8'): [val.decode('utf-8') for val in v]
for k, v in kv.items()}
|
def groupby(function, sequence):
"""
Example:
>>> from m2py import functional as f
>>> f.groupby(len, ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'])
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>>
>>>
>>> f.groupby(f.is_even, [1, 2, 3, 4, 5, 6, 7])
{False: [1, 3, 5, 7], True: [2, 4, 6]}
>>>
"""
output = {}
for x in sequence:
y = function(x)
if not output.get(y):
output[y] = [x]
else:
output[y].append(x)
return output
|
def coalesce_volume_dates(in_volumes, in_dates, indexes):
"""Sums volumes between the indexes and ouputs
dates at the indexes
in_volumes : original volume list
in_dates : original dates list
indexes : list of indexes
Returns
-------
volumes: new volume array
dates: new dates array
"""
volumes, dates = [], []
for i in range(len(indexes)):
dates.append(in_dates[indexes[i]])
to_sum_to = indexes[i+1] if i+1 < len(indexes) else len(in_volumes)
volumes.append(sum(in_volumes[indexes[i]:to_sum_to]))
return volumes, dates
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.