content stringlengths 42 6.51k |
|---|
def PIL_box(rectangle):
"""Represent a Rectangle in PIL's Coordinate System.
See https://pillow.readthedocs.io/en/stable/handbook/concepts.html#coordinate-system
"""
x, y, width, height = rectangle
return (x, y, x + width, y + height) |
def remove_none_entries(obj):
"""Remove dict entries that are ``None``.
This is cascaded in case of nested dicts/collections.
"""
if isinstance(obj, dict):
return {k: remove_none_entries(v) for k, v in obj.items() if v is not None}
elif isinstance(obj, (list, tuple, set)):
return type(obj)(map(remove_none_entries, obj))
return obj |
def events_to_map(shifts):
"""
Convert a list of shifts to a map of shifts
"""
shift_map = {}
for shift in shifts:
start_date = shift['start_dt']
if start_date in shift_map:
shift_map[start_date].append(shift)
else:
shift_map[start_date] = [shift]
return shift_map |
def flatten(l):
"""
Flatten list of lists
:param l:
:return:
"""
return [item for sublist in l for item in sublist] |
def validate_ipaddress_version(ipaddress_version):
"""
Validate IPAddress version for IPSet
Property: IPSet.IPAddressVersion
"""
VALID_IP_VERSION = ("IPV4", "IPV6")
if ipaddress_version not in VALID_IP_VERSION:
raise ValueError(
"IPSet IPAddressVersion must be one of: %s" % ", ".join(VALID_IP_VERSION)
)
return ipaddress_version |
def _error_traceback_html(exc_info, traceback):
"""
Generates error traceback HTML.
:param tuple exc_info:
Output of :func:`sys.exc_info` function.
:param traceback:
Output of :func:`traceback.format_exc` function.
"""
html = """
<html>
<head>
<title>ERROR: {error}</title>
</head>
<body style="font-family: sans-serif">
<h4>The Authomatic library encountered an error!</h4>
<h1>{error}</h1>
<pre>{traceback}</pre>
</body>
</html>
"""
return html.format(error=exc_info[1], traceback=traceback) |
def remove_deleted_datasets_from_results(result):
"""Remove datasets marked as removed from results.
Arguments:
result (dict): Results with all datasets.
Returns:
dict: Results where removed datasets are removed.
"""
new_results = [
dataset for dataset in result.get("results") if dataset.get("removed") is False
]
result["results"] = new_results
return result |
def equal(lst):
"""
are all elements of lst equal?
"""
return lst.count(lst[0]) == len(lst) |
def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}" |
def zoom_fit(screen, bounds, balanced=True):
"""What affine transform will zoom-fit the given items?
screen: (w,h) of the viewing region
bounds: (x,y,w,h) of the items to fit
balance: Should the x and y scales match?
returns: [translate x, translate y, scale x, scale y]
"""
(sw, sh) = screen
(gx, gy, gw, gh) = bounds
x_scale = sw/gw
y_scale = sh/gh
if (balanced):
x_scale = min(x_scale, y_scale)
y_scale = x_scale
return [-gx*x_scale, -gy*y_scale, x_scale, y_scale] |
def get_symbol_freq_in_senteces(symbol, sentences):
"""
:param symbol: The symbol which frequency we calculater
:param sentences: a list of the sentences, representing the document / search space
:return: the number of occurrences of the given symbol among the words in the search space.
"""
freq = 0
for sentence in sentences:
for w in sentence:
for char in w:
if char == symbol:
freq += 1
return freq |
def trial_division(n):
"""[Trial Division](https://en.wikipedia.org/wiki/Trial_division)
Arguments:
n (Integer): Number to factor
Returns:
Array: List of factors of n
"""
a = []
while n % 2 == 0:
a.append(2)
n /= 2
f = 3
while f * f <= n:
if (n % f == 0):
a.append(f)
n /= f
else:
f += 2
if n > 1:
a.append(n)
# Only odd number is possible
return a |
def decode_conditions(conditions: str):
"""
Decode the conditions from a JCN mnemonic to a decimal value.
Parameters
----------
conditions: str, mandatory
List of a maximum of 4 conditions
Returns
-------
int_conditions: int
Integer value of the conditions
Raises
------
N/A
Notes
------
N/A
"""
int_conditions = 0
if 'I' in conditions:
int_conditions = 8
if 'A' in conditions:
int_conditions = int_conditions + 4
if 'C' in conditions:
int_conditions = int_conditions + 2
if 'T' in conditions:
int_conditions = int_conditions + 1
return int_conditions |
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
assert i >= 0
head = p[:i]
if head and head != '/' * len(head):
head = head.rstrip('/')
return head |
def calculate_distance(a, b):
"""Calculate the distance between two points on the board (manhatan).
:type a: tuple
:param a: the first point on the board
:type b: tuple
:param b: the second point on the board
:rtype: interger
"""
dx = (a[2] - b[2]) ** 2
dy = (a[1] - b[1]) ** 2
dz = (a[0] - b[0]) ** 2
return (dx + dy + dz) ** 0.5 |
def parse_tags(targs):
"""
Tags can be in the forma key:value or simply value
"""
tags = {}
for t in targs:
split_tag = t.split(':')
if len(split_tag) > 1:
tags['tag:' + split_tag[0]] = split_tag[1]
else:
tags['tag:' + split_tag[0]] = ''
return tags |
def _to_11_bit(data):
"""
Convert a bytearray to an list of 11-bit numbers.
Args:
data (bytes): bytearray to convert to 11-bit numbers
Returns:
int[]: list of 11-bit numbers
"""
buffer = 0
num_of_bits = 0
output = []
for i in range(len(data)):
buffer |= data[i] << num_of_bits
num_of_bits += 8
if num_of_bits >= 11:
output.append(buffer & 2047)
buffer = buffer >> 11
num_of_bits -= 11
if num_of_bits != 0:
output.append(buffer & 2047)
return output |
def translate_es_name(name):
"""Translate Elasticsearch name to PostgreSQL name.
Replaces dashes with underscores.
"""
if name == '@timestamp':
return 'timestamp'
return name.replace('-', '_') |
def search_disabled(request):
"""Facility for disabling search functionality.
This may be used in the future to automatically disable search if the search
backend goes down.
"""
return dict(SEARCH_DISABLED=False)
# return dict(SEARCH_DISABLED=not settings.DEBUG) |
def grubler_f(m, n, j, *f_i):
"""
Formular (2.4). Grubler's formula for calculation of Degrees of Freedom (DoF) of a mechanism.
:param m: DoF of a link. For planar mechanisms, m = 3; for spatial mechanisms, m = 6.
:param n: Number of links.
:param j: Number of joints.
:param f_i: DoF brought by joint i. Should be a series of numbers, corresponds to the number of joints.
:return: DoF of the whole mechanisms.
"""
dof = m * (n - 1 - j) + sum(f_i)
return dof |
def itoa(num, base=10):
""" Convert a decimal number to its equivalent in another base.
This is essentially the inverse of int(num, base).
"""
negative = num < 0
if negative:
num = -num
digits = []
while num > 0:
num, last_digit = divmod(num, base)
digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[last_digit])
if negative:
digits.append('-')
digits.reverse()
return ''.join(digits) |
def factorial_tail_rec(n):
"""
Python does not support tail-call optimization so writing factorial in
tail call style will not offer additional benefits in terms of efficient
context passing during recursion. Recursion is considered "non-pythonic"
and should be avoided.
"""
def auxiliary(c, accumalation):
if c < 2:
return accumalation
return auxiliary(c-1, c * accumalation)
return auxiliary(n, 1) |
def delve(o,*k):
"""
Return items from a nested dict.
"""
return delve(o[k[0]],*k[1:]) if len(k)>1 else o[k[0]] |
def genSubstr(string, n):
"""
Generate all substrings of max length n for string
"""
length = len(string)
res = [string[i: j] for i in range(0, length)
for j in range(i + 1, min(i + n + 1, length + 1))]
return res |
def wrap_seq_for_applescript(seq):
"""Wrap a Python sequence in braces and quotes for use in AppleScript"""
quoted = [f'"{item}"' for item in seq]
joined = ', '.join(quoted)
wrapped = '{' + joined + '}'
return wrapped |
def _s_for(seq):
"""Returns a string of comma-separated %s markers for each
element of the sequence; i.e. returns '%s, %s, %s, %s, %s'
for a 5-element sequence. For SQL queries.
"""
return ', '.join(['%s'] * len(seq)) |
def replace_value_with_choice(data, replacements):
"""
:param data: list of dictionaries
:param replacements: dict of {attribute: choices} to replace
:return: list of dictionaries
"""
for attribute, choices in replacements.items():
choice_dict = dict(choices)
for item in data:
if attribute in item:
item[attribute] = choice_dict.get(
item[attribute],
item[attribute] if item[attribute] is not None else 'None'
)
return data |
def rectify(sid):
"""Make it match our nomenclature."""
if len(sid) == 4 and sid.startswith("K"):
return sid[1:]
return sid |
def cent_from_year(year):
""" takes an integer and returns the matching century """
if year == 0:
return 1
elif year < 0:
return -(year - 1) // -100
else:
return (year - 1) // 100 + 1 |
def Root_body(cont_Object_f):
"""
:param cont_Object_f: The sequence of "f" values of Object objects contained in this Root
:type cont_Object_f: Array
"""
return '\n'.join(s for s in cont_Object_f if s is not None) |
def div_round_up(x, n):
"""Divides a number x by another integer n and rounds up the result
Parameters
----------
x: int
Numerator
n: int
Denominator
Returns
-------
result: int
Result
Example
-------
>>> from pymatting import *
>>> div_round_up(3,2)
2
"""
return (x + n - 1) // n |
def press_JSME_data_book(t_k):
"""From the JSME data book
Parameters
----------
t_k, K
Returns
-------
Vapor pressure, Pa
References
----------
Antoine equation fit, found in
The Japan Society of Mechanical Engineers (JSME),
JSME Data Book: Heat Transfer, fifth ed.,
Maruzen, Tokyo, 2009 (in Japanese)
(I don't directly have this book.)
Notes
-----
Cited by:
Kanemura et al, Analytical and experimental study of
the evaporation and deposition rates from a high-speed
liquid lithium jet. Fusion Engineering and Design,
122, pages 176-185, November, 2017.
"""
a = 9.94079
b = -8001.8
c = 6.676
pressure_pa = 10**(a + b / (t_k + c))
return pressure_pa |
def args(**kwargs):
"""return a list with `growlnotify` cli arguments"""
args = []
for k, v in kwargs.items():
short = len(k) == 1
string = "-%s" % k if short else "--%s" % k
if isinstance(v, bool):
"""flag, e.g.: -s, --sticky"""
if v:
args += [string]
else:
""" -t "title text", --title "title text """
args += [string, str(v)]
return args |
def generate_partial_word(word, correct_guess_list):
"""generates the word with all correctly chosen letters"""
temp_partial_word = ""
# for each letter either put a dash or a letter
for i in range(len(word)):
matches = False
for letter in correct_guess_list:
if letter == word[i]:
temp_partial_word = temp_partial_word + letter
matches = True
if matches == False:
temp_partial_word = temp_partial_word + "_"
return temp_partial_word
#if there is no match to word[i] then add a underscore for that index
# only append underscore after all matches are determined |
def get_file_id(doc, file_name):
"""
Helper function to access data when MongoObserver is used.
Go through all files in doc and return the id of the file with file_name.
"""
r = list(filter(lambda dic: dic['name'] == file_name, doc['artifacts']))
assert len(r) == 1
return r[0]['file_id'] |
def format_command(command):
"""
Formats a command for displaying on screen.
"""
args = []
for arg in command:
if ' ' in arg:
args.append('"' + arg + '"')
else:
args.append(arg)
return ' '.join(args) |
def _find_swift_version(args):
"""Returns the value of the `-swift-version` argument, if found.
Args:
args: The command-line arguments to be scanned.
Returns:
The value of the `-swift-version` argument, or None if it was not found in
the argument list.
"""
# Note that the argument can occur multiple times, and the last one wins.
last_swift_version = None
count = len(args)
for i in range(count):
arg = args[i]
if arg == "-swift-version" and i + 1 < count:
last_swift_version = args[i + 1]
return last_swift_version |
def linear(x1, y1, x2, y2):
""" Returns m and c from y = mx + c """
# y-y1 = m(x-x1)
# m = (y2 - y1) / (x2 - x1)
# y = (y2 - y1) / (x2 - x1) * (x - x1) + y1
m = (y2 - y1) / (x2 - x1)
c = y1 - m*x1
return m, c |
def get_mave_csv_schematype_from_exception(exception):
"""Get MAVE CSV schema type from exception string.
E.g. get 'score' from:
Error [400]: [...] No such file [...]: '/Users/[...]/9ff4b4d6-daea-4178-b170-7b24c77ce0c0-score.csv'
Args:
exception (str): Exception messaged raised due to a MAVE CSV file validation exception.
MAVE CSV filepath must be in the exception traceback.
Returns:
(str): The schematype of the MAVE CSV file or "" if schematype isn't found
"""
try:
return exception.lower().split(".csv")[-2].split("-")[-1]
except IndexError:
return "" |
def _ensure_unicode(text):
"""Return a unicode object for the given text.
Args:
text (bytes or unicode):
The text to decode.
Returns:
unicode: The decoded text.
"""
if isinstance(text, bytes):
text = text.decode('utf-8')
return text |
def _unescape_new_lines(commit_message):
"""
:type commit_message: str
"""
return commit_message.replace('$$', '\n') |
def sumfeat(obj_list, feat):
"""a helper method that calculates the sum of a target feature over a list of objects
Args:
obj_list: a list of objects
feat: a string containing the name of the target feature
Returns:
the sum of the target feature over the given list of objects
"""
sum = 0
for obj in obj_list:
sum += eval('obj.'+feat)
return sum |
def to_tuple(set_sentences):
"""
Change each sentence to tuple of words.
INPUT: set_sentences a set of sentences
OUTPUT: set_tuples a set of corresponding tuples
"""
result = set()
for sentence in set_sentences:
result.add(tuple(sentence.split()))
return result |
def split_board(board_args):
"""
Splits the board args
:param board_args:
:return:
"""
board = []
card_1 = board_args[0:2]
board.append(card_1)
card_2 = board_args[2:4]
board.append(card_2)
card_3 = board_args[4:6]
board.append(card_3)
card_4 = board_args[6:8]
if card_4 is not "":
board.append(card_4)
card_5 = board_args[8:10]
if card_5 is not "":
board.append(card_5)
return board |
def uniform_cdf(x):
"""returns the probability that
a uniform random variable is <= x"""
if x < 0:
return 0
elif x < 1:
return x
else:
return 1 |
def parse_command(line):
"""Split given line in command to execute on the remote system and
commands to pipe/redirect the output to on the host.
Pipe:
"ll | grep foo" -> ('ll', 'grep foo')
Redirect:
"cat /root/conf > conf" -> ('cat /root/conf', 'cat > conf')
Pipe found first, redirect part of host command:
"ll | grep foo > log" -> ('ll', 'grep foo > log')
"""
pipe_command, _, pipe_commands = line.partition('|')
redirect_command, _, redirect_commands = line.partition('>')
pipe_command = pipe_command.strip()
pipe_commands = pipe_commands.strip()
redirect_command = redirect_command.strip()
redirect_commands = redirect_commands.strip()
if len(pipe_command) <= len(redirect_command):
command = pipe_command
else:
command = redirect_command
pipe_commands = f'cat > {redirect_commands}'
return (command, pipe_commands) |
def reformat_ssh_key_to_pem_bytes(ssh_key_str: str) -> bytes:
"""
reformat the ssh key string to pem format bytes for github client.
:param ssh_key_str: utf-8 string without header and footer for the github app rsa private key
:return: pem formatted private key in bytes with header and footer
"""
chunked = '\n'.join(ssh_key_str[i:i+64] for i in range(0, len(ssh_key_str), 64))
return f"-----BEGIN RSA PRIVATE KEY-----\n{chunked}\n-----END RSA PRIVATE KEY-----\n".encode("utf-8") |
def _get_warmup_factor_at_iter(
method: str, iter: int, warmup_iters: int, warmup_factor: float
) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
See :paper:`ImageNet in 1h` for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
elif method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
else:
raise ValueError("Unknown warmup method: {}".format(method)) |
def mapValues(key, dict_obj):
"""
mapValues(func: function, obj: dict)
transformed dict values with "func"
args:
func = L x: -x; obj = {"a":0, "b":1}
retun:
transformed dict
{"a": 0, "b": -1}
"""
for k,v in dict_obj.items():
dict_obj[k] = key(v)
return dict_obj |
def check_type(lst, t):
"""Checks if all elements of list ``lst`` are of one of the types in ``t``.
:param lst: list to check
:param t: list of types that the elements in list should have
:return: Bool
"""
for el in lst:
if type(el) not in t:
return False
return True |
def _err():
"""Connection mock error"""
return 'error', [] |
def enc(x, codec='ascii'):
"""Encodes a string for SGML/XML/HTML"""
if isinstance(x, bytes):
return ''
x = x.replace('&', '&').replace('>', '>').replace('<', '<').replace('"', '"')
if codec:
x = x.encode(codec, 'xmlcharrefreplace')
return x |
def is_unique_corr(x):
"""Check that ``x`` has no duplicate elements.
Args:
x (list): elements to be compared.
Returns:
bool: True if ``x`` has duplicate elements, otherwise False
"""
return len(set(x)) == len(x) |
def _GenerateGstorageLink(c, p, b):
"""Generate Google storage link given channel, platform, and build."""
return 'gs://chromeos-releases/%s-channel/%s/%s/' % (c, p, b) |
def valid_band_combo(request):
"""Return true if band combo is valid, False if not."""
valid = [1, 2, 3, 4, 5, 6, 7, 9]
try:
# handles error if band1, 2 or 3 doesn't exist
bands = [int(request.params.get('band1')),
int(request.params.get('band2')),
int(request.params.get('band3'))]
except:
return False
# Check if all bands are unique
unique = len(set(bands)) == 3
return all(x in valid for x in bands) and unique |
def parse_quotes(cmd, quotes=True, string=True):
""" parses quotes """
string_literals = ['\'', '\"']
args = []
words = cmd
open_q = False
if quotes:
for quote in string_literals:
if quote in cmd:
if cmd.count(quote) % 2 != 0:
raise ValueError("Invalid input: all quotes need accompanied closing quote")
while quote in words:
if words.partition(quote)[0] is None:
break
if open_q:
args.append(words.partition(quote)[0])
open_q = False
else:
args.extend(words.partition(quote)[0].split())
open_q = True
words = words.partition(quote)[2]
if words is not "":
args.extend(words.split())
break
else:
args.extend(words.split())
else:
args = words.split()
if string:
str_args = []
for arg in args:
str_args.append(str(arg))
return str_args
else:
return args |
def char_to_string(ll):
"""Convert 2-D list of chars to 1-D list of strings."""
# https://stackoverflow.com/questions/23618218/numpy-bytes-to-plain-string
# bytes_string.decode('UTF-8')
# We might be able to do this a bit more shorthand as we did in Python2.x
# i.e return [''.join(x).strip() for x in ll]
# But this works for now.
result = []
for i in range(len(ll)):
x = ll[i]
string = ''
for j in range(len(x)):
c = x[j]
if type(c) == str:
string += c
else:
string += c.decode()
result.append(string.strip())
return result |
def terminals(tree):
"""Returns a list of the terminal strings in tree"""
def _terminals(node, terms):
if isinstance(node, list):
for child in node[1:]:
_terminals(child, terms)
else:
terms.append(node)
terms = []
_terminals(tree, terms)
return terms |
def merge_sort(intervals):
"""
Merges and sorts the intervals in a list.
It's an alternative of merge_gaps() that sort the list before merging.
Should be faster but I haven't campared them yet.
"""
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
merged = []
for higher in sorted_by_lower_bound:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
# test for intersection between lower and higher:
# we know via sorting that lower[0] <= higher[0]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
return merged |
def spacify_number(number):
""" Takes a number and returns a string with spaces every 3 numbers
"""
nb_rev = str(number)[::-1]
new_chain = ''
for val, letter in enumerate(nb_rev):
if val%3==0:
new_chain += ' '
new_chain += letter
final_chain = new_chain[::-1]
return final_chain |
def return_name_html(info):
"""
In the PBP html the name is in a format like: 'Center - MIKE RICHARDS'
Some also have a hyphen in their last name so can't just split by '-'
:param info: position and name
:return: name
"""
s = info.index('-') # Find first hyphen
return info[s + 1:].strip(' ') |
def second_order_derivative(w, x, order=2):
""" Return the value of d2y/dx2 at point x.
"""
result = 2 * w[0]
return result |
def sigmoid_backward(dA, cache):
"""
partial derivative of single SIGMOID unit
Arguments:
dA -- post-activation gradient
cache -- (Z, A), the pre/post-activation matrix
Returns:
dZ -- gradient of cost with respect to Z
"""
A = cache[1]
dZ = dA * A * (1-A)
return dZ |
def tell_domain_tls_name(d):
"""
>>> tell_domain_tls_name('*.example.com')
'example-com'
>>> tell_domain_tls_name('prometheus.example.com')
'prometheus-example-com'
"""
parts = d.split('.')
if parts[0] == '*':
parts = parts[1:]
return '-'.join(parts) |
def make_proposals(preferences):
""" Takes in a dictionary with key equal to a participant and
value equal to a list of that participant's preferences.
Function iterates over each participant in order to find and return
a record of the proposals in the form of a dictionary.
Each participant proposes in turn using its preference list and
receives a proposal, accepting only the highest preferred
proposal using its preference list and rejecting all others.
Function returns when there are no longer any participants in the
priority queue (participants left needing to propose) or there are
no more participants to propose to.
For example:
Inputs of
preferences = {
'A': ['B', 'D', 'C'],
'B': ['D', 'C', 'A'],
'C': ['A', 'D', 'B'],
'D': ['A', 'B', 'C'],
}
Outputs =>
proposal_record = {
'A': ['D', 'D'],
'B': ['C', 'C'],
'C': ['B', 'B'],
'D': ['A', 'A'],
}
"""
proposal_record = {}
proposers = [] # using non-optimal list here to represent priority queue
# Create list of proposers and empty proposal_record for each
for participant in preferences:
proposers.append(participant)
proposal_record[participant] = ["", ""]
# breakpoint()
# to show proposers and empty proposal_record
while proposers:
current_proposer = proposers.pop(0)
# Get current proposer's preference list of proposees
current_proposer_prefs = preferences[current_proposer][:]
# Propose to each proposee in order until accepted
for proposee in current_proposer_prefs:
proposee_prefs = preferences[proposee]
current_proposer_ranking = proposee_prefs.index(current_proposer)
# get proposal_record for proposee and proposer
proposee_proposal_to, proposee_proposal_from = proposal_record[proposee]
proposer_proposal_to, proposer_proposal_from = proposal_record[current_proposer]
# breakpoint()
# if proposee has not accepted a proposal yet
if not proposee_proposal_from:
proposal_record[proposee][1] = current_proposer
proposal_record[current_proposer][0] = proposee
break
# if current proposal is better than accepted proposal
elif proposee_prefs.index(proposee_proposal_from) > current_proposer_ranking:
proposal_record[proposee][1] = current_proposer
proposal_record[current_proposer][0] = proposee
# Reject previously accepted proposal symmetrically
# Step 1: reset rejected participant's proposal record
proposal_record[proposee_proposal_from][0] = ""
# Step 2: put rejected participant at front of priority queue
proposers.insert(0, proposee_proposal_from)
# Step 3: remove rejected pairing symmetrically from the preference list
preferences[proposee].remove(proposee_proposal_from)
preferences[proposee_proposal_from].remove(proposee)
break
# Otherwise, proposee prefers previously accepted proposal
else:
# Update preference lists for rejected proposal
preferences[proposee].remove(current_proposer)
preferences[current_proposer].remove(proposee)
return proposal_record |
def from_hex(hex_data: str, delimiter: str = " ") -> bytes:
"""Converts hexadecimal byte string into bytes object"""
if delimiter == "":
data = [hex_data[i:i+2] for i in range(0, len(hex_data), 2)]
else:
data = hex_data.split(delimiter)
data = [int(byte, 16) for byte in data]
return bytes(data) |
def symbol_transform(symbol):
"""Transform symbol for XueQiu."""
return ''.join(symbol.split('.')[::-1]) |
def cleanup_code( content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n') |
def _calculate_positives_negatives(target_details):
"""
Takes expected and actual target values, generating true and false positives and negatives,
including the actual correct # of positive and negative values.
"""
true_positive = 0
true_negative = 0
false_negative = 0
false_positive = 0
actual_positive = 0
actual_negative = 0
for idx in range(len(target_details)):
predicted_target = target_details[idx]["predicted_target"]
expected_target = target_details[idx]["expected_target"]
if expected_target == 1:
actual_positive = actual_positive + 1
else:
actual_negative = actual_negative + 1
if predicted_target == 1 and expected_target == 1:
true_positive = true_positive + 1
elif predicted_target == 0 and expected_target == 0:
true_negative = true_negative + 1
elif predicted_target == 1 and expected_target == 0:
false_positive = false_positive + 1
elif predicted_target == 0 and expected_target == 1:
false_negative = false_negative + 1
return {
"true_positive": float(true_positive),
"false_positive": float(false_positive),
"actual_positive": float(actual_positive),
"true_negative": float(true_negative),
"false_negative": float(false_negative),
"actual_negative": float(actual_negative),
} |
def rgba2hex(rgba):
"""Convert rgba values to hex."""
# Slice the tuple so that
# we don't get alpha and
# convert values to 8 bit ints
rgb = tuple([min(max(int(255 * i), 0), 255) for i in rgba[:3]])
return "#{0:02x}{1:02x}{2:02x}".format(*rgb) |
def flatten(arr):
"""
Flatten the given 2D array
"""
return [item for sublist in arr for item in sublist] |
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restict samples & labels to a training or validation split.
# Arguments
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either "training", "validation", or None.
If None, we return all of the data.
# Returns
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
if subset == "training":
print("Using %d files for training." % (len(samples) - num_val_samples,))
samples = samples[:-num_val_samples]
labels = labels[:-num_val_samples]
elif subset == "validation":
print("Using %d files for validation." % (num_val_samples,))
samples = samples[-num_val_samples:]
labels = labels[-num_val_samples:]
else:
raise ValueError(
'`subset` must be either "training" '
'or "validation", received: %s' % (subset,)
)
return samples, labels |
def get_link_color(link_loss: int) -> str:
"""
This function returns the failure color code based on the percentage of the lost packets
"""
result = ""
failure_colors = [
"#ffffff",
"#ffeeee",
"#ffdddd",
"#ffcccc",
"#ffbbbb",
"#ffaaaa",
"#ff8888",
"#ff6666",
"#ff4444",
"#ff2222",
"#ff0000"
]
for i, fc in enumerate(failure_colors):
if link_loss == float(i * 10):
result = fc
else:
if link_loss > float(i * 10) and link_loss < float((i + 1) * 10):
result = failure_colors[i + 1]
return result |
def proteins_from_rf(aa_seq):
"""Search sequence for start and stop codons and compute all possible
proteins in an amino acid sequence to return a list of possible proteins"""
current_prot = []
proteins = []
for aa in aa_seq:
if aa == "_":
# STOP accumulating amino acids if STOP codon is found
if current_prot:
for p in current_prot:
proteins.append(p)
current_prot = []
else:
# START accumulating amino acids if START codon found
if aa == "M":
current_prot.append("")
for i, _ in enumerate(current_prot):
current_prot[i] += aa
# for i in range(len(current_prot)):
# current_prot[i] += aa
return proteins |
def flatten_one_level(iterable, is_nested, get_nested):
"""
Returns [get_nested(e) if is_nested(e) else e for e in iterable]
"""
return [get_nested(e) if is_nested(e) else e for e in iterable] |
def calc_dDdc_fn(c, dc, D_fn):
"""
Computes dD/dc given a functional form to estimate D(c).
"""
# computes diffusivity at given concentration and one step size away [m^2/s]
D1 = D_fn(c)
D2 = D_fn(c + dc)
# computes dD/dc using forward difference formula [m^2/s / kg/m^3]
dDdc = (D2 - D1) / dc
return dDdc |
def update_full_dict_projections(nodes, full_dict_projections, t_dict_proj):
"""
Full dict embeddings is a dictionary where keys are nodes and values are their embedding in the latest time stamp
they have shown up, i.e. if a node is in oth time stamps t and k where t < k, then its embedding here is of
time k (the bigger one).
:param nodes: Nodes of current snapshot
:param full_dict_projections: The current full_dict_projections
:param t_dict_proj: embedding dict of time t
:return: Updated full_dict_projections
"""
for node in nodes:
a = t_dict_proj[node]
if full_dict_projections.get(node) is None:
full_dict_projections.update({node: a})
else:
full_dict_projections[node] = a
return full_dict_projections |
def _get_domain(domains, domain_spec):
"""A helper routine to find materials/cells for load_from_hdf5(...)"""
# If domain_spec is an integer, it must be a domain ID
if isinstance(domain_spec, int) and domain_spec in domains:
return domains[domain_spec]
# If domain_spec is a string, it must be a domain name
elif isinstance(domain_spec, str):
for domain_id, domain in domains.items():
if domain_spec == domain.getName():
return domain
# If domain could not be found
return None |
def word_sentiment_feature(word):
""" Given a word, it returns a dictonary of the first letter of the word"""
first_l = word[0]
# features is of the dictionary type having only one feature
features = {"first letter": first_l}
return features |
def find_overlap(ls_one, ls_two):
"""
Question 8.5: Find overlap in two linked lists
without cycles
"""
# find length of list one
len_one = 0
len_two = 0
head_one = ls_one
head_two = ls_two
while head_one:
head_one = head_one.next
len_one += 1
while head_two:
head_two = head_two.next
len_two += 1
diff = abs(len_one - len_two)
while diff:
ls_one = ls_one.next
ls_two = ls_two.next
diff -= 1
while ls_one != ls_two:
ls_one = ls_one.next
ls_two = ls_two.next
return ls_one |
def _ListToDict(env_vars_list):
"""Converts [{'key': key, 'value': value}, ...] list to dict."""
return {item.key: item.value for item in env_vars_list} |
def _sugar(s):
"""Shorten strings that are too long for decency."""
# s = s.replace("{", "{{").replace("}", "}}")
if len(s) > 30:
return s[:10] + " ... " + s[-10:]
else:
return s |
def json_lookup(json_data, key):
"""
Give a key "a.b.c", look up json_data['a']['b']['c']
Returns None if any of the keys were not found.
"""
sofar = json_data
for k in key.split("."):
try:
sofar = sofar[k]
except:
return None
return sofar |
def node_is_in_list(node,list_of_nodes):
"""
Check if the node is present in the list of nodes
"""
for node2 in list_of_nodes:
if node is node2:
return True
return False |
def unique(listCheck):
""" Check that all elements of list are unique
https://stackoverflow.com/a/5281641"""
seen = set()
return not any(tuple(i) in seen or seen.add(tuple(i)) for i in listCheck) |
def get_text(obj):
"""Small helper function to get the text from a Beautiful Soup object
unless it is None in which case return None"""
return None if obj is None else obj.text |
def is_blank(value: str) -> bool:
"""check string is None OR is empty or blank
Arguments: string tested
"""
return not (value and value.strip()) |
def print_alignment(match, test_instance, gold_instance, flip=False):
""" print the alignment based on a match
Args:
match: current match, denoted by a list
test_instance: instances of AMR 1
gold_instance: instances of AMR 2
filp: filp the test/gold or not"""
result = []
for i, m in enumerate(match):
if m == -1:
if not flip:
result.append(
test_instance[i][1] +
"(" +
test_instance[i][2] +
")" +
"-Null")
else:
result.append(
"Null-" +
test_instance[i][1] +
"(" +
test_instance[i][2] +
")")
else:
if not flip:
result.append(
test_instance[i][1] +
"(" +
test_instance[i][2] +
")" +
"-" +
gold_instance[m][1] +
"(" +
gold_instance[m][2] +
")")
else:
result.append(
gold_instance[m][1] +
"(" +
gold_instance[m][2] +
")" +
"-" +
test_instance[i][1] +
"(" +
test_instance[i][2] +
")")
return " ".join(result) |
def _valid_spatial(validation_method, value):
"""
Executes a given validation method and returns whether or not any validation
messages were generated.
:param function validation_method: The validation method to execute
:param Any value: The value to validate
:rtype: bool
"""
errors = {'value': []}
validation_method('value', {'value': value}, errors, {})
return len(errors['value']) == 0 |
def SplitProcessor_FindCleanBreak( sw, vertical, pos, list_panes, bx, by, bw, bh ):
"""
Finds a split on an axis within the specified bounds, if found returns True, otherwise False.
This shares an edge case with tmux that is an inherent limitation in the way that tmux works.
For more information on this edge case, look over the example file "session_unsupported".
Important note about the clean break algorithm used. The caller scans all qualifying panes,
then it uses each qualifying side as a base from which it calls this function. Here we scan
all qualifying panes to complete a match (see scanline). If the result is a clean break,
this function returns True, and the caller has the location of the break. While there's room
for optimization (probably best ported to C++, where the scanline technique will be really
fast), it probably isn't needed since it's likely to be adequate even on embedded systems.
"""
#-----------------------------------------------------------------------------------------------
#
# Outline: Clean Break Algorithm (1.0.1)
# ~ Establish pointers
# ~ Initialize scanline, used for detecting a clean break spanning multiple panes
# ~ For each qualifying pane that has a shared edge
# ~ If shared edge overlaps, add it to the scanline
# ~ If scanline has no spaces, then a clean break has been found, return True
# ~ Nothing found, return False
#
#-----------------------------------------------------------------------------------------------
# Notify user
if sw['scanline'] and sw['verbose'] >= 3:
sw['print']("(3) Scanline: Find clean " + [ "horizontal", "vertical" ][vertical] + \
" break at position " + str(pos))
# ~ Establish pointers
if vertical: sl_bgn, sl_siz = bx, bw # Vertical split is a horizontal line
else: sl_bgn, sl_siz = by, bh # Horizontal split is a vertical line
# ~ Initialize scanline, used for detecting a clean break spanning multiple panes
scanline = list(' ' * sl_siz) # Sets the scanline to spaces (used as a disqualifier)
# ~ For each qualifying pane that has a shared edge
for pane in list_panes:
# Disqualifiers
if 's' in pane: continue # Processed panes are out of bounds, all its edges are taken
if pane['y'] >= by+bh or pane['y']+pane['h'] <= by: continue # Fully out of bounds
if pane['x'] >= bx+bw or pane['x']+pane['w'] <= bx: continue # Fully out of bounds
if vertical and pane['y'] != pos and pane['y']+pane['h'] != pos: continue # No alignment
if not vertical and pane['x'] != pos and pane['x']+pane['w'] != pos: continue # No alignment
# ~ If shared edge found, add it to the scanline
if vertical: sl_pos, sl_len = pane['x'], pane['w'] # Vertical split is a horizontal line
else: sl_pos, sl_len = pane['y'], pane['h'] # Horizontal split is a vertical line
if sl_pos < sl_bgn: sl_len -= sl_bgn - sl_pos ; sl_pos = sl_bgn # Clip before
if sl_pos + sl_len > sl_bgn + sl_siz: sl_len = sl_bgn + sl_siz - sl_pos # Clip after
for n in range( sl_pos - sl_bgn, sl_pos - sl_bgn + sl_len ): scanline[n] = 'X'
# Show the scanline in action
if sw['scanline'] and sw['verbose'] >= 3:
sw['print']("(3) Scanline: [" + "".join(scanline) + "]: modified by pane " + pane['n'])
# ~ If scanline has no spaces, then a clean break has been found, return True
if not ' ' in scanline: return True
# ~ Nothing found, return False
return False |
def get_rain_svg(risk_of_rain: float) -> str:
"""
Get SVG path to risk of rain icon
:param risk_of_rain: Risk of rain between 0 and 1
:return: path to SVG resource
"""
if risk_of_rain < 0.33:
return 'Icons/Shades.svg'
return 'Icons/Umbrella.svg' |
def _merge(left_list, right_list):
"""Merge two lists with the result being sorted using __lt__
operator / method
"""
final = [] # k3
# index1, index2 for left and right lists respectively
i1, i2 = 0, 0 # k4
for _ in range(len(left_list) + len(right_list)): # n
# get value from left, if none avail, extend final w/ remaining values
try:
left = left_list[i1] # k5
except IndexError:
final.extend(right_list[i2:]) # k6
break
# get value from right, if none avail, extend final w/ remaining values
try:
right = right_list[i2] # k7
except IndexError:
final.extend(left_list[i1:]) # k8
break
# compare the values to have the smaller value come first
if left < right: # k9
final.append(left) # k10
i1 += 1 # k11
else:
final.append(right) # k12
i2 += 1 # k13
return final # k14 |
def flatten(data):
"""
Flattens the given data structure.
Returns:
list[str]
"""
list_ = []
if isinstance(data, list):
[list_.extend(flatten(item)) for item in data]
elif isinstance(data, dict):
[list_.extend(flatten(item)) for item in data.values()]
else:
list_.append(data)
return list_ |
def update(event, context):
"""
Place your code to handle Update events here
To return a failure to CloudFormation simply raise an exception, the exception message will be sent to CloudFormation Events.
"""
physical_resource_id = event['PhysicalResourceId']
response_data = {'blah': 'update'}
return physical_resource_id, response_data |
def Likelihood(evidence, hypo):
"""Computes the likelihood of the evidence assuming the hypothesis is true.
Args:
evidence: a tuple of (number of heads, number of tails)
hypo: float probability of heads
Returns:
probability of tossing the given number of heads and tails with a
coin that has p probability of heads
"""
heads, tails = evidence
p = hypo
return pow(p, heads) * pow(1-p, tails) |
def nested_counter(nested_list, inner_index, outer_index):
"""
Return the counter of a nested list.
Usage in template:
{% load list_to_columns %}
{% for outer_loop in nested_list %}
{% for inner_loop in outer_loop %}
{{ nested_counter nested_list forloop.counter0
forloop.parentloop.counter0 }}
{% endfor %}
{% endfor %}
Args:
nested_list: A nested list item.
inner_index: The current index of the inner loop.
outer_index: The current index of the outer loop.
Returns:
int. The counter.
"""
counter = 0
for i, outer in enumerate(nested_list):
if i < outer_index:
counter += len(outer)
elif i == outer_index:
counter += inner_index
return counter |
def forceIterable(x):
"""Forces a non iterable object into a list, otherwise returns itself
:param list x: the object
:return: object as a list
:rtype: list"""
if not getattr(x, '__iter__', False):
return list([x])
else:
return x |
def match_ot(gold_ote_sequence, pred_ote_sequence):
"""
calculate the number of correctly predicted opinion target
:param gold_ote_sequence: gold standard opinion target sequence
:param pred_ote_sequence: predicted opinion target sequence
:return: matched number
"""
n_hit = 0
for t in pred_ote_sequence:
if t in gold_ote_sequence:
n_hit += 1
return n_hit |
def check_is_faang(item):
"""
Function checks that record belongs to FAANG project
:param item: item to check
:return: True if item has FAANG project label and False otherwise
"""
if 'characteristics' in item and 'project' in item['characteristics']:
for project in item['characteristics']['project']:
if 'text' in project and project['text'].lower() == 'faang':
return True
return False |
def scale_between(minval, maxval, numStops):
""" Scale a min and max value to equal interval domain with
numStops discrete values
"""
scale = []
if numStops < 2:
return [minval, maxval]
elif maxval < minval:
raise ValueError()
else:
domain = maxval - minval
interval = float(domain) / float(numStops)
for i in range(numStops):
scale.append(round(minval + interval * i, 2))
return scale |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.