content stringlengths 42 6.51k |
|---|
def sort_by_latest_letter(list_of_strings):
"""
>>> sort_by_latest_letter(["abc", "cab", "bca"])
['bca', 'cab', 'abc']
"""
return [sorted_element[::-1] for sorted_element in sorted([element[::-1] for element in list_of_strings])] |
def normalize(title):
"""
Normalizes a page title to the database format. E.g. spaces are converted
to underscores and the first character in the title is converted to
upper-case.
:Parameters:
title : str
A page title
:Returns:
The normalized title.
:Example:
>>> from mw.lib import title
>>>
>>> title.normalize("foo bar")
'Foo_bar'
"""
if title is None:
return title
else:
if len(title) > 0:
return (title[0].upper() + title[1:]).replace(" ", "_")
else:
return "" |
def emptyIndex(index):
"""
Determine whether an index is empty.
@param index: An index.
@type index: L{dict}
@return: true if C{index} is empty, otherwise false.
"""
if not index:
return True
for _ignore_fieldName, fieldIndex in index.iteritems():
for _ignore_fieldValue, records in fieldIndex.iteritems():
for _ignore_record in records:
return False
return True |
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>') |
def tensor2scalar(x):
"""Convert torch.Tensor to a scalar value.
Args:
x (torch.Tensor):
Returns:
scaler
"""
if isinstance(x, float):
return x
return x.cpu().detach().item() |
def float_to_32(value):
""" convert float value into fixed exponent (16) number
returns (int_part,frac_part)
int_part is integer part (16 bit) of value
frac_part is fraction part (16 bit) of value """
value = int(round(value*0x10000,0))
return ((value & 0xffff0000) >> 16, value & 0x0000ffff) |
def _allows_downcast_fallback(super_class):
"""Get the whether downcast can fallback to a dict or not"""
return getattr(super_class, "__deserialize_downcast_allow_fallback__", False) |
def urn_to_url(urn):
""" Turns a urn into a path for a url """
if urn is None:
return None
return urn.replace(":", "/") |
def compress(word):
"""
This function takes a string as an argument and returns a new string
such that each character is followed by its count, and any adjacent
duplicate characters are removed.
"""
result = ""
if len(word) == 0:
return result
else:
count = 1
for i in range(1, len(word)):
if word[i] is word[i-1]:
count += 1
else:
result = result + word[i-1] + str(count)
count = 1
return result + word[len(word)-1] + str(count) |
def rel_err_rel_var(O1, O2, x1, x2):
""" Estimate of relative error `abs(x2/(x2-x1)*(O1-O2)/O2)` """
return abs(x2/(x2-x1)*(O1-O2)/O2) |
def binning(experiments, wells=4, prefix='test'):
"""Split set of input experiments into groups.
Parameters
----------
experiments : list of str
List of experiment names.
wells : int
Number of groups to divide experiments into.
Returns
-------
slices : int
Number of experiments per group.
bins : dict of list of str
Dictionary, keys corresponding to group names, and elements containing
lists of experiments in each group.
bin_names : list of str
List of group names
"""
total_videos = len(experiments)
bins = {}
slices = int(total_videos/wells)
bin_names = []
for num in range(0, wells):
slice1 = num*slices
slice2 = (num+1)*(slices)
pref = '{}_W{}'.format(prefix, num)
bins[pref] = experiments[slice1:slice2]
bin_names.append(pref)
return slices, bins, bin_names |
def rc4(buffer, key):
"""
Encrypt / decrypt the content of `buffer` using RC4 algorithm.
Parameters
----------
buffer : bytes
The bytes sequence to encrypt or decrypt.
key : bytes
The key to be used to perform the cryptographic operation.
Returns
-------
A bytes sequence representing the transformed input.
More information
----------------
Adapted from http://cypherpunks.venona.com/archive/1994/09/msg00304.html
"""
# Preparation step
state = list(range(256))
x = 0
y = 0
index1 = 0
index2 = 0
for counter in range(256):
index2 = (key[index1] + state[counter] + index2) % 256
state[counter], state[index2] = state[index2], state[counter]
index1 = (index1 + 1) % len(key)
# encryption / decryption step
output = [0] * len(buffer)
for i in range(len(buffer)):
x = (x + 1) % 256
y = (state[x] + y) % 256
state[x], state[y] = state[y], state[x]
xorIndex = (state[x] + state[y]) % 256
output[i] = buffer[i] ^ state[xorIndex]
return bytes(output) |
def split_string(instance_string):
"""
Split a string like app_label.model_name-instance_pk to app_label.model_name, instance_pk
We need to handle multiple `-` inside the instance_pk, this is why this function looks ugly.
"""
content_splitpoint = instance_string.index('-')
if not content_splitpoint:
raise ValueError
content_type_string = instance_string[:content_splitpoint]
id_string = instance_string[content_splitpoint + 1:]
return content_type_string, id_string |
def positionIf(pred, seq):
"""
>>> positionIf(lambda x: x > 3, range(10))
4
"""
for i,e in enumerate(seq):
if pred(e):
return i
return -1 |
def get_split(partition_rank, training=0.7, dev=0.2, test=0.1):
"""
This function partitions the data into training, dev, and test sets
The partitioning algorithm is as follows:
1. anything less than 0.7 goes into training and receives an appropiate label
2. If not less than 0.7 subtract 0.7 and see if the rank is less than 0.2 if not assign to dev
3. Lastly if the rank is greater than 0.9 (0.7+0.2) assign it to test set.
return label that corresponds to appropiate dataset cateogories
"""
if partition_rank < training:
return 3
partition_rank -= training
if partition_rank < dev:
return 4
partition_rank -= dev
assert partition_rank <= test
return 5 |
def print_train_time(start, end, device=None):
"""Prints difference between start and end time.
Args:
start (float): Start time of computation (preferred in timeit format).
end (float): End time of computation.
Returns:
float: time between start and end in seconds (higher is longer).
"""
total_time = end - start
if device:
print(f"\nTrain time on {device}: {total_time:.3f} seconds")
else:
print(f"\nTrain time: {total_time:.3f} seconds")
return round(total_time, 3) |
def n_jobs_cap(n_jobs):
"""
Cap the number of jobs for sklearn tasks on Windows.
https://github.com/scikit-learn/scikit-learn/issues/13354
Args:
n_jobs: int
Returns:
n_jobs
"""
if n_jobs is None or n_jobs < 0 or n_jobs > 60:
# Bug in windows if more than 60 jobs
# https://github.com/scikit-learn/scikit-learn/issues/13354
import platform
if platform.system() == 'Windows':
if n_jobs is None or n_jobs < 0:
import multiprocessing
n_jobs = max(multiprocessing.cpu_count() - 2, 1)
n_jobs = min(n_jobs, 60)
return n_jobs |
def strip_begin_end_public_key(key):
"""
Strips off newline chars, BEGIN PUBLIC KEY and END PUBLIC KEY.
"""
return key.replace("\n", "")\
.replace("-----BEGIN PUBLIC KEY-----", "").replace(
"-----END PUBLIC KEY-----", "") |
def parse_problems(lines):
""" Given a list of lines, parses them and returns a list of problems. """
i = 0
res = []
while i < len(lines):
P, G = map(int, lines[i].split())
grudges = [tuple(map(int, lines[i + n + 1].split())) for n in range(G)]
i += G + 1
res.append((P, grudges))
return res |
def reject_info_to_report(starid, reject_info):
"""
For a given agasc_id, get all of the related "reject" info in an array
"""
log = []
for entry in reject_info:
if entry['id'] != starid:
continue
log.append(f"Not selected stage {entry['stage']}: {entry['text']}")
return log |
def limit_sub_bbox(bbox, sub_bbox):
"""
>>> limit_sub_bbox((0, 1, 10, 11), (-1, -1, 9, 8))
(0, 1, 9, 8)
>>> limit_sub_bbox((0, 0, 10, 10), (5, 2, 18, 18))
(5, 2, 10, 10)
"""
minx = max(bbox[0], sub_bbox[0])
miny = max(bbox[1], sub_bbox[1])
maxx = min(bbox[2], sub_bbox[2])
maxy = min(bbox[3], sub_bbox[3])
return minx, miny, maxx, maxy |
def unicode_double_escape(s: str) -> str:
"""Remove double escaped unicode characters in a string."""
return bytes(bytes(s, "ascii").decode("unicode-escape"), "ascii").decode(
"unicode_escape"
) |
def fixedGradient(q, k, dx, U1):
""" Neumann boundary condition
Assume that the resulted gradient at boundary condition is fixed.
Please see any numerical analysis text book for details.
Return: float
"""
Ug = q / k * 2 * dx + U1
return Ug |
def missing_formula(*groups,group_labels = []):
"""
Docstring for function pyKrev.missing_formula
====================
This function compares n lists of molecular formula and outputs a dictionary containing the missing formula in each list.
Use
----
missing_formula(list_1,..,list_n)
Returns a dictionary in which each key corresponds to an input list (labelled "group 1...n")
and the corresponding value is a list containing the missing formula in that group.
Parameters
----------
*groups: n lists of molecular formula. Each item in the list should be a formula string.
group_labels = list of group label strings of len(groups)
"""
default_names = ['group 1','group 2','group 3','group 4','group 5','group 6','group 7','group 8','group 9']
if not group_labels:
group_labels = default_names[0:len(groups)]
molecular_formula = dict()
missing_molecular_formula = dict()
for i,g in zip(group_labels,groups):
molecular_formula[i] = g
#now compare elements of the groups
for i in group_labels:
#create a shallow copy of the molecular formula dictionary
temp_molecular_formula = dict.copy(molecular_formula)
#create a set out of the unique formulas in the current group
current_group = set(molecular_formula[i])
#remove the current group from the shallow copy
del temp_molecular_formula[i]
#create a list out of the formulas in the shallow copy
other_groups = list(temp_molecular_formula.values())
#find the formula that are shared by all other_groups
common_groups = set(other_groups[0])
for og in other_groups:
common_groups = common_groups & set(og) # the & operator returns intersecting values of sets
#find the common formula that aren't present in the current_group
missing_formula = []
for formula in common_groups:
if formula in current_group:
pass
else:
missing_formula.append(formula)
#add it to the unique_molecular_formula dict
missing_molecular_formula[i] = missing_formula
return missing_molecular_formula |
def _remove_punctuation(text):
"""
Remove punctuation from a text.
:param text: the text input
:return: the text with punctuation removed
"""
if not hasattr(_remove_punctuation, 'translator'):
import string
_remove_punctuation.translator = str.maketrans('', '', string.punctuation)
return text.translate(_remove_punctuation.translator) |
def validate_status(status):
"""
Validate status
:param status: The Status of CloudWatchLogs or S3Logs
:return: The provided value if valid
Property: CloudWatchLogs.Status
Property: S3Logs.Status
"""
valid_statuses = ["ENABLED", "DISABLED"]
if status not in valid_statuses:
raise ValueError("Status: must be one of %s" % ",".join(valid_statuses))
return status |
def make_question(text, tag, webhook, responses=None, conclude_on=None):
"""Make a question to ask.
Args:
text (str): Question to ask.
tag (str): Question tag for retrieving results.
webhook (str): Webhook to listen for results on.
responses (:obj:`list`, optional): List of responses to provide. Defaults to None.
conclude_on (:obj:`str`, optional): Response the question should conclude on. Defaults to None.
Returns:
dict: Question to be asked
Examples:
>>> question = pytill.make_question(
"This is my cool question?", "cool-q-1", "sweet-web-hook-bro",
responses=["yes", "no"], conclude_on="Thanks for answering."
)
"""
question = {
"text": text,
"tag": tag,
"webhook": webhook
}
if conclude_on:
question["conclude_on"] = conclude_on
if responses:
question["responses"] = responses
return question |
def null_hurst_measure(measure):
"""Hurst computation parameter from some slope fit.
Parameters
----------
measure: float
the slope of the fit using some method.
Returns
-------
H: float
the Hurst parameter.
"""
# Compute measure
return float(measure) |
def extract_input(event):
"""
Returns the data from an object organized in the manner expected
"""
return event.get("body").get("data") |
def array_plus_array(arr1, arr2):
"""
Finds the sum of two arrays.
:param arr1: an array of integers.
:param arr2: an array of integers.
:return: the sum of the elements of both arrays.
"""
return sum(arr1) + sum(arr2) |
def decoded(qscore):
"""Returns Phred ASCII encoding type of FastQ quality scores.
Older FastQ files may use Phred 64 encoding.
"""
encoding = ''
# Unique set of characters across both Phred encoding types
encodings = { # Pred-33 Encoding characters
'!': '33', '#': '33', '"': '33', '%': '33', '$': '33', "'": '33',
'&': '33', ')': '33', '(': '33', '+': '33', '*': '33', '-': '33',
',': '33', '/': '33', '.': '33', '1': '33', '0': '33', '3': '33',
'2': '33', '5': '33', '4': '33', '7': '33', '6': '33', '9': '33',
'8': '33', ';': '33', ':': '33', '=': '33', '<': '33', '?': '33',
'>': '33',
# Pred-64 Encoding characters
'K': '64', 'M': '64', 'L': '64', 'O': '64', 'N': '64', 'Q': '64',
'P': '64', 'S': '64', 'R': '64', 'U': '64', 'T': '64', 'W': '64',
'V': '64', 'Y': '64', 'X': '64', '[': '64', 'Z': '64', ']': '64',
'\\': '64', '_': '64', '^': '64', 'a': '64', '`': '64', 'c': '64',
'b': '64', 'e': '64', 'd': '64', 'g': '64', 'f': '64', 'i': '64',
'h': '64'
}
for char in qscore:
try:
encoding = encodings[char]
break
except KeyError:
pass
return encoding |
def format_bytes(size, type="speed"):
"""
Convert bytes to KB/MB/GB/TB/s
"""
# 2**10 = 1024
power = 2**10
n = 0
power_labels = {0 : 'B', 1: 'KB', 2: 'MB', 3: 'GB', 4: 'TB'}
while size > power:
size /= power
n += 1
formatted = " ".join((str(round(size, 2)), power_labels[n]))
if type == "speed":
return formatted + "/s"
elif type == "size":
return formatted |
def pad_vocab_to_eight(vocab):
"""Pads vocabulary so that it is divisible by 8.
Args:
vocab (dict): vocabulary in the form token->id
Returns:
dict: vocab with new tokens added if necessary, such that the total
vocab size is divisible by 8.
"""
v_len = len(vocab)
if v_len % 8 == 0:
return vocab
for id_add in range(0, 8 - v_len % 8):
vocab['<$'+str(id_add)+'$>'] = v_len + id_add
return vocab |
def format_percent(num):
"""
Format a percentage
"""
return int(round(num)) |
def input_to_dictionary(input):
"""Method to convert Graphene inputs into dictionary"""
dictionary = {}
for key in input:
# Convert GraphQL global id to database id
# if key[-2:] == 'id':
# input[key] = from_global_id(input[key])[1]
dictionary[key] = input[key]
return dictionary |
def get_commit_link(repo_name: str, commit_sha: str) -> str:
"""
Build a commit URL for manual browser access using full repository name and commit SHA1
:param repo_name: full repository name (i.e. `{username}/{repoanme}`)
:param commit_sha: 40 byte SHA1 for a commit
:return: A commit URL
"""
return "https://github.com/{}/commit/{}".format(repo_name, commit_sha) |
def get_actions_from_policy(data):
"""Given a policy dictionary, create a list of the actions"""
actions_list = []
# Multiple statements are in the 'Statement' list
# pylint: disable=too-many-nested-blocks
for i in range(len(data['Statement'])):
try:
# Statement must be a dict if it's a single statement. Otherwise it will be a list of statements
if isinstance(data['Statement'], dict):
# We only want to evaluate policies that have Effect = "Allow"
# pylint: disable=no-else-continue
if data['Statement']['Effect'] == 'Deny':
continue
else:
try:
# Action = "s3:GetObject"
if isinstance(data['Statement']['Action'], str):
actions_list.append(
data['Statement']['Action'])
# Action = ["s3:GetObject", "s3:ListBuckets"]
elif isinstance(data['Statement']['Action'], list):
actions_list.extend(
data['Statement']['Action'])
elif 'Action' not in data['Statement']:
print('Action is not a key in the statement')
else:
print(
"Unknown error: The 'Action' is neither a list nor a string")
except KeyError as k_e:
print(
f"KeyError at get_actions_from_policy {k_e}")
exit()
# Otherwise it will be a list of Sids
elif isinstance(data['Statement'], list):
# We only want to evaluate policies that have Effect = "Allow"
try:
if data['Statement'][i]['Effect'] == 'Deny':
continue
else:
if 'Action' in data['Statement'][i]:
if isinstance(data['Statement'][i]['Action'], str):
actions_list.append(
data['Statement'][i]['Action'])
elif isinstance(data['Statement'][i]['Action'], list):
actions_list.extend(
data['Statement'][i]['Action'])
elif data['Statement'][i]['NotAction'] and not data['Statement'][i]['Action']:
print('Skipping due to NotAction')
else:
print(
"Unknown error: The 'Action' is neither a list nor a string")
exit()
else:
continue
except KeyError as k_e:
print(
f"KeyError at get_actions_from_policy {k_e}")
exit()
else:
print(
"Unknown error: The 'Action' is neither a list nor a string")
# exit()
except TypeError as t_e:
print(
f"TypeError at get_actions_from_policy {t_e}")
exit()
try:
actions_list = [x.lower() for x in actions_list]
except AttributeError as a_e:
print(actions_list)
print(f"AttributeError: {a_e}")
actions_list.sort()
return actions_list |
def recvall(conn, length):
""" Retreive all pixels. """
buf = b''
while len(buf) < length:
data = conn.recv(length - len(buf))
if not data:
return data
buf += data
return buf |
def manhattan_distance_between(a, b):
"""
Compute manhattan distance between 2 points
"""
# return math.sqrt((a[0] - b[0])**2 + (a[1]- b[1])**2)
return max(abs(a[0] - b[0]), abs(a[1] - b[1])) |
def gt_types_to_binary_comparison(calls):
"""From an array of calls, check if a variant position qualifies as a variant.
0,1,2,3==HOM_REF, HET, UNKNOWN, HOM_ALT
Return string of 1s and 0s to represent position"""
binary_calls = []
for call in calls:
if call == 1 or call == 3:
binary_calls.append(1)
else:
binary_calls.append(0)
return ''.join([str(i) for i in binary_calls]) |
def regroup(x, n):
"""
Turns a flat list into a list of lists with sublength n
Args:
x: flat list
i: sublist len
Returns: list of lists
"""
i = 0
new_list = []
while i < len(x):
new_list.append(x[i:i + n])
i += n
return new_list |
def has_math(lines):
"""Test if math appears anywhere in the post."""
for line in lines:
if '$$' in line:
return True
elif '$' in line:
return True
return False |
def is_hangul(string):
"""Check if there is a character in the Hangul syllables block. MUST BE IN UNICODE."""
for i in string:
if 44032 <= ord(i) <= 55215:
return True
return False |
def gen_all_permutations(outcomes, length):
"""
Iterative function that enumerates the set of all permutations of
outcomes of given length.
"""
answer_set = set([()])
for dummy_idx in range(length):
temp_set = set()
for partial_sequence in answer_set:
for item in outcomes:
if (not (item in partial_sequence)):
new_sequence = list(partial_sequence)
new_sequence.append(item)
temp_set.add(tuple(new_sequence))
answer_set = temp_set
return answer_set |
def _bubbled_up_groups_from_units(group_access_from_units):
"""
Return {user_partition_id: [group_ids]} to bubble up from Units to Sequence.
This is to handle a special case: If *all* of the Units in a sequence have
the exact same group for a given user partition, bubble that value up to the
Sequence as a whole. For example, say that every Unit in a Sequence has a
group_access that looks like: { ENROLLMENT: [MASTERS] } (where both
constants are ints). In this case, an Audit user has nothing to see in the
Sequence at all, and it's not useful to give them an empty shell. So we'll
act as if the Sequence as a whole had that group setting. Note that there is
currently no way to set the group_access setting at the sequence level in
Studio, so course teams can only manipulate it for individual Units.
"""
# If there are no Units, there's nothing to bubble up.
if not group_access_from_units:
return {}
def _normalize_group_access_dict(group_access):
return {
user_partition_id: sorted(group_ids) # sorted for easier comparison
for user_partition_id, group_ids in group_access.items()
if group_ids # Ignore empty groups
}
normalized_group_access_dicts = [
_normalize_group_access_dict(group_access) for group_access in group_access_from_units
]
first_unit_group_access = normalized_group_access_dicts[0]
rest_of_seq_group_access_list = normalized_group_access_dicts[1:]
# If there's only a single Unit, bubble up its group_access.
if not rest_of_seq_group_access_list:
return first_unit_group_access
# Otherwise, go through the user partitions and groups in our first unit
# and compare them to all the other group_access dicts from the units in the
# rest of the sequence. Only keep the ones that match exactly and do not
# have empty groups.
common_group_access = {
user_partition_id: group_ids
for user_partition_id, group_ids in first_unit_group_access.items()
if group_ids and all(
group_ids == group_access.get(user_partition_id)
for group_access in rest_of_seq_group_access_list
)
}
return common_group_access |
def find_x_intercept(gradient:int, y_intercept:int, height:int):
"""
Find x intercept of the line with the bottom of the image from the line parameters
:param gradient: gradient of line
:param y_intercept: y intercept of line
:param height: height of the image
:return: x intercept of line with the bottom of the image
"""
return (height - 1 - y_intercept) / gradient if gradient != 0 else -1 |
def index_to_slice(idx) -> slice:
"""Converts an index to a slice.
Args:
idx: int
The index.
Returns:
slice
A slice equivalent to the index.
"""
return slice(idx, idx+1, None) |
def extension_to_ignore(file, ignore_extensions):
"""check if file need to be ignored
Args:
file (str): file_path or file_name
ignore_extensions (list): Extensions to ignore
Returns:
bol: True to ignore. False to not ignore.
"""
file_lower = file.lower()
if len(ignore_extensions) == 0:
return False
elif file_lower.endswith(tuple(ignore_extensions)):
return True
else:
return False |
def cascaded(*args):
""" (args:any) -> arg : not None
Returns first non-None arg.
"""
for arg in args:
if arg is not None:
return arg |
def parse_cmd(script, *args):
"""Returns a one line version of a bat script
"""
if args:
raise Exception('Args for cmd not implemented')
# http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/cmd.mspx?mfr=true
oneline_cmd = '&&'.join(script.split('\n'))
oneline_cmd = 'cmd.exe /c "%s"' % oneline_cmd
return oneline_cmd |
def default_eval_func(data):
"""
Evaluates the graph for accuracy. Returns the accuracy based on the current
data iteration. The default "accuracy" should always be the first entry in the list
provided to eval_names.
:param data:
:return:
"""
if len(data) > 1:
print('default evaluation function only expected 1 output, accuracy. Using first datum')
# Return the accuracy
return data[0][1] |
def within_percent(num1, num2, percent: int):
"""Compare two numeric values by percentage difference
Return True if they are mutually within x-percent of each other
Parameters
----------
num1 : int or float
num2 : int or float
percent : int
Percentage difference between the two. Mutual difference!
Returns
-------
bool
True if num1 and num2 are within percent of each other
"""
# Sort numerically, convert to float just in case
compsorted = sorted([float(num1), float(num2)])
lower = 1 - (float(percent)/100)
upper = 1 + (float(percent)/100)
if compsorted[0] * upper > compsorted[1] * lower:
return(True)
else:
return(False) |
def choose(n, r):
"""
Returns the value of nCr
"""
if r > n//2 + 1:
r = n - r
numerator = 1
denominator = 1
for i in range(n, n - r, -1):
numerator *= i
denominator *= (n - i + 1)
return(numerator//denominator) |
def account_info(info):
"""Extract user information from IdP response"""
return dict(
user=dict(
email=info['User.email'][0],
profile=dict(
username=info['User.FirstName'][0],
full_name=info['User.FirstName'][0])),
external_id=info['User.email'][0],
external_method='onelogin',
active=True) |
def pure_python_hello(name:str):
"""Testing a pure python function
Args:
name (str): Just a simple name
"""
return "Hello " + name |
def check_de(current_de, list_of_de):
"""Check if any of the strings in ``list_of_de``
is contained in ``current_de``."""
return any([de in current_de for de in list_of_de]) |
def sandhi(inwords):
"""Returns a string with the following replacements performed on the provided one.
- ~s/ \+ ([^\[])/$1/g; replace + and surround spaces with nothing
- replace aa with A
- replace ii with I
- replace uu with U
- replace Ru with R
"""
inwords=inwords.replace(' + ','')
inwords=inwords.replace('aa','A')
inwords=inwords.replace('ii','I')
inwords=inwords.replace('uu','U')
inwords=inwords.replace('Ru','R')
return inwords |
def check_target(i):
"""
Input: {
dict - dictionary with info about supported host and target OS
host_os_uoa - host OS UOA (already resolved)
host_os_dict - host OS dict (already resolved)
target_os_uoa - target OS UOA (already resolved)
target_os_dict - target OS UOA (already resolved)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
cus=i['dict']
hosx=i['host_os_uoa']
hosd=i['host_os_dict']
tosx=i['target_os_uoa']
tosd=i['target_os_dict']
# Check if restricts dependency to a given host or target OS
only_hos=cus.get('only_for_host_os',[])
if len(only_hos)>0:
if hosx not in only_hos:
return {'return':1, 'error':'host OS is not supported by this software'}
only_hos1=cus.get('only_for_host_os_tags',[])
if len(only_hos1)>0:
x=hosd.get('tags',[])
found=False
for xx in only_hos1:
if xx in x:
found=True
break
if not found:
return {'return':1, 'error':'host OS family is not supported by this software'}
only_tos=cus.get('only_for_target_os',[])
if len(only_tos)>0:
if tosx not in only_tos:
return {'return':1, 'error':'target OS is not supported by this software'}
only_tos1=cus.get('only_for_target_os_tags',[])
if len(only_tos1)>0:
x=tosd.get('tags',[])
found=False
for xx in only_tos1:
if xx in x:
found=True
break
if not found:
return {'return':1, 'error':'target OS family is not supported by this software'}
return {'return':0} |
def address_group_name(address):
"""
Return name of dataset group /entry/[group]/name
:param address: str hdf address
:return: str
"""
names = address.replace('\\', '/').split('/')
return names[-2] |
def get_frequency(text: str) -> dict:
"""Get word frequency from text"""
freq_dict = {}
for letter in text:
freq_dict[letter] = freq_dict.get(letter, 0) + 1
return freq_dict |
def currencyformat(value):
"""replaces the value with a currency formated number, 0 for non number"""
try:
float(value)
except:
value = 0
return '{:0,}'.format(int(round(value))) |
def _is_decoy_suffix(pg, suffix='_DECOY'):
"""Determine if a protein group should be considered decoy.
This function checks that all protein names in a group end with `suffix`.
You may need to provide your own function for correct filtering and FDR estimation.
Parameters
----------
pg : dict
A protein group dict produced by the :py:class:`ProtXML` parser.
suffix : str, optional
A suffix used to mark decoy proteins. Default is `'_DECOY'`.
Returns
-------
out : bool
"""
return all(p['protein_name'].endswith(suffix) for p in pg['protein']) |
def multiply_and_round(num: float, factor: float = 100, precision: int = 2) -> float:
"""
Takes a floating point value (presumably one between 0 and 1), multiplies it with a given factor (default 100)
and rounds it with the given precision.
:param num: number to multiply and round
:param factor: multiplying factor (default = 100, to create percentages)
:param precision: rounding precision
:return: product rounded with precision
"""
return round(num * factor, precision) |
def noveltyprimes(n):
"""
"primes" of the form 31337 - 313333337 - see ekoparty 2015 "rsa 2070"
*** not all numbers in this form are prime but some are (25 digit is prime) ***
"""
maxlen = 25 # max number of digits in the final integer
for i in range(maxlen-4):
prime = int("3133" + ("3" * i) + "7")
if n % prime == 0:
q = n//prime
return prime,q |
def init_dict_brackets(first_level_keys):
"""Initialise a dictionary with one level
Arguments
----------
first_level_keys : list
First level data
Returns
-------
one_level_dict : dict
dictionary
"""
one_level_dict = {}
for first_key in first_level_keys:
one_level_dict[first_key] = {}
return one_level_dict |
def anagram_checker(str1, str2):
"""
Check if the input strings are anagrams
Args:
str1(string),str2(string): Strings to be checked if they are anagrams
Returns:
bool: If strings are anagrams or not
"""
if len(str1) != len(str2):
# Clean strings
clean_str_1 = str1.replace(" ", "").lower()
clean_str_2 = str2.replace(" ", "").lower()
if sorted(clean_str_1) == sorted(clean_str_2):
return True
return False |
def decide_flow_name(desc):
"""
Based on the provided description, determine the FlowName.
:param desc: str, row description
:return: str, flowname for row
"""
if 'Production' in desc:
return 'Production'
if 'Consumed' in desc:
return 'Consumed'
if 'Sales' in desc:
return 'Sales'
if 'Losses' in desc:
return 'Losses'
return 'None' |
def GetDet3(x, y, z):
"""
helper function
"""
d = x[0] * y[1] * z[2] + x[1] * y[2] * z[0] \
+ x[2] * y[0] * z[1] - x[0] * y[2] * z[1] \
- x[1] * y[0] * z[2] - x[2] * y[1] * z[0]
return d |
def replace_if_present_else_append(
objlist,
obj,
cmp=lambda a, b: a == b,
rename=None):
"""
Add an object to a list of objects, if that obj does
not already exist. If it does exist (`cmp(A, B) == True`),
then replace the property in the property_list. The names
are compared in a case-insensitive way.
Input
=====
:objlist, list: list of objects.
:obj, object: object to Add
Options
=======
:cmp, (bool) cmp (A, B): compares A to B. If True, then the
objects are the same and B should replace A. If False,
then B should be appended to `objlist`.
:param rename: Should A be renamed instead of overwritten? If not False,
then rename should be a unary function that changes the name of A.
:type rename: bool or unary function
Output
======
List is modified in place. A reference to the list is returned.
"""
print(type (objlist))
for i in range(len(objlist)):
# was a matching object found in the list?
if cmp(objlist[i], obj):
# if so, should the old object be renamed?
if rename is not None:
newA = rename(objlist[i])
# is the renamed object distinct from the object
# (`obj`) that is to be added to the list?
if cmp(newA, obj):
msg = '`rename` does not make {} unique.'.format(
str(objlist[i])[:32])
raise ValueError(msg)
# now that we have newA, replace the original
# object in the list with `obj`...
objlist[i] = obj
#... and replace_if_present_else_append newA.
replace_if_present_else_append(
objlist, newA, cmp=cmp, rename=rename)
# if the existing object should not be renamed,
# simply replace.
else:
objlist[i] = obj
# short circuit to exit the for loop and the function.
return objlist
# if we get here, then the property was not found. Append. HI
objlist.append(obj)
return objlist |
def _status_decode(status):
"""Decode a 1 byte status into logical and physical statuses."""
logical_status = (status & 0b00001100) >> 2
physical_status = status & 0b00000011
return (logical_status, physical_status) |
def _format_rotator_mode(value):
"""Format rotator mode, and rais appropriate error if it can't be formatted."""
modes = set(['pa', 'vertical', 'stationary'])
if value.lower() not in modes:
raise ValueError("Rotator mode must be in {!r}".format(modes))
return value.lower() |
def fix_user_permissions(permissions):
"""Converts numeric user permissions to a dictionary of permissions"""
fixed_permissions = dict()
for user in permissions:
mode = int(permissions[user])
user_permissions = dict()
user_permissions["member"] = (mode & 0b100 != 0)
user_permissions["spectator"] = (mode & 0b010 != 0)
user_permissions["manager"] = (mode & 0b001 != 0)
fixed_permissions[user] = user_permissions
return fixed_permissions |
def table_row(k, v, html):
"""Output a key-value pair as a row in a table."""
if html:
return ''.join(['<tr><td class="e">', k, '</td><td class="v">', v, '</td></tr>'])
else:
return k + '\n' + v + '\n\n' |
def remove_headers(headers, name):
"""Remove all headers with name *name*.
The list is modified in-place and the updated list is returned.
"""
i = 0
name = name.lower()
for j in range(len(headers)):
if headers[j][0].lower() != name:
if i != j:
headers[i] = headers[j]
i += 1
del headers[i:]
return headers |
def compute_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return intersect / (sum_area - intersect) |
def check_words(text: str, words: list):
"""Check if the text only contains words from the list.
Args:
- text: The text to check.
- words: The words to check for.
Returns:
- True if the text contains all the words.
"""
for word in words:
if word not in text:
return False
return True |
def beautify_url(url):
"""
Remove the URL protocol and if it is only a hostname also the final '/'
"""
try:
ix = url.index('://')
except ValueError:
pass
else:
url = url[ix+3:]
if url.endswith('/') and url.index('/') == len(url)-1:
url = url[:-1]
return url |
def calculate_factor_values(levels):
"""Calculate values of trial factors.
Parameters
----------
levels : dictionary
The factor levels of the trial.
Returns
-------
dictionary
Calculated factor values.
"""
# Set parameters based on factor levels.
f = {
'U': 10**levels['U_exp'],
'K': 10**levels['K_exp'],
'D': 10**levels['D_exp'],
'base_level_fall': 10**levels['base_level_fall']}
return f |
def isqrt(n):
"""Slightly more efficient than iroot for the special case of r=2"""
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x |
def get_intersection_union_jaccard(exonmap1, exonmap2):
""" get jaccard between exon map
1.3 and 2.4
jaccard = 2/4 = 0.5
"""
union_sum = 0
intersection_sum = 0
dct1 = dict()
for se in exonmap1:
s, e = se
for i in range(int(s), int(e) + 1):
if not i in dct1.keys():
dct1[i] = 0
dct1[i] += 1
dct2 = dict()
for se in exonmap2:
s, e = se
for i in range(int(s), int(e) + 1):
if not i in dct2.keys():
dct2[i] = 0
dct2[i] += 1
st = set()
for ii in [dct1.keys(), dct2.keys()]:
for i in ii:
st.add(i)
union_sum = len(st)
for i in st:
if i in dct1.keys() and i in dct2.keys():
intersection_sum += 1
j = intersection_sum / union_sum
return(intersection_sum, union_sum, j) |
def parent_pk_kwarg_name(value):
"""More meaningful parent path variable name
and compatible with drf-spectacular."""
return f"{value}_id" |
def sort_result_artifact_filenames(list_of_artifact_filenames):
"""Sort the result artifact filenames
Sorts the given list of result filenames by parameter index (assumed to be
the beginning of the filename, preceding an underscore e.g.
``00004_*.qza``)
Parameters
----------
list_of_artifact_filenames : List
A list of artifact filenames returned by the ``get_results`` method
Returns
-------
sorted_artifact_filenames : List
Sorted list of the found artifact filenames
See Also
--------
q2_mlab.doctor.get_results
"""
sorted_artifact_filenames = sorted(
list_of_artifact_filenames, key=lambda name: int(name.split("_")[0])
)
return sorted_artifact_filenames |
def _restore_case(s, memory):
"""Restore a lowercase string's characters to their original case."""
cased_s = []
for i, c in enumerate(s):
if i + 1 > len(memory):
break
cased_s.append(c if memory[i] else c.upper())
return ''.join(cased_s) |
def all_coin_types_to_string(coin_dict):
"""
Converts all coin elements into a string, no matter their value.
Keyword Arguments:
coin_dict (dict): A dictionary consisting of all 4 coin types.
Returns:
(string): The resulting string.
"""
return f"{coin_dict['plat']}p {coin_dict['gold']}g {coin_dict['silver']}s {coin_dict['copper']}c" |
def decode_govee_temp(packet_value: int) -> float:
"""Decode potential negative temperatures."""
# See https://github.com/Thrilleratplay/GoveeWatcher/issues/2
# The last 3 decimal digits encode the humidity, so use "// 1000" to mask them out.
if packet_value & 0x800000:
return ((packet_value ^ 0x800000) // 1000) / -10.0
return (packet_value // 1000) / 10.0 |
def normalize_value(value):
"""
Normalizes the given value, and returns it
"""
return (value * 0.5) + 0.5 |
def outChangeNamespaces(old_namespaces, new_namespaces):
"""If old_namespaces != new_namespaces, close old namespace and open new one."""
str = ""
if old_namespaces != new_namespaces:
if len(old_namespaces) > 0:
str += "\n"
while len(old_namespaces) > 0:
str += "} //namespace " + old_namespaces[len(old_namespaces)-1] + "\n"
old_namespaces = old_namespaces[:len(old_namespaces)-1]
if len(new_namespaces) > 0:
str += "//////////////////////////////////////////////////////////////////////////////\n\n"
while len(new_namespaces) > 0:
str += "namespace " + new_namespaces[0] + " {\n"
new_namespaces = new_namespaces[1:]
return str + "\n" |
def scsilun_to_int(lun):
"""
There are two style lun number, one's decimal value is <256 and the other
is full as 16 hex digit. According to T10 SAM, the full 16 hex digit
should be swapped and converted into decimal.
For example, SC got zlinux lun number from DS8K API, '40294018'. And it
should be swapped to '40184029' and converted into decimal, 1075331113.
When the lun number is '0c' and its decimal value is <256, it should be
converted directly into decimal, 12.
https://github.com/kubernetes/kubernetes/issues/45024
"""
pretreated_scsilun = int(lun, 16)
if pretreated_scsilun < 256:
return pretreated_scsilun
return (pretreated_scsilun >> 16 & 0xFFFF) | \
(pretreated_scsilun & 0xFFFF) << 16 |
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0] |
def get_uid_gid(uid, gid=None):
"""Try to change UID and GID to the provided values.
UID and GID are given as names like 'nobody' not integer.
Src: http://mail.mems-exchange.org/durusmail/quixote-users/4940/1/
"""
import pwd, grp
uid, default_grp = pwd.getpwnam(uid)[2:4]
if gid is None:
gid = default_grp
else:
try:
gid = grp.getgrnam(gid)[2]
except KeyError:
gid = default_grp
return (uid, gid) |
def GiB(val):
"""Calculate Gibibit in bits, used to set workspace for TensorRT engine builder."""
return val * 1 << 30 |
def validate_acronym(acronym, submission):
""" Validate a submission against the acronym. """
chunks = submission.split(' ')
acronym_len = len(acronym)
# needs to use the right number of words
if len(chunks) != acronym_len:
return False
# first letter of each word needs to match the acronym
for i in range(acronym_len):
if chunks[i][0].upper() != acronym[i]:
return False
return True |
def three_shouts(word1, word2, word3):
"""Returns a tuple of strings
concatenated with '!!!'."""
# Define inner
def inner(word):
"""Returns a string concatenated with '!!!'."""
return word + '!!!'
# Return a tuple of strings
return (inner(word1), inner(word2) ,inner(word3)) |
def discount_opex(opex, global_parameters, country_parameters):
"""
Discount opex based on return period.
Parameters
----------
cost : float
Financial cost.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
Returns
-------
discounted_cost : float
The discounted cost over the desired time period.
"""
return_period = global_parameters['return_period']
discount_rate = global_parameters['discount_rate'] / 100
wacc = country_parameters['financials']['wacc']
costs_over_time_period = []
for i in range(0, return_period):
costs_over_time_period.append(
opex / (1 + discount_rate)**i
)
discounted_cost = round(sum(costs_over_time_period))
#add wacc
discounted_cost = discounted_cost * (1 + (wacc/100))
return discounted_cost |
def join_namespace(namespace, ident):
"""
Joins a namespace and a bare identifier into a full identifier.
>>> join_namespace('a', 'b')
'a:b'
>>> join_namespace('', 'b')
':b'
"""
return ':'.join([namespace, ident]) |
def check_option(val, home):
"""Check whether main menu option is valid."""
try:
# Change option to integer.
val = int(val)
# Option is not in the range.
if val <= 0 or val > 3:
print('Not an option, please try again.\n\n\n')
return val, home
home = False
return val, home
except ValueError:
# Option is not an integer.
print('Please enter an integer 1-3.\n\n\n')
home = True
return val, home |
def gen_cols():
""" Columns to keep from Redfin listings"""
relevant_columns = [
'ADDRESS', 'CITY', 'STATE OR PROVINCE', 'ZIP OR POSTAL CODE', "PRICE",
]
return relevant_columns |
def to_range(x, start, limit):
"""wraps x into range [start, limit]"""
return start + (x - start) % (limit - start) |
def get_callbacks(callback_list):
"""
Returns a list of callbacks given a list of callback specifications.
A list of callback specifications is a list of tuples (callback_name, **callback_params).
Parameters
----------
callback_list: a list of tuples (callback_name, **callback_params)
Returns
-------
A list of callbacks
"""
callbacks = []
for name,params in callback_list:
callbacks.append(globals()[name](**params))
return callbacks |
def hexlify(code):
"""Convert code to hex form."""
return f'0x{hex(code)[2:].upper().zfill(4)}' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.