content stringlengths 42 6.51k |
|---|
def get_files_from_response(response):
"""Returns a list of files from Slack API response
:param response: Slack API JSON response
:return: List of files
"""
return response.get('files', []) |
def get_info(tag, label, how):
"""Get information from a tag.
Parameters
----------
tag : bs4.element.Tag
The data object to find the information from.
label : str
The name of the tag to get information from.
how : {'raw', 'all' , 'txt', 'str'}
Method to use to get the information.
raw - get an embedded tag
str - get text and convert to string
all - get all embedded tags
all-str - get all embedded tags, and convert to string
all-list - get all embedded tags, and collect into a list
Returns
-------
{bs4.element.Tag, bs4.element.ResultSet, unicode, str, None}
Requested data from the tag. Returns None is requested tag is unavailable.
"""
if how not in ['raw', 'str', 'all', 'all-str', 'all-list']:
raise ValueError('Value for how is not understood.')
# Use try to be robust to missing tag
try:
if how == 'raw':
return tag.find(label)
elif how == 'str':
return tag.find(label).text
elif how == 'all':
return tag.find_all(label)
elif how == 'all-str':
return ' '.join([part.text for part in tag.find_all(label)])
elif how == 'all-list':
return [part.text for part in tag.find_all(label)]
except AttributeError:
return None |
def find_perimeter(height: int, width: int) -> int:
"""Find the perimeter of a rectangle."""
return (height + width) * 2 |
def longest_match(sequence, subsequence):
"""Returns length of longest run of subsequence in sequence."""
# Initialize variables
longest_run = 0
subsequence_length = len(subsequence)
sequence_length = len(sequence)
# Check each character in sequence for most consecutive runs of subsequence
for i in range(sequence_length):
# Initialize count of consecutive runs
count = 0
# Check for a subsequence match in a "substring" (a subset of characters) within sequence
# If a match, move substring to next potential match in sequence
# Continue moving substring and checking for matches until out of consecutive matches
while True:
# Adjust substring start and end
start = i + count * subsequence_length
end = start + subsequence_length
# If there is a match in the substring
if sequence[start:end] == subsequence:
count += 1
# If there is no match in the substring
else:
break
# Update most consecutive matches found
longest_run = max(longest_run, count)
# After checking for runs at each character in seqeuence, return longest run found
return longest_run |
def str2list(data):
"""
Create a list of values from a whitespace and newline delimited text
(keys are ignored).
For example, this:
ip 1.2.3.4
ip 1.2.3.5
ip 1.2.3.6
becomes:
['1.2.3.4', '1.2.3.5', '1.2.3.6']
"""
list_data = []
for line in data.split('\n'):
line = line.strip()
if not line:
continue
try:
splitted = line.split(' ')
# key = splitted[0]
value = splitted[1]
except Exception:
continue
list_data.append(value)
return list_data |
def validate_param(value, invalid_list=None):
"""
Ensure the param is a valid value by checking that it's not present in a list of invalid values.
:param value: Param value to be tested
:type value: string, list, or object
:param invalid_list: (optional) List of values to be considered invalid
:type invalid_list: list
:return: Param value if valid, else None
:rtype: string, list, or object
"""
invalid_list = invalid_list or [None, '', ' ', 0, '0', [], [''], {}, {''}]
if value not in invalid_list:
return value
else:
return None |
def get_regions(chr_pos):
"""
Modified get_regions() to work with reference names that include an underscore
Does the same thing as the usual get_regions from nanoRMS except, preserves the
actual reference name, instead of only the first element after _ split
Parameters
----------
chr_pos : list
List of character positions in the format "Reference-name_position"
Returns
-------
regions : list of tuples
Each tuple corresponds to its position in the format required by trace_df()
"""
regions = [
("_".join(cp.split("_")[:-1]), int(cp.split("_")[-1]), "_") for cp in chr_pos
]
return regions |
def dict_add_source_prefix(obj, source_str, shodan_protocol_str=''):
"""Return dict where any non-nested element (except 'ip and ip_int') is prefixed by the OSINT source name"""
keys_not_source_prefixed = ['ip', 'asn', 'ip_int']
# These will still have the source prefixed
shodan_keys_not_protocol_prefixed = ['asn', 'ip', 'ipv6 port', 'hostnames', 'domains', 'location',
'location.area_code', 'location.city', 'location.country_code',
'location.country_code3', 'location.country_name', 'location.dma_code',
'location.latitude', 'location.longitude', 'location.postal_code',
'location.region_code', 'opts', 'org', 'isp', 'os', 'transport', 'protocols']
for key in list(obj):
# prefix all non-nested elements except ip and ip_int
if '.' not in key and key not in keys_not_source_prefixed:
# if other OSINT than Shodan, just prefix source
if shodan_protocol_str is '':
new_key = key.replace(key, (source_str + "." + key))
# if shodan
else:
# just prefix source if general shodan key
if key in shodan_keys_not_protocol_prefixed:
new_key = key.replace(key, (source_str + "." + key))
# prefix source AND shodan.module (protocol) if protocol-specific key
else:
new_key = key.replace(key, (source_str + "." + shodan_protocol_str + '.' + key))
if new_key != key:
obj[new_key] = obj[key]
del obj[key]
return obj |
def cool_number(value, num_decimals=2):
"""
Django template filter to convert regular numbers to a
cool format (ie: 2K, 434.4K, 33M...)
:param value: number
:param num_decimals: Number of decimal digits
"""
int_value = int(value)
formatted_number = '{{:.{}f}}'.format(num_decimals)
if int_value < 1000:
return str(int_value)
elif int_value < 1000000:
return formatted_number.format(int_value / 1000.0).rstrip('0.') + 'K'
else:
return formatted_number.format(int_value / 1000000.0).rstrip('0.') + 'M' |
def return_list_without_duplicates(lst):
"""
Removes duplicates from a list and return the new ilst
:param lst: list
:return: list
"""
new_list = list()
for item in lst:
if item not in new_list:
new_list.append(item)
return new_list |
def part1(data):
"""
>>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
>>> part1(read_input())
1581
"""
previous = data[0]
count = 0
for value in data[1:]:
if value > previous:
count += 1
previous = value
return count |
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if (len(string) < len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0, len(sub) + 1)] for j in range(0, len(string) + 1)]
for j in range(1, len(sub) + 1):
for i in range(1, len(string) + 1):
if (string[i - 1] == sub[j - 1]):
lengths[i][j] = lengths[i - 1][j - 1] + 1
else:
lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1])
return lengths[len(string)][len(sub)] |
def none_value(value):
""" If value is None, return "", if not, return string format of value
Returns:
* value -- converted value
"""
if not value: return ""
return "%s" % value |
def list_to_string(list):
"""
joins a list of strings together
:param list: list of strings
:return: string
"""
return ' '.join(list) |
def index_name_if_in_list(name, name_list, suffix='', prefix=''):
"""
Find a unique name by adding an index to the name so it is unique within the given list.
Parameters:
name (str): Name
name_list (iterable): List of names that the new name must differ from.
suffix (str): The suffix to append after the index.
prefix (str): The prefix to append in front of the index.
Returns:
str: A unique name within the given list.
"""
new_name = '{}'.format(name)
index = 1
while new_name in name_list:
new_name = '{}_{}{}{}'.format(name, prefix, index, suffix)
index += 1
return new_name |
def unpackPlayerArg(packedArg):
"""Take a string that may or may not contain multiple space-separated tokens.
return the first token as the expected player module name, the rest as a single string
for treatment as args to the instantiated class.
"""
unpacked = packedArg.split(" ", 1)
if not unpacked:
raise ValueError("Unexpected agent arg provided: %s" % packedArg)
if len(unpacked) == 1:
# no additional args
return unpacked[0], None
else:
return unpacked[0], unpacked[1] |
def heaviside(x, bias=0) -> int:
"""
Heaviside function Theta(x - bias)
returns 1 if x >= bias else 0
:param x: floating point number as input to heavisde
:param bias: shift on the heaviside function
:return: 1 or 0 int
"""
indicator = 1 if x >= bias else 0
return indicator |
def nextnumber(s):
"""gibt fuer eine Zahl, die als String eingegeben wird, die naechste Zahl der Folge "Look and Say sequence" (https://oeis.org/A005150) ebenfalls als String aus
Beispiel: 21 -> 1211
"""
liste = []
part = ""
for i in range(len(s)):
if len(part) == 0 or s[i] == part[-1]:
part = part + s[i]
if i == len(s)-1:
liste.append(part)
else:
liste.append(part)
part = s[i]
if i == len(s)-1:
liste.append(part)
newnumber = ""
for l in liste:
newnumber = newnumber + str(len(l)) + l[0]
return newnumber |
def convert_to_single_atom_aromatic_string(aromatic_atom_labels):
"""
Checks the triple of aromatic atom labels and returns the one with lower atom name
Parameters
----------
aromatic_atom_labels: list of str
triple of atom labels
Returns
-------
str: A label of a single atom representative of the aromatic ring (ie C:PHE:49:CG)
"""
return min(aromatic_atom_labels, key=lambda l: l.split(":")[3])
# aromatic_CG_atom_label = ":".join(aromatic_atom_label.split(":")[0:3]) + ":CG:vmd_idx"
# return aromatic_CG_atom_label |
def _await(coroutine):
"""
Return a generator
"""
if hasattr(coroutine, '__await__'):
return coroutine.__await__()
else:
return coroutine |
def _to_map(value, delimiter1=":", delimiter2="="):
"""
Converts raw string to map
Ex. 'ns=bar:roster=null:pending_roster=A,B,C:observed_nodes=null'
Returns {'ns': 'bar', 'roster': 'null', 'pending_roster': 'A,B,C', 'observed_nodes': 'null'}
"""
vmap = {}
if not value:
return vmap
try:
data_list = value.split(delimiter1)
except Exception:
return vmap
for kv in data_list:
try:
k, v = kv.split(delimiter2)
vmap[k] = v
except Exception:
continue
return vmap |
def evaluate_poly(poly, x):
"""Compute the polynomial function for a given value x.
Returns that value.
Example:
>>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7x^4 + 9.3x^3 + 5x^2
>>> x = -13
>>> printevaluate_poly(poly, x) # f(-13) = 7(-13)^4 + 9.3(-13)^3 +
5(-13)^2
180339.9
poly: tuple of numbers, length > 0
x: number
returns: float
"""
# polyLength = len(poly)
# print"You've entered a value which corresponds to the polynomial:"
# for i in range(len(poly)):
# print%fx^%d" % (poly[i],i)
x = float(x)
# print"Your x value was %f" % x
# print"Starting Calcs"
total = 0.0
for i in range(len(poly)):
total += (x ** i) * poly[i]
return total |
def ip_to_long (ip):
"""
Convert ip address to a network byte order 32-bit integer.
"""
quad = ip.split('.')
if len(quad) == 1:
quad = quad + [0, 0, 0]
elif len(quad) < 4:
host = quad[-1:]
quad = quad[:-1] + [0,] * (4 - len(quad)) + host
lip = 0
for q in quad:
lip = (lip << 8) | int(q)
return lip |
def get_outliers(month: int, metric: str, samples: list, minimum: float, maximum: float) -> list:
"""building scatter plot data for outlier samples a specific month. <metric> is what will be shown on the y axis"""
outliers = []
for sample in samples:
if minimum > sample[metric] or sample[metric] > maximum:
sample_data = {"x": month, "y": round(sample[metric], 2), "name": sample["id"]}
outliers.append(sample_data)
return outliers |
def float_with_error(x):
"""
some value in cif accompanies error like "1.234(5)
"""
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x) |
def get_items(tpls, key):
"""extracts values from tuples where the first value is equals the key"""
lst = []
for tpl in tpls:
if tpl[0] == key:
lst.append(tpl[1])
return lst |
def compile_progress(persons, persons_progress, badge_parts):
"""Return [part][person] boolean matrix."""
parts_progress = [] # [part][person] boolean matrix
for idx, part in enumerate(badge_parts):
person_done = []
for progress in persons_progress:
for part_done in progress:
if part_done.idx == idx:
person_done.append(True)
break
else: # No break
person_done.append(False)
parts_progress.append(person_done)
return parts_progress |
def validate_and_normalize_unique_id(unique_id: str) -> str:
"""
Take a host of unique_id formats and convert them into a "normalized" version.
For example, "@tiktoklive" -> "tiktoklive"
:return: Normalized version of the unique_id
"""
if not isinstance(unique_id, str):
raise Exception("Missing or invalid value for 'uniqueId'. Please provide the username from TikTok URL.")
return (
unique_id
.replace("https://www.tiktok.com/", "")
.replace("/live", "")
.replace("@", "")
.strip()
) |
def truncate_value(limit, value):
"""
Truncate ``value`` to a maximum of ``limit`` characters.
"""
values = value.split(u'\n')
value = values[0]
if len(value) > limit or len(values) > 1:
return u'{}\u2026'.format(value[:limit])
return value |
def q_ntu(epsilon, c_min, temp_hot_in, temp_cold_in):
"""Computes the q value for the NTU method
Args:
epsilon (int, float): The value of the effectivness for the HX.
c_min (int, float): minimum C value for NTU calculations.
temp_hot_in (int, float): Hot side inlet temeprature.
temp_cold_in (int, float): Cold side inlet temeprature.
Returns:
int, float: The value of the removal from the NTU method.
"""
return epsilon*c_min*(temp_hot_in-temp_cold_in) |
def get_wl_band(radar_frequency):
"""Returns integer corresponding to radar frequency.
Args:
radar_frequency (float): Radar frequency (GHz).
Returns:
int: 0=35GHz radar, 1=94Ghz radar.
"""
return 0 if (30 < radar_frequency < 40) else 1 |
def detect_loader_type(my_data, loader_type):
""" Detect the loader type declared in the configuration file
Inside this function should go the implementation of
specific detection for any kind of loader.
Args:
my_data (str): path of file or chunk file set
loader_type (str): loader description in yaml file
"""
if not loader_type == "auto_detect":
return loader_type
# Here should go the implementation for the rest of loaders
else:
raise ValueError("Unknown format: {}".format(loader_type)) |
def turning_radius(speed):
"""Minimum turning radius given speed"""
return -6.901E-11 * speed**4 + 2.1815E-07 * speed**3 - 5.4437E-06 * speed**2 + 0.12496671 * speed + 157 |
def get_index(l, key, value):
"""Find the index of an element by key value, for lists of dicts
Return: index or -1
"""
return next((index for index, d in enumerate(l) if d[key] == value), -1) |
def _preprocess_padding(padding):
"""Convert keras' padding to tensorflow's padding.
# Arguments
padding: string, `"same"` or `"valid"`.
# Returns
a string, `"SAME"` or `"VALID"`.
# Raises
ValueError: if `padding` is invalid.
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding |
def project_deleted(project):
""" Before analysis hook. If this project has been deleted, don't analyze it.
:param project: project that is being analyzed
:return:
"""
return project.get('deleted', False) or project.get('delete', False) |
def _parse_package_name(package):
"""
Splits npm package name into [@scope, name]
"""
return package.split("/") if package.startswith("@") else [None, package] |
def height(t):
"""
Return 1 + length of the longest path of t.
@param BinaryTree t: binary tree to find the height of
@rtype: int
>>> t = BinaryTree(13)
>>> height(t)
1
>>> height(BinaryTree(5, BinaryTree(3), BinaryTree(8, BinaryTree(7))))
3
"""
if t is None:
return 0
else:
return 1 + max(height(t.left), height(t.right)) |
def get_segment_signature(vid, fstart, fend):
"""
Generating video clip signature string
"""
return '{}-{:04d}-{:04d}'.format(vid, fstart, fend) |
def normalize_module_name(layer_name):
"""
Normalize a each module's name in nn.Model
in case of model was wrapped with DataParallel
"""
modules = layer_name.split('.')
try:
idx = modules.index('module')
except ValueError:
return layer_name
del modules[idx]
return '.'.join(modules) |
def get_relative_word_frequency(word_frequency):
"""Return dictionary with words as keys and relative frequencies as values."""
max_frequency = max(word_frequency.values())
relative_word_frequency = {}
for word, frequency in word_frequency.items():
relative_word_frequency[word] = frequency / max_frequency
return relative_word_frequency |
def parse_request(message): # pragma: no cover
"""Validate that the request is well-formed if it is return the URI from the request."""
request_split = message.split()
if request_split[0] != 'GET':
raise ValueError(405)
elif 'HTTP/' not in request_split[2]:
raise ValueError(400)
elif '1.1' not in request_split[2]:
raise ValueError(505)
return request_split[1] |
def rm_first_line(text):
""" Remove the first line in `text`."""
return '\n'.join(text.split('\n')[1:]) |
def isDict(input):
"""
This function check input is a dict object.
:param input: unknown type object
"""
return isinstance(input, dict) |
def n_terminal_asparagine(amino_acids):
"""
Asparagine at the N-terminus of a peptide is also hard
to synthesize, though not as bad as {Gln, Glu, Cys}
"""
return amino_acids[0] == "N" |
def format_basic(value, format_spec=""):
""" Wrapper around format() for use in functools.partial """
return format(value, format_spec) |
def filter_digits(string):
"""Returns the all digits concatonated sequentially in a string as a single integer value."""
return ''.join(filter(lambda i: i.isdigit(), string)) |
def get_paralogs_data(paral_file):
"""Extract paralogous projections."""
# basically just read a file and save to a set
if paral_file is None:
return set()
with open(paral_file, "r") as f:
paral_proj = set(x.rstrip() for x in f.readlines())
return paral_proj |
def rgb_to_tk(rgb):
"""
Converts rgb values to tkinter color codes.
:param rgb: Tuple of 3 ints.
:return: tk color code string
"""
return "#%02x%02x%02x" % rgb |
def get_dataset_language(ods_dataset_metas):
"""Gets the ISO 639-1 language code of the dataset. Default is 'eng'"""
if 'metas' in ods_dataset_metas:
if 'language' in ods_dataset_metas['metas']:
return ods_dataset_metas['metas']['language']
return 'eng' |
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
return "taiga-back.{}".format(name) |
def get_handler_name(handler):
"""Name (including class if available) of handler function.
Args:
handler (function): Function to be named
Returns:
string: handler name as string
"""
if '__self__' in dir(handler) and 'name' in dir(handler.__self__):
return handler.__self__.name + '.' + handler.__name__
else:
return handler.__name__ |
def braf_v600e_vd(braf_v600e_variation):
"""Create test fixture for BRAF V600E variation descriptor"""
return {
"id": "normalize.variation:braf%20v600e",
"type": "VariationDescriptor",
"variation_id": "ga4gh:VA.8JkgnqIgYqufNl-OV_hpRG_aWF9UFQCE",
"variation": braf_v600e_variation,
"molecule_context": "protein",
"structural_type": "SO:0001606",
"vrs_ref_allele_seq": "V"
} |
def _follow_inheritance(cls, attr):
"""Find where an attribute comes from originally."""
for base in cls.mro()[1:]:
if hasattr(base, attr):
return _follow_inheritance(base, attr)
# Not found in any bases, must be defined here
return cls |
def count(iterable):
"""
Returns the number of items in `iterable`.
"""
return sum(1 for whatever in iterable) |
def validateEmail(string, localhost=False, exclude='', allowempty=0):
"""validate a string to be a valid email
it is possible to specify if 'localhost' is accepted
or a value that is not acceptable (i.e.
an example like 'user@domain.edu')
"""
string = str(string)
#print string, string.split('.')
if string.strip() == "":
if allowempty:
return True
return False
if "@" in string:
try:
name,domain = string.split("@")
if (not localhost) and (not "." in string):
return False
else:
splitted = string.split('.')
for s in splitted:
if not s.strip():
return False
return True
except:
return False
return False |
def _vcross3d(v, w):
"""Find the cross product of two 3D vectors."""
return (
v[1] * w[2] - v[2] * w[1],
v[2] * w[0] - v[0] * w[2],
v[0] * w[1] - v[1] * w[0],
) |
def elapsedTime(seconds, suffixes = ['y', 'w', 'd', 'h', 'm', 's'], add_s = False, separator = ' '):
"""
Takes an amount of seconds and turns it into a human-readable amount of time.
"""
# the formatted time string to be returned
if seconds == 0:
return '0s'
time = []
# the pieces of time to iterate over (days, hours, minutes, etc)
# - the first piece in each tuple is the suffix (d, h, w)
# - the second piece is the length in seconds (a day is 60s * 60m * 24h)
parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
# for each time piece, grab the value and remaining seconds, and add it to
# the time string
for suffix, length in parts:
value = seconds / length
if value > 0:
seconds = seconds % length
time.append('%s%s' % (str(value),
(suffix, (suffix, suffix + 's')[value > 1])[add_s]))
if seconds < 1:
break
return separator.join(time) |
def ytv_id(url):
"""
#### returns YouTube Video ID
##### wheter regular or shortened format
##### Example:
x = ytv_id(input('enter url: '))
### print(x)
>>> enter url: https://www.youtube.com/watch?v=jNQXAC9IVRw
### jNQXAC9IVRw
"""
vid = url.split('/') ; return vid[-1].split('watch?v=')[-1] |
def parse_stash(line):
"""
Parse a line of `git_stash_list` output.
>>> parse_stash('refs/stash@{0} 29453bf380ff2e3aabf932a08287a162bc12d218')
(0, 'refs/stash@{0}', '29453bf380ff2e3aabf932a08287a162bc12d218')
"""
(reflog_selector, commit_hash) = line.split()
num = int(reflog_selector.lstrip('refs/stash@{').rstrip('}'))
return (num, reflog_selector, commit_hash) |
def build_resp_headers(json_resp_body):
"""Returns the headers to be used for the CloudFormation response"""
return {
# This is required for the pre-signed URL, requests may add a default unsigned
# content-type
'content-type': '',
'content-length': str(len(json_resp_body))
} |
def batting_average(at_bats, hits):
"""Calculates the batting average to 3 decimal places using number of at bats and hits."""
try:
return round(hits / at_bats, 3)
except ZeroDivisionError:
return round(0, 3) |
def index_schema(schema, path):
"""Index a JSON schema with a path-like string."""
for section in path.split('/'):
if schema['type'] != 'object':
raise ValueError('Only object types are supported in the schema structure, '
'but saw type %s' % schema['type'])
properties = schema['properties']
if section not in properties:
raise ValueError('Invalid path %s in user options' % path)
schema = properties[section]
return schema |
def sum_of_diffs(vals):
"""Compute sum of diffs"""
vals2 = vals[1:]
total = 0
for v1, v2 in zip(vals, vals2):
total += v2 - v1
return total |
def generate_random_string(length):
"""
just that...generate a random string
arguments
length : int
length of random string
returns
_string : str
random string
"""
import string
import random
res = ''.join(random.choices(string.ascii_uppercase +
string.digits, k = length))
_string = str(res)
return _string |
def _p(pre, name):
"""
Make prefix-appended name
:param pre:
:param name:
:return the string prefix_name:
"""
return '{}_{}'.format(pre, name) |
def beats(one, two):
"""Determine whether or not the player's move beats the opponent's move.
Rock beats scissors, scissors beat paper, and paper beats rock.
Args:
one (str): The player's move
two (str): The opponent's move
Returns:
bool: True if the player's move beats the opponent's move,
false otherwise
"""
return ((one == "rock" and two == "scissors") or
(one == "scissors" and two == "paper") or
(one == "paper" and two == "rock")) |
def subOneThenMult(value, arg):
"""Subtracts one from arg then multiplies by value"""
return (value) * (arg - 1) |
def reg8_delta(a, b):
"""Determine 8-bit difference, allowing wrap-around"""
delta = b - a if b > a else 256 + b - a
return delta - 256 if delta > 127 else delta |
def format_release(release: dict) -> dict:
"""
Format a raw release record from GitHub into a release usable by Virtool.
:param release: the GitHub release record
:return: a release for use within Virtool
"""
asset = release["assets"][0]
return {
"id": release["id"],
"name": release["name"],
"body": release["body"],
"etag": release["etag"],
"filename": asset["name"],
"size": asset["size"],
"html_url": release["html_url"],
"download_url": asset["browser_download_url"],
"published_at": release["published_at"],
"content_type": asset["content_type"],
} |
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
area = ((base_minor + base_major) / 2) * height
return round(area, 1) |
def _resource_id_from_record_tuple(record):
"""Extract resource_id from HBase tuple record
"""
return record[1]['f:resource_id'] |
def projectPartners(n):
""" project_partners == PEP8 (forced mixedCase by CodeWars) """
return n * (n - 1) / 2 |
def format_with_bold(s_format, data=None):
"""
Returns the string with all placeholders preceeded by '_b' replaced
with a bold indicator value;
:param: s_format: a string format;
if contains '_b{}b_' this term gets bolded.
:param: s: a string or value
Note 1: '... _b{}; something {}b_ ...' is a valid format.
Note 2: IndexError is raised using the output format only when
the input tuple length < number of placeholders ({});
it is silent when the later are greater (see Example).
Example:
# No error:
fmt = 'What! _b{}b_; yes: _b{}b_; no: {}.'
print(format_with_bold(fmt).format('Cat', 'dog', 3, '@no000'))
# IndexError:
print(format_with_bold(fmt).format('Cat', 'dog'))
"""
if data is None:
raise TypeError('Missing data (is None).')
if '{' not in s_format:
raise TypeError('Missing format placeholders.')
# Check for paired markers:
if s_format.count('_b') != s_format.count('b_'):
err_msg1 = "Bold indicators not paired. Expected '_b with b_'."
raise LookupError(err_msg1)
# Check for start bold marker:
b1 = '_b'
i = s_format.find(b1 + '{')
# Check marker order: '_b' before 'b_':
if i > s_format.find('}' + 'b_'):
err_msg2 = "Starting bold indicator not found. Expected '_b before b_'."
raise LookupError(err_msg2)
while i != -1:
# Check for trailing bold marker:
b2 = 'b_'
j = s_format.find('}' + b2)
if j != -1:
s_format = s_format.replace(b1, '\033[1m')
s_format = s_format.replace(b2, '\033[0m')
else:
err_msg3 = "Trailing bold indicator not found. Expected '_b with b_'."
raise LookupError(err_msg3)
i = s_format.find(b1 + '{')
# Now combine string with data:
mismatch_err = 'Format placeholders != data items'
if isinstance(data, (tuple, list)):
if s_format.count('{}') != len(data):
raise IndexError(mismatch_err)
return s_format.format(*data)
elif isinstance(data, dict):
if '{}' not in s_format:
# eg, assume keys given as indices: 'a is {a}, b is {b}'
return s_format.format_map(data)
else:
if s_format.count('{}') != len(data.values()):
raise IndexError(mismatch_err)
return s_format.format(*data.values())
else:
if s_format.count('{}') != 1:
raise IndexError(mismatch_err)
return s_format.format(data) |
def _number_width(n):
"""Calculate the width in characters required to print a number
For example, -1024 takes 5 characters. -0.034 takes 6 characters.
"""
return len(str(n)) |
def checkSegment(c1,c2):
"""
Determines whether or not it is necessary to further process a segment
given that its endpoints have been processed already.
Parameters
----------
c1 : component
The component of B closest to the first vertex of a segment of A.
c2 : component
The component of B closest to the second vertex of a segment of A.
Returns
----------
(component, float)
The nearest component of B along with its distance from vertex a.
"""
# No need to check a segment if either:
# c1 and c2 are the same component, or
# c1 and c2 are consective vertices
if c1==c2:
return False
elif c1[0] == False and c2[0] == False and abs(c1[1]-c2[1])==1:
return False
else:
return True |
def format_container_output(output: bytes) -> str:
"""
Format the output of a Docker container for printing or logging.
Args:
output: Raw bytes output by the container.
Returns:
Output formatted as a string.
"""
# Decode bytestring, remove trailing newlines that get inserted.
return output.decode("utf-8").rstrip("\n") |
def parse_execution_context(execution_context):
"""
Arguments
---------
execution_context
An execution context (execution_id, Composition)
:return: the execution_id associated with **execution_context**
"""
try:
return execution_context.default_execution_id
except AttributeError:
return execution_context |
def _mapButtonTitles(titles):
"""
Convert key
"""
buttonTitles = []
for buttonTitle in titles:
if isinstance(buttonTitle, tuple):
title, returnCode = buttonTitle
buttonTitle = dict(title=title, returnCode=returnCode)
buttonTitles.append(buttonTitle)
return buttonTitles |
def cum_sum(seq):
"""
Cumulative sum (include 0)
"""
s = 0
cumult = [0]
for n in seq:
s += n
cumult.append(s)
return cumult |
def evaluate_accuracy(tp: int, tn: int, fp: int, fn: int) -> float:
"""Accuracy (ACC).
$ACC=\dfrac{TP + TN}{TP + TN + FP + FN}$
Args:
tp: True Positives
tn: True Negatives
fp: False Positives
fn: False Negatives
"""
try:
return (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
return 0.0 |
def percentile(x, ys):
"""
Calculated percentile score of the user with karma (x) among all users with the following list of karmas (ys)
:param x: The karma of the user whose percentile score is desired
:param ys: The karma of all the users who are interacting with the platform. Interaction -> Asks Question, Answers
:return: percentile score of the user
"""
sz_y = len(ys)
if sz_y == 0:
return -1
elif sz_y == 1:
return 0.
else:
return sum(y < x for y in ys) / float(len(ys) - 1) * 100 |
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if not ints:
return None, None
_min = ints[0]
_max = ints[0]
for integer in ints[1:]:
if integer < _min:
_min = integer
if integer > _max:
_max = integer
return _min, _max |
def _to_binary(val):
"""Convert to binary."""
if isinstance(val, str):
return val.encode('utf-8')
assert isinstance(val, bytes)
return val |
def languages(language_list):
"""
Return a delimited list of languages as seperated boolean columns
"""
try:
return {k: True for k in language_list.split('; ')}
except AttributeError:
return {} |
def to_python_type(py_type: str) -> type:
"""Transform an OpenAPI-like type to a Python one.
https://swagger.io/docs/specification/data-models/data-types
"""
TYPES = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict
}
return TYPES[py_type] |
def _axis_to_dim(axis):
"""Translate Tensorflow 'axis' to corresponding PyTorch 'dim'"""
return {0: 0, 1: 2, 2: 3, 3: 1}.get(axis) |
def obj_to_dict(obj):
"""Recursively convert object instance to dictionary"""
SKIP_KEYS = ["_requester", "_radar", "raw_json"]
if type(obj) is dict:
res = {}
for k, v in obj.items():
if k in SKIP_KEYS:
continue
res[k] = obj_to_dict(v)
return res
elif type(obj) is list:
return [obj_to_dict(item) for item in obj]
elif hasattr(obj, "__dict__"):
return obj_to_dict(vars(obj))
else:
return obj |
def get_note_key(timestamp):
"""Generates redis keyname for note"""
return "note_%s" % timestamp |
def get_field(d, field_list):
"""get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename']
"""
if isinstance(field_list, str):
return d[field_list]
else:
idx = d.copy()
for field in field_list:
idx = idx[field]
return idx |
def format_message(message, color=None):
"""
Message output format
"""
def red(msg):
return "\033[91m{}\033[00m".format(msg)
def green(msg):
return "\033[92m{}\033[00m".format(msg)
if not message:
message = ''
message = str(message).replace('\n', '')
message = message.strip()
if color == 'red':
return red(message)
elif color == 'green':
return green(message)
else:
return message |
def concat_2(a):
"""Concatenate the string "2" to to the end of an existing string object.
Parameters
----------
a : string
The string to be concatenated to.
Returns
-------
a+"2" : string
The resulting string.
Notes
-----
Uses the built-in ``+`` operator.
"""
return a + "2" |
def cmp_sets(a, b):
""" Result tuples in format (a-only, common, b-only) """
set_a = set(a)
set_b = set(b)
a_only = sorted(set_a.difference(set_b))
common = sorted(set_a.intersection(set_b))
b_only = sorted(set_b.difference(set_a))
return (a_only, common, b_only) |
def noam_decay(step, warmup_steps, model_size):
"""
Learning rate schedule described in
https://arxiv.org/pdf/1706.03762.pdf.
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5))) |
def _merge_block(internal_transactions, transactions, whitelist):
"""
Merge responses with trace and chain transactions. Remove non-whitelisted fields
Parameters
----------
internal_transactions : list
List of trace transactions
transactions : list
List of chain transactions
whitelist : list
List of allowed fields
Returns
-------
list
List of trace transactions extended with whitelisted fields from related chain transactions
"""
transactions_by_id = {
(transaction["hash"], transaction["blockHash"]): transaction
for transaction in transactions
}
for transaction in internal_transactions:
hash = transaction["transactionHash"]
block = transaction["blockHash"]
if (hash, block) in transactions_by_id:
whitelisted_fields = {
key: value
for key, value in transactions_by_id[(hash, block)].items()
if key in whitelist
}
transaction.update(whitelisted_fields)
del transactions_by_id[(hash, block)]
return internal_transactions |
def _d_print(inputs, name: str = 'Foo'):
"""Print shape of inputs, which is tensor or list of tensors."""
is_list = isinstance(inputs, (list, tuple))
print('{}: ({})'.format(name, 'List' if is_list else 'Single'))
if not is_list:
print(' ', inputs.dtype, inputs.shape, inputs.name, flush=True)
else:
for ten in inputs:
print(' ', ten.dtype, ten.shape, ten.name, flush=True)
print('', flush=True)
return inputs |
def shorten_dewey(class_mark: str, digits_after_period: int = 4) -> str:
"""
Shortens Dewey classification number to maximum 4 digits after period.
BPL materials: default 4 digits - 505.4167
NYPl adult/young adult: default 4 digits
NYPL juvenile materials: 2 digits - 618.54
Args:
class_mark: Dewey classification
digits_after_period: number of allowed digits after period
Returns:
shortened class_mark
"""
class_mark = class_mark[: 4 + digits_after_period]
while len(class_mark) > 3 and class_mark[-1] in ".0":
class_mark = class_mark[:-1]
return class_mark |
def create_potential_intervals(distances, threshold):
"""creates a list of intervals for which the difference in distances
of the distances list is below a certain threshold.
Args:
distances (list): List of sublists of distances between points in the optimal path
threshold (float): maximum allowed distance
Returns:
list: potential intervals as described above.
"""
potential_intervals = []
for distance in distances:
potential = True
if len(distance[0]) != 0:
for i in range(len(distance[0]) - 1):
if abs(distance[0][i] - distance[0][i+1]) > threshold:
potential = False
if potential == True:
potential_intervals.append(distance)
return potential_intervals |
def _validate_float(value):
"""Return True if value is an integer"""
return value.__class__ in (int, float) |
def inRange(a, interval) :
"""
Check whether a number is in the given interval.
"""
lo, hi = interval
return a >= lo and a < hi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.