content stringlengths 42 6.51k |
|---|
def pad_to(data, alignment, pad_character=b'\xFF'):
""" Pad to the next alignment boundary """
pad_mod = len(data) % alignment
if pad_mod != 0:
data += pad_character * (alignment - pad_mod)
return data |
def pretty_tree(x, kids, show):
"""Return a pseudo-graphic tree representation of the object `x` similar to the
`tree` command in Unix.
Type: `(T, Callable[[T], List[T]], Callable[[T], str]) -> str`
It applies the parameter `show` (which is a function of type `(T) -> str`) to get a
textual representation of the objects to show.
It applies the parameter `kids` (which is a function of type `(T) -> List[T]`) to
list the children of the object to show.
Examples:
```pycon
>>> print(pretty_tree(
... ["foo", ["bar", "baz"], "quux"],
... lambda obj: obj if isinstance(obj, list) else [],
... lambda obj: "[]" if isinstance(obj, list) else str(obj),
... ))
[]
|-- foo
|-- []
| |-- bar
| `-- baz
`-- quux
```
"""
(MID, END, CONT, LAST, ROOT) = ("|-- ", "`-- ", "| ", " ", "")
def rec(obj, indent, sym):
line = indent + sym + show(obj)
obj_kids = kids(obj)
if len(obj_kids) == 0:
return line
else:
if sym == MID:
next_indent = indent + CONT
elif sym == ROOT:
next_indent = indent + ROOT
else:
next_indent = indent + LAST
chars = [MID] * (len(obj_kids) - 1) + [END]
lines = [rec(kid, next_indent, sym) for kid, sym in zip(obj_kids, chars)]
return "\n".join([line] + lines)
return rec(x, "", ROOT) |
def get_pad_tuple(padding, kernel):
"""Common code to get the pad option
Parameters
----------
padding : int or str
Padding size, or ['VALID', 'SAME']
kernel : tuple of int
Conv kernel size
Returns
-------
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# pad_h = pad_w = padding * 2
pad_h = padding[0] * 2
pad_w = padding[1] * 2
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left |
def parse_prefix(prefix, default_length=128):
"""
Splits the given IP prefix into a network address and a prefix length.
If the prefix does not have a length (i.e., it is a simple IP address),
it is presumed to have the given default length.
:type prefix: string
:param prefix: An IP mask.
:type default_length: long
:param default_length: The default ip prefix length.
:rtype: string, int
:return: A tuple containing the IP address and prefix length.
"""
if '/' in prefix:
network, pfxlen = prefix.split('/')
else:
network = prefix
pfxlen = default_length
return network, int(pfxlen) |
def electricity_cost_sek(dist, sekpkwh=.85, kmpkwh=100):
"""Gets cost of commute via ebike in Swedish Krona.
Inputs:
dist: distance in kilometers (numeric)
sekpkwh: Swedish Krona (SEK) per kilowatt-hour (kWh). Obtained from electricity_price() function.
kmpkwh: Kilometers (km) per kilowatt-hour (kWh).
ebikes: 80-100 kilometers per kWh?
https://www.ebikejourney.com/ebike/
"""
return sekpkwh * dist / kmpkwh |
def _tpu_zone(index):
"""Chooses a GCP TPU zone based on the index."""
if index < 70:
return 'europe-west4-a'
elif index < 80: # 10
return 'us-central1-b'
elif index < 90: # 10
return 'us-central1-c'
elif index < 100: # 30 (seems like 10...)
return 'asia-east1-c'
elif index < 110:
return 'us-central1-a'
elif index < 120:
return 'us-central1-b'
elif index < 130:
return 'europe-west4-a'
else:
raise ValueError('Unhandled zone index') |
def apply_shear(c_ellip, c_gamma):
"""Compute complex ellipticity after shearing by complex shear `c_gamma`."""
return (c_ellip + c_gamma) / (1.0 + c_gamma.conjugate() * c_ellip) |
def is_non_ascii(value):
""" Check whether the string is ASCII only or not.
"""
return not all(ord(c) < 128 for c in value) |
def mag2mom(mw):
"""Converts magnitude to moment - dyne-cm"""
return 10 ** (3.0 / 2.0 * (mw + 10.7)) |
def sdf_supported_property(property_name):
"""
Check to verify that this property is support natively in SDF (without modification)
:param property_name name to check
:return: boolean true/false
"""
# these supported items, assume the input models are valid JSON,
# and do no addition verification of linking properties to valid types.
# basic JSON properties
supportedProperties = ["type", "minimum",
"maximum", "uniqueItems", "format"]
# extended JSON properties
supportedProperties.extend(
["minItems", "maxItems", "default", "exclusiveMinimum", "exclusiveMaximum"])
# properties used for strings, other modifiers
supportedProperties.extend(["maxLength", "minLength"])
# description, readOnly, enum, $ref handled in a special function,
# to rename / reorder properties
if property_name in supportedProperties:
return True
else:
return False |
def _copyPoints(points):
"""
Make a shallow copy of the points.
"""
copied = [point.copy() for point in points]
return copied |
def _make_equal_size(a: str, b: str):
"""
Make the strings a and b equal size, by right-padding with spaces the shortest string
"""
max_length = max(len(a), len(b))
a = a.ljust(max_length).upper()
b = b.ljust(max_length).upper()
return a, b |
def find_folder(folder_list, folder_name):
"""
Check whether there is already a folder named 'folder_name' within the list 'folder_list'. Return its id.
"""
# Check whether a folder with the same name already exists
for folder in folder_list:
if folder['title'] == folder_name:
#print('title: %s, id: %s' % (folder['title'], folder['id']))
return folder['id']
return False |
def mode(data=[]):
""" returns the modal value in a list """
# gets the values represented once
value_set = set( data )
# Initial values
c = -1
v = []
for d in value_set:
# Current count is higher that prev observed
if ( c < data.count( d )):
c = data.count( d )
v = [d]
elif ( c == data.count( d )):
v.append( d )
return v,c |
def prependash(arr):
"""Prepend dashes to non-empty list"""
return arr and ('--',)+arr |
def stripdesc(desc):
"""strip trailing whitespace and leading and trailing empty lines"""
return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n') |
def stop_when_true(test_expr, result_expr, seq):
"""
feed elements into expr until it returns True, returning that element (None otherwise)
:param test_expr: callable of 1 arg that returns True/False
:param result_expr: callable of 1 arg; takes found element from test_expr
and maps it to a final value to return
:param seq: iterable of elements that can be passed to expr; when expr returns
True then return that element, None otherwise
:return: an element from seq or None
"""
result = None
for e in seq:
if test_expr(e):
result = result_expr(e)
break
return result |
def unbind(port: int) -> dict:
"""Request browser port unbinding.
Parameters
----------
port: int
Port number to unbind.
"""
return {"method": "Tethering.unbind", "params": {"port": port}} |
def true_l2s(value: float) -> float:
"""Convert linear component to SRGB gamma corrected"""
if value <= 0.0031308:
return value * 12.92
return 1.055 * (value ** (1.0 / 2.4)) - 0.055 |
def merge(left, right):
"""Merge function used for merge sort"""
lPoint = 0
rPoint = 0
result = []
# Use two pointer method to build sorted list
while lPoint < len(left) and rPoint < len(right):
if left[lPoint][0] > right[rPoint][0]: # Sort by min radius in descending order
result.append(left[lPoint])
lPoint += 1
else:
result.append(right[rPoint])
rPoint += 1
# Insert remaining terms from left or right
if lPoint < len(left):
for remaining in left[lPoint:]:
result.append(remaining)
elif rPoint < len(right):
for remaining in right[rPoint:]:
result.append(remaining)
return result |
def format_bytes(bytes):
"""Pretty-print a number of bytes."""
if bytes > 1e6:
bytes = bytes / 1.0e6
return '%.1fm' % bytes
if bytes > 1e3:
bytes = bytes / 1.0e3
return '%.1fk' % bytes
return str(bytes) |
def fold(val, min, max):
"""
Transform values normalized between 0-1 back to their regular range.
Parameters
----------
val : float
value to be unfolded.
min: float
min of value range.
max: float
max of value range.
"""
fold_list = []
for i in val:
fold_i = (i-min)/(max - min)
fold_list.append(fold_i)
return fold_list |
def count_sliding_increases(depths: list):
"""
:param depths: list of depths
:return: number of times depth increased
:rtype: int
"""
increases = 0
int_depths = [int(depth) for depth in depths]
for position in range(len(int_depths)):
if position >= 3 and \
int_depths[position]+int_depths[position-1]+int_depths[position-2] \
> int_depths[position-1]+int_depths[position-2]+int_depths[position-3]:
increases += 1
return increases |
def nested_haskey(x, keys):
"""
For a nested dictionary 'x' and list of keys, checks
if all keys exist in the nested key path.
"""
if len(keys) == 1:
return (keys[0] in x)
if keys[0] in x:
return nested_haskey( x[keys[0]], keys[1:])
else:
return False |
def by_index(fg_color_index, bg_color_index = 0, attribute = 0):
"""
Return string with ANSI escape code for set text colors
fg_color_index: color index from 0 to 255, applied to text
bg_color_index: color index from 0 to 255, applied to background
attribute: use Attribute class variables
"""
return f"\033[{attribute};38;5;{fg_color_index};48;5;{bg_color_index}m" |
def extract_explanation(exp_string):
"""
Convert raw string (eg "uid1|role1 uid2|role2" -> [uid1, uid2], [role1, role2])
"""
if type(exp_string) != str:
return [], []
uids = []
roles = []
for uid_and_role in exp_string.split():
uid, role = uid_and_role.split("|")
uids.append(uid)
roles.append(role)
return uids, roles |
def merge_sort(array):
"""
Input : list of values
Note :
It divides input array in two halves, calls itself for the two halves and then merges the two sorted halves.
Returns : sorted list of values
"""
def join_sorted_arrays(array1, array2):
"""
Input : 2 sorted arrays.
Returns : New sorted array
"""
new_array = [] # this array will contain values from both input arrays.
j = 0 # Index to keep track where we have reached in second array
n2 = len(array2)
for i, element in enumerate(array1):
# We will compare current element in array1 to current element in array2, if element in array2 is smaller, append it
# to new array and look at next element in array2. Keep doing this until either array2 is exhausted or an element of
# array2 greater than current element of array1 is found.
while j < n2 and element > array2[j]:
new_array.append(array2[j])
j += 1
new_array.append(element)
# If there are any remaining values in array2, that are bigger than last element in array1, then append those to
# new array.
for i in range(j,n2):
new_array.append(array2[i])
return new_array
n = len(array)
if n == 1:
return array
else:
array[:int(n/2)] = merge_sort(array[:int(n/2)])
array[int(n/2):] = merge_sort(array[int(n/2):])
array[:] = join_sorted_arrays(array[:int(n/2)],array[int(n/2):])
return array |
def __assert_sorted(collection):
"""Check if collection is ascending sorted, if not - raises :py:class:`ValueError`
:param collection: collection
:return: True if collection is ascending sorted
:raise: :py:class:`ValueError` if collection is not ascending sorted
Examples:
>>> __assert_sorted([0, 1, 2, 4])
True
>>> __assert_sorted([10, -1, 5])
Traceback (most recent call last):
...
ValueError: Collection must be ascending sorted
"""
if collection != sorted(collection):
raise ValueError("Collection must be ascending sorted")
return True |
def encodeCipherString(enctype, iv, ct, mac):
"""return bitwarden cipherstring"""
ret = "{}.{}|{}".format(enctype, iv.decode('utf-8'), ct.decode('utf-8'))
if mac:
return ret + '|' + mac.decode('utf-8')
return ret |
def exists_in_prev_dialog_states(slot_value, converted_turns):
"""Whether slot value exists in the previous dialogue states."""
for user_turn in converted_turns[::2]:
assert user_turn['speaker'] == 'USER'
for frame in user_turn['frames']:
if 'state' in frame and 'slot_values' in frame['state']:
slot_values_dict = frame['state']['slot_values']
for slot, values_list in slot_values_dict.items():
new_list = []
for value in values_list:
new_list.extend(value.split('|'))
if slot_value in new_list:
return frame['service'], slot, values_list
return None, None, None |
def convertStringToBool(nodeText):
"""
Convert string to bool
@ In, nodeText, str, string from xml node text
@ Out, val, bool, True or False
"""
stringsThatMeanTrue = list(['yes','y','true','t','on'])
val = False
if nodeText.lower() in stringsThatMeanTrue:
val = True
return val |
def bytes(bits=None, kilobytes=None,megabytes=None,gigabytes=None,terabytes=None,petabytes=None):
"""Usage: Convert to bytes. Example: bytes(gigabytes=2)"""
if bits is not None:
return bits/8
elif kilobytes is not None:
return kilobytes*1000
elif megabytes is not None:
return megabytes*1e+6
elif gigabytes is not None:
return gigabytes*1e+9
elif terabytes is not None:
return terabytes*1e+12
elif petabytes is not None:
return petabytes*1e+15
else:
raise Exception("You must specify one value. Example: bits, kilobytes, megabytes, gigabytes, terabytes, petabytes") |
def _check_one_to_one(case_control_map: dict) -> bool:
"""Check if mapping dict is one-to-one (one control per case)."""
return all([len(ctrls) == 1 for ctrls in case_control_map.values()]) |
def isclose(a, b, rel_tol=1e-4, abs_tol=1e-4):
"""
Returns True if a and b are close to each other (within absolute or relative tolerance).
:param a: float
:param b: float
:param rel_tol: float
:param abs_tol: float
:return: bool
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
def clamp(n, maxabs):
"""Clamp a number to be between -maxabs and maxabs"""
return max(-maxabs, min(n, maxabs)) |
def solve(n, ar):
"""
Return the absolute sum of the nxn matrix defined by ar.
"""
# Initialize the sum of the diagonals 1 and 2.
d1sum = 0
d2sum = 0
# Iterate through the list of n-sized lists, one row at a time and use
# enumerate() to generate the current index value which we'll use to
# determine the indeces of each diagonal to use later.
for (ridx, row) in enumerate(ar):
# Add the appropriate value to the diagonal sums starting on opposite
# ends of the row and moving inward based on the current row index.
d1sum += row[ridx]
d2sum += row[n - ridx - 1]
# Return the absolute value of the difference of the diagonal sums.
return abs(d1sum - d2sum) |
def flatten_documents_to_sentence_strings(docs):
"""Flattens the given documents in nested form to a string representation where each sentence is a new document (useful for sentence-wise cooccurrence measuring)
[ #docs
[ #document1
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
],
[ #document2
['word1','word2','word3'], #sentence1
['word1','word2','word3'], #sentence2
]
]
becomes
[ #docs
's1_word1 s1_word2 s1_word3', #document1_sentence1
's2_word1 s2_word2 s2_word3', #document1_sentence2
's1_word1 s1_word2 s1_word3', #document2_sentence1
s2_word1 s2_word2 s2_word3', #document2_sentence2
]
"""
strsents = []
for doc in docs:
strsents.extend([' '.join(sent) for sent in doc])
return strsents |
def clean_cut(phases, cut):
""" Ensure the validation set doesn't
contain permutatations of the training vectors """
from itertools import permutations
def perms(phase):
return [list(i) for i in permutations(phase.split())]
print("Searching the clean validation cut...")
while phases[cut].split() in perms(phases[cut-1]):
#print('Cut at index:', cut, phases[cut])
cut += 1
return cut |
def try_key(d, key, val):
"""
d: dict
key: str
val: object
the default value if the key does not exist in d
"""
if key in d:
return d[key]
return val |
def coroutine_frames(all_frames):
"""Extract coroutine boilerplate frames from a frame list
for better stack/traceback printing of coroutines
"""
useful_frames = []
for frame in all_frames:
if frame[0] == '<string>' and frame[2] == 'raise_exc_info':
continue
# start out conservative with filename + function matching
# maybe just filename matching would be sufficient
elif frame[0].endswith('tornado/gen.py') and frame[2] in {'run', 'wrapper', '__init__'}:
continue
elif frame[0].endswith('tornado/concurrent.py') and frame[2] == 'result':
continue
useful_frames.append(frame)
return useful_frames |
def istype(obj, check):
"""like isinstance(obj, check), but strict
This won't catch subclasses.
"""
if isinstance(check, tuple):
for cls in check:
if type(obj) is cls:
return True
return False
else:
return type(obj) is check |
def get_relationships_by_type(rels, rel_type):
"""
Finds relationships by a relationship type
Example::
# Import
from cloudify import ctx
from cloudify_ansible_tower import utils
# Find specific relationships
rels = utils.get_relationships_by_type(
ctx.instance.relationships,
'cloudify.ansible_tower.relationships.a_custom_relationship')
:param list<`cloudify.context.RelationshipContext`> rels: \
List of Cloudify instance relationships
:param string rel_type: Relationship type
:returns: List of relationship objects
:rtype: list of :class:`cloudify.context.RelationshipContext`
"""
ret = list()
if not isinstance(rels, list):
return ret
for rel in rels:
if rel_type in rel.type_hierarchy:
ret.append(rel)
return ret |
def preprocess_keylist(keylist, **options):
"""Convert a list of keys to a comma-separated string."""
if isinstance(keylist, list):
return ", ".join([str(key) for key in keylist])
return keylist |
def binary_search(li, x):
"""
Given a sorted list li, return the index at which x appears using binary search
i.e. in O(log(N)) time complexity where N is the length of li.
If x is not in the list, return -1.
:param li: list of sorted elements
:param x: element to search for
:return:
"""
if not li:
return -1
if len(li) == 1:
return 0 if li[0] == x else -1
mid_idx = len(li) // 2
if x < li[mid_idx]:
return binary_search(li[:mid_idx], x)
else:
idx = binary_search(li[mid_idx:], x)
return (mid_idx + idx) if idx > -1 else -1 |
def scale_convert(x):
"""Transforms from 1:9 scale to -1:1"""
return ((x - 1) / 8 - 0.5) * 2 |
def _check_monit_services_status(check_result, monit_services_status):
"""
@summary: Check whether each type of service which was monitored by Monit was in correct status or not.
If a service was in "Not monitored" status, sanity check will skip it since this service
was temporarily set to not be monitored by Monit.
@return: A dictionary contains the testing result (failed or not failed) and the status of each service.
"""
check_result["services_status"] = {}
for service_name, service_info in monit_services_status.items():
check_result["services_status"].update({service_name: service_info["service_status"]})
if service_info["service_status"] == "Not monitored":
continue
if ((service_info["service_type"] == "Filesystem" and service_info["service_status"] != "Accessible")
or (service_info["service_type"] == "Process" and service_info["service_status"] != "Running")
or (service_info["service_type"] == "Program" and service_info["service_status"] != "Status ok")):
check_result["failed"] = True
return check_result |
def markdownify(span):
"""
Given a "span" element, returns a segment of markdown text
Supports __bold__, _italic_,  and [link](url) elements.
Bold and italic elements are trimmed to avoid markdown parse errors.
"""
raw_text = span.get("text", "")
modifiers = span.get("modifiers", {})
if not raw_text and not modifiers:
return ""
if modifiers.get("image"):
img_url = modifiers.get("image")
return f""
bold = "__" if modifiers.get("bold") else ""
italic = "_" if modifiers.get("italic") else ""
target = ""
islink = ("", "")
postfix = ""
if bold or italic:
# we need to trim the text to accound for markdown, so
# fixup danging spaces...
old_len = len(raw_text)
raw_text = raw_text.strip()
postfix = " "
if modifiers.get("link"):
islink = "[", "]"
target = f"({modifiers['link']['url']})"
text = (
f"{bold}{italic}{islink[0]}{raw_text}{islink[1]}{target}{italic}{bold}{postfix}"
)
return text |
def lr_schedule0(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 10, 20, 30, 40 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-1
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 10:
lr *= 1e-1
print('Learning rate: ', lr)
return lr |
def iface_definition(iface, ssid, psk):
"""Returns the corresponding iface definition as a string,
formatted for inclusion in /etc/network/interfaces.d/"""
return \
"""iface {} inet dhcp
wpa-ssid "{}"
wpa-psk {}
""".format(iface, ssid, psk) |
def count_fq(zz):
""" x is a sorted list with repeats. returns a list of [count,value]
where count is the number of times value is repeated in the list"""
res = []
s = 0
z = list(zz)
z.append(-100000)
for i in range(len(z)-1):
if not z[i] == z[i+1]:
v = [s+1,z[i]]
res.append(v)
s = 0
else:
s=s+1
return res |
def is_cell(keyword):
"""Check if cell keyword
Args:
Profiles keyword
Returns:
True if cell keyword
"""
if keyword[0] == 'L':
z1 = keyword[1]
else:
z1 = keyword[0]
return bool(z1 == 'B') |
def get_cyclic_label(cycle_nr):
"""
Return string to be inserted in the SMILES string to indicate a cycle
Input
----------
cycle_nr: int, number of the cycle to be closed
Output
-------
str, string to be inserted in the SMILES string to indicate a cycle
"""
if cycle_nr > 9:
return '%' + str(cycle_nr)
else:
return str(cycle_nr) |
def tag(tag_name: str, data: str):
"""
Generate tag include tag name and tag data
"""
return '<' + tag_name + '>' + data + '</' + tag_name + '>' |
def boolean(value: str) -> bool:
"""Convert a "human" boolean (Y, Yes, True, ...) to a Python boolean,
raising a ValueError if it can't be converted.
"""
if value.lower() not in {"y", "yes", "n", "no", "true", "false"}:
raise ValueError('Please enter either "y" or "n".')
return value.lower() in {"y", "yes", "true"} |
def make_car(manufacturer, model, **user_info):
"""Build a dictionary containing everything we know about a car."""
car = {} # create empty dictionary
# add elements to dictionary
car['manufacturer'] = manufacturer
car['model'] = model
for key, value in user_info.items():
car[key] = value # store values in dictionary
return car |
def get_agency_url(operator_code):
"""Get url for operators"""
operator_urls = {
'OId_LUL': "https://tfl.gov.uk/maps/track/tube",
'OId_DLR': "https://tfl.gov.uk/modes/dlr/",
'OId_TRS': "https://www.thamesriverservices.co.uk/",
'OId_CCR': "https://www.citycruises.com/",
'OId_CV': "https://www.thamesclippers.com/",
'OId_WFF': "https://tfl.gov.uk/modes/river/woolwich-ferry",
'OId_TCL': "https://tfl.gov.uk/modes/trams/",
'OId_EAL': "https://www.emiratesairline.co.uk/",
#'OId_CRC': "https://www.crownrivercruise.co.uk/",
}
if operator_code in list(operator_urls.keys()):
return operator_urls[operator_code]
else:
return "NA" |
def weighted_average(numbers):
"""
Returns the weighted average of the numbers array weighted by the frequency of the numbers
:param numbers:List[int]
:return: float
"""
num_dict = {}
for n in numbers:
num_dict[n] = num_dict.get(n, 0) + 1
weighted_sum = 0
for n in num_dict:
weighted_sum += n * num_dict[n]
return weighted_sum/len(numbers) |
def features(runenvs):
"""
Information about the capabilities of the cluster.
show the user a list of available "features"
and additional computational nodes.
"""
output = {}
for runenv in runenvs:
if runenv['runenv'] in output:
output[runenv['runenv']].append(runenv['feature'])
else:
output[runenv['runenv']] = [runenv['feature']]
result = []
for runenv in output:
result.append(
{
"os": "Linux",
"extension_node": None,
"runenv": runenv,
"features": output[runenv]
}
)
return result |
def build_model_data(model, name, is_public, tlp, tags, intelligence, description):
"""
Builds data dictionary that is used in Threat Model creation/update request.
"""
if model == 'tipreport':
description_field_name = 'body'
else:
description_field_name = 'description'
data = {k: v for (k, v) in (('name', name), ('is_public', is_public), ('tlp', tlp),
(description_field_name, description)) if v}
if tags:
data['tags'] = tags if isinstance(tags, list) else [t.strip() for t in tags.split(',')]
if intelligence:
data['intelligence'] = intelligence if isinstance(intelligence, list) else [i.strip() for i in
intelligence.split(',')]
return data |
def UnixifyPath(file_path):
"""converts \ to /"""
return file_path.replace("\\", "/") |
def trim_docstring(docstring):
"""
Uniformly trim leading/trailing whitespace from docstrings.
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip() |
def igInt(i,length=7):
"""
returns a string denoting an integer of appropriate length
"""
return '%*i' % (length,i) |
def lower_obj(x):
"""
Lower cases objects, including nested objects.
Adapted from here: https://stackoverflow.com/a/4223871/5965685
:param x: input object
:return: output object lower-cased
"""
ignored_fields = ['path', 'g-function-path', 'g_b-function-path']
if isinstance(x, list):
return [lower_obj(v) for v in x]
elif isinstance(x, dict):
d = {}
for k, v in x.items():
if k.lower() in ignored_fields:
d[k.lower()] = v
else:
d[k.lower()] = lower_obj(v)
return d
elif isinstance(x, str):
return x.lower()
else:
return x |
def FindBuildRecordFromLog(description, build_info):
"""Find the right build record in the build logs.
Get the first build record from build log with a reason field
that matches 'description'. ('description' is a special tag we
created when we launched the buildbot, so we could find it at this
point.)
"""
for build_log in build_info:
if description in build_log['reason']:
return build_log
return {} |
def prunejoin(dict_, list_, sep=", "):
"""Remove non-values from list and join it using sep."""
return sep.join([dict_.get(i) for i in list_ if dict_.get(i)]) |
def hash_sdbm(name):
"""Calculate SDBM hash over a string."""
ret = 0
for ii in name:
ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF
return "0x%x" % (ret) |
def _build_merge_vcf_command_str(raw_vcf_path_list):
"""Generate command string to merge vcf files into single multisample vcf."""
command = " ".join([
'bcftools merge', " ".join(raw_vcf_path_list)
])
return command |
def _next_smooth_int(n):
"""Find the next even integer with prime factors no larger than 5.
Args:
n: An `int`.
Returns:
The smallest `int` that is larger than or equal to `n`, even and with no
prime factors larger than 5.
"""
if n <= 2:
return 2
if n % 2 == 1:
n += 1 # Even.
n -= 2 # Cancel out +2 at the beginning of the loop.
ndiv = 2 # Dummy value that is >1.
while ndiv > 1:
n += 2
ndiv = n
while ndiv % 2 == 0:
ndiv /= 2
while ndiv % 3 == 0:
ndiv /= 3
while ndiv % 5 == 0:
ndiv /= 5
return n |
def check_instance_tag(tag_key, tag_value, app):
"""
Check if instance tag given matches the application configuration
:param tag_key:
:param tag_value:
:param app:
:return: bool
>>> my_app = {'_id': '123456789', 'name': 'webapp', 'env': 'dev', 'role': 'webfront'}
>>> check_instance_tag('app', 'nope', my_app)
False
>>> check_instance_tag('env', 'prod', my_app)
False
>>> check_instance_tag('app', 'webapp', my_app)
True
>>> check_instance_tag('app_id', '123456789', my_app)
True
>>> check_instance_tag('color', 'green', my_app)
False
>>> check_instance_tag('Name', 'ec2.test.front.webapp', my_app)
False
"""
if tag_key == 'app_id':
return tag_value == app['_id']
if tag_key == 'app':
return tag_value == app['name']
if tag_key == 'env':
return tag_value == app['env']
if tag_key == 'role':
return tag_value == app['role']
if tag_key == 'color':
return tag_value == app.get('blue_green', {}).get('color')
if tag_key.startswith('aws:'):
return True
instance_tags = {t['tag_name']: t['tag_value'] for t in app.get('environment_infos', {}).get('instance_tags', [])}
return tag_value == instance_tags.get(tag_key) |
def format_int(value):
"""Cast the given value to an integer (and then to a string)."""
return "{}".format(int(value)) |
def proposal(proposal):
"""Specific mocked proposal for reparameterisation tests"""
proposal.use_default_reparameterisations = False
return proposal |
def is_int(value):
"""returns Tre if a value can be typecasted as an int, else Falsee"""
try:
int(value)
return True
except ValueError:
return False |
def fit_linear(x, a, b):
"""Function model for calculating linear fit
Args:
x (float): Variable that we try to fit
a (float): the directional factor
b (float): free element
Returns:
float: value of linear fit
"""
return a * x + b |
def cascaded_cmp_with_partial_constants_and_false_end(a, b, c):
"""
>>> cascaded_cmp_with_partial_constants_and_false_end(3, 6, 8)
False
>>> cascaded_cmp_with_partial_constants_and_false_end(1, 6, 8)
False
>>> cascaded_cmp_with_partial_constants_and_false_end(4, 6, 8)
False
>>> cascaded_cmp_with_partial_constants_and_false_end(3, 7, 8)
False
"""
x = 1 < 2 < a < 4 < 5 < b < 7 < 7 < c
return x |
def extendedEuclidean( a, b ):
"""Calculate the GCD and linear combination of two integers using
the Extended Euclidean algorithm.
Arguments should be integers.
Returns (r,s,t) such that (r = a*s + b*t)
-
Based on pseudocode from
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = int(b), int(a)
while r:
quotient = old_r // r #integer division
old_r, r = r, (old_r - quotient*r)
old_s, s = s, (old_s - quotient*s)
old_t, t = t, (old_t - quotient*t)
return (old_r, old_s, old_t) |
def ensure_replicates_kwarg_validity(replicate_kwarg):
"""
Ensures `replicate_kwarg` is either 'bootstrap' or 'jackknife'. Raises a
helpful ValueError otherwise.
"""
if replicate_kwarg not in ['bootstrap', 'jackknife']:
msg = "`replicates` MUST be either 'bootstrap' or 'jackknife'."
raise ValueError(msg)
return None |
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*',\
'423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*',\
'423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*',\
'423145*', '*553215', '*35214*', '*41532*', '*2*1***'])
False
"""
board = board[1:-1]
for row in board:
row = list(row[1:-1])
row = [int(x) for x in row if x!='*']
if len(row)!=len(set(row)):
return False
return True |
def ensure_three_decimal_points_for_milliseconds_and_replace_z(
datetimestring: str,
) -> str:
"""
To convert SciHub Datetimes to Python Datetimes, we need them in ISO format
SciHub Datetimes can have milliseconds of less than 3 digits therefore
we pad them with zeros to the right to make 3 digits, as required by `datetime`
We also need to replace Z at the end with +00:00
:param datetimestring: Str representing a SciHub Datetime
:returns: Str representing a correctly padded SciHub Datetime
"""
datetimestring_stripped = datetimestring.replace("Z", "")
try:
number_of_decimal_points = len(datetimestring_stripped.split(".")[1])
if number_of_decimal_points < 3:
datetimestring_stripped = (
f"{datetimestring_stripped}{(3 - number_of_decimal_points) * '0'}"
)
except IndexError:
datetimestring_stripped = f"{datetimestring_stripped}.000"
return f"{datetimestring_stripped}+00:00" |
def Total(data):
"""Returns the float value of a number or the sum of a list."""
if type(data) == float:
total = data
elif type(data) == int:
total = float(data)
elif type(data) == list:
total = float(sum(data))
else:
raise TypeError
return total |
def score_to(ans, keys):
"""
Return score the ans get.
Ans and keys are key-value dict.
key - expert id
values - [homepage,gender,position,pic,email,location]
"""
num = len(ans)
goals = 0
for i in ans:
goal = 0
x = ans[i]
y = keys[i]
for j in range(len(x)):
if j != 2:
if x[j] == y[j]:
goal += 1
else:
pos_x = set(x[j].split(';'))
pos_y = set(y[j].split(';'))
goal += len(pos_x & pos_y) / len(pos_x | pos_y)
goals += goal
goals = goals / (num * 6)
return goals |
def _to_string(val):
"""Convert to text."""
if isinstance(val, bytes):
return val.decode('utf-8')
assert isinstance(val, str)
return val |
def getOverlap(a,b):
"""
get the number of ms overlapped between 2 time windows
"""
return max(0,min(a[1],b[1]) - max(a[0],b[0])) |
def make_box(poly):
"""Generate a bounding box from a polygon"""
x = []
y = []
for p in poly:
for point in p:
x.append(point[0])
y.append(point[1])
return (min(x), min(y), max(x), max(y)) |
def format_imports(import_statements):
"""
-----
examples:
@need
from fastest.constants import TestBodies
@end
@let
import_input = TestBodies.TEST_STACK_IMPORTS_INPUT
output = TestBodies.TEST_STACK_IMPORTS_OUTPUT
@end
1) format_imports(import_input) -> output
-----
:param import_statements: list
:return: list
"""
return [
'{}\n'.format(import_statement.strip())
for import_statement in import_statements
if len(import_statement) > 0
] |
def _get_external_id(account_info):
"""Get external id from account info."""
if all(k in account_info for k in ('external_id', 'external_method')):
return dict(id=account_info['external_id'],
method=account_info['external_method'])
return None |
def bio2_to_bioes(tags):
"""
Convert the BIO2 tag sequence into a BIOES sequence.
Args:
tags: a list of tags in BIO2 format
Returns:
new_tags: a list of tags in BIOES format
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
else:
if len(tag) < 2:
raise Exception(f"Invalid BIO2 tag found: {tag}")
else:
if tag[:2] == 'I-': # convert to E- if next tag is not I-
if i+1 < len(tags) and tags[i+1][:2] == 'I-':
new_tags.append(tag)
else:
new_tags.append('E-' + tag[2:])
elif tag[:2] == 'B-': # convert to S- if next tag is not I-
if i+1 < len(tags) and tags[i+1][:2] == 'I-':
new_tags.append(tag)
else:
new_tags.append('S-' + tag[2:])
else:
raise Exception(f"Invalid IOB tag found: {tag}")
return new_tags |
def hub_quantile_prediction_dict_validator(target_group_dict, prediction_dict):
"""
Does hub prediction_dict validation as documented in `json_io_dict_from_quantile_csv_file()`
"""
error_messages = [] # return value. filled next
valid_quantiles = target_group_dict['quantiles']
prediction_quantiles = prediction_dict['prediction']['quantile']
if set(valid_quantiles) != set(prediction_quantiles):
error_messages.append(f"prediction_dict quantiles != valid_quantiles. valid_quantiles={valid_quantiles}, "
f"prediction_quantiles={prediction_quantiles}")
return error_messages |
def clean_note_content(content):
"""Removes unwanted characters from note content."""
return content.strip().replace('"', "'") |
def join(*args):
"""
join url with paths and url queries
:param args:
:return:
"""
return '/'.join([arg.rstrip('/') for arg in args if len(arg)]) |
def _join(s, tail):
"""
Return the concatenation of s + ' ' + tail if s is a truthy string, or tail
only otherwise.
"""
return " ".join((s, tail)) if s else tail |
def isSolvable(state):
"""
Function to check if the solution exists from the current node
configuration
"""
invCount = 0
size = len(state)
for i in range(0, size-1):
for j in range(i+1, size):
if (int(state[j]) and int(state[i]) and state[i] > state[j]):
invCount += 1
# return (invCount%2 == 0)
return 1 |
def seconds_to_duration(seconds):
"""Return a string representation of the duration in seconds"""
h = seconds // 3600
m = seconds % 3600 // 60
s = seconds % 3600 % 60
return (
f"{h:.0f}h:{m:02.0f}m:{s:02.0f}s"
if h > 0
else (f"{m:.0f}m:{s:02.0f}s" if m > 0 else f"{s:.0f}s")
) |
def replace_opts(rep_doc, opts):
"""Replace flags with parameter names.
This is a simple operation where we replace the command line flags
with the attribute names.
Parameters
----------
rep_doc : string
Documentation string
opts : dict
Dictionary of option attributes and keys. Use reverse_opt_map
to reverse flags and attrs from opt_map class attribute.
Returns
-------
rep_doc : string
New docstring with flags replaces with attribute names.
Examples
--------
doc = grab_doc('bet')
opts = reverse_opt_map(fsl.Bet.opt_map)
rep_doc = replace_opts(doc, opts)
"""
# Replace flags with attribute names
for key, val in list(opts.items()):
rep_doc = rep_doc.replace(key, val)
return rep_doc |
def get_key_of_item(d: dict, i: str) -> str:
"""Returns the key of item (string) i in dict d, or None if i is not in d."""
for key, item in d.items():
if item == i:
return key
return "" |
def remove_empty_leading_trailing(lines):
"""
Removes leading and trailing empty lines.
A list of strings is passed as argument, some of which may be empty.
This function removes from the start and end of the list a contiguous
sequence of empty lines and returns the result. Embedded sequences of
empty lines are not touched.
Parameters:
lines List of strings to be modified.
Return:
Input list of strings with leading/trailing blank line sequences
removed.
"""
retlines = []
# Dispose of degenerate case of empty array
if len(lines) == 0:
return retlines
# Search for first non-blank line
start = 0
while start < len(lines):
if len(lines[start]) > 0:
break
start = start + 1
# Handle case when entire list is empty
if start >= len(lines):
return retlines
# Search for last non-blank line
finish = len(lines) - 1
while finish >= 0:
if len(lines[finish]) > 0:
break
finish = finish - 1
retlines = lines[start:finish + 1]
return retlines |
def is_crud(sql):
"""Check that given sql is insert , update, delete or select
:param sql: Sql string to check for is_crud
:return: Boolean result
"""
crud = ['insert', 'update', 'delete', 'select']
if not isinstance(sql, str):
raise TypeError('`sql` argument is not valid. `sql` must be str.')
if sql == '':
raise ValueError('Sql statement is empty')
parts = sql.split(' ')
first_part = parts[0]
lower_first_part = first_part.lower()
if lower_first_part in crud:
return True
return False |
def escape_newline(s: str) -> str:
"""Replaces each new line character (\\n) in the input with \\\\n"""
return s.replace('\n', '\\n') |
def gather_ec_by_fc(toplist, ec_blast ,counts):
"""[format finnal ec by function counts]
Args:
toplist ([list]): [top 20 predicted EC]
ec_blast ([string]): [blast results]
counts ([int]): [function counts]
Returns:
[string]: [comma sepreated ec string]
"""
if counts==0:
return '-'
elif str(ec_blast)!='nan':
return str(ec_blast)
else:
return ','.join(toplist[0:counts]) |
def json_serialization(ytd_json_op):
"""
helping func for json serialize (filtering only required data)
"""
ytd_data_array = []
for r_obj in ytd_json_op:
ytd_data_array.append(
{
'link': 'https://www.youtube.com/watch?v='+ r_obj['id']['videoId'],
'title': r_obj['snippet']['title'],
'image': r_obj['snippet']['thumbnails']['medium']['url'],
'description': r_obj['snippet']['description'],
'channel_link': 'https://www.youtube.com/channel/'+r_obj['snippet']['channelId'],
'published_at': r_obj['snippet']['publishedAt']
})
return ytd_data_array |
def booleanise(b):
"""Normalise a 'stringified' Boolean to a proper Python Boolean.
ElasticSearch has a habit of returning "true" and "false" in its
JSON responses when it should be returning `true` and `false`. If
`b` looks like a stringified Boolean true, return True. If `b`
looks like a stringified Boolean false, return False.
Raise ValueError if we don't know what `b` is supposed to represent.
"""
s = str(b)
if s.lower() == "true":
return True
if s.lower() == "false":
return False
raise ValueError("I don't know how to coerce %r to a bool" % b) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.