content stringlengths 42 6.51k |
|---|
def check_if_comp_is_completely_unconnected(conn1, conn2, ingoing, outgoing):
"""
Checks if two connected components are completely unconnected each other
Parameters
-------------
conn1
First connected component
conn2
Second connected component
ingoing
Ingoing dictionary
outgoing
Outgoing dictionary
Returns
-------------
boolean
Boolean value that tells if the two connected components are completely unconnected
"""
for act1 in conn1:
for act2 in conn2:
if ((act1 in outgoing and act2 in outgoing[act1]) and (
act1 in ingoing and act2 in ingoing[act1])):
return False
return True |
def load_properties(multiline, separator='=', comment_char='#', keys=None):
"""
Read a multiline string of properties (key/value pair separated by *separator*) into a dict
:param multiline: input string of properties
:param separator: separator between key and value
:param comment_char: lines starting with this char are considered comments, not key/value pairs
:param keys: list to append the keys to
:return:
"""
props = {}
for line in multiline.splitlines():
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(comment_char):
key_value = stripped_line.split(separator)
key = key_value[0].strip()
value = separator.join(key_value[1:]).strip().strip('"')
props[key] = value
if keys != None:
keys.append(key)
return props |
def test_name(f):
"""Takes 'tests/demo.html' -> 'demo'"""
return f.replace('tests/', '').replace('.html', '') |
def add_scheme_if_missing(url):
"""
>>> add_scheme_if_missing("example.org")
'http://example.org'
>>> add_scheme_if_missing("https://example.org")
'https://example.org'
"""
if "//" not in url:
url = "http://%s" % url
return url |
def make_key_mapping(keys, start_note):
"""Return a dictionary of (note, velocity) by computer keyboard key code"""
mapping = {}
for i, key in enumerate(keys):
mapping[key] = (start_note + i, 127)
return mapping |
def get_next_epoch(savedmodel_path):
"""
Read the path to the saved model and returns the next eopch to train on.
Input: string specifying path
Returns: int specifying the next epoch
"""
if savedmodel_path == "":
next_epoch = 1
else:
next_epoch = int(savedmodel_path[savedmodel_path.rfind("-")+1:]) + 1
return next_epoch |
def addv3(a, b):
"""add 3-vector b to a"""
return (a[0]+b[0], a[1]+b[1], a[2]+b[2]) |
def set_timezone_override(timezoneId: str) -> dict:
"""Overrides default host system timezone with the specified one.
Parameters
----------
timezoneId: str
The timezone identifier. If empty, disables the override and
restores default host system timezone.
**Experimental**
"""
return {
"method": "Emulation.setTimezoneOverride",
"params": {"timezoneId": timezoneId},
} |
def get_iou(bb1, bb2):
"""Calculate the Intersection over Union (IoU) of two bounding boxes.
Args:
bb1: dict {'x1', 'x2', 'y1', 'y2'} The (x1, y1) position is at the top left
corner, the (x2, y2) position is at the bottom right corner
bb2: dict {'x1', 'x2', 'y1', 'y2'} The (x, y) position is at the top left
corner, the (x2, y2) position is at the bottom right corner
Returns:
float in [0, 1]
"""
# determine the coordinates of the intersection rectangle
(bb1_x1, bb1_y1, bb1_x2, bb1_y2) = bb1
(bb2_x1, bb2_y1, bb2_x2, bb2_y2) = bb2
x_left = max(bb1_x1, bb2_x1)
y_top = max(bb1_y1, bb2_y1)
x_right = min(bb1_x2, bb2_x2)
y_bottom = min(bb1_y2, bb2_y2)
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1_x2 - bb1_x1) * (bb1_y2 - bb1_y1)
bb2_area = (bb2_x2 - bb2_x1) * (bb2_y2 - bb2_y1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou |
def breadcrumbs_list(links):
"""Returns a list of links to render as breadcrumbs inside a <ul> element in a HTML template.
``links`` should be a iterable of tuples (URL, text).
"""
crumbs = ""
li_str = '<li class="breadcrumb-item"><a href="{}">{}</a></li>'
li_str_active = '<li class="breadcrumb-item active"><span>{}</span></li>'
# Iterate over the list, except for the last item.
if len(links) > 1:
for i in links[:-1]:
crumbs += li_str.format(i[0], i[1])
# Add the final "active" item.
crumbs += li_str_active.format(links[-1][1])
return crumbs |
def remove_prefix(text, prefixes):
"""Remove the prefix from input string (if the string starts with prefix)."""
for prefix in prefixes:
if text.startswith(prefix):
return text[len(prefix):]
return text |
def app_launcher(filepath, filename, CMD, ARGS):
"""
Args:
filepath : (str) path to modis downloading script
filename : (str) downloading script
CMD : (list) command e.g. [nohup,bash]
ARGS : (list) arguments of downloading script e.g. [--dir=./ , ]
"""
import os
from subprocess import run, PIPE, TimeoutExpired
flag = 0
try:
run(CMD+[os.path.join(*[filepath, filename])]+ARGS, )
except TimeoutExpired:
flag = -1
return flag |
def numerical_diff(f, x):
"""
Returns the derivative of f at x.
"""
h = 1e-4 # rule of thumb
return (f(x + h) - f(x - h)) / (2 * h) |
def max(x, y):
"""(int, int) -> int
Determine the maximum of <x> and <y>
"""
return x if x > y else y |
def find_smalest_headers(content):
"""
find the smalest header level
:param content:
:type content:
:return:
:rtype:
"""
ignore = False
lines = content.splitlines()
headers = []
for line in lines:
if line.startswith("`"):
ignore = not ignore
break
if line.startswith("#") and not ignore:
headers.append(line.split(" ")[0])
headers = set(headers)
level = sorted(headers, key=len)
if len(level) == 0:
level = 1
else:
level = len(level[0])
return level |
def aspect_ratio(a):
"""
Returns the ratio of the width to height of rectangle a.
"""
if a is None: return 0
(x, y, w, h) = a
return float(w) / h |
def insertion_sort(items):
"""Sorts a list of items in ascending order.
"""
for i, val in enumerate(items[1:]):
temp = i
while i >= 0 and val < items[i]:
items[i + 1] = items[i]
i -= 1
items[i + 1] = val
return items |
def filter(arg_list, nsitemin, nsitemax):
"""Filters so nsites is within nsitemin and nsitemax.
Parameters:
arg_list (list): list of materials.
nsitesmin (int): min of nsites.
nsitemax (int): max of nsites.
Returns:
list: filtered list.
"""
respond_filter_list = []
for i,el in enumerate(arg_list):
nsites = el["nsites"]
if nsites > nsitemax or nsites < nsitemin:
pass
else:
respond_filter_list.append(el)
return respond_filter_list |
def create_project(p_dict):
"""Creates project"""
p_dict["users"] = {
"mrsj": {"member": True, "spectator": True, "manager": True},
"tschuy": {"member": True, "spectator": False, "manager": False}
} if "users" not in p_dict else p_dict["users"]
p_dict["uuid"] = "309eae69-21dc-4538-9fdc-e6892a9c4dd4"
p_dict["revision"] = 1
p_dict["created_at"] = "2015-05-23"
p_dict["updated_at"] = None
p_dict["deleted_at"] = None
p_dict["uri"] = p_dict["uri"] if "uri" in p_dict else None
return p_dict |
def get_event_subject(event_type: str) -> str:
"""Return the first part of the event_type
e.g.
>>> Event.event_type = 'experiment.deleted'
>>> Event.get_event_subject() == 'experiment'
"""
return event_type.split(".")[0] |
def exlude_replication_pools(pools_data):
"""
Returns an array without pools only used for replication.
"""
data = []
for entry in pools_data:
if entry['SANPoolsUsage'] != "replication":
data.append(entry.copy())
return data |
def Dictionary_to_ListList(python_dictionary):
"""Returns a list with two lists : [[key_list][item_list]]"""
key_list=[]
value_list=[]
for key,value in python_dictionary.items():
key_list.append(key)
value_list.append(value)
out_list=[key_list,value_list]
return out_list |
def checking_features(columns:list, lst_features:list) -> bool:
"""
Parameters:
* columns [list]: list of columns in the dataframe.
* lst_features [list]: list of required features.
Return:
True/False [bool]: If all / not all the required features
are in the dataframe.
"""
if all([feat in columns for feat in lst_features]):
return True
else:
return False |
def to_tuple(obj):
"""Tuple Converter
Takes any object and converts it to a tuple.
If the object is already a tuple it is just returned,
If the object is None an empty tuple is returned,
Else a tuple is created with the object as it's first element.
Args:
obj (any object): the object to be converted
Returns:
A tuple containing the given object
"""
if isinstance(obj, list):
return tuple(obj)
elif isinstance(obj, tuple):
return obj
elif obj is None:
return ()
else:
return (obj, ) |
def str2bool(txt):
"""Convert string to bool, use for query parameter
"""
if not bool(txt):
return False
return str(txt).lower() in ("1", "y", "yes", "t", "true") |
def is_integer(testValue):
"""Returns True if testValue is an integer and False otherwise."""
isInteger = True
charactersDone = 0
currentCharacter = 0
positiveNegative = 0
decimal = 0
testValueString = str(testValue)
testValueString = testValueString.strip()
totalCharacters = len(testValueString)
if totalCharacters == 0:
isInteger = False
while charactersDone < totalCharacters:
if testValueString[currentCharacter] not in '-+0123456789. ':
isInteger = False
if testValueString[currentCharacter] in ['.', ' '] and testValueString[totalCharacters - 1] not in ['.', '0',
' ']:
isInteger = False
if testValueString[currentCharacter] in [' ', '-', '+', '.'] and totalCharacters == 1:
isInteger = False
if testValueString[currentCharacter] in ['-', '+'] and currentCharacter != 0:
isInteger = False
if testValueString[currentCharacter] in ['-', '+']:
positiveNegative = positiveNegative + 1
if testValueString[currentCharacter] in ['.']:
decimal = decimal + 1
if positiveNegative > 1 or decimal > 1:
isInteger = False
currentCharacter = currentCharacter + 1
charactersDone = charactersDone + 1
return isInteger |
def _merge(iterable):
"""Merge multiple items into a single one separating with a newline"""
return "\n".join(iterable) |
def format_str(template, *args):
"""Format multiple string by using first arg as a template."""
return [template.format(arg) for arg in args] |
def sub_with_none(v1,v2):
""" Substract treating None as zero """
p1 = v1 is None
p2 = v2 is None
if p1 and p2:
return None
elif p1:
return -v2
elif p2:
return v1
else:
return v1 - v2 |
def truncate(content, length=100, suffix="..."):
"""
Smart string truncation
"""
if len(content) <= length:
return content
else:
return content[:length].rsplit(" ", 1)[0] + suffix |
def elgamal_public_key(prk):
"""
Return three number tuple as public key.
Parameters
==========
prk : Tuple (p, r, e) generated by ``elgamal_private_key``
Returns
=======
(p, r, e = r**d mod p) : d is a random number in private key.
Examples
========
>>> from sympy.crypto.crypto import elgamal_public_key
>>> elgamal_public_key((1031, 14, 636))
(1031, 14, 212)
"""
return prk[0], prk[1], pow(prk[1], prk[2], prk[0]) |
def chunk(input_list, size):
"""
Split a list into a list-of-lists.
If size = 2, [1, 2, 3, 4, 5, 6, 7] becomes [[1,2], [3,4], [5,6], [7]]
"""
return [input_list[i:i + size] for i in range(0, len(input_list), size)] |
def py3round(number: float) -> int:
"""
Unified rounding in all python versions. Used by albumentations.
Parameters
----------
number: float to round.
Returns
-------
Rounded number
"""
if abs(round(number) - number) == 0.5:
return int(2.0 * round(number / 2.0))
return int(round(number)) |
def is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False |
def _find_exclude_idx(ch_names, exclude):
"""Find the index of all channels to exclude.
If there are several channels called "A" and we want to exclude "A",
then add (the index of) all "A" channels to the exclusion list.
"""
return [idx for idx, ch in enumerate(ch_names) if ch in exclude] |
def _potes_to_unique_potes(potes):
""" Remove repetitions from potes (positional votes) """
unique_potes = []
n = []
for pote in potes:
flag_new = True
for i, p in enumerate(unique_potes):
if list(pote) == list(p):
n[i] += 1
flag_new = False
if flag_new:
unique_potes.append(pote)
n.append(1)
return unique_potes, n |
def clean_aliases(aliases):
"""
Removes unwanted characters from the alias name.
This function replaces:
-both '/' and '\\\\' with '_'.
-# with "", as it is not allowed according to the schema.
Can be called prior to registering a new alias if you know it may contain such unwanted
characters. You would then need to update your payload with the new alias to submit.
Args:
aliases: `list`. One or more record alias names to submit to the Portal.
Returns:
`str`: The cleaned alias.
Example::
clean_alias_name("michael-snyder:a/troublesome\alias")
# Returns michael-snyder:a_troublesome_alias
"""
new = []
for alias in aliases:
new.append(alias.replace("/", "_").replace("\\", "_").replace("#", ""))
return new |
def smcConvertMaskToInt( aRegMask ):
""" function to convert bit field mask string to integer -- assumes mask is contiguous bits"""
numBits = 0;
aBinStr = '{0:32b}'.format(int( aRegMask, 16 )).strip().rstrip( "0" )
while len( aBinStr ):
aBinCh = aBinStr[-1]
aBinStr = aBinStr[0:-1]
if aBinCh == '1':
numBits += 1
else:
break
return ((2**numBits) - 1) # return max value field can contain |
def range_data_override(main_data, added_data):
"""
Take main data and then use the added data to completely overwrite clusters and all
keys in the clusters
"""
# HULK SMAAAAASH!
for cluster in added_data:
main_data[cluster] = added_data[cluster]
return main_data |
def hmirror (val=None):
""" Set or get horizontal mirror """
global _hmirror
if val is not None:
_hmirror = val
return _hmirror |
def hsvToRGB(h, s, v):
"""Convert HSV color space to RGB color space
@param h: Hue
@param s: Saturation
@param v: Value
return (r, g, b)
"""
import math
hi = math.floor(h / 60.0) % 6
f = (h / 60.0) - math.floor(h / 60.0)
p = v * (1.0 - s)
q = v * (1.0 - (f*s))
t = v * (1.0 - ((1.0 - f) * s))
return {
0: (v, t, p),
1: (q, v, p),
2: (p, v, t),
3: (p, q, v),
4: (t, p, v),
5: (v, p, q),
}[hi] |
def construct_modified_raw_text(token_dict):
"""Construct modified raw text from words and spaces."""
raw = ""
for orth, spacy in zip(token_dict["ORTH"], token_dict["SPACY"]):
raw += orth
if spacy:
raw += " "
return raw |
def makeinstrumentstr(instrument):
"""Return a shortened string for the instrument"""
if instrument=='SALTICAM':
instr='S'
elif instrument=='RSS':
instr='P'
else:
instr=''
return instr |
def get_padding(north, south, west, east, padding=10):
"""
Calculate a reasonable amount of padding for the map
:param north:
:type north:
:param south:
:type south:
:param west:
:type west:
:param east:
:type east:
:param padding:
:type padding:
:return: The amount of padding to apply
:rtype: int
"""
padding /= 100
dlat = abs(north - south)
dlon = abs(east - west)
return round(dlat * padding), round(dlon * padding) |
def convert_time_24(time):
"""
Given a number, convert to 24 H unless it's the hour 12
"""
if not time == "12":
time = str(int(time) + 12)
return time |
def isGColRequired(config, num):
"""A quick helper function that checks whether we need to bother reading the g1,g2 columns.
It checks the config dict for the output file names gg_file_name, ng_file_name (only if
num == 1), etc. If the output files indicate that we don't need the g1/g2 columns, then
we don't need to raise an error if the g1_col or g2_col is invalid.
This makes it easier to specify columns. e.g. for an NG correlation function, the
first catalog does not need to have the g1,g2 columns, and typically wouldn't. So
if you specify g1_col=5, g2_col=6, say, and the first catalog does not have these columns,
you would normally get an error.
But instead, we check that the calculation is going to be NG from the presence of an
ng_file_name parameter, and we let the would-be error pass.
Parameters:
config (dict): The configuration file to check.
num (int): Which number catalog are we working on.
Returns:
True if some output file requires this catalog to have valid g1/g2 columns,
False if not.
"""
return config and ( 'gg_file_name' in config
or 'm2_file_name' in config
or 'norm_file_name' in config
or (num==1 and 'ng_file_name' in config)
or (num==1 and 'nm_file_name' in config)
or (num==1 and 'kg_file_name' in config) ) |
def gcd(a,b):
"""Calculates Greatest Common Divisor of two integers.
:param a: First integer
:param b: Second integer
:returns: Greatest Common Divisor (GCD) of *a* and *b*
Uses `Euclid's algorithm
<http://en.wikipedia.org/wiki/Greatest_common_divisor#
Using_Euclid.27s_algorithm>`_.
"""
if(a==b): return a
if(a== 0): return b
if(b== 0): return a
if(a==1 or b==1): return 1
if(a>b):
big, small = a, b
else:
big, small = b, a
r = big%small
while(r != 0):
big = small
small = r
r = big%small
return small |
def eval_and_or(pred, gold):
"""
Args:
Returns:
"""
pred_ao = pred['where'][1::2]
gold_ao = gold['where'][1::2]
pred_ao = set(pred_ao)
gold_ao = set(gold_ao)
if pred_ao == gold_ao:
return 1, 1, 1
return [len(pred_ao), len(gold_ao), 0] |
def unquoteStr(astr, escChar='\\', quoteChars='"\''):
"""Remove quotes from a string and unescapes contained escaped quotes.
Based on email.unquote.
"""
if len(astr) > 1:
for quoteChar in quoteChars:
if astr.startswith(quoteChar) and astr.endswith(quoteChar):
return astr[1:-1].replace(escChar + escChar, escChar).replace(escChar + quoteChar, quoteChar)
return astr |
def is_owner_of_doc(doc):
"""
Function for checking document ownership.
SINCE WE ARE KEEPING ACCESS CONTROL SIMPLE, WE ARE DEFAULTING THIS TO TRUE
:param str doc: A specific document ID
:return bool: whether the document's owner matches the current owner
"""
# owner = es.get(index=es_index, doc_type='attachment', id=doc, fields='owner')['fields']['owner'][0]
# return is_owner(owner)
return True |
def safe_get(dct, *keys):
"""Extract value from nested dictionary
Args:
dct (dict): dictionary one want to extrat value from
*keys (string|list|tuple): keys to exatract value
Returns:
value
"""
for key in keys:
try:
dct = dct[key]
except:
return None
return dct |
def row_or_column_danger(attacking_row, attacking_column, row, column):
"""Check safety of pieces on the same row or the same column.
Arguments:
attacking_row -- second piece's row number
attacking_column -- second piece-s column number
row -- first piece's row number
column -- first piece's column number
"""
return True if attacking_row == row or attacking_column == column else \
False |
def mdc(a, b):
"""
mdc: int x int --> int
mdc(a, b) devolve o maximo divisor comum de dois numeros inteiros.
"""
while b != 0:
a, b = b, a%b
return a |
def int_or_float(x):
"""Return int or float value of x.
Args:
x (str): Value to convert.
Returns:
int or float:
Raises:
ValueError:
"""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
raise ValueError('invalid literal for int_or_float(): \'{}\''.format(x)) |
def format_sig(name, args, retv):
"""
Format method signature with Javap's method definition style.
Arguments are: name of method, list of argument types, and type of return value.
>>> format_sig('getSomeValue', ['int', 'java.lang.String'], 'org.mydomain.myapp.SomeData[]')
u'org.mydomain.myapp.SomeData[] getSomeValue(int, java.lang.String)'
"""
return u'%s %s(%s)' % (retv, name, ', '.join(args)) |
def safe_img_url(url, max_size=1024):
"""Given an image URL, strict enforce size and validity."""
if (url and isinstance(url, str)
and len(url) < max_size
and url[0:4] == 'http'):
return url.strip()
return None |
def locate_pad(pshape, ushape):
"""Return the min and max indices of the range u in range p."""
xmin = (pshape - ushape) // 2
xmax = xmin + ushape
return xmin, xmax |
def key_cleanup(params: dict) -> dict:
"""Clean-up dictionary keys for use in constructors.
This is required eg. for dicts from YAML to make sure keys are compatible
with passing as keyword-only arguments to constructors. Currently, this
replaces hyphens (which look nicer) in keys to underscores internally,
so that they become valid identifiers within the code."""
return dict((k.replace("-", "_"), v) for k, v in params.items()) |
def task_builder(generator, n_train=1000, n_test=250):
"""
creates the task generator
:generator: generator for the wanted task
:n_train: number of samples to generate for training
:n_test: number of samples to generate for testing
:returns: dictionary of the task of generated samples
"""
task = {'train':[], 'test':[]}
for i in range(0,n_train):
inp, out = generator()
task['train'].append({'input':inp, 'output':out})
for i in range(0,n_test):
inp, out = generator()
task['test'].append({'input':inp, 'output':out})
return task |
def get_data_file_parameters(data_type):
"""Takes a string describing the type of climate data, returns url and file name for that data"""
base_url = 'http://www.metoffice.gov.uk/climate/uk/datasets/{0}/ranked/UK.txt'
data_url = base_url.format(data_type)
data_target = 'UK_{0}_data.txt'.format(data_type)
return data_url, data_target |
def decode(s):
"""Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
strs = []
i = 0
while i < len(s):
index = s.find(":", i)
size = int(s[i:index])
strs.append(s[index+1: index+1+size])
i = index+1+size
return strs |
def align_options(options):
"""
Indents flags and aligns help texts.
"""
l = 0
for opt in options:
if len(opt[0]) > l:
l = len(opt[0])
s = []
for opt in options:
s.append(' %s%s %s' % (opt[0], ' ' * (l - len(opt[0])), opt[1]))
return '\n'.join(s) |
def split_component_chars(address_parts):
"""
:param address_parts: list of the form [(<address_part_1>, <address_part_1_label>), .... ]
returns [(<char_0>, <address_comp_for_char_0), (<char_1>, <address_comp_for_char_1),.., (<char_n-1>, <address_comp_for_char_n-1)]
"""
char_arr = []
for address_part, address_part_label in address_parts:
# The address part of the tuple (address_part, address_part_label)
for c in address_part:
char_arr.append((c, address_part_label))
return char_arr |
def mock_msg(cmd, body):
"""Wrap body in a dummy message header for cmd."""
return { 'head': { 'seq': 0, 'cmd' : cmd}, 'body': body, 'reply': None} |
def get_param_status(i, param_ranges):
"""Get parameter name of the parameter in param_ranges which contains the index i.
Also return whether i is pointing to the first index of the parameter.
"""
for p, rng in param_ranges.items():
if i in rng:
return p, i == rng[0] |
def flatten_lists(lst):
"""Remove nested lists."""
return [item for sublist in lst for item in sublist] |
def getVal(x, y, M):
"""
Returns the value of the element (x,y) in M.
"""
if (x,y) in M:
return M[(x,y)]
elif (y,x) in M:
return M[(y,x)]
else:
return 0
pass |
def list_to_str(id_list):
"""
Convert list of IDs (ints) to comma separated string
:param id_list: list of ints
:return: list as comma separated string
"""
string = ''
for item in id_list:
string += str(item) + ','
return string.rstrip(',') |
def format_tup(f, s, sep: str, wrap: bool = False) -> str:
"""Stringify a 2-tuple (with the second element optional)"""
ss = f"{f}{sep}{s}" if s else f"{f}"
return f"({ss})" if s and wrap else ss |
def init_guess_word(length):
"""
Returns the initial guess word state.
The guess word is initialised with underscores, one for each letter
of the target word.
Args:
length: the length of the target word
Returns:
The initialised guess word, a list of `length` underscores
"""
return ["_"] * length |
def cuboid_to_bbox(cuboid):
"""Change notation from closed intervals to the half open intervals that are
typically used in python/numpy indexing. This enables us to think of the
numbers as 3D bounding boxes (wireframe line segments enclosing a volume).
Example:
on x=-20..26,y=-36..17,z=-47..7
becomes
on x=[-20, 27), y=[-36, 18), z=[-47, 8)
"""
bbox = {}
bbox["mode"] = cuboid["mode"]
bbox["x"] = (cuboid["x"][0], cuboid["x"][1] + 1)
bbox["y"] = (cuboid["y"][0], cuboid["y"][1] + 1)
bbox["z"] = (cuboid["z"][0], cuboid["z"][1] + 1)
return bbox |
def _dump_point(obj, fmt):
"""
Dump a GeoJSON-like Point object to WKT.
:param dict obj:
A GeoJSON-like `dict` representing a Point.
:param str fmt:
Format string which indicates the number of digits to display after the
decimal point when formatting coordinates.
:returns:
WKT representation of the input GeoJSON Point ``obj``.
"""
coords = obj['coordinates']
pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords)
return pt |
def capitalize(_, text):
""" Capitalises every word in a string, include these enclosed within
brackets and excluding apostrophes.
"""
list_words = text.lower().split()
for _w, word in enumerate(list_words):
for _c, char in enumerate(word):
if char.isalpha():
list_words[_w] = word[:_c] + char.upper() + word[_c+1:]
break
return " ".join(list_words) |
def get_diff_of_keys(source, target):
"""
Function which returns a list of keyphrases,
that are included in source but not in target.
This done through partial match of their keyphrases.
Each keyphrase is lowercase and split in a set of terms
to enable this partial match.
"""
return [
key_src for key_src in source
if not any(
set(key_src.lower().split())
& set(key_tar.lower().split())
for key_tar in target)
] |
def get_fullname(first_name, last_name):
"""get first_name space last_name"""
return (first_name or '') + \
(first_name and " " or '') + (last_name or '') |
def boolean_to_str(src):
"""Encodes a boolean using the canonical lexical representation.
src
Anything that can be resolved to a boolean except None, which
raises ValueError."""
if src is None:
raise ValueError("Can't convert None to boolean")
elif src:
return "true"
else:
return "false" |
def squares(start, end):
"""The squares function uses a list comprehension to create a list of
squared numbers (n*n). It receives the variables start and end, and returns
a list of squares of consecutive numbers between start and end inclusively.
For example, squares(2, 3) should return [4, 9]."""
return [x*x for x in range(start, end + 1)] |
def merge(dict1, dict2):
"""dict1 takes precedence"""
for key, value in dict1.items():
if isinstance(value, dict):
node = dict2.setdefault(key, {})
merge(value, node)
else:
dict2[key] = value
return dict2 |
def manhattan(pos1, pos2):
"""Find the manhattan (taxicab, L[1]) distance from pos1 to pos2
where pos1 and pos2 are n-dimensional vectors
this is the sum of all differences along the coordinate dimensions"""
return sum(abs(a - b) for a, b in zip(pos1, pos2)) |
def unique(l):
"""Filters duplicates of iterable.
Create a new list from l with duplicate entries removed,
while preserving the original order.
Parameters
----------
l : iterable
Input iterable to filter of duplicates.
Returns
-------
list
A list of elements of `l` without duplicates and in the same order.
"""
new_list = []
seen = set()
for el in l:
if el not in seen:
new_list.append(el)
seen.add(el)
return new_list |
def float_div(num1, num2):
"""Function: float_div
Description: Takes two numbers and does floating division. Returns zero
if the divisor is zero.
Arguments:
(input) num1 number -> First number.
(input) num2 number -> Second number.
(output) Return results of division or 0.
"""
try:
return float(num1) / num2
except ZeroDivisionError:
return 0 |
def find_service_in_advertisement(adv_data, uuid):
""" Find service with the given UUID in the advertising data. """
if len(uuid) != 2 and len(uuid) != 16:
raise ValueError("Invalid UUID length.")
# Incomplete List of 16 or 128-bit Service Class UUIDs.
incomplete_list = 0x02 if len(uuid) == 2 else 0x06
# Complete List of 16 or 128-bit Service Class UUIDs.
complete_list = 0x03 if len(uuid) == 2 else 0x07
# Parse advertisement packet.
i = 0
while i < len(adv_data):
ad_field_length = adv_data[i]
ad_field_type = adv_data[i + 1]
# Find AD types of interest.
if ad_field_type in (incomplete_list, complete_list):
ad_uuid_count = int((ad_field_length - 1) / len(uuid))
# Compare each UUID to the service UUID to be found.
for j in range(ad_uuid_count):
start_idx = i + 2 + j*len(uuid)
# Get UUID from AD data.
ad_uuid = adv_data[start_idx: start_idx + len(uuid)]
if ad_uuid == uuid:
return True
# Advance to the next AD structure.
i += ad_field_length + 1
# UUID not found.
return False |
def clean(token):
"""
Remove blank characters.
"""
return token.strip() if token else None |
def categorize_transcript_recovery(info):
"""
full --- means that every exon in the tID was covered!
fused --- full, but assembled exon match start > 0, meaning
likely fusion of overlapped transcripts
5missX --- means that the assembled one is missing beginning X exons
3missY --- means that the assembled one is missing ending Y exons
skipped --- means that the asseembled one is missing some intermediate exons!
"""
if len(info["matchedExons"]) == info["tID_num_exons"]:
if info["matchedExons"][0][1] == 0:
return "full"
else:
return "fused"
msg = ""
if info["matchedExons"][0][0] > 0:
msg += "5miss" if info["strand"] == "+" else "3miss"
msg += str(info["matchedExons"][0][0])
if info["matchedExons"][-1][0] < info["tID_num_exons"] - 1:
msg += ";" if msg != "" else ""
msg += "3miss" if info["strand"] == "+" else "5miss"
msg += str(info["tID_num_exons"] - 1 - info["matchedExons"][-1][0])
if msg == "": # must be missing some ground truth exons!
return "skipped"
return msg |
def decode(encoded):
"""
Returns a Decoded list of latitude,longitude coordinates.
Parameters
----------
encoded : Endcoded string
Returns
-------
list
Has the structure
[(lon1, lat1), (lon2, lat2), ..., (lonn, latn)]
"""
#six degrees of precision in valhalla
inv = 1.0 / 1e6;
decoded = []
previous = [0,0]
i = 0
#for each byte
while i < len(encoded):
#for each coord (lat, lon)
ll = [0,0]
for j in [0, 1]:
shift = 0
byte = 0x20
#keep decoding bytes until you have this coord
while byte >= 0x20:
byte = ord(encoded[i]) - 63
i += 1
ll[j] |= (byte & 0x1f) << shift
shift += 5
#get the final value adding the previous offset and remember it for the next
ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))
previous[j] = ll[j]
#scale by the precision and chop off long coords also flip the positions so
#its the far more standard lon,lat instead of lat,lon
decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])
#hand back the list of coordinates
return decoded |
def calc(x, y):
"""
Test calc function with doctest
to execute: python -m doctest -v python_file.py
>>> calc(1,2)
3
>>> calc(2,3)
1
>>> calc(1,3)
4
"""
soma = x + y
return soma |
def fix_latex_command_regex(pattern, application='match'):
"""
Given a pattern for a regular expression match or substitution,
the function checks for problematic patterns commonly
encountered when working with LaTeX texts, namely commands
starting with a backslash.
For a pattern to be matched or substituted, and extra backslash is
always needed (either a special regex construction like \w leads
to wrong match, or \c leads to wrong substitution since \ just
escapes c so only the c is replaced, leaving an undesired
backslash). For the replacement pattern in a substitutions, specified
by the application='replacement' argument, a backslash
before any of the characters abfgnrtv must be preceeded by an
additional backslash.
The application variable equals 'match' if pattern is used for
a match and 'replacement' if pattern defines a replacement
regex in a re.sub command.
Caveats: let pattern just contain LaTeX commands, not combination
of commands and other regular expressions (\s, \d, etc.) as the
latter will end up with an extra undesired backslash.
Here are examples on failures:
>>> re.sub(r'\begin\{equation\}', r'\[', r'\begin{equation}')
'\\begin{equation}'
>>> # match of mbox, not \mbox, and wrong output:
>>> re.sub(r'\mbox\{(.+?)\}', r'\fbox{\g<1>}', r'\mbox{not}')
'\\\x0cbox{not}'
Here are examples on using this function:
>>> from scitools.misc import fix_latex_command_regex as fix
>>> pattern = fix(r'\begin\{equation\}', application='match')
>>> re.sub(pattern, r'\[', r'\begin{equation}')
'\\['
>>> pattern = fix(r'\mbox\{(.+?)\}', application='match')
>>> replacement = fix(r'\fbox{\g<1>}', application='replacement')
>>> re.sub(pattern, replacement, r'\mbox{not}')
'\\fbox{not}'
Avoid mixing LaTeX commands and ordinary regular expression
commands, e.g.:
>>> pattern = fix(r'\mbox\{(\d+)\}', application='match')
>>> pattern
'\\\\mbox\\{(\\\\d+)\\}'
>>> re.sub(pattern, replacement, r'\mbox{987}')
'\\mbox{987}' # no substitution, no match
"""
import string
problematic_letters = string.ascii_letters if application == 'match' \
else 'abfgnrtv'
for letter in problematic_letters:
problematic_pattern = '\\' + letter
if letter == 'g' and application == 'replacement':
# no extra \ for \g<...> in pattern
if r'\g<' in pattern:
continue
ok_pattern = '\\\\' + letter
if problematic_pattern in pattern and not ok_pattern in pattern:
pattern = pattern.replace(problematic_pattern, ok_pattern)
return pattern |
def migrate_shipper(content):
"""
Moves everything under the `shipper:` section to be top level.
"""
lines = content.splitlines()
outlines = []
state = "out"
for line in lines:
if state == "out":
if line.startswith("shipper:"):
state = "in"
# eat line
else:
outlines.append(line)
elif state == "in":
if line.startswith(" "):
outlines.append(line[2:])
elif line.startswith("\t"):
outlines.append(line[1:])
elif line == "":
outlines.append(line)
else:
outlines.append(line)
state = "out"
return "\n".join(outlines) + "\n" |
def is_ip(s):
"""
Check a string whether is a legal ip address.
:type s: string
:param s: None
=======================
:return:
**Boolean**
"""
try:
tmp_list = s.split(b':')
s = tmp_list[0]
if s == b'localhost':
return True
tmp_list = s.split(b'.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True |
def click(field, num_rows, num_cols, given_i, given_j):
"""click event in mine_sweeper game"""
this_value = field[given_i][given_j]
if this_value == 0:
field[given_i][given_j] = -2
for i in range(max(given_i-1,0), min(given_i+2,num_rows)):
for j in range(max(given_j-1,0), min(given_j+2,num_cols)):
if not (i == given_i and j == given_j):
if field[i][j] == 0:
click(field, num_rows, num_cols, i, j)
return field |
def Delta(new, old, field):
"""Return difference new - old for field."""
if field in old:
return new[field] - old[field]
else:
return new[field] |
def convert_arg(arg):
"""Convert string to type"""
# pylint: disable=broad-except
if arg.lower() == 'none':
arg = None
elif arg.lower() == 'false':
arg = False
elif arg.lower() == 'true':
arg = True
elif '.' in arg:
try:
arg = float(arg)
except Exception:
pass
else:
try:
arg = int(arg)
except Exception:
pass
return arg |
def largest_digit_helper(n, l_n):
"""
The function returns the largest digit in the integer.
:param n: int.
:param l_n: int, the largest digit.
"""
if n < 10:
if n > l_n:
l_n = n
return l_n
else:
k = n % 10 # get units digit
if k > l_n: # get largest digit
l_n = k
n //= 10 # divide by 10
return largest_digit_helper(n, l_n) |
def axis_helper(y_shape, x_shape):
"""
check which axes the x has been broadcasted
Args:
y_shape: the shape of result
x_shape: the shape of x
Return:
a tuple refering the axes
"""
res = []
j = len(x_shape) - 1
for i in range(len(y_shape) - 1, -1, -1):
if j < 0 or x_shape[j] != y_shape[i]:
res.append(i)
j -= 1
return tuple(res[::-1]) |
def isPerfect(number):
"""using generator expressions"""
return sum((divisor for divisor in range(1, number) if number % divisor == 0)) == number |
def my_reverse(L):
"""
Accepts a list `L` and reverses its elements. Solves problems 1 & 2.
Parameters
----------
L : list
The list to be reversed.
Returns
-------
revL : list
The reversed list.
"""
# Initialisations
revL = list() # The empty list to be appended to
for i in range(len(L)): # Insert the `i`-th element of `L` to the front of `revL`
revL.insert(0, L[i])
return revL |
def _apply_epa_correction(pm, rh):
"""Applies the EPA calibration to Purple's PM2.5 data.
Version of formula matches the Purple Air site's info.
We floor it to 0 since the combination of very low pm2.5 concentration
and very high humidity can lead to negative numbers.
"""
return max(0, 0.534 * pm - 0.0844 * rh + 5.604) |
def _prepareTime(time):
"""Get time in the proper shape
ex : 174512 for 17h 45m 12s
ex : 094023 for 09h 40m 23s"""
time = str(time)
time = '000000'+time
time = time[len(time)-6:]
return time |
def get_wikipedia_link(links):
"""extract wikipedia links"""
if not isinstance(links, list):
return None
for link in links:
if not isinstance(link, dict):
continue
if link.get("title") == "wikipedia":
return link.get("url")
return None |
def hamming_dist(str_one, str_two):
""" returns number of hamming_dist between two strings """
len_one = len(str_one)
len_two = len(str_two)
if len_one != len_two:
raise ValueError("Strings have different lengths.")
mismatches = 0
for i in range(len_one):
if str_one[i] != str_two[i]:
mismatches += 1
return mismatches |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.