content stringlengths 42 6.51k |
|---|
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total |
def convert_words_to_index(words, dictionary):
"""
Replace each word in the dataset with its index in the dictionary
"""
return [dictionary[word] if word in dictionary else 0 for word in words] |
def _sym_index(k1, k2):
"""
Get the index of an entry in a folded symmetric array.
Parameters
------------
k1, k2: int
0-based indices into a symmetric matrix.
Returns
--------
int
Return the linear index of the (k1, k2) element of a symmetric
matrix where the triangular part has been stacked into a vector.
"""
def ld_ind(k1, k2):
return int(k2 + k1 * (k1 + 1) / 2)
if k2 <= k1:
return ld_ind(k1, k2)
else:
return ld_ind(k2, k1) |
def flatten_treestring(tree):
"""
Turn bracketed tree string into something looking like English
"""
import re
tree = re.sub(r'\(.*? ', '', tree).replace(')', '')
tree = tree.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
return tree |
def transformArrYXToXYList(arrYX):
""" transformArrYXToXYList(arrYX)
Getting a array of positions invert order.
Parameters
----------
arrYX : Array
List of positions to invert
Returns
-------
List
Returns list with all coordinates inverted inside
a list.
"""
points = []
for point in arrYX:
points.append([point[1], point[0]])
return points |
def numeric_task_id(task_id):
"""Converts a task-id to the numeric task-id.
Args:
task_id: task-id in either task-n or n format
Returns:
n
"""
# This function exists to support the legacy "task-id" format in the "google"
# provider. Google labels originally could not be numeric. When the google
# provider is completely replaced by the google-v2 provider, this function can
# go away.
if task_id is not None:
if task_id.startswith('task-'):
return int(task_id[len('task-'):])
else:
return int(task_id) |
def power3(n):
"""This function calculates the power of three of a given number."""
result = n**3
return result |
def typecast(value):
"""Recursively typecast an input
Parameters
----------
value : int, float, str, tuple, list
"""
if isinstance(value, (dict, list)):
iterator = range(len(value)) if isinstance(value, list) else value
for i in iterator:
value[i] = typecast(value[i])
elif value == "None":
return None
elif value in ("False", "True"):
return True if value == "True" else False
else:
for type_fn in (int, float, str):
try:
return type_fn(value)
except ValueError:
pass
return value |
def insertion_sort(l):
"""
The insertion sort, although still O(n^2), works in a slightly different way. It always
maintains a sorted sublist in the lower positions of the list. Each new item is then "inserted"
back into the previous sublist such that the sorted sublist is one item larger.
"""
for index in range(1, len(l)):
currentvalue = l[index]
position = index
while position > 0 and l[position-1] > currentvalue:
l[position] = l[position-1]
position -= 1
l[position] = currentvalue
return l |
def slice_list(in_list, lens):
"""Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list.
"""
if isinstance(lens, int):
assert len(in_list) % lens == 0
lens = [lens] * int(len(in_list) / lens)
if not isinstance(lens, list):
raise TypeError('"indices" must be an integer or a list of integers')
elif sum(lens) != len(in_list):
raise ValueError('sum of lens and list length does not '
f'match: {sum(lens)} != {len(in_list)}')
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:idx + lens[i]])
idx += lens[i]
return out_list |
def _getbundleitems(bundle, nametoindexmap, itemsdict):
"""Get a list of the items in a bundle"""
bundleitems = []
descriptions = bundle['descriptions']
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in nametoindexmap:
bundleitems.append(itemsdict[nametoindexmap[value]])
return bundleitems |
def _has_medwall(model):
"""Check if structure has medial wall, which is when the model count is
equal to the number of vertices. Always false for non surface models
"""
if ((model['type'] == 'CIFTI_MODEL_TYPE_SURFACE') and
(model['count'] == model['n_indices'])):
return True
else:
return False |
def get_confidence_outcome(tps, fps, fns, negative_ids):
"""
Determines whether prediction with a given confidence is true or false based on the TPs and FPs lists
Parameters
----------
tps, fps : list of tuple
A list of predicted TP(FP)s in format (slice_id, Adhesion, confidence)
fns : list of tuple
A list of predicted FNs in format (slice_id, confidence)
negative_ids : list of str
A list of negative ids
Returns
-------
outcomes : list
A list of tuple of confidence and whether its prediction is true
outcomes_negative : list
A list of tuple of confidence and whether its prediction is true for negative slices only
"""
outcomes = []
outcomes_negative = []
for _, _, confidence in tps:
outcomes.append((1, confidence))
for slice_id, _, confidence in fps:
outcomes.append((0, confidence))
if slice_id in negative_ids:
outcomes_negative.append((0, confidence))
for _ in fns:
outcomes.append((1, 0))
return outcomes, outcomes_negative |
def get_majority_count(class_list):
"""
(function) get_majority_count
-----------------------------
Get majority count value
Parameter
---------
- class_list : class list
Return
------
- majority count
"""
# Create a count table
class_count = {}
for vote in class_list:
if not vote in class_count:
class_count[vote] = 0
class_count[vote] += 1
# Sort the table by values
sorted_class_count = sorted(class_count.items(), key=lambda kv:(kv[1], kv[0]), reverse=True)
return sorted_class_count |
def default_if_none(value, default):
"""Return given default if value is None"""
if value is None:
return default
return value |
def _minor_release(version):
"""Excludes the patch release from a version number, such that
only major and minor versions face strict matching."""
major_minor_version = version.rsplit('.', 1)[0]
if len(major_minor_version .split('.')) != 2:
raise ValueError("Version doesn't conform to `major.minor.patch` format.")
return major_minor_version |
def rem_encoding(code):
"""Remove encoding declarations from compiled code so it can be passed to exec."""
old_lines = code.splitlines()
new_lines = []
for i in range(min(2, len(old_lines))):
line = old_lines[i]
if not (line.lstrip().startswith("#") and "coding" in line):
new_lines.append(line)
new_lines += old_lines[2:]
return "\n".join(new_lines) |
def parse_records(database_records):
"""
Parses database records into a clean json-like structure
Param: database_records (a list of db.Model instances)
Example: parse_records(User.query.all())
Returns: a list of dictionaries, each corresponding to a record, like...
[
{"id": 1, "title": "Book 1"},
{"id": 2, "title": "Book 2"},
{"id": 3, "title": "Book 3"},
]
"""
parsed_records = []
for record in database_records:
print(record)
parsed_record = record.__dict__
del parsed_record["_sa_instance_state"]
parsed_records.append(parsed_record)
return parsed_records |
def toLowerCaseB(str):
"""
:type str: str
:rtype: str
"""
result=""
for i in str:
index=ord(i)
if 65<=index<=90:
result+=chr(index+32)
else:
result+=i
return result |
def interp(mapping, x):
"""Compute the piecewise linear interpolation given by mapping for input x.
>>> interp(((1, 1), (2, 4)), 1.5)
2.5
"""
mapping = sorted(mapping)
if len(mapping) == 1:
xa, ya = mapping[0]
if xa == x:
return ya
return x
for (xa, ya), (xb, yb) in zip(mapping[:-1], mapping[1:]):
if xa <= x <= xb:
return ya + float(x - xa) / (xb - xa) * (yb - ya)
return x |
def flatten(lst):
"""Flattens a list of lists"""
return [item for sublist in lst for item in sublist] |
def clean_words(words, stop_words):
"""Remove stop words from words
Args:
words: A list of words
stop_words: Comma separated string with stop words that should be
removed from words.
Returns:
A list of words with stop words removed.
"""
cleaned = []
for word in words:
if word.lower() not in stop_words.split(','):
cleaned.append(word)
return cleaned |
def get_repository_access_for_role(role_name, repository_type):
"""Return a Bitbucket permission level for a *role* (not a team) to a particular repository."""
access_for_roles = {
"tlz_admin": {
"baseline": "REPO_WRITE",
"resource": "REPO_ADMIN"
},
"tlz_developer": {
"baseline": "REPO_WRITE",
"resource": "REPO_WRITE"
},
"tlz_developer_ro": {
"baseline": "REPO_READ",
"resource": "REPO_READ"
}
}
try:
return access_for_roles[role_name][repository_type]
except KeyError:
return None |
def sentencecase(var): # Some variable
"""
Sentence case convention. Include space between each element and uppercase the first letter of first element.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``Sentence case`` convention.
"""
result = ""
for i, element in enumerate(var):
element = list(element)
if i == 0:
element[0] = element[0].upper()
result += "".join(element) + " "
return result[:-1] |
def rgb_2_hex(color: tuple) -> str:
"""
Convert RGB color vales to Hex code (eg. #ffffff).
:param color: tuple of RGB values for color eg. (255, 255, 255)
:returns: color Hex code
"""
r, g, b = color
return f"#{r:02x}{g:02x}{b:02x}" |
def new_tree_object(previous, obj):
"""
Check that we are still on the same project/subject/session.
:param previous: previous loop info
:param obj: object
:return: True if new info, False otherwise
"""
if obj['project_id'] != previous['project'] or \
obj['subject_label'] != previous['subject'] or \
obj['session_label'] != previous['session']:
return True
return False |
def get_frame_size(data):
""" Gets frame height and width from data. """
for cls, cls_data in data.items():
for timestep, t_data in enumerate(cls_data):
if len(t_data['im_hs'] > 0):
im_h = t_data['im_hs'][0]
im_w = t_data['im_ws'][0]
return im_h, im_w
return None |
def flags(cmd, data):
"""
Checks if there are flags in the path data
Returns the indices of all values in the path data which are flags
"""
if cmd.lower() == 'a': # a: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+
indices = range(len(data))
return [index for index in indices if (index % 7) in [3, 4]]
return [] |
def to_title(attribute, ignore_list=['uuid', 'id']):
""" `to_title` converts and attribute name into a humanized title.
"""
title = " ".join([attr for attr in attribute.split('_') if attr not in ignore_list])
return title.capitalize() |
def build_response_block(name: str, vote, removalreasons: list) -> dict:
"""Build blocks for moderator responses in archive message"""
textstring = f"{name}: {vote}"
if removalreasons:
textstring += "\n\t"
for reason in removalreasons:
textstring += f" {reason},"
textstring = textstring.rstrip(",")
responseblock = {
"type": "mrkdwn",
"text": textstring
}
return responseblock |
def normalize_area_name(area_name: str) -> str:
"""Normalize an area name by removing whitespace and case folding."""
return area_name.casefold().replace(" ", "") |
def place_word(grid, coords, word):
"""
Return the grid with the new word placed
"""
for i, l in enumerate(word):
x, y = coords[0] + i, coords[1]
grid[y][x] = l
return grid |
def rev_seq(seq):
"""Reverse the sequence and use the complement bases.
This is for annotation on "-" strand.
example:
rev_seq("atgc")
>>> 'gcat'
rev_seq("akgc")
>>> "k" is not valid base in:
>>> dict_keys(['A', 'C', 'G', 'T', 'N', 'a', 'c', 'g', 't', 'n'])
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N',
'a': 't', 'c': 'g', 'g': 'c', 't': 'a', 'n': 'n'}
return_seq = ""
for base in seq:
try:
return_seq = complement[base] + return_seq
except KeyError:
print('"%s" is not valid base in:' %(base))
print(complement.keys())
raise
return return_seq |
def get_as_subtext_field(field, field_title=None) -> str:
"""Get a certain variable as part of the subtext, along with a title for that variable."""
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title} :" + s
return s |
def sign(x):
"""Returns (+-)1 with same sign as x (or 0 if x == 0)."""
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0 |
def almostEqual(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
Lifted from pre 3.5 isclose() implementation,
floating point error tolerant comparison
:param a: [float] first number in comparison
:param b: [float] second number in comparison
:param rel_tol: [float] relative tolerance in comparison
:param abs_tol: [float] absolute tolerance in case of relative tolerance issues
:return: [bool] args are equal or not
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
def get_z_sample(xbar, mu, SE):
"""
Return the z-score of a sample, from a sampling distribution.
Parameters
----------
* xbar: mean of the current sample.
* mu: mean of the population from where the sample is drawn.
* SE: standard error of the sampling distribution (population SD / root(population size))
Returns
-------
The z-score of the sample with mean less than or equal to xbar.
Given by (xbar - mu) / SE.
"""
return (xbar-mu) / SE |
def _get_stage2_folder(tmp_folder: str) -> str:
"""Get the temporary folder path used for the 2nd stage of merging"""
return tmp_folder.rstrip("/") + "/stage-2/" |
def log2(n):
"""
Computes the number of binary digits used in the representation on n.
Input:
n An integer.
Output:
As in the description. For example, 0 -> 0, 000101010101 -> 9.
"""
log=-1
while n>0:
log+=1
n=n>>1
return log |
def getMinorClassNRU(processesClass):
"""
Get index of minor value class.
:param processesClass: a list created by NRU algorithm.
:type processesClass: list[list[int]]
:return index if minor value.
:rtype: int
"""
minorValue = [0, 999999]
for i in range(len(processesClass)):
if processesClass[i][-1] <= minorValue[1]:
minorValue[0] = i
minorValue[1] = processesClass[i][-1]
return minorValue[0] |
def flatten(b, delim="__"):
"""
# i.e. input = map( lambda x: JSON.flatten( x, "__" ), input )
"""
val = {}
for i in b.keys():
if isinstance(b[i], dict):
get = flatten(b[i], delim)
for j in get.keys():
val[i + delim + j] = get[j]
else:
val[i] = b[i]
return val |
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
from io import StringIO
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(s).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens) |
def shunt(infix):
"""Return infix regex as postfix"""
#convert input to a stack list
infix=list(infix)[::-1]
#operator stack and output list as empty lists
opers,postfix =[],[]
#operator precedence
prec={'*':100,'.':90, '|':80, '/':80, '\\':80, ')':70, '(':60}
#loop through input one character at a time
while infix:
#pop a character from the input
c=infix.pop()
#decide what to do based on character
if c== '(':
#push an open bracket to opers stack
opers.append(c)
elif c==')':
#pop the operators stack until you find an open bracket
while opers[-1]!='(':
postfix.append(opers.pop())
#get rid of '('
opers.pop()
elif c in prec:
#push any operators on opers stack with hight prec to output
while opers and prec[c] < prec[opers[-1]]:
postfix.append(opers.pop())
opers.append(c)
else:
#typically we just push the character to the output
postfix.append(c)
#pop all operators to the output
while opers:
postfix.append(opers.pop())
#convert output list to string
return ''.join(postfix) |
def deep_merge_dicts(original, incoming):
"""
Deep merge two dictionaries. Modifies original.
For key conflicts if both values are:
a. dict: Recursively call deep_merge_dicts on both values.
b. list: Call deep_merge_lists on both values.
c. any other type: Value is overridden.
d. conflicting types: Value is overridden.
"""
for key in incoming:
if key in original:
if isinstance(original[key], dict) and isinstance(incoming[key], dict):
deep_merge_dicts(original[key], incoming[key])
else:
original[key] = incoming[key]
else:
original[key] = incoming[key]
return original |
def file_path(folder, filename):
""" returns the complete path to the file by concatenating the folder
:param folder: name of folder
:param filename: name of file
:return: relative path of file
"""
folder_path = "./" if len(folder) == 0 else folder
return folder_path + filename |
def rotate_90(rectangle):
"""Rotates a rectangle (2x2 coordinates) by 90"""
return [(-y, x) for x, y in rectangle] |
def to_vantage_level(level):
"""Convert the given HASS level (0-255) to Vantage (0.0-100.0)."""
return float((level * 100) / 255) |
def str2num(string, decimal_sep='.'):
"""
A helper function that converts strings to numbers if possible
and replaces the float decimal separator with the given value.
eg. '1' => 1
'1.2' => 1.2
'1,2' => 1.2
'True' => True
"""
if not isinstance(string, str): return string
if string.isdigit(): return int(string)
if string=='True': return True
if string=='False': return False
try:
string_x = string.translate({ord(decimal_sep): 46})
# necessary because of PEP-515, ignore _ in strings
string_x = string_x.translate({95:35})
return float(string_x)
except: return string |
def flatten(alist):
""" Return a list of items from a list of list
:param alist:
:return:
"""
return [item for sublist in alist for item in sublist] |
def convert_single_linear_to_srgb(color_value):
"""
Changes as single RGB color in linear to SRGB color space
:param color_value:float, single color value in 0-1 range (for example red channel)
:return:float, new color converted to SRGB
"""
a = 0.055
if color_value <= 0.0031308:
return color_value * 12.92
return (1 + a) * pow(color_value, 1 / 2.4) - a |
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return "%s,%04d" % (video_id, int(timestamp)) |
def parsing_explore_lines(explore_list, loc_list):
"""
This function parses a list of explore lines into a grouped structure of explore lines.
:param explore_list: the list representing raw explore file.
:type explore_list: list
:param loc_list: the list of dividers, each divider is the number of join in the explore list
:type loc_list: list
:return: a grouped and nested list representing the explore structure with joins.
"""
grouped_explore = []
grouped_explore.append(explore_list[:loc_list[0]])
for r in range(len(loc_list)-1):
grouped_explore.append(explore_list[loc_list[r]: loc_list[r+1]])
grouped_explore.append(explore_list[loc_list[-1]:])
return grouped_explore |
def searchFile(filename, searchterm):
""" search file for text, return True or False"""
fd = open (filename, 'r')
data = fd.read()
test = data.find(searchterm)
if test < 0 :
return False
return True |
def _prepare_tags(tags):
"""
>>> _prepare_tags({'protocol': 'http'})
['protocol:http']
"""
if not tags:
return []
return [f'{k}:{v}' for k, v in tags.items()] |
def domain_to_url(domain: str) -> str:
""" Converts a (partial) domain to valid URL """
if domain.startswith("."):
domain = "www" + domain
return "http://" + domain |
def trace_object_dict(nobj, traces, object=None, background=None, params=None, tracelist=None):
""" Creates a list of dictionaries, which contain the object traces in each slit
Parameters
----------
nobj : int
Number of objects in this slit
traces : numpy ndarray
the trace of each object in this slit
object: numpy ndarray (optional)
An image containing weights to be used for the object.
background : numpy ndarray (optional)
An image containing weights to be used for the background
params : dict
A dictionary containing some of the important parameters used in
the object tracing.
tracelist : list of dict
A list containing a trace dictionary for each slit
To save memory, the object and background images for multiple slits
can be stored in a single object image and single background image.
In this case, you must only set object and background for the zeroth
slit (with 'None' set for all other slits). In this case, the object
and background images must be set according to the slit locations
assigned in the slf._slitpix variable.
Returns
-------
tracelist : list of dict
A list containing a trace dictionary for each slit
"""
# Create a dictionary with all the properties of the object traces in this slit
newdict = dict({})
newdict['nobj'] = nobj
newdict['traces'] = traces
newdict['object'] = object
newdict['params'] = params
newdict['background'] = background
if tracelist is None:
tracelist = []
tracelist.append(newdict)
return tracelist |
def __name_match(name):
""" Convert a template name to a pattern matching all instances of the
service.
Args:
name: A unit name without type suffix
Returns:
The name, possibly modified for matching
"""
service_name_match = name
if name.endswith('@'):
service_name_match = '{0}*'.format(name)
return service_name_match |
def _duration(seconds: int):
"""
:param seconds: duration to be converted
:return: a duration string with 00h 00m 00s format
"""
dur_hour = int(seconds // 3600)
dur_min = int((seconds % 3600) // 60)
dur_sec = int(seconds - (3600 * dur_hour) - (60 * dur_min))
return f'{dur_hour}h {dur_min}m {dur_sec}s' |
def get_ngrams(text, n):
"""Returns all ngrams that are in the text.
Inputs:
text: string
n: int
Returns:
list of strings (each is a ngram)
"""
tokens = text.split()
return [" ".join(tokens[i:i + n]) for i in range(len(tokens) - (n - 1))] |
def clean_data(data):
"""
This function takes in data and returns a mutated version that converts string versions of numbers
to their integer or float form depending on the type of value.
Parameters:
data (list): A list of dictionaries.
Returns:
(list): A list of dictionaries with the numerical values appropriately converted.
"""
for row in data:
for key in row.keys():
if key == 'points':
row[key] = float(row[key])
elif key == 'position':
row[key] = int(row[key])
return data |
def flattenjson(j, null_array=False, delim='.'):
"""Take a dict representing a JSON structure and return a table row, having
converted the nested dictionaries to new fields and the nested arrays to,
optionally, NULL or a string of the array. Defaults to nulling the array.
"""
output = {}
def inner(k, v):
if isinstance(v, dict):
for k2, v2 in v.items():
inner(k + delim + k2, v2)
elif isinstance(v, list):
output.update({k: "<redacted_list>" if null_array else str(v)})
else: # is a terminal value
output.update({k: v})
for k, v in j.items():
inner(k, v)
return output |
def manhattan_distance(p1, p2):
"""
Precondition: p1 and p2 are same length
:param p1: point 1 tuple coordinate
:param p2: point 2 tuple coordinate
:return: manhattan distance between two points p1 and p2
"""
distance = 0
for dim in range(len(p1)):
distance += abs(p1[dim] - p2[dim])
return distance |
def get_file_name_from_url(url: str) -> str:
"""Get the last part of an URL."""
return url.rsplit('/', 1)[1] |
def parse_repo_from_ssh_command(cmd):
"""
Get the repo from the incoming SSH command.
This is needed as the original intended response to `git push' is
overridden by the use of a 'forced command' (see install_authorized_key()).
The forced command needs to know what repo to act on.
"""
try:
path = cmd.split()[1].strip('\'').strip('"').lstrip('/')
if '..' in path:
return None
return path
except:
return None |
def _domain_for_region(region):
"""Get the DNS suffix for the given region.
Args:
region (str): AWS region name
Returns:
str: the DNS suffix
"""
return "c2s.ic.gov" if region == "us-iso-east-1" else "amazonaws.com" |
def get_text(words):
""" Returns the (unnormalized) text composed from the given words."""
return "".join([x.unnormalized_with_whitespaces for x in words]) |
def cast_float(value):
"""
Return a floated value or none
"""
try:
return float(value)
except (ValueError, TypeError):
return None |
def resolve_diff(diff):
"""Find the shortest BF code to change a cell by `diff`, making use of overflow."""
diff %= 256
if diff < 128:
return "+" * diff
else:
return "-" * (256 - diff) |
def upper_keys(d):
"""Returns a new dictionary with the keys of `d` converted to upper case
and the values left unchanged.
"""
return dict(zip((k.upper() for k in d.keys()), d.values())) |
def flatten(groups):
""" Flatten a list of lists.
"""
flat = []
for subgroup in groups:
if isinstance(subgroup, list):
for g in subgroup:
flat.append(g)
else:
flat.append(subgroup)
return flat |
def ip(address, env=None):
"""Split the address from its port, since nginx passes
it over together.
eg, 8.8.8.8:80 returns 8.8.8.8
address should be the ip:port
env flags some environment specific
changes, see below."""
ip = str(address.split(':')[0])
if env=='dev':
#hackiness for environment specific silliness.
#Changes ips, eg from 8.8.8.8 to 8.8.2.8
ip=ip.split('.')
ip[2]="2"
ip=".".join(ip)
elif env=='prod':
ip=ip.split('.')
ip[2]="1"
ip=".".join(ip)
return ip |
def percent_pos_to_step_num(percent_pos, max_steps):
""" Calculate step number from percentage position. Note that 100% means fully up."""
return round((1 - percent_pos) * max_steps) |
def apply(*args, func=None):
"""Call `func` passing unpacked args to it"""
if func is None:
func = args[0]
return func(*args[1:])
else:
return func(*args) |
def _nus_uuid(short: int) -> str:
"""Get a 128-bit UUID from a ``short`` UUID.
Args:
short: The 16-bit UUID.
Returns:
The 128-bit UUID as a string.
"""
return f"6e40{short:04x}-b5a3-f393-e0a9-e50e24dcca9e" |
def getitem(array, i):
"""
Get a value from a list or dict
Example:
{% load vrot %}
{{ my_dict|getitem:my_key }}
"""
try:
return array[i]
except:
return '' |
def flips(rows):
"""
>>> flips(('...', '###', '#..'))
[('...', '###', '#..'), ('#..', '###', '...'), ('...', '###', '..#')]
"""
flips = []
flips.append(rows)
flips.append(reversed(rows))
flips.append(reversed(row) for row in rows)
return [tuple("".join(row) for row in flip) for flip in flips] |
def account_state_icon(state):
"""Highlight the state of user account
Args:
state (str): The state of the user's account
Returns:
str: A set of HTML classes.
"""
state_lowercase = state.lower()
if state_lowercase == "active":
return 'fa fa-circle text-success'
else:
return 'fa fa-circle text-danger' |
def reformat_explicit_markup(text):
"""
Fixes Pandas docstring warning about explicit markup not followed by a blank line.
Parses the text and finds ``'.. '`` strings by adding a blank line next after.
:param text: Original text with warnings
:return: Modified text that fixes warnings
"""
lines = text.split('\n')
new_text = ''
while len(lines) > 0:
line = lines[0]
if line.strip().startswith('.. versionchanged') or line.strip().startswith('.. versionadded') or \
line.strip().startswith('.. deprecated') or line.strip().startswith('.. _'):
new_text += line + '\n'
# Here if found explicit markup
if len(lines) > 1:
# Here if there is at least one line after explicit markup
if lines[1].strip != '':
# Here if there is no empty line after explicit markup. Add new line then
new_text += '\n'
lines.pop(0)
elif line.strip().startswith('.. note') or line.strip().startswith('.. warning'):
new_text += line.strip() + '\n'
if len(lines) > 1:
# Here if there is at least one line after explicit markup
if lines[1].strip() == '':
# Here if there is empty line after explicit markup. Remove new line then
lines.pop(1)
elif line.strip().startswith('.. ['):
new_text += '\n' # Remove references
else:
new_text += line + '\n'
lines.pop(0)
return new_text |
def can_reach_end(steps):
"""
Question 6.4: Given an array of n integers, where
A[i] denotes the maximum number of steps that can
advance from i, return whether it is possible to
advance to last index from beginning of array
"""
last_idx = len(steps)
furthest_reach_so_far = 0
idx = 0
while idx <= furthest_reach_so_far < last_idx:
furthest_reach_so_far = max(
furthest_reach_so_far,
steps[idx] + idx
)
idx += 1
return furthest_reach_so_far >= last_idx |
def compare_locations(bq_dataset_location,gcs_bucket_location):
"""Compares the locations of BQ dataset and GCS bucket
Arguments:
bq_dataset_location {str} -- BQ dataset location
gcs_bucket_location {str} -- GCS Bucket location
Returns:
bool -- True if matches else False
"""
if bq_dataset_location==gcs_bucket_location:
return True
else:
return False |
def _append_domain(opts):
"""
Append a domain to the existing id if it doesn't already exist
"""
# Domain already exists
if opts["id"].endswith(opts["append_domain"]):
return opts["id"]
# Trailing dot should mean an FQDN that is terminated, leave it alone.
if opts["id"].endswith("."):
return opts["id"]
return "{0[id]}.{0[append_domain]}".format(opts) |
def button_to_var(button):
"""
Gets an environment variable name from a mouse button number.
"""
return "_mouse_" + str(button) + "_command" |
def fortfloat(text: str) -> float:
"""Convert Fortran-style float to python float."""
text = text.strip()
if text.endswith("d"):
text = text[:-1]
text.replace("d", "e")
try:
return float(text)
except ValueError:
if len(text) > 1 and "-" in text[1:]:
text = f"{text[0]}{text[1:].replace('-', 'e-')}"
return float(text)
else:
raise |
def get_bf_csv_file_name(
date_stamp: str = "2021-08-03",
file_prefix: str = "dfjt",
file_suffix: str = "interfaceProperties",
) -> str:
"""
Get the correct filename based on the CSV naming standard
from the "Batfish Database".
Args:
date_stamp: A formatted date stamp indicating the day of the file to be retrieved.
Example: 2021-08-03 - August 3rd 2021.
file_prefix: The prefix of the file naming standard, prior to the date stamp.
file_suffix: The suffix of the file naming standard, after the date stamp.
Returns:
bf_csv_file_name: The formatted CSV file name, based on the arguments
passing into the function.
Raises:
N/A
"""
# Formatted the file name, for example dfjt-2021-08-03-interfaceProperties.csv
bf_csv_file_name = f"{file_prefix}-{date_stamp}-{file_suffix}.csv"
return bf_csv_file_name |
def partition(data):
"""ignore me, subset of quicksort"""
pivot = data[0]
less, equal, greater = [], [], []
for temp in data:
if temp < pivot:
less.append(temp)
elif temp > pivot:
greater.append(temp)
else:
equal.append(temp)
return less, equal, greater |
def prefix_lines( text, prefix ):
"""
Prefix each non-empty line in the given text with the given prefix.
>>> prefix_lines('',' ')
''
>>> prefix_lines(' ',' ')
' '
>>> prefix_lines('\\n',' ')
'\\n'
>>> prefix_lines('x',' ')
' x'
>>> prefix_lines('x\\n',' ')
' x\\n'
>>> prefix_lines('x\\ny\\n', ' ' )
' x\\n y\\n'
>>> prefix_lines('x\\ny', ' ' )
' x\\n y'
"""
return '\n'.join( prefix + l if l else l for l in text.split( '\n' ) ) |
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph the following concepts (comma-separated, using yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'yearly' \
else 'Graph the following comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'monthly' \
else 'Graph the following comma-separated entities (yearly frequencies):' if termtype == 'Wikipedia entities' and timeperiod == 'yearly' \
else 'Graph the following comma-separated entities (monthly frequencies):' if termtype == 'Wikipedia entities' and timeperiod == 'monthly' \
else 'Enter a phrase and show its cluster together with its other concepts:'
return label |
def parse_key_format(value):
"""Handle string formats of key files."""
return value.strip("'").replace('\\n', '\n') |
def flat_map(iterable, func):
"""func must take an item and return an interable that contains that
item. this is flatmap in the classic mode"""
results = []
for element in iterable:
result = func(element)
if len(result) > 0:
results.extend(result)
return results |
def is_multi_label(labels_list):
"""Whether labels list provided is a multi-label dataset.
Parameters
----------
labels_list : list
list of labels (integers) or multiple labels (lists of integers) or mixed
Returns
-------
True if multi-label
"""
return any([isinstance(l, list) for l in labels_list]) |
def contains_sequence_or_url(video_path: str) -> bool:
"""Checks if the video path is a URL or image sequence."""
return '%' in video_path or '://' in video_path |
def contains(interval_1, interval_2):
"""
Whether one bounding box entirely contains another
>>> contains([0, 4], [2, 3])
True
>>> contains([0, 1], [0.5, 3])
False
>>> contains([2, 3], [4, 0])
True
"""
interval_1_sorted = sorted(interval_1)
interval_2_sorted = sorted(interval_2)
within_1 = all(interval_1_sorted[0] <= i_2 <= interval_1_sorted[1] for i_2 in interval_2)
within_2 = all(interval_2_sorted[0] <= i_1 <= interval_2_sorted[1] for i_1 in interval_1)
return within_1 or within_2 |
def adjacent(xy1, xy2):
"""
Checks wheither two positions represented in 2D coordinates are adjacent
"""
x1, y1 = xy1
x2, y2 = xy2
dx, dy = x1 - x2, y1 - y2
return (dx == 0 and abs(dy) == 1) or (dy == 0 and abs(dx) == 1) |
def calendar_to_fiscal(cal_year, cal_mo):
"""Converts a calendar year and month into a fiscal year and month.
Returns (fiscal_year, fical_month) tuple.
"""
if cal_mo <= 6:
fiscal_year = cal_year
fiscal_month = cal_mo + 6
else:
fiscal_year = cal_year + 1
fiscal_month = cal_mo - 6
return fiscal_year, fiscal_month |
def _cvxsearch(f, x1, x2):
"""Optimises a convex, integer-domain function using binary search."""
x = x1 + (x2-x1)//2 # midpoint.
y1, y, y2 = f(x1), f(x), f(x2)
if x2-x1 == 1:
return (x1, y1) if y1 <= y2 else (x2, y2)
# Recurse on the half-region containing the local minimum.
if y >= y1:
return _cvxsearch(f, x1, x)
elif y >= y2:
return _cvxsearch(f, x, x2)
else:
x1, y1 = _cvxsearch(f, x1, x)
x2, y2 = _cvxsearch(f, x, x2)
return (x1, y1) if y1 <= y2 else (x2, y2) |
def get_url(query, page):
"""
Generate URL from query and page number.
The input query will be split into token or list of word, e.g 'luka parah' will be ['luka', 'parah'] and 'kdrt' will be ['kdrt']
For input query with more than 1 word, the url will be `..q={token1}+{token2}&..`, e.g `..q=luka+parah&..`
"""
queryLength = len(query.split())
if queryLength == 1:
url = 'https://www.lapor.go.id/search?q={}&page={}'.format(query, page)
else:
query = query.split()
param = '+'.join(query)
url = 'https://www.lapor.go.id/search?q={}&page={}'.format(param, page)
return url |
def service_messages_to_string(messages):
"""
:type messages: list[ServiceMessage]
"""
return u"\n".join([x.as_unicode() for x in messages]) |
def identifier(iden_name, iden_type, iden_kind, iden_index):
"""Entry in a symbol table"""
entry = {
"name": iden_name, "type": iden_type, "kind": iden_kind,
"index": iden_index
}
return entry |
def isarray(A):
"""
returns True if A is an array
"""
try:
if A.shape:
return True
else:
return False
except:
return False |
def listify(arg):
"""
Simple utility method to ensure an argument provided is a list. If the
provider argument is not an instance of `list`, then we return [arg], else
arg is returned.
:type arg: list
:rtype: list
"""
if isinstance(arg, (set, tuple)):
# if it is a set or tuple make it a list
return list(arg)
if not isinstance(arg, list):
return [arg]
return arg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.