content stringlengths 42 6.51k |
|---|
def listGetArithmeticMean(listofnumbers):
""" Return the arithmetic mean of a list of numbers
"""
arithmeticmean = sum(listofnumbers)
arithmeticmean /= max(len(listofnumbers), 1) * 1.0
return arithmeticmean |
def php_substr(_string, _start, _length=None):
"""
>>> php_substr("abcdef", -1)
'f'
>>> php_substr("abcdef", -2)
'ef'
>>> php_substr("abcdef", -3, 1)
'd'
>>> php_substr("abcdef", 0, -1)
'abcde'
>>> php_substr("abcdef", 2, -1);
'cde'
>>> php_substr("abcdef", 4, -4);
False
>>> php_substr("abcdef", -3, -1);
'de'
>>> php_substr('a', 2)
False
"""
if not isinstance(_string, str):
return False
if _string == "":
return False
if _length is None:
if _start > len(_string):
return False
return _string[_start:]
if _start < 0 and _length > 0:
_length = _start + _length
r = _string[_start:_length]
if r == "":
return False
return r |
def two_fer (name = 'you'):
"""
:param name: str - optional
:return: str - two for one
"""
return f'One for {name}, one for me.' |
def parabolic(x, a=0.0, b=0.0, c=0.0):
"""Return a parabolic function.
x -> a * x**2 + b * x + c
"""
return a * x**2 + b * x + c |
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1:nextCR]
else:
return strg[lastCR + 1:] |
def camelcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component except the first one.
This method preserves already camelcased strings."""
components = value.split("_")
if len(components) == 1:
return value[0].lower() + value[1:]
else:
components[0] = components[0][0].lower() + components[0][1:]
return components[0].lower() + "".join(x.title() for x in components[1:]) |
def connected_component(_n, _v, _edges):
"""
# https://www.geeksforgeeks.org/connected-components-in-an-undirected-graph/
:param _n: number of vertices
:param _v: vertex contained by target component
:type _edges: set of edges, e.g. {(0, 1), (1, 2)}
"""
def dfs_util(temp, _v, _visited):
_visited[_v] = True # Mark the current vertex as visited
temp.add(_v) # Store the vertex to list
adj_edges = [_ for _ in _edges if _v in _]
for i in [_[0] if _[1] == _v else _[1] for _ in adj_edges]: # Repeat for all vertices adjacent to this vertex v
if not _visited[i]:
temp = dfs_util(temp, i, _visited) # Update the list
return temp
return dfs_util(set(), _v, [False] * _n) |
def _split3_ ( xlims , ylims , zlims ) :
"""Split 3D-region into eight smaller pieces
Example
-------
>>> region = (-1,1),( -2,5) , ( -3,3)
>>> newregions = _split3_( region )
>>> for nr in newregions : print nr
"""
xc = 0.5 * ( xlims[1] + xlims[0] )
dx = 0.5 * ( xlims[1] - xlims[0] )
yc = 0.5 * ( ylims[1] + ylims[0] )
dy = 0.5 * ( ylims[1] - ylims[0] )
zc = 0.5 * ( zlims[1] + zlims[0] )
dz = 0.5 * ( zlims[1] - zlims[0] )
return ( ( ( xc-dx , xc ) , ( yc-dy , yc ) , ( zc-dz , zc ) ) ,
( ( xc-dx , xc ) , ( yc-dy , yc ) , ( zc , zc + dz ) ) ,
( ( xc-dx , xc ) , ( yc , yc+dy ) , ( zc-dz , zc ) ) ,
( ( xc-dx , xc ) , ( yc , yc+dy ) , ( zc , zc + dz ) ) ,
( ( xc , xc+dx ) , ( yc-dy , yc ) , ( zc-dz , zc ) ) ,
( ( xc , xc+dx ) , ( yc-dy , yc ) , ( zc , zc + dz ) ) ,
( ( xc , xc+dx ) , ( yc , yc+dy ) , ( zc-dz , zc ) ) ,
( ( xc , xc+dx ) , ( yc , yc+dy ) , ( zc , zc + dz ) ) ) |
def _is_new_tree(trees, this_tree):
"""
True if it's a new tree.
Parameters
----------
trees:
"""
# check for length
if trees == []:
return True
ll = [len(i) for i in trees]
# print(len(this_tree))
l_this = len(this_tree)
same_length=[]
for i, l in enumerate(ll):
if l == l_this:
same_length.append(i)
# same_length = np.where(ll == len(this_tree))[0]
# print(same_length, len(same_length))
if len(same_length) == 0:
return True
#same_length = [same_length]
import collections
for i_tree in same_length:
# print(i_tree)
# list[indices] does not work.
# So, there is no way iterate over a sub sample of a list
# except using a much simpler 'slicing'.
# Instead, iterate over the indices.
test_tree = trees[i_tree]
# print(i_tree)
if collections.Counter(test_tree) == collections.Counter(this_tree):
return False
return True |
def depth2intensity(depth, interval=300):
"""
Function for convertion rainfall depth (in mm) to
rainfall intensity (mm/h)
Args:
depth: float
float or array of float
rainfall depth (mm)
interval : number
time interval (in sec) which is correspondend to depth values
Returns:
intensity: float
float or array of float
rainfall intensity (mm/h)
"""
return depth * 3600 / interval |
def format_legacy_response(response, skills, category):
"""Format responses appropriately for legacy API. # noqa: E501
Examples:
>>> skills = ["Strength", "Hitpoints", "Ranged", "Magic", "Slayer", "Farming"]
>>> category = "xp"
>>> response = [
... {
... 'skills': {
... 'Farming': {'xp': 8109782},
... 'Hitpoints': {'xp': 6262476},
... 'Magic': {'xp': 5720554},
... 'Ranged': {'xp': 4644881},
... 'Slayer': {'xp': 2596132},
... 'Strength': {'xp': 5403638},
... },
... 'player': 'ElderPlinius',
... 'timestamp': '2021-12-23',
... 'aggregationLevel': 'AggregationLevel.DAILY',
... },
... {
... 'skills': {
... 'Farming': {'xp': 8234596},
... 'Hitpoints': {'xp': 6262585},
... 'Magic': {'xp': 5720557},
... 'Ranged': {'xp': 4644884},
... 'Slayer': {'xp': 2596132},
... 'Strength': {'xp': 5403768},
... },
... 'player': 'ElderPlinius',
... 'timestamp': '2021-12-24',
... 'aggregationLevel': 'AggregationLevel.DAILY',
... },
... ]
>>> format_legacy_response(response, skills, category)
[['2021-12-23', 5403638, 6262476, 4644881, 5720554, 2596132, 8109782], ['2021-12-24', 5403768, 6262585, 4644884, 5720557, 2596132, 8234596]]
"""
def format_item(item):
return [item["timestamp"]] + [
item["skills"][skill][category] for skill in skills
]
return [format_item(item) for item in response] |
def sort_by_length(words):
"""Sort a list of words in reverse order by length.
This is the version in the book; it is stable in the sense that
words with the same length appear in the same order
words: list of strings
Returns: list of strings
"""
t = []
for word in words:
t.append((len(word), word))
t.sort(reverse=True)
res = []
for length, word in t:
res.append(word)
return res |
def all_possible_state_helper( equipment ):
"""
create a list of tuples to represent all possible equipment combinations
equipment is a list of dictionary that contain at least a key called number
"""
result = []
for i, e in enumerate( equipment ):
if i == 0:
for j in range(e['number']+1): # add one to include maximal number of equipment
result.append( (j, ) )
else:
new_result = []
for k in result:
for j in range(e['number']+1):
new_result.append( tuple([t for t in k] + [j,]) )
result = new_result
return result |
def recursive_bases_from_class(klass):
"""Extract all bases from entered class."""
result = []
bases = klass.__bases__
result.extend(bases)
for base in bases:
result.extend(recursive_bases_from_class(base))
return result |
def setPath(day, month, year, repoAddress="\\\\jilafile.colorado.edu\\scratch\\regal\\common\\LabData\\NewRb\\CryoData"):
"""
This function sets the location of where all of the data files are stored. It is occasionally called more
than once in a notebook if the user needs to work past midnight.
:param day: A number string, e.g. '11'.
:param month: The name of a month, e.g. 'November' (must match file path capitalization).
:param year: A number string, e.g. '2017'.
:return:
"""
global dataAddress
if type(day) == int:
day = str(day)
if type(year) == int:
year = str(year)
dataAddress = repoAddress + "\\" + year + "\\" + month + "\\" + month + " " + day + "\\Raw Data\\"
#print("Setting new data address:" + dataAddress)
return dataAddress |
def _join_lines(lines):
"""
Simple join, except we want empty lines to still provide a newline.
"""
result = []
for line in lines:
if not line:
if result and result[-1] != '\n':
result.append('\n')
else:
result.append(line + '\n')
return ''.join(result).strip() |
def name_test(item):
""" used for pytest verbose output """
return f"{item['params']['mlag']}:{item['expected']['state']}" |
def enum(word_sentences, tag_sentences):
"""
enumerate words, chars and tags for
constructing vocabularies
:param word_sentences:
"""
words = sorted(list(set(sum(word_sentences, []))))
chars = sorted(list(set(sum([list(word) for word in words], []))))
tags = sorted(list(set(sum(tag_sentences, []))))
return words, chars, tags |
def test_reassign(obj):
"""
>>> test_reassign(object())
'hello'
>>> sig, syms = infer(test_reassign.py_func, functype(None, [object_]))
>>> sig
const char * (*)(object_)
>>> syms['obj'].type
const char *
"""
obj = 1
obj = 1.0
obj = 1 + 4j
obj = 2
obj = "hello"
return obj |
def getResNumb(line):
"""
reads the resNumb from the pdbline
"""
if line == None:
return 0
else:
return int( line[22:26].strip() ) |
def identity_matrix(dim):
"""Construct an identity matrix.
Parameters
----------
dim : int
The number of rows and/or columns of the matrix.
Returns
-------
list of list
A list of `dim` lists, with each list containing `dim` elements.
The items on the "diagonal" are one.
All other items are zero.
Examples
--------
>>> identity_matrix(4)
[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
"""
return [[1. if i == j else 0. for i in range(dim)] for j in range(dim)] |
def normalizeGlyphWidth(value):
"""
Normalizes glyph width.
* **value** must be a :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("Glyph width must be an :ref:`type-int-float`, not %s."
% type(value).__name__)
return value |
def _get_printable_columns(columns, row):
"""Return only the part of the row which should be printed.
"""
if not columns:
return row
# Extract the column values, in the order specified.
return tuple(row[c] for c in columns) |
def cast(current, new):
"""Tries to force a new value into the same type as the current."""
typ = type(current)
if typ == bool:
try:
return bool(int(new))
except (ValueError, TypeError):
pass
try:
new = new.lower()
except:
pass
if (new=='on') or (new[0] in ('y','t')):
return True
if (new=='off') or (new[0] in ('n','f')):
return False
else:
try:
return typ(new)
except:
pass
print ("Problem setting parameter (now %s) to %s; incorrect type?" % (current, new))
return current |
def standard_dict_group(data):
"""Group with standard dict.
"""
d = {}
for key, value in data:
d.setdefault(key, []).append(value)
return d |
def sublist_at(list, index, length):
"""
Return the sublist that starts at the given index and has the given
length. If the index is out of bounds, return an empty list. If
the length is too long, return only as many item as exist starting
at `index`.
For example:
get_sublist([1, 2, 3, 4, 5], 2, 2) -> [3, 4]
get_sublist([1, 2, 3, 4, 5], 4, 2) -> [5]
get_sublist([1, 2, 3, 4, 5], 6, 2) -> []
"""
if list == () or index < 0 or length <= 0:
return ()
else:
head, tail = list
if index == 0:
return (head, sublist_at(tail, index, length - 1))
else:
return sublist_at(tail, index - 1, length) |
def generate_ride_headers(token):
"""Generate the header object that is used to make api requests."""
return {
'Authorization': 'bearer %s' % token,
'Content-Type': 'application/json',
} |
def size_convert(size: float = 0) -> str:
"""Convert a size in bytes to human readable format.
Args:
size: The length of a file in bytes
Returns:
A human readable string for judging size of a file
Notes:
Uses the IEC prefix, not SI
"""
size_name = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
i = 0
while size >= 1024:
size /= 1024
i += 1
try:
return str(round(size, 2)) + size_name[i]
except IndexError:
return str(round(size, 2)) + " x " + "1024^{i} bytes" |
def is_side(s):
"""Return whether s is a side."""
return type(s) == list and len(s) == 3 and s[0] == 'side' |
def decoded_to_json(decoded_message):
"""
wrap the decoded data in json format
"""
response_list = []
for msg in decoded_message:
response = {}
response["text"] = msg.data
coordinates = msg.rect
coord_list = []
coord_list.append(coordinates.left)
coord_list.append(coordinates.top)
coord_list.append(coordinates.left+coordinates.width)
coord_list.append(coordinates.top+coordinates.height)
response["box"] = coord_list
response["score"] = 1
response_list.append(response)
return response_list |
def find_base_url_path(url):
"""
Finds the base path of a URL.
"""
url = url.strip("/").replace("http://", "https://").split("?")[0].lower()
return url |
def format_ruleset(ruleset):
"""
>>> format_ruleset(11100)
[0, 0, 0, 1, 1, 1, 0, 0]
>>> format_ruleset(0)
[0, 0, 0, 0, 0, 0, 0, 0]
>>> format_ruleset(11111111)
[1, 1, 1, 1, 1, 1, 1, 1]
"""
return [int(c) for c in f"{ruleset:08}"[:8]] |
def getMatchingIndices(func, seq):
"""Returns indices of a sequence where `func` evaluated to True."""
return [i for i, v in enumerate(seq) if func(v)] |
def voltageDivision(v_in, r_list_ordered, showWork=False):
"""
Voltage is divided among the resistors in direct proportion to their resistances;
the larger the resistance, the larger the voltage drop.
"""
r_total = sum(r_list_ordered)
voltages = [r/r_total*v_in for r in r_list_ordered]
if showWork:
print("Resistor ordered voltage division: ", voltages)
print("Adjust directions as necessary after getting result.")
return voltages |
def import_prov_es(info):
"""
Create job json for importing of PROV-ES JSON.
Example:
job = {
'type': 'import_prov_es',
'name': 'action-import_prov_es',
'tag': 'v0.3_import',
'username': 'ops',
'params': {
'prod_url': '<dataset url>'
},
'localize_urls': []
}
"""
# build params
params = {}
rule_hit = info["rule_hit"]
params["index"] = rule_hit["_index"]
params["doctype"] = rule_hit["_type"]
params["id"] = info["objectid"]
urls = rule_hit["_source"]["urls"]
if len(urls) > 0:
params["prod_url"] = urls[0]
for url in urls:
if url.startswith("s3"):
params["prod_url"] = url
break
else:
params["prod_url"] = None
params["rule_name"] = info["rule"]["rule_name"]
params["username"] = info["rule"]["username"]
job = {
"type": "import_prov_es",
"name": "action-import_prov_es",
"tag": params["rule_name"],
"username": params["username"],
"params": params,
"localize_urls": [],
}
# pprint(job, indent=2)
return job |
def _is_single_type(data):
""" check whether elements in data has same data-type or not
Arguments:
data {list} -- data you want to check elements' data-type
Returns:
{data-type or bool} -- if all elements has same data-type, return the data-type
if not, return False
"""
_type = {}
for d in data:
_type[type(d)] = None
is_single_type = True if len(_type) == 1 else False
if is_single_type:
return list(_type.keys())[0]
else:
False |
def uniqify(some_list):
"""
Filter out duplicates from a given list.
:Example:
>>> uniqify([1, 2, 2, 3])
[1, 2, 3]
"""
return list(set(some_list)) |
def get_len(text):
"""
Obtains the length of the input text, in number of characters
Args:
text: string containing text to find the length of
Returns:
The length of the input text represented as an int
"""
text_formatted = text.split(" ")
length_of_text = len(text_formatted)
return length_of_text |
def check_dir_slash(path):
"""Appends trailing slash to path if necessary.
Args:
path(str): path to be checked for slash
Returns:
(str) path with slash at the end
"""
if path.endswith('/'):
return path
else:
return path+'/' |
def set_output_path(folder, fprefix, particip, output_file, sess):
""" Define pathway of output files """
output_path = folder + '/' + fprefix + '-' + \
'%02d' % particip + '/' + output_file + '_' + fprefix + \
'-' + '%02d' % particip + '_' + 'run' + \
str(sess) + '.tsv'
return output_path |
def get_arguments(omit=None):
"""Get a calling function's arguments.
Returns:
args : dict
The calling function's arguments.
"""
from inspect import getargvalues, stack
if omit is None:
omit = []
_args, _, _, _vars = getargvalues(stack()[1][0])
args = {}
for name in _args:
if name in omit: continue
args[name] = _vars[name]
return args |
def parseDockerAppliance(appliance):
"""
Takes string describing a docker image and returns the parsed
registry, image reference, and tag for that image.
Example: "quay.io/ucsc_cgl/toil:latest"
Should return: "quay.io", "ucsc_cgl/toil", "latest"
If a registry is not defined, the default is: "docker.io"
If a tag is not defined, the default is: "latest"
:param appliance: The full url of the docker image originally
specified by the user (or the default).
e.g. "quay.io/ucsc_cgl/toil:latest"
:return: registryName, imageName, tag
"""
appliance = appliance.lower()
# get the tag
if ':' in appliance:
tag = appliance.split(':')[-1]
appliance = appliance[:-(len(':' + tag))] # remove only the tag
else:
# default to 'latest' if no tag is specified
tag = 'latest'
# get the registry and image
registryName = 'docker.io' # default if not specified
imageName = appliance # will be true if not specified
if '/' in appliance and '.' in appliance.split('/')[0]:
registryName = appliance.split('/')[0]
imageName = appliance[len(registryName):]
registryName = registryName.strip('/')
imageName = imageName.strip('/')
return registryName, imageName, tag |
def crt(pairs):
"""
After 8 hours of pointless optimizing, I searched for a good solution
and finally found this from "sophiebits". She made a nice and lightning fast
implementation of the Chinese Remainder Theorem. Please, follow the link
to her original solution devoutly:
https://github.com/sophiebits/adventofcode/blob/main/2020/day13.py
"""
m = 1
for x, mx in pairs:
m *= mx
total = 0
for x, mx in pairs:
b = m // mx
total += x * b * pow(b, mx-2, mx)
total %= m
return total |
def parse_plugins_option(plugins_option):
"""
Return parsed -p command line option or "".
"""
return (plugins_option or "").split(",") |
def user_has_verified_email(user) -> bool:
"""A check to see if the user has an verified email with the organization
Args:
user ([type]): Is the user to check
Returns:
bool: True if the user has verified their email with the organisation
"""
if user["organizationVerifiedDomainEmails"]:
return True
else:
return False |
def to_hass_level(level):
"""Convert the given Vantage (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100) |
def erd_encode_signed_byte(value: int) -> str:
"""
Convert a hex byte to a signed int. Copied from GE's hextodec method.
"""
value = int(value)
if value < 0:
value = value + 256
return value.to_bytes(1, "big").hex() |
def _versiontuple(v):
"""
Converts a version string to a tuple.
Parameters
----------
v : :obj:`str`
Version number as a string. For example: '1.1.1'
Returns
-------
tuple
Three element tuple with the version number. For example: (1, 1, 1)
"""
return tuple(map(int, (v.split(".")))) |
def list_hosts(ip, end):
"""
Creates IP lists for different threads.
:return: list of hosts
"""
address = ip.split(".")
hosts = []
for addr in range(int(address[3]), end):
hosts.append(".".join([octet for octet in address[:3]]) + "." + str(addr))
return hosts |
def file_size(num, suffix='B'):
"""
Output human readable file size
:param num: Integer in bytes
:param suffix: Appended to type of file size
:return: String
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix) |
def tensor_transpose(g):
"""
:param g: tensor
:return: tranpose of the tensor
"""
import numpy as np
g = np.array(g)
tg = np.array(g)
for i in range(g.shape[0]):
for j in range(g.shape[0]):
tg[i][j] = g[j][i]
# tg is the transposed tensor
return tg |
def sum_of_odd_nums_upto_n(i):
"""Function for sum of odd nums upto n"""
num = 1
ans = 0
while num <= i:
if num % 2 == 1:
ans = ans + num
num = num + 1
return ans |
def get_coordinate_text(x, y):
"""
extension:
Get the x-coordinate and y-coordinate of mouse and return string coordinate.
:param x: The x-coordinate of the mouse.
:param y: The y-coordinate of the mouse.
:return str, the string of coordinate.
"""
return "(" + str(x) + "," + str(y) + ")" |
def get_tool_name(url: str) -> str:
"""Get the tool name from the specified URL."""
if url.endswith(".json"):
url = url[:-len(".json")]
if url.endswith(".php"):
url = url[:-len(".php")]
parts = url.split("_")
for i, fragment in enumerate(parts):
if fragment[0].isupper():
name = "_".join(parts[i:])
break
else:
raise ValueError(f"Could not infer tool name from URL: {url}")
# Read filters sometimes have a $ in the URL, e.g.:
# https://software.broadinstitute.org/gatk/documentation/tooldocs/4.0.7.0/org_broadinstitute_hellbender_engine_filters_ReadFilterLibrary$AllowAllReadsReadFilter.json
if "$" in name:
# Workaround for bizarre one-off GATK 3 tool name:
# https://software.broadinstitute.org/gatk/documentation/tooldocs/3.8-0/org_broadinstitute_gatk_engine_filters_CountingFilteringIterator$CountingReadFilter.php
if name == "CountingFilteringIterator$CountingReadFilter":
return "CountingFilteringIterator.CountingReadFilter"
return name[name.index("$")+1:]
return name |
def count_user(data_list):
"""
Function count the users.
Args:
data_list: The data list that is doing to be iterable.
Returns:
A list with the count value of each user type.
"""
customer = 0
subscriber = 0
for i in range(len(data_list)):
if data_list[i][5] == "Customer":
customer += 1
elif data_list[i][5] == "Subscriber":
subscriber += 1
return [customer, subscriber] |
def _generate_fortran_bool(pybool):
"""Generates a fortran bool as a string for the namelist generator
"""
if pybool:
fbool = '.true.'
else:
fbool = '.false.'
return fbool |
def ordinal(num):
"""Returns ordinal number string from int,
e.g. 1, 2, 3 becomes 1st, 2nd, 3rd, etc. """
SUFFIXES = {1: 'st', 2: 'nd', 3: 'rd'}
# 10-20 don't follow the normal counting scheme.
if 10 <= num % 100 <= 20:
suffix = 'th'
else:
# the second parameter is a default.
suffix = SUFFIXES.get(num % 10, 'th')
return str(num) + suffix |
def get_select_indexes(selects, attributes):
""" Gets the indexes for all select statements.
Args:
selects: select values
attributes: attributes in the tables
Returns:
indexes: look up indexes for select values
"""
if selects[0] != '*':
indexes = []
split_select = [select.split('.') for select in selects]
for select in split_select:
indexes.append([select[0], attributes[select[0]].index(select[1])])
return indexes
else:
return [selects] |
def round_float_str(s, precision=6):
"""
Given a string containing floats delimited by whitespace
round each float in the string and return the rounded string.
Parameters
----------
s : str
String containing floats
precision : int
Number of decimals to round each float to
"""
round_str = ''
for token in s.split():
try:
f = round(float(token), precision)
round_str += str(f)
except:
round_str += token
round_str += ' '
return round_str |
def count_and_say(n):
"""
Count and say
:param n: number
:type n: int
:return: said string
:rtype: str
"""
# basic case
if n == 0:
return ''
elif n == 1:
return '1'
prev_seq = count_and_say(n - 1)
curr_seq = ''
cnt = 1
for i in range(len(prev_seq)):
if i + 1 < len(prev_seq) and prev_seq[i] == prev_seq[i + 1]:
cnt += 1
continue
else:
curr_seq += str(cnt)
curr_seq += prev_seq[i]
cnt = 1
return curr_seq |
def conv_ext_to_proj(P):
"""
Extended coordinates to projective
:param P:
:return:
"""
return P[0], P[1], P[2] |
def google_drive_useable(link):
"""Changes a google drive link into something usable"""
return link.replace('/open?', '/uc?') |
def _common_shape(sshape, ashape):
"""Return broadcast shape common to both sshape and ashape."""
# Just return both if they have the same shape
if sshape == ashape:
return sshape
srank, arank = len(sshape), len(ashape)
# do a special comparison of all dims with size>1
if srank > arank:
newrank = srank
ashape = sshape[:newrank-arank] + ashape
else:
newrank = arank
sshape = ashape[:newrank-srank] + sshape
newshape = list(sshape)
for i in range(newrank):
if sshape[i] != ashape[i]:
if sshape[i] == 1:
newshape[i] = ashape[i]
elif ashape[i] == 1:
newshape[i] = sshape[i]
else:
raise ValueError("Arrays have incompatible shapes");
return tuple(newshape) |
def get_price_as_int(n):
"""Utility function."""
return int(('{:.8f}'.format(n)).replace('0.', '')) |
def is_py_script(item: str):
"""check to see if item(file or folder) is a python script, by checking it's extension
Args:
item (str): the name of the file or folder
Returns:
bool: wether or not the item is a python script
"""
is_it_py_script : bool = False
ext : str = ".py"
if ext in item:
is_it_py_script = True
...
return is_it_py_script
... |
def make_object(obj, kwargs):
"""
Applies the kwargs to the object, returns obj(kwargs)
"""
return obj(**kwargs) |
def context_to_airflow_vars(context):
"""
Given a context, this function provides a dictionary of values that can be used to
externally reconstruct relations between dags, dag_runs, tasks and task_instances.
:param context: The context for the task_instance of interest
:type context: dict
"""
params = dict()
dag = context.get('dag')
if dag and dag.dag_id:
params['airflow.ctx.dag.dag_id'] = dag.dag_id
dag_run = context.get('dag_run')
if dag_run and dag_run.execution_date:
params['airflow.ctx.dag_run.execution_date'] = dag_run.execution_date.isoformat()
task = context.get('task')
if task and task.task_id:
params['airflow.ctx.task.task_id'] = task.task_id
task_instance = context.get('task_instance')
if task_instance and task_instance.execution_date:
params['airflow.ctx.task_instance.execution_date'] = \
task_instance.execution_date.isoformat()
return params |
def clean_value(value):
"""Clean spreadsheet values of issues that will affect validation """
if value == 'n/a':
return 'N/A'
elif value == 'Design was registered before field was added':
return ''
return value |
def get_keys(dict_info):
"""return key info"""
key_list = []
for k in dict_info.keys():
print(k)
key_list.append(k)
return key_list |
def show_variable_description_pca(click_data):
"""
gives a description to each of the variables when their point is clicked in the Graph
:param click_data:
:return:
"""
if not click_data:
return "Click on a data point to get a short description of the variable"
my_dot = click_data['points'][0]['customdata'][0]
if not my_dot:
return "No matching data"
else:
return str(my_dot) |
def hhmmss(t: float, dec=0):
"""Format time in seconds to form hh:mm:ss
Arguments
---------
t : float
Timestamp in seconds
dec : int (default 0)
number of decimal places to show for the seconds
"""
h = int(t / 3600)
t -= h * 3600
m = int(t / 60)
t -= m * 60
s = t
if dec > 0:
template = "{{}}:{{:02d}}:{{:0{}.0{}f}}".format(dec + 3, dec)
elif dec == 0:
template = "{{}}:{{:02d}}:{{:0{}.0{}f}}".format(dec + 2, dec)
else:
raise ValueError("Decimal places must be >= 0 for hhmmss formatting")
return template.format(h, m, s) |
def make_edge(v1, v2):
"""
We store edges as tuple where the vertex indices are sorted (so
the edge going from v1 to v2 and v2 to v1 is the same)
"""
return tuple(sorted((v1, v2))) |
def get_x_prime(tau, z):
"""
Equation [2](2.2)
:param tau: time constant
"""
return (1.0 / tau) * z |
def offset_5p(cov, offsets_5p):
"""
Return appropriate offset for 5' transcript ends based on average transcript coverage.
"""
return offsets_5p[0] * cov + offsets_5p[1] |
def pretty_duration(seconds):
""" Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s"
"""
if seconds is None:
return ''
ret = ''
if seconds >= 86400:
ret += '{:.0f}d'.format(int(seconds / 86400))
seconds = seconds % 86400
if seconds >= 3600:
ret += '{:.0f}h'.format(int(seconds / 3600))
seconds = seconds % 3600
if seconds >= 60:
ret += '{:.0f}m'.format(int(seconds / 60))
seconds = seconds % 60
if seconds > 0:
ret += '{:.1f}s'.format(seconds)
return ret |
def repr_args(args, kwargs):
"""Stringify a set of arguments.
Arguments:
args: tuple of arguments as a function would see it.
kwargs: dictionary of keyword arguments as a function would see it.
Returns:
String as you would write it in a script.
"""
args_s = (("%s, " if kwargs else "%s") % ", ".join(map(repr, args))) if args else ""
kws = ", ".join(map(lambda it: "%s=%r" % (it[0], it[1]), kwargs.items()))
return "%s%s" % (args_s, kws) |
def wsorted(ws):
"""Sort the letters of a word"""
w = list(ws.rstrip())
w.sort()
return ''.join(w) |
def convert_bytes_to(bytes: int, convert_to: str) -> int:
"""
Function convert bytes to kb, mb, gb and tb
>>> convert_bytes_to(1024, convert_to="kb")
1
>>> convert_bytes_to(123456789, convert_to="mb")
118
>>> convert_bytes_to(1073741824, convert_to="gb")
1
"""
data = {"kb": 1, "mb": 2, "gb": 3, "tb": 4}
res = float(bytes)
for _ in range(data[convert_to]):
res = res / 1024
return round(res) |
def edges2nodes(edges):
"""gather the nodes from the edges"""
nodes = []
for e1, e2 in edges:
nodes.append(e1)
nodes.append(e2)
nodedict = dict([(n, None) for n in nodes])
justnodes = list(nodedict.keys())
# justnodes.sort()
justnodes = sorted(justnodes, key=lambda x: str(x[0]))
return justnodes |
def query_result_to_dict(result):
"""
SQLAlchemy returns tuples, they need to be converted to dict so we can jsonify
:return:
"""
return dict(zip(result.keys(), result)) |
def is_integer(value, cast=False):
"""Indicates whether the given value is an integer. Saves a little typing.
:param value: The value to be checked.
:param cast: Indicates whether the value (when given as a string) should be cast to an integer.
:type cast: bool
:rtype: bool
.. code-block:: python
from superpython.utils import is_integer
print(is_integer(17))
print(is_integer(17.5))
print(is_integer("17"))
print(is_integer("17", cast=True))
"""
if isinstance(value, int):
return True
if isinstance(value, str) and cast:
try:
int(value)
except ValueError:
return False
else:
return True
return False |
def decode_line(s,l,func = float):
"""
split a string and convert each token with <func>
and append the result to list <l>
"""
x = s.split()
for v in x:
l.append(func(v))
return(l) |
def parse_word(guess, response):
"""
Given a word and the games response, generate a tuple of rules
Parameters:
guess: str The guess
response: str the response from the game. Must be one of G, Y or B
Y - represents yellow
B - represents black
G - represents green
Returns: tuple
A tuple of rules that contains three items:
* cant_contain: list of "black" letters that can't be in the word
* wrong_spot: dict of {str: List[int]} indicating "yellow" letters
that are in the word and the indexes of the word they can't be in
* right_spot: dict {int: str} "green boxes" the index in the word and
the letter that goes there
"""
cant_contain = []
wrong_spot = {}
right_spot = {}
for i, (c, r) in enumerate(zip(guess, response)):
if r == 'b':
cant_contain.append(c)
if r == 'y':
if c in wrong_spot:
wrong_spot[c].append(i)
else:
wrong_spot[c] = [i]
if r == 'g':
right_spot[i] = c
return cant_contain, wrong_spot, right_spot |
def _is_import_valid(documents):
"""
Validates the JSON file to be imported for schema correctness
:param documents: object loaded from JSON file
:return: True if schema seems valid, False otherwise
"""
return isinstance(documents, list) and \
all(isinstance(d, dict) for d in documents) and \
all(all(k in d for k in ('pk', 'model', 'fields')) for d in documents) and \
all(all(k in d['fields'] for k in ('uuid', 'owner')) for d in documents) |
def is_valid_matrix(mtx):
"""
>>> is_valid_matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
True
>>> is_valid_matrix([[1, 2, 3], [4, 5], [7, 8, 9]])
False
"""
if mtx == []:
return False
cols = len(mtx[0])
for col in mtx:
if len(col) != cols:
return False
return True |
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except Exception:
return None |
def exists_in(x, a):
"""True if x is a member of the listoid a, False if not."""
from numpy import size, where
if size(where(a == x)) == 0:
return False
else:
return True |
def smallest_diff_key(A, B):
"""return the smallest key adiff in A such that A[adiff] != B[bdiff]"""
diff_keys = [k for k in A if k not in B or A.get(k) != B.get(k)]
if (len(diff_keys)) != 0:
return min(diff_keys)
else:
return None |
def is_callable(attribute, instance=None):
"""Check if value or attribute of instance are callable."""
try:
if instance:
return callable(getattr(instance, attribute))
else:
return callable(attribute)
except AttributeError:
return False |
def build_path(package_name: str) -> str:
"""Build path from java package name."""
return package_name.replace(".", "/") |
def ra2deg(h, m, s):
"""
Convert RA (hour, minute, second) to degree.
"""
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0 |
def in_region(region, point):
"""
Detect if a point exists in a region.
Region: (x, y, width, height)
Point: (x, y)
Returns True or False depending on if the point exists in the region.
"""
rx, ry, rw, rh = region
x, y = point
return (
x > rx and x < rx + rw and
y > ry and y < ry + rh
) |
def count_loc(lines):
"""
Taken from:
https://app.assembla.com/spaces/tahar/subversion/source/HEAD/tahar.py
linked to from:
http://stackoverflow.com/questions/9076672/how-to-count-lines-of-code-in-python-excluding-comments-and-docstrings
"""
nb_lines = 0
docstring = False
for line in lines:
line = line.strip()
if line == "" \
or line.startswith("#") \
or docstring and not (line.startswith('"""') or line.startswith("'''"))\
or (line.startswith("'''") and line.endswith("'''") and len(line) >3) \
or (line.startswith('"""') and line.endswith('"""') and len(line) >3) :
continue
# this is either a starting or ending docstring
elif line.startswith('"""') or line.startswith("'''"):
docstring = not docstring
continue
else:
nb_lines += 1
return nb_lines |
def _decoded_str_len(l):
"""
Compute how long an encoded string of length *l* becomes.
"""
rem = l % 4
if rem == 3:
last_group_len = 2
elif rem == 2:
last_group_len = 1
else:
last_group_len = 0
return l // 4 * 3 + last_group_len |
def oct2bin(octvalue, init_length):
"""Convert octal value to binary"""
value = str(bin(int(octvalue, 8))[2:])
return value.zfill(init_length) |
def split_path(path, all_paths):
"""Split path into parent and current folder."""
idx = path.rfind('/', 0, -1)
if idx == -1:
return '', path
parent, label = path[0:idx + 1], path[idx+1:-1]
if parent not in all_paths:
return '', path
return parent, label |
def _validate_workflow_var_format(value: str) -> str:
"""Validate workflow vars
Arguments:
value {str} -- A '.' seperated string to be checked for workflow variable
formatting.
Returns:
str -- A string with validation error messages
"""
add_info = ''
parts = value.split('.')
if len(parts) == 2:
# workflow.id, workflow.name, item.field_name
typ, attr = parts
if attr not in ('name', 'id'):
add_info = 'The only valid workflow variables with two segments' \
' are workflow.name and workflow.id.'
elif len(parts) == 4:
# workflow.inputs.parameters.<NAME>
# workflow.inputs.artifacts.<NAME>
# workflow.operators.<OPERATORNAME>.image
typ, attr, prop, _ = parts
if attr in ('inputs', 'outputs'):
if prop not in ('parameters', 'artifacts'):
add_info = 'Workflow inputs and outputs variables must be ' \
'"parameters" or "artifacts".'
elif attr == 'operators':
if parts[-1] != 'image':
add_info = 'Workflow operator variable can only access ' \
'the image name.'
else:
add_info = 'Workflow variables must reference to "inputs", "outputs" ' \
'or "operators".'
else:
add_info = 'Workflow variables are either 2 or 4 segments.'
return add_info |
def set_bit(target, bit):
"""
Returns target but with the given bit set to 1
"""
return target | (1 << bit) |
def user(default=None, request=None, **kwargs):
"""Returns the current logged in user"""
return request and request.context.get("user", None) or default |
def page_not_found(e):
"""
Handling on page not found
Args:
e (): None
Returns:
HTML Page
"""
# handling 404 ERROR
return "<h1>404</h1>" \
"<p>The resource could not be found</p>", 404 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.