content
stringlengths 42
6.51k
|
|---|
def isClosedPathStr(pathStr):
"""takes in SVF 'd=' path string and outputs true if closed (according to
SVG)"""
tmp = pathStr.rstrip()
return tmp[len(tmp) - 1] == 'z'
|
def _load_polygons(geojson):
"""
Loads polygons from a geojson file; should work for both MultiPolygon and
FeatureCollection of Polygons
"""
polygons = []
if geojson['type'] == 'MultiPolygon':
for polygon in geojson['coordinates']:
polygons.append(polygon)
elif geojson['type'] == 'Polygon':
polygons = [geojson['coordinates']]
elif geojson['type'] == 'FeatureCollection':
for feature in geojson['features']:
geom = feature['geometry']
polygons.extend(_load_polygons(geom))
return polygons
|
def fizzBuzz(n):
"""
:type n: int
:rtype: List[str]
"""
list = []
for x in range(1, n):
if (x % 5 == 0 and x % 3 == 0):
list.append("FizzBuzz")
elif (x % 3 == 0 ):
list.append("Fizz")
elif (x % 5 == 0 ):
list.append("Buzz")
else:
list.append(str(x))
return list
|
def do_step_right(pos: int, step: int, width: int) -> int:
"""Takes current position and do 3 steps to the
right. Be aware of overflow as the board limit
on the right is reached."""
new_pos = (pos + step) % width
return new_pos
|
def get_all_species_alive(species_list):
"""Get all names of species that are alive.
Parameters
----------
species_list : list of strings
List of all species.
Returns
-------
numb_total_population_alive : list of strings
List of all species excluding dead species.
"""
total_population_alive = [
x for x in species_list.keys() if ("dead" not in x)]
numb_total_population_alive = "+".join(total_population_alive)
return numb_total_population_alive
|
def _calculate_length(gene, gene_to_cds_length):
"""Calculates the average coding sequence length of the gene"""
try:
transcripts = gene_to_cds_length[gene]
except KeyError:
transcripts = []
lengths = []
for transcript in transcripts:
lengths.append(transcripts[transcript]["length"])
length = round(sum(lengths) / float(len(lengths)) if len(lengths) != 0 else 0)
return length
|
def service_namespace_type(service_namespace):
"""
Property: ScalingInstruction.ServiceNamespace
"""
valid_values = ["autoscaling", "ecs", "ec2", "rds", "dynamodb"]
if service_namespace not in valid_values:
raise ValueError(
'ServiceNamespace must be one of: "%s"' % (", ".join(valid_values))
)
return service_namespace
|
def tcpPortOpen(host, port):
"""
Opens a connection to a remote socket at address (host, port) and closes it to open the TCP port.
Parameter *host*:
Host address of the socket
Parameter *port*:
TCP port that will be opened
Return value:
This method returns a boolean which is true, if the TCP Port is open and false otherwise.
"""
from socket import socket, AF_INET, SOCK_STREAM
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((host, port))
s.close()
return not result
|
def add_namespace_to_cmd(cmd, namespace=None):
"""Add an optional namespace to the comand."""
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
|
def _get_two_letter_language_code(language_code):
"""
Shortens language to only first two characters (e.g. es-419 becomes es)
This is needed because Catalog returns locale language which is not always a 2 letter code.
"""
if language_code is None:
return None
elif language_code == '':
return ''
else:
return language_code[:2]
|
def build_content_type(format, encoding='utf-8'):
"""
Appends character encoding to the provided format if not already present.
"""
if 'charset' in format:
return format
return "%s; charset=%s" % (format, encoding)
|
def to_list(x, allow_tuple=False):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
# Arguments
x: target object to be normalized.
allow_tuple: If False and x is a tuple,
it will be converted into a list
with a single element (the tuple).
Else converts the tuple to a list.
# Returns
A list.
"""
if isinstance(x, list):
return x
if allow_tuple and isinstance(x, tuple):
return list(x)
return [x]
|
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
|
def migrated(file_path):
"""check if the jobs defined in file_path are migrated."""
# we do not migrate the periodis in release repo
# due to https://github.com/openshift/release/pull/7178
if file_path.endswith('periodics.yaml') and 'openshift/release/' in file_path:
return False
if file_path.endswith('presubmits.yaml') and 'ci-operator/jobs/openshift-priv/' in file_path:
return True
return False
|
def duplicates(list, item):
"""Returns index locations of item in list"""
return [i for i, x in enumerate(list) if x == item]
|
def firstPositiveNumber(A):
"""
A: list[int]
return: int
"""
if len(A) > 0 and A[0] > 0:
return 0
if len(A) > 0 and A[-1] < 0:
return len(A)
l = 0
h = len(A)-1
while l <= h:
mid = l + (h-l) // 2
if A[mid] > 0 and A[mid-1] < 0:
return mid
elif A[mid] > 0:
h = mid - 1
else:
l = mid + 1
|
def get_available_update_count(repositories):
"""Returns the number of available plugin updates
Args:
repositories (dict): A list of repositories with their plugins.
print_info (bool): If set to True, information about the updates will
be printed
Returns:
A tuple containing the number of available updates and info text
"""
updateable_count = 0
for r in repositories:
plugins = r.get("plugins")
if not plugins:
continue
for p in plugins:
if p.get("updateAvailable", False):
updateable_count += 1
if updateable_count == 1:
out = (f"One update is available. "
"Use plugins.update() to install the update.")
elif updateable_count > 1:
out = (f"{updateable_count} updates are available. "
"Use plugins.update() to install the updates.")
else:
out = "No updates available."
return updateable_count, out
|
def data2str(data):
"""
Convert some data to a string.
An empty or None value is returned unchanged (helpful for testing), e.g.:
'57 75 6e 64 65 72 62 61 72 49 52' -> 'WunderbarIR'
'' -> ''
"""
if not data: return data
text = ''.join([chr(int(v, 16)) for v in data.split()])
return text
|
def text(label, name, **kwargs):
"""
Creates a Dialog "text element".
Other Parameters
----------------
placeholder : str
A string displayed as needed to help guide users in completing the
element. 150 character maximum.
subtype : str
['email', 'url', 'tel', 'number']
max_length : int
Maximum input length allowed for element. Up to 150 characters. Defaults
to 150.
min_length : int
Integer Minimum input length allowed for element. Up to 150
characters. Defaults to 0.
optional : bool
Provide true when the form element is not required. By default, form
elements are required.
hint : str
Helpful text provided to assist users in answering a question. Up to 150
characters.
value : str
A default value for this field. Up to 150 characters.
Returns
-------
dict
"""
return {'type': 'text', 'label': label, 'name': name,
**kwargs}
|
def is_multiple(n, divide):
"""
>>> is_multiple(0, 1)
False
>>> is_multiple(10, 1)
True
>>> is_multiple(10, 2)
True
>>> is_multiple(10, 3)
False
"""
return (0 != n) and (0 == (n % divide))
|
def image_type(file):
"""Returns str of file type. 'jpeg', 'png', 'gif'.
Returns None if unable to identify file type"""
if isinstance(file, str):
f = open(file, 'rb')
binary_string = f.read(32)
else:
binary_string = file.read(32)
if binary_string[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
elif binary_string.startswith(b'\211PNG\r\n\032\n'):
return 'png'
elif binary_string[:6] in (b'GIF87a', b'GIF89a'):
return 'gif'
else:
return None
|
def check_uv(u, v):
"""
Returns weightings for frequencies u and v
for anisotropic surfaces
"""
if abs(u) + abs(v) == 0:
return 4.
elif u * v == 0:
return 2.
return 1.
|
def max_value_position(value_list):
""" Returns the position of the element with the highest value in value_list
:param value_list(list): the array to be searched through
:return: the index of the max value in value_list
"""
max_value = value_list[0]
max_pos = 0
for index in range(1, len(value_list)):
if value_list[index] > max_value:
max_value = value_list[index]
max_pos = index
return max_pos
|
def offset(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to
each datapoint.
Example:
.. code-block:: none
&target=offset(Server.instance01.threads.busy,10)
"""
for series in seriesList:
series.name = "offset(%s,%g)" % (series.name,float(factor))
series.pathExpression = series.name
for i,value in enumerate(series):
if value is not None:
series[i] = value + factor
return seriesList
|
def is_match(prefix: str, value: str) -> bool:
"""Checks if value has the given case-insensitive prefix."""
assert prefix != '', 'Cannot match with empty prefix'
return value.lower().startswith(prefix.lower())
|
def get_coordinate_from_line(coordinate, line):
"""
Returns a value of a coordinate from a line
"""
for word in line.split(","):
if str(coordinate)+"=" in word:
if coordinate == "phi":
return float(word[word.index("=")+1:])
else:
return float(word[word.index("=")+1:-1])
|
def _transform_and_shift(affine_transform, col_indices, row_indices, cellxh, cellyh):
"""
Transforms indices to coordinates and applies a half pixel shift
Args:
affine_transform (object): The affine transform.
col_indices (1d array): The column indices.
row_indices (1d array): The row indices.
cellxh (float): The half cell width in the x direction.
cellyh (float): The half cell width in the y direction.
Returns:
``numpy.ndarray``, ``numpy.ndarray``
"""
x_coords, y_coords = affine_transform * (col_indices, row_indices)
x_coords += abs(cellxh)
y_coords -= abs(cellyh)
return x_coords, y_coords
|
def _to_bool(text):
"""Convert str value to bool.
Returns True if text is "True" or "1" and False if text is "False" or "0".
Args:
text: str value
Returns:
bool
"""
if text.title() in ("True", "1"):
result = True
elif text.title() in ("False", "0"):
result = False
else:
raise ValueError("Expected 'True', 'False', '1', '0'; got '%s'" % text)
return result
|
def parse_bsub(output):
"""Parse bsub output and return job id.
:param output: stdout of bsub command
:type output: str
:returns: job id
:rtype: str
"""
for line in output.split("\n"):
if line.startswith("Job"):
return line.split()[1][1:-1]
|
def create_set_global_settings_payload(plugin_context: str, payload):
"""Create and return "setGlobalSettings" dictionary to send to the Plugin Manager.
Args:
plugin_context (str): An opaque value identifying the plugin/Property Inspector.
Received during the plugin registration procedure.
payload (dict): Dictionary with info to add to global settings.
Returns:
dict: Dictionary with payload to save data for plugin context.
"""
return {
"event": "setGlobalSettings",
"context": plugin_context,
"payload": payload
}
|
def _validate_isofactor(isofactor, signed):
""" [Docstring]
"""
if isofactor[0] == 0.0:
return (False, "Error: 'isovalue' cannot be zero")
if isofactor[1] <= 1.0:
return (False, "Error: 'factor' must be greater than one")
if not signed and isofactor[0] < 0:
return (False, "Error: Negative 'isovalue' in absolute "
"thresholding mode")
return (True, "")
|
def myfunc(Xs):
""" returns a list of doubled values"""
Ys=[]
for x in Xs:
Ys.append(2*x)
return Ys
|
def is_palindrome(the_string):
"""
Evaluates a given string and determines whether or not it is a palindrome. :param the_string: The string to evaluate.
:returns: True when the string is a palindrome, False otherwise.
"""
idx_a = 0
idx_b = len(the_string) - 1
while idx_a < idx_b:
# SKIP OVER ANY AND ALL SPACES
if the_string[idx_a] == " ":
idx_a += 1
continue
if the_string[idx_b] == " ":
idx_b -= 1
continue
# IF THEY EVER DON'T MATCH IT FAILS.
if the_string[idx_a].lower() != the_string[idx_b].lower():
return False
else:
idx_a += 1
idx_b -= 1
return True
|
def stringify_numbers(d: dict) -> dict:
"""Converts d values that are integers into strings."""
result = {}
for key, value in d.items():
if type(value) == int:
result[key] = str(value)
elif type(value) == dict:
result[key] = stringify_numbers(value)
return result
|
def parse_mp_id(mp_id):
""" Return MP ID string for valid MP ID
Modified from mp_phot package, util.py, .get_mp_and_an_string().
:param mp_id: raw MP identification, either number or other ID, e.g., 5802 (int), '5802', or
'1992 SG4'. [int or string]
:return: for numbered MP, give simply the string, e.g. '5802'.
for other MP ID, give the string prepended wtih '~', e.g., '~1992 SG4'.
"""
if isinstance(mp_id, int):
if mp_id < 1:
raise ValueError('MP ID must be a positive integer.')
mp_id_string = str(mp_id) # e.g., '1108' for numbered MP ID 1108 (if passed in as int).
elif isinstance(mp_id, str):
if mp_id[0] not in '0123456789':
raise ValueError('MP ID does not appear valid.')
try:
_ = int(mp_id) # a test only
except ValueError:
mp_id_string = '~' + mp_id # e.g., '*1997 TX3' for unnumbered MP ID '1997 TX3'.
else:
mp_id_string = mp_id.strip() # e.g., '1108' for numbered MP ID 1108 (if passed in as string).
else:
raise TypeError('mp_id must be an integer or string representing a valid MP ID.')
return mp_id_string
|
def encode_to_xml(text: str) -> str:
"""
Encodes ampersand, greater and lower characters in a given string to HTML-entities.
"""
text_str = str(text)
text_str = text_str.replace('&', '&')
text_str = text_str.replace('<', '<')
text_str = text_str.replace('>', '>')
return text_str
|
def find_reference_in_list(name, references):
"""
This finds the matching reference (file path) in a list of references.
Args:
name (str): The name of the file to look for.
references (list): The list of references to look through for a match.
Returns:
reference (str): The matching reference from the list.
"""
return next((ref for ref in references if name == ref.split('/')[-1]), 0)
|
def classImplements(c, ms):
"""
c is a class, and ms is a set of method names.
Returns True if c implements all the methods in c.
Complains otherwise, and returns False
"""
result = True
for n in ms:
m = getattr(c, n, False)
if not (m and callable(m)):
print(c, "does not have method", n)
result = False
return result
|
def nptf(x):
"""
Negative Power To Fraction
For converting the second value in midi.TimesignatureEvent data from
a negative power to a fraction
"""
return round(1 // 2 ** -x)
|
def _format_headers(headers, line_prefix=None):
"""Create a human readable formatted string of headers.
:type headers: dict
:param headers: request headers
:type line_prefix: string
:param line_prefix: prefix for each printed line
:rtype: string
:return:
"""
return '\n'.join('{}{}: {}'.format(line_prefix, k, v)
for k, v
in headers.items())
|
def initial_variables(args):
"""
"""
return [
args['token'],
args['label'],
args['currencies'].split(',')
]
|
def run_length_encode(data):
""" Encodes the input data using the RLE method.
See: https://en.wikipedia.org/wiki/Run-length_encoding
Args:
data: list, corresponding to the input data.
Returns:
list, result of the compression.
"""
val = data[0]
count = 1
compressed_data = []
for i in range(1, len(data)):
if data[i] == data[i-1]:
count += 1
else:
compressed_data.extend([count, val])
val = data[i]
count = 1
compressed_data.extend([count, val])
return compressed_data
|
def pollutants_from_summary(summary):
"""
Get the list of unique pollutants from the summary.
:param list[dict] summary: The E1a summary.
:return dict: The available pollutants, with name ("pl") as key
and pollutant number ("shortpl") as value.
"""
return {d["pl"]: d["shortpl"] for d in summary}
|
def eq7p10d1_R(pg, roof_slope, W):
"""Equation 7.10-1 rain on snow surcharge load, R:
if pg>0 and pg<=20 (psf) and roof_slope (deg) < W/50 (ft):
R = 5 (psf)
otherwise:
R = 0
"""
pg_le_20psf_and_nonzero = (pg <= 20) & (pg > 0)
low_slope = roof_slope < (W/50)
return 5 * (pg_le_20psf_and_nonzero | low_slope)
|
def get_allowed_tokens(config):
"""Return a list of allowed auth tokens from the application config"""
return [token for token in (config.get('AUTH_TOKENS') or '').split(':') if token]
|
def combine_games(games):
"""Combines games from multiple days into a single list."""
return [y for x in games for y in x]
|
def get_unique_list_of_cells(filtered, cells_at_point):
"""
Get unique list of cells
:param filtered: filtered cells
:param cells_at_point: new list of cells at a point for intersection
:return:
"""
filtered = [cell for cell in cells_at_point if cell in filtered]
return filtered
|
def landmarks_json(name, infos="", normalized="NOT"):
"""Return a json object for the landmarks saving.
Param: name, string. Name of the dataset.
Param: infos, string. Some information on the set.
Param: normalized, string. Tell If the points were normalized & which way.
"""
return {
"name": name,
"type": "LANDMARKS",
"infos": infos,
"datas": [],
"normalized": normalized
}
|
def check(x):
"""
This function is to check if input x is a valid number
Parameters
------
x : string
Return
------
boolean : True if x is a valid number, else False
"""
if '-' in x:
x = x.lstrip('-')
if '.' in x:
x = x.replace('.','0',1)
return x.isnumeric()
|
def fahrenheit_to_celsius(temp_in_f):
"""
Actually does the conversion of temperature from F to C.
PARAMETERS
--------
temp_in_f: float
A temperature in degrees Fahrenheit.
RETURNS
-------
temp_in_c: float
The same temperature converted to degrees Celsius.
"""
return (temp_in_f-32)*5/9
|
def or_tag_filter(tags):
"""Return a "filter by any tags" of the element to create a json advance search.
:param List of :class:`str` tags: Desired filtering tags
:returns: json structure to call the asking tasks.
"""
if not isinstance(tags, list):
tags = [tags]
if len(tags) == 1:
return {
"operator": "Equal",
"field": "Tags",
"value": tags[0]
}
tag_selector = {
"operator": "Or",
"filters":
[
{
"operator": "Equal",
"field": "Tags",
"value": tag_value
} for tag_value in tags
]
}
return tag_selector
|
def b2h(bseq, delim=' ', reverse=False, leading0x=False):
"""
(b)ytes(2)(h)ex
Convert a sequence of bytes to its hex-formatted string representation.
@reverse determines whether the bytes will be swapped/reveresed to handle endianness
@leading0x sets whether a leading '0x' will be included in the formated hex string
Notes:
- If an int (and not a string) is desired, try:
h2i(b2h(X))
- This routine used to be called 'bytes2hexstr'
"""
if reverse: # endianness
bseq = ''.join(f"{b:02X}" for b in bseq)
bseq = delim.join(reversed([bseq[i:i+2] for i in range(0, len(bseq), 2)]))
else:
bseq = delim.join(f"{b:02X}" for b in bseq)
bseq = bseq.lower()
return '0x' + bseq if leading0x else bseq
|
def _create_weather_key(lat, lng):
"""
Creates a database legal key for the given coordinate.
Args:
lat (string or float) -- latitude of the coordinate
lng (string or float) -- longitude of the coordinate
Returns:
string -- proper key for database
"""
tmp = "%s,%s" % (lat, lng)
return tmp.replace(".", "")
|
def bool_str(x):
"""Return T or F for a bool."""
return 'T' if x else 'F'
|
def prepare_conf_contours(pair):
"""Generates the output text for confidence contours.
"""
text = f"{pair[0][0]} {pair[0][1]} ssr\n"
for result in pair[1]:
text += " ".join(str(t) for t in result)
text += "\n"
return text
|
def znorm(angle):
""" Normalizes an angle between -180 and 180. """
angle = angle % 360
return angle if angle <= 180 else angle - 360
|
def split_array(array, elements):
"""
split array into subarrays of specified length
:param
array: original array
:param
elements: length of subarrays
:return:
array of subarrays of specified length
"""
length = len(array)
index = 0
end = index + elements
result = []
while end < length:
result.append(array[index:end])
index = end
end = index + elements
result.append(array[index:])
return result
|
def parse_test_files_option(opt):
"""
Parse option passed to --test-files into a key-value pair.
>>> parse_test_files_option('generators.py:10,13,19')
('generators.py', [10, 13, 19])
"""
opt = str(opt)
if ':' in opt:
(f_name, rest) = opt.split(':', 1)
return (f_name, list(map(int, rest.split(','))))
else:
return (opt, [])
|
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
|
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
|
def find_string_between(string, sub_first, sub_last):
"""
Common function to find a substring surrounded by sub_first and sub_last.
sub_last can be set to None if for example you expect to isolate something
at the end of the string. In this case, the whole string after sub_first is
returned.
In case submitted data raises a ValueError, an empty string will be
returned instead.
"""
try:
start = string.index(sub_first) + len(sub_first)
if sub_last is not None:
end = string.index(sub_last, start)
return string[start:end]
else:
return string[start:]
except ValueError:
return ''
|
def batch_array(array, max_batch_size):
"""Split an array according the maximum batch_size.
Args:
array: List or 2D Numpy array.
max_batch_size: Integer value with maximum batch_size or None.
Returns:
A list of lists or numpy arrays the concatenation of which is the input
list or array, and the first dimension of each array is less than or equal
to max_batch_size. If max_batch_size is None, then a singleton list with the
input list/array is returned.
"""
if max_batch_size is None:
return [array]
num_batches = (len(array) + max_batch_size - 1) // max_batch_size
assert num_batches > 0
return [
array[(i * max_batch_size):((i + 1) * max_batch_size)]
for i in range(num_batches)
]
|
def snake_to_camel(s):
"""Turn a snake-case string to a camel-case string.
Args:
s (str): The string to convert to camel-case.
"""
return ''.join([*map(str.title, s.split('_'))])
|
def numOfMismatches(s1, s2):
""" Returns number of character mismatches in two strings """
s1Letters = {k: s1.count(k) for k in s1}
s2Letters = {k: s2.count(k) for k in s2}
# Compare matches
s = {}
for k2 in s2Letters:
if k2 in s1Letters.keys():
s[k2] = abs(s1Letters[k2] - s2Letters[k2])
else:
s[k2] = s2Letters[k2]
# Sum up remaining matches
mismatches = sum(s.values())
return mismatches
|
def inherits_from(obj, a_class):
"""
Function that determine if a class is an inherited class.
Args:
obj (object any type): The object to analyze.
a_class (object any type): The reference object.
Returns:
Returns True if the object is an instance of a class that
inherited (directly or indirectly) from the specified class ;
otherwise False.
"""
if type(obj) is not a_class:
return isinstance(obj, a_class)
return False
|
def split_numerical_value(numeric_value, splitVal, nextVal):
"""
split numeric value on splitVal
return sub ranges
"""
split_result = numeric_value.split(',')
if len(split_result) <= 1:
return split_result[0], split_result[0]
else:
low = split_result[0]
high = split_result[1]
# Fix 2,2 problem
if low == splitVal:
lvalue = low
else:
lvalue = low + ',' + splitVal
if high == nextVal:
rvalue = high
else:
rvalue = nextVal + ',' + high
return lvalue, rvalue
|
def f4(x):
"""Evaluate the estimate x**4+x**3+x**2+x."""
return x*(x*(x*x+x)+x)+x
|
def pair(x, y):
"""
Cantor pairing function
http://en.wikipedia.org/wiki/Pairing_function#Inverting_the_Cantor_pairing_function
"""
return ((x + y) * (x + y + 1) / 2) + y
|
def dict_find_key(dd, value):
""" Find first suitable key in dict.
:param dd:
:param value:
:return:
"""
key = next(key for key, val in dd.items() if val == value)
return key
|
def unique(input_list):
"""
Return a list of unique items (similar to set functionality).
Parameters
----------
input_list : list
A list containg some items that can occur more than once.
Returns
-------
list
A list with only unique occurances of an item.
"""
output = []
for item in input_list:
if item not in output:
output.append(item)
return output
|
def linearized_preproc(srcs):
"""
maps from a num-rows length list of lists of ntrain to an
ntrain-length list of concatenated rows
"""
lsrcs = []
for i in range(len(srcs[0])):
src_i = []
for j in range(len(srcs)):
src_i.extend(srcs[j][i][1:]) # b/c in lua we ignore first thing
lsrcs.append(src_i)
return lsrcs
|
def _line_to_array(agile_line):
"""convert the weird AGILEPack weights output to an array of floats"""
entries = agile_line.split(',')[1:]
return [float(x) for x in entries]
|
def is_payload_supported(maintype: str, subtype: str) -> bool:
"""
Papermerge supports pdf, tiff, jpeg and png formats.
Returns true if mimetype (maintype + subtype) is one of
supported types:
PDF => maintype=application, subtype=pdf
TIFF => maintype=image, subtype=tiff
Jpeg => maintype=image, subtype=jpeg
png => maintype=image, subtype=png
Also will return true in case of 'application/octet-stream'.
"""
if not maintype:
return False
if not subtype:
return False
duo = (maintype.lower(), subtype.lower())
supported = (
('application', 'octet-stream'),
('application', 'pdf'),
('image', 'png'),
('image', 'jpeg'),
('image', 'jpg'),
('image', 'tiff'),
)
if duo in supported:
return True
return False
|
def get_tf_node_names(tf_nodes, mode="inputs"):
"""
Inputs:
- tf_nodes: list[str]. Names of target placeholders or output variable.
- mode: str. When mode == inputs, do the stripe for the input names, for
instance 'placeholder:0' could become 'placeholder'.
when model == 'outputs', we keep the origin suffix number, like
'bn:0' will still be 'bn:0'.
Return a list of names from given list of TensorFlow nodes. Tensor name's
postfix is eliminated if there's no ambiguity. Otherwise, postfix is kept
"""
if not isinstance(tf_nodes, list):
tf_nodes = [tf_nodes]
names = list()
for n in tf_nodes:
tensor_name = n if isinstance(n, str) else n.name
if mode == "outputs":
names.append(tensor_name)
continue
name = tensor_name.split(":")[0]
if name in names:
# keep postfix notation for multiple inputs/outputs
names[names.index(name)] = name + ":" + str(names.count(name) - 1)
names.append(tensor_name)
else:
names.append(name)
return names
|
def only_sig(row):
"""Returns only significant events"""
if row[-1] == 'yes':
return row
|
def build_in_out_edges(edges):
"""
Given a Dictionary of edge_id -> edge, builds two
dictionaries of node_id -> edge (incoming or outgoing edges from that node).
:param edges: A Dictionary of edge_id -> edge
:return: (incoming_edges, outgoing_edges), a tuple of Dictionaries of node_id to
list of incoming/outgoing edges to/from that node
"""
incoming_edges = {}
outgoing_edges = {}
# Build maps which store all incoming and outgoing edges for every node
for edge_id in edges.keys():
edge = edges[edge_id]
if not incoming_edges.__contains__(edge.end):
incoming_edges[edge.end] = []
incoming_edges[edge.end].append(edge)
if not outgoing_edges.__contains__(edge.start):
outgoing_edges[edge.start] = []
outgoing_edges[edge.start].append(edge)
for edge_dict in [incoming_edges, outgoing_edges]:
for edge_id in edge_dict:
edges = edge_dict[edge_id]
edge_dict[edge_id] = sorted(edges, key=lambda x: x.id)
return incoming_edges, outgoing_edges
|
def _idempotent(method):
"""Return whether *method* is idempotent."""
return method in ('GET', 'HEAD', 'PUT')
|
def deepenOnce(someList):
"""Deepens list by one level. Example:
[0, 1, 2, 3, 4, 5]
turns into
[[0], [1], [2], [3], [4], [5]]
Args:
someList (iterable): some iterable.
Returns:
list: deepened resultant list
"""
return [[a] for a in someList]
|
def is_name(text):
"""
Checks if text sounds like a name or abbreviation (title or upper case).
:param text: the text to check.
:return: True if it might be a name.
"""
return text.istitle() or text.isupper()
|
def find_grade(total):
# write an appropriate and helpful docstring
"""
convert total score into grades
:param total: 0-100
:return: str
"""
grade = 0 # initial placeholder
# use conditional statement to set the correct grade
grade = 'A' if total>=93 else 'A-' if total >= 90 else 'B+' if total >= 87 else 'B' if total >= 83 else 'B-' if total >=80 else 'C+' if total >= 77 else 'C' if total >= 73 else 'C-' if total >=70 else 'D' if total >=60 else 'F'
return grade
|
def col_collection(records):
"""Takes the collection of records and returns the list of freeform tags."""
z = len(records[0]) - 1
a = records[0][z]
# rewrite!
for x in range(1, len(records)):
print(x)
z1 = len(records[x]) - 1
a = a + records[x][z1]
return list(set(a))
|
def get_padded_string(string: str,
in_string: str,
from_char_index: int) -> str:
""" Return a string that is appropriately padded/indented, given a starting position.
For example, if a starting index of 4 is given for a string " content\ngoes here",
the resulting string becomes " content\n goes here".
"""
pad_count = 0
index = from_char_index
while index >= 0:
# keep going backwards in the string
index -= 1
if index < 0 or in_string[index] == '\n':
# we found the previous line or beginning of string
break
pad_count += 1
if pad_count > 0:
# split content up into separate lines
lines = string.splitlines(keepends=True)
# then append padding between each line
string = (' ' * pad_count).join(lines)
# and get rid of any trailing newlines
string = string.rstrip()
return string
|
def ceil_div_offline(value, factor):
"""Fuction to get ceil number."""
return ((value) + (factor)-1) // (factor)
|
def title_invalidates_entry(title):
"""
Determines if the title contains phrases that indicate that the book is invalid
>>> from gender_novels.corpus_gen import title_invalidates_entry
>>> title_invalidates_entry("Index of the Project Gutenberg Works of Michael Cuthbert")
True
>>> title_invalidates_entry("Pride and Prejudice")
False
:param title: str
:return: boolean
"""
title = title.lower()
if title.find("index of the project gutenberg ") != -1:
# print("Was an index")
return True
if title.find("complete project gutenberg") != -1:
# print("Was a compilation thing")
return True
if title.find("translated by ") != -1:
# print("Was a translation")
return True
# if (title.find("vol. ") != -1):
# return True
# if re.match(r"volume \d+", title, flags= re.IGNORECASE):
# return True
return False
|
def convert_to_float(state):
"""Return float of state, catch errors."""
try:
return float(state)
except (ValueError, TypeError):
return None
|
def read_root():
"""Display status."""
return {"status": "up"}
|
def convert_data(data: list) -> list:
"""Convert fixture to new format"""
print(f"Found {len(data)} entries, updating ... ", end='')
for item in data:
item['model'] = 'exams.exam'
fields: dict = item['fields']
fields['minute_author'] = fields.pop('author')
fields['minute_file'] = fields.pop('file')
fields['submitted_on'] = fields.pop('submitted')
fields['is_archived'] = False
print('Done!')
return data
|
def get_heart_rate(message_fields):
""" get_heart_rate
return the heart rate as float from a message.as_dict()['fields'] object
Args:
message_fields: a message.as_dict()['fields'] object (with name 'record')
Returns:
heart rate in bpm or 50. if not found
"""
for message_field in message_fields:
if message_field['name'] == 'heart_rate':
return message_field['value']
return 50.
|
def vecdiff(a, b):
""" The function computes the vector difference in two dimensions """
return [a[0] - b[0], a[1] - b[1]]
|
def line(x1, y1, x2, y2):
"""Returns a list of all of the points in a line
between `x1`, `y1` and `x2`, `y2`. Uses the Bresenham line algorithm.
More info at https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm"""
points = []
isSteep = abs(y2-y1) > abs(x2-x1)
if isSteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
isReversed = x1 > x2
if isReversed:
x1, x2 = x2, x1
y1, y2 = y2, y1
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y2
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x2, x1 - 1, -1):
if isSteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error <= 0:
y -= ystep
error += deltax
else:
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if isSteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
return points
|
def sub(proto, *args):
"""
This really should be a built-in function.
"""
return proto.format(*args)
|
def get_tags_gff(tagline):
"""Extract tags from given tagline"""
tags = dict()
for t in tagline.split(';'):
tt = t.split('=')
tags[tt[0]] = tt[1]
return tags
|
def NoTests(path, dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent
|
def get_median(list):
"""Calc the median from a list of numbers
Args:
list: The list from where the calc will be done
Returns:
Return the median from a list of numbers
"""
median_index = (len(list)-1)//2
if len(list)%2 == 0:
return (list[median_index] + list[median_index+1])/2
else:
return list[median_index]
|
def assign_assumed_width_to_province_roads_from_file(asset_width, width_range_list):
"""Assign widths to Province roads assets in Vietnam
The widths are assigned based on our understanding of:
1. The reported width in the data which is not reliable
2. A design specification based understanding of the assumed width based on ranges of
values
Parameters
- asset_width - Numeric value for width of asset
- width_range_list - List of tuples containing (from_width, to_width, assumed_width)
Returns
assumed_width - assigned width of the raod asset based on design specifications
"""
assumed_width = asset_width
for width_vals in width_range_list:
if width_vals[0] <= assumed_width <= width_vals[1]:
assumed_width = width_vals[2]
break
return assumed_width
|
def buildbracemap(code):
"""Build jump map.
Args:
code: List or string or BF chars.
Returns:
bracemap: dict mapping open and close brace positions in the code to their
destination jumps. Specifically, positions of matching open/close braces
if they exist.
correct_syntax: True if all braces match. False if there are unmatched
braces in the code. Even if there are unmatched braces, a bracemap will
be built, and unmatched braces will map to themselves.
"""
bracestack, bracemap = [], {}
correct_syntax = True
for position, command in enumerate(code):
if command == '[':
bracestack.append(position)
if command == ']':
if not bracestack: # Unmatched closing brace.
bracemap[position] = position # Don't jump to any position.
correct_syntax = False
continue
start = bracestack.pop()
bracemap[start] = position
bracemap[position] = start
if bracestack: # Unmatched opening braces.
for pos in bracestack:
bracemap[pos] = pos # Don't jump to any position.
correct_syntax = False
return bracemap, correct_syntax
|
def db_ping(ctx):
""" Ping the connection to mongodb database
:returns: bool
"""
try:
config = ctx.obj["config"]
c = ctx.obj["db"]
c.server_info()
return True
except:
return False
|
def get_many_cases(data):
"""
Get the result case ids that are a subset of the data/form list
data = [
(u'Element Checks', None, [
(u'ElementDim', 5, []),
(u'Min Edge Length', 6, []),
(u'Min Interior Angle', 7, []),
(u'Max Interior Angle', 8, [])],
),
]
>>> get_many_cases(data)
[5, 6, 7, 8]
>>> data = [(u'Max Interior Angle', 8, [])]
[8]
"""
name, case, rows = data
if case is None:
# remove many results
# (Geometry, None, [results...])
cases = []
for irow, row in enumerate(rows):
name, row_id, data2 = row
cases += get_many_cases(row)
else:
cases = [case]
return cases
|
def multiplicative_persistence(num: int) -> int:
"""
Return the persistence of a given number.
https://en.wikipedia.org/wiki/Persistence_of_a_number
>>> multiplicative_persistence(217)
2
>>> multiplicative_persistence(-1)
Traceback (most recent call last):
...
ValueError: multiplicative_persistence() does not accept negative values
>>> multiplicative_persistence("long number")
Traceback (most recent call last):
...
ValueError: multiplicative_persistence() only accepts integral values
"""
if not isinstance(num, int):
raise ValueError("multiplicative_persistence() only accepts integral values")
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values")
steps = 0
num_string = str(num)
while len(num_string) != 1:
numbers = [int(i) for i in num_string]
total = 1
for i in range(0, len(numbers)):
total *= numbers[i]
num_string = str(total)
steps += 1
return steps
|
def to_center(title, space, object=str()):
"""
This function is reponsible for centralizing the title. accepted at least two vestments.
:param title: receive any title, only string.
:param space: receive any value, only numbers.
:param object: receive any object, ex: - . = ~ < > _ | between others.
:return: The text centralized.
"""
try:
if object == '':
receive = f'{title:^{space}}'
else:
receive = f'{title:{object}^{space}}'
except ValueError as err:
return f'information value invalid, {err}'
except TypeError as err:
return f'The type value invalid, {err}'
else:
return receive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.