content
stringlengths 42
6.51k
|
|---|
def _escape(s: str) -> str:
"""Escapes special chracters in inputrc strings"""
return s.replace("\\", "\\\\").replace('"', '\\"')
|
def aXbXa(v1, v2):
"""
Performs v1 X v2 X v1 where X is the cross product. The
input vectors are (x, y) and the cross products are
performed in 3D with z=0. The output is the (x, y)
component of the 3D cross product.
"""
x0 = v1[0]
x1 = v1[1]
x1y0 = x1 * v2[0]
x0y1 = x0 * v2[1]
return (x1 * (x1y0 - x0y1), x0 * (x0y1 - x1y0))
|
def fetch_online(ticker='MSFT', days=1):
"""
:param ticker: name of the stock to fetch the data for
:param days: how many most recent data to fetch. days = 1 fetches for today only.
:return: list of quotes for the most recent 'days'
"""
data = []
for day in range(days):
pass
return data
|
def get_check_name(geojson):
"""
Returns the checkName from a FeatureCollection
:param geojson:
:return string:
"""
return geojson.get("properties").get("generator")
|
def get_common_count(list1, list2):
"""
Get count of common between two lists
:param list1: list
:param list2: list
:return: number
"""
return len(list(set(list1).intersection(list2)))
|
def _pad_key(key: str):
"""pads key with / if it does not have it"""
if key.startswith("/"):
return key
return "/" + key
|
def convert_kg_to_target_units(data_kg, target_units, kg_to_kgC):
"""
Converts a data array from kg to one of several types of target units.
Args:
data_kg: numpy ndarray
Input data array, in units of kg.
target_units: str
String containing the name of the units to which the "data_kg"
argument will be converted. Examples: 'Tg', 'Tg C', 'Mg',
'Mg C', 'kg, 'kg C', etc.
kg_to_kg_C: float
Conversion factor from kg to kg carbon.
Returns:
data: numpy ndarray
Ouptut data array, converted to the units specified
by the 'target_units' argument.
Remarks:
At present, only those unit conversions corresponding to the
GEOS-Chem benchmarks have been implemented.
This is an internal routine, which is meant to be called
directly from convert_units.
"""
# Convert to target unit
if target_units == "Tg":
data = data_kg * 1e-9
elif target_units == "Tg C":
data = data_kg * kg_to_kgC * 1.0e-9
elif target_units == "Gg":
data = data_kg * 1e-6
elif target_units == "Gg C":
data = data_kg * kg_to_kgC * 1.0e-6
elif target_units == "Mg":
data = data_kg * 1e-3
elif target_units == "Mg C":
data = data_kg * kg_to_kgC * 1.0e-3
elif target_units == "kg":
data = data_kg
elif target_units == "kg C":
data = data_kg * kg_to_kgC
elif target_units == "g":
data = data_kg * 1e3
elif target_units == "g C":
data = data_kg * kg_to_kgC * 1.0e3
else:
msg = "Target units {} are not yet supported!".format(target_units)
raise ValueError(msg)
# Return converted data
return data
|
def weight_of(vertex1, vertex2, edges):
""" Gives the distance between two vertices. It reads it from the tuple array "edges" """
if vertex1 == vertex2:
return None
for _tuple in edges:
if vertex1 in _tuple and vertex2 in _tuple:
return _tuple[2]
return None
|
def remove_OOV(text, vocab):
"""
Remove OOV words in a text.
"""
tokens = str(text).split()
tokens = [word for word in tokens if word in vocab]
new_text = " ".join(tokens)
return new_text
|
def inv_depths_normalize(inv_depths):
"""
Inverse depth normalization
Parameters
----------
inv_depths : list of torch.Tensor [B,1,H,W]
Inverse depth maps
Returns
-------
norm_inv_depths : list of torch.Tensor [B,1,H,W]
Normalized inverse depth maps
"""
mean_inv_depths = [inv_depth.mean(2, True).mean(3, True) for inv_depth in inv_depths]
return [inv_depth / mean_inv_depth.clamp(min=1e-6)
for inv_depth, mean_inv_depth in zip(inv_depths, mean_inv_depths)]
|
def _getbuf(data):
"""Converts data into ascii,
returns bytes of data.
:param str bytes bytearray data: Data to convert.
"""
if isinstance(data, str):
return data.encode("ascii")
return bytes(data)
|
def insert_file_paths(command, file_paths, start_delim='<FILE:', end_delim='>'):
"""
Insert file paths into the command line at locations indicated by
the starting and ending delimiters.
"""
tokens = command.split()
final_tokens = []
for token in tokens:
if token.startswith(start_delim) and token.endswith(end_delim):
key = token[len(start_delim):-len(end_delim)]
final_tokens.append(file_paths[key])
else:
final_tokens.append(token)
return ' '.join(final_tokens)
|
def mix(x, y, a):
"""glsl `mix` function.
"""
return x * (1 - a) + y * a
|
def get_city_by_id(item_id):
"""Get City by Item ID.
Given the item ID of a luxury good, return which city it needs to be brought to.
Args:
item_id (str): Item ID
Returns:
str: City name the item should be brought to.
"""
ids_to_city = {
"RITUAL": "Caerleon",
"KNOWLEDGE": "Martlock",
"SILVERWARE": "Lymhurst",
"DECORATIVE": "Fort Sterling",
"TRIBAL": "Bridgewatch",
"CEREMONIAL": "Thetford"
}
for n in ids_to_city.keys():
if n in item_id:
return ids_to_city[n]
|
def passes_language_test(t008, t041s):
"""
Checks if data in 008 and 041$a fulfills Recap language test
args:
t008: str, value of 008 MARC tag
t041s: list, list of language codes found in 041 $a
returns:
Boolean: True if applicable for Recap, False if not
"""
if t041s is None:
t041s = []
passes = True
langs = set()
langs.add(t008[35:38])
for code in t041s:
langs.add(code)
if "eng" in langs:
# multilanguge materials and English are not to be
# sent to Recap
passes = False
return passes
|
def trimBytes(bs):
""" Trims trailing zeros in a byte string """
n = bs.find(b'\0')
if n != -1:
return bs[:n]
return bs
|
def is_numeric(x):
"""Tests to see if a character is numeric"""
try:
float(x)
return True
except (ValueError, TypeError):
return False
|
def applyF_filterG(L, f, g):
"""
Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty
"""
L_copy = L.copy()
L.clear()
for i in L_copy:
if g(f(i)):
L.append(i)
if len(L) == 0:
return -1
else:
return max(L)
|
def _build_index_definition(index):
"""
Creates request object to Index to be deployed
:param index:
:return:
"""
index_def = {
"IndexName": index["name"],
"KeySchema": [
{
"AttributeName": index["index_key_name"],
"KeyType": "HASH"
}
],
"Projection": {
"ProjectionType": "ALL"
}
}
if index.get('index_sort_key_name'):
index_def['KeySchema'].append(
{
"AttributeName": index["index_sort_key_name"],
"KeyType": "RANGE"
})
return index_def
|
def test_case(panel):
"""Return a simple case"""
case_info = {
"case_id": "1",
"genome_build": 37,
"owner": "cust000",
"individuals": [
{"analysis_type": "wgs", "sex": 1, "phenotype": 2, "individual_id": "ind1"}
],
"status": "inactive",
"panels": [panel],
}
return case_info
|
def strip_leading_output_cell(lyx_string):
"""Return `string` with any leading output cell stripped off."""
lyx_string_lines = lyx_string.splitlines()
saved_lines = []
max_search_lines = 3
found_cell = False
inside_cell = False
for count, line in enumerate(lyx_string_lines):
if not found_cell and line.startswith("\\begin_inset Flex LyxNotebookCell:Output:"):
found_cell = True
inside_cell = True
elif inside_cell and line.startswith(r"\end_inset"):
inside_cell = False
elif not inside_cell:
saved_lines.append(line)
else:
pass # Do nothing; lines inside the Output cell are ignored.
if count > max_search_lines and not found_cell:
return lyx_string # No output cell found.
return "\n".join(saved_lines)
|
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
if integer_string:
ret = int(integer_string)
else:
return integer_string
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
|
def star_sub_sections(body:str):
"""
Change
\subsection
\subsubsection
To:
\subsection*
\subsubsection*
"""
body = body.replace(r'\subsection',r'\subsection*')
body = body.replace(r'\subsubsection',r'\subsubsection*')
return body
|
def sort_dict_items_by_key(_dict):
"""Sort dict items by key."""
return sorted(
_dict.items(),
key=lambda item: item[0],
)
|
def reform_uid(uid):
"""
Convert a uid with underscores to the original format
"""
return uid[:3]+"://" + "/".join(uid[6:].split("_"))
|
def get_default_nncf_compression_config(h, w):
"""
This function returns the default NNCF config for this repository.
The config makes NNCF int8 quantization.
"""
nncf_config_data = {
'input_info': {
'sample_size': [1, 3, h, w]
},
'compression': [
{
'algorithm': 'quantization',
'initializer': {
'range': {
'num_init_samples': 8192, # Number of samples from the training dataset
# to consume as sample model inputs for purposes of setting initial
# minimum and maximum quantization ranges
},
'batchnorm_adaptation': {
'num_bn_adaptation_samples': 8192, # Number of samples from the training
# dataset to pass through the model at initialization in order to update
# batchnorm statistics of the original model. The actual number of samples
# will be a closest multiple of the batch size.
#'num_bn_forget_samples': 1024, # Number of samples from the training
# dataset to pass through the model at initialization in order to erase
# batchnorm statistics of the original model (using large momentum value
# for rolling mean updates). The actual number of samples will be a
# closest multiple of the batch size.
}
}
}
],
'log_dir': '.'
}
return nncf_config_data
|
def stations_by_river(stations):
"""Given list of MonitoringStation objects; returns dictionary mapping river
names to a list of MonitoringStation objects, which lie on that river."""
output = {}
for station in stations:
if station.river not in output:
output[station.river] = [station]
else:
output[station.river].append(station)
return output
|
def binary(i, width):
"""
>>> binary(0, 5)
[0, 0, 0, 0, 0]
>>> binary(15, 4)
[1, 1, 1, 1]
>>> binary(14, 4)
[1, 1, 1, 0]
"""
bs = bin(i)[2:]
bs = ("0" * width + bs)[-width:]
b = [int(c) for c in bs]
return b
|
def merge_strings(*args, **kwds):
"""Returns non empty string joined by sep
The default separator is an empty string.
"""
sep = kwds.get('sep', '')
return sep.join([s for s in args if s])
|
def get_powers_of_2(_sum):
"""Get powers of 2 that sum up to the given number.
This function transforms given integer to a binary string.
A reversed value limited to digits of binary number is extracted
from it, and each of its characters is enumerated.
Each digit is tested for not being 0. If the test passes, the index
associated with the digit is used as an exponent to get the next
value in the sequence to be returned.
:param _sum: a sum of all elements of the sequence to be returned
:returns: a list of powers of two whose sum is given
"""
return [2**y for y, x in enumerate(bin(_sum)[:1:-1]) if int(x)]
|
def hash_generator(token_to_id, tokens):
"""Generate hash for tokens in 'tokens' using 'token_to_id'.
Args:
token_to_id: dict. A dictionary which maps each token to a unique ID.
tokens: list(str). A list of tokens.
Returns:
int. Hash value generated for tokens in 'tokens' using 'token_to_id'.
"""
hash_val = 0
n = len(tokens) - 1
base = len(token_to_id) ** n
for x in tokens:
hash_val += token_to_id[x] * base
base /= len(token_to_id)
return hash_val
|
def getvalue(row, name, mapping={}):
"""If name in mapping, return row[mapping[name]], else return row[name]."""
if name in mapping:
return row[mapping[name]]
else:
return row[name]
|
def inject_post_param(request, injectionstring):
"""
Generates a list of new requests with replaced/modified post parameters
:param request: request instance
:param injection_string: list of strings to inject into the request
:return: list of requests
"""
requests = []
return requests
|
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger'
|
def populate_src_and_dst_dicts_with_single_offense(offense, src_ids, dst_ids):
"""
helper function: Populates source and destination id dictionaries with the id key/values
:return:
"""
if "source_address_ids" in offense and isinstance(
offense["source_address_ids"], list
):
for source_id in offense["source_address_ids"]:
src_ids[source_id] = source_id
if "local_destination_address_ids" in offense and isinstance(
offense["local_destination_address_ids"], list
):
for destination_id in offense["local_destination_address_ids"]:
dst_ids[destination_id] = destination_id
return None
|
def greatest_common_divisor(number1: int, number2: int) -> int:
"""Return greatest common divisor of number1 and number2."""
if number1 < number2:
divisor = number1
else:
divisor = number2
while divisor > 0:
if number1 % divisor == 0 and number2 % divisor == 0:
break
divisor -= 1
return divisor
|
def get_class_source_from_source(source: str) -> str:
"""Gets class source from source, i.e. module.path@version, returns version.
Args:
source: source pointing to potentially pinned sha.
"""
# source need not even be pinned
return source.split("@")[0]
|
def name_class(classname):
"""Change AMQP class name to Python class name"""
return classname.capitalize()
|
def get_next_moves(board):
"""Return a list of allowed moves for the given board state."""
return [i for i, p in enumerate(board) if p == 0]
|
def wer(h, r):
"""
Calculation of WER with Levenshtein distance.
Works only for iterables up to 254 elements (uint8).
O(nm) time ans space complexity.
Parameters
----------
r : list
h : list
Returns
-------
int
Examples
--------
>>> wer("who is there".split(), "is there".split())
1
>>> wer("who is there".split(), "".split())
3
>>> wer("".split(), "who is there".split())
3
"""
# initialisation
import numpy
if len(h) == 0:
return len(r)
d = numpy.zeros((len(r) + 1) * (len(h) + 1), dtype=numpy.uint8)
d = d.reshape((len(r) + 1, len(h) + 1))
for i in range(len(r) + 1):
for j in range(len(h) + 1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
# computation
for i in range(1, len(r) + 1):
for j in range(1, len(h) + 1):
if r[i - 1] == h[j - 1]:
d[i][j] = d[i - 1][j - 1]
else:
substitution = d[i - 1][j - 1] + 1
insertion = d[i][j - 1] + 1
deletion = d[i - 1][j] + 1
d[i][j] = min(substitution, insertion, deletion)
return d[len(r)][len(h)]
|
def not_equal(quant1, quant2):
"""Binary function to call the operator"""
return quant1 != quant2 and quant2 != quant1
|
def remove_prefix(text, prefix):
""" Remove prefix from text if present. """
if text.startswith(prefix):
return text[len(prefix):]
return text
|
def clean_filename(filename):
"""
Heuristically replaces known extensions to create sensible output file name
:param filename: the input file name to strip extensions from
"""
if filename.endswith(".conll10") or filename.endswith(".conllu") and not filename.startswith("."):
return filename.replace(".conll10", "").replace(".conllu", "")
else:
return filename
|
def as_text(bytes_or_text, encoding="utf-8"):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, str):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError(
"Expected binary or unicode string, got %r" % bytes_or_text
)
|
def combin(n, r):
"""A fast way to calculate binomial coefficients by Andrew Dalke (contrib)."""
if 0 <= r <= n:
ntok = 1
rtok = 1
for t in range(1, min(r, n - r) + 1):
ntok *= n
rtok *= t
n -= 1
return ntok // rtok # bit-wise operation
else:
return 0
|
def simplified_collection(constants):
"""Dummy collection for tests"""
return {
"@context": f"/{constants['api_name']}/contexts/DroneCollection.jsonld",
"@id": f"/{constants['api_name']}/DroneCollection/1",
"@type": "DroneCollection",
"members": [{"@id": f"/{constants['api_name']}/Drone/1", "@type": "Drone"}],
"search": {
"@type": "hydra:IriTemplate",
"hydra:mapping": [
{
"@type": "hydra:IriTemplateMapping",
"hydra:property": "http://auto.schema.org/speed",
"hydra:required": False,
"hydra:variable": "DroneState[Speed]",
}
],
"hydra:template": f"/{constants['api_name']}/Drone(DroneState[Speed])",
"hydra:variableRepresentation": "hydra:BasicRepresentation",
},
"totalItems": 1,
"view": {
"@id": f"/{constants['api_name']}/DroneCollection?page=1",
"@type": "PartialCollectionView",
"first": f"/{constants['api_name']}/DroneCollection?page=1",
"last": f"/{constants['api_name']}/DroneCollection?page=1",
"next": f"/{constants['api_name']}/DroneCollection?page=1",
},
}
|
def format_patch(patch_parts):
"""Format the patch parts back into a patch string."""
return '{patch}.{prerel}{prerelversion}'.format(**patch_parts)
|
def degrees_to_meters(degrees):
"""
111195 = (Earth mean radius)*PI/180
(supposedly 'maximum error using this method is ~ 0.1%')
:see: https://stackoverflow.com/questions/12204834/get-distance-in-meters-instead-of-degrees-in-spatialite
"""
ret_val = 111195 * degrees
return ret_val
|
def has_repeating_pair(string):
"""Check if string has repeating pair of letters."""
return any(
string.count(string[index:index+2]) > 1
for index, _ in enumerate(string[:-1])
)
|
def _get_cpus(metrics):
"""Get a list of strings representing the CPUs available in ``metrics``.
:param list metrics: The metrics used to look for CPUs.
:rtype: :py:class:`list`
The returned strings will begin with the CPU metric name. The list is sorted in ascending order.
"""
cpus = list({m["name"].rpartition("cpu")[0] for m in metrics})
cpus.sort()
return cpus
|
def isprime(potential_prime_number):
"""Basically takes a number and
checks if it is prime or not.
"""
n = abs(int(potential_prime_number)) # This is to make sure it is a positive integer
if n < 2:
return False
if n % 2 == 0:
return n == 2 # since 2 is prime will return true then and false for evens
k = 3
while k * k <= n:
if n % k == 0:
return False
k += 2
return True
|
def trim_comments(val):
"""
Remove in-line comments that begin with "#", and whitespace.
"""
return val.split("#")[0].strip()
|
def url_join(*parts):
"""Join parts of URL and handle missing/duplicate slashes."""
return "/".join(str(part).strip("/") for part in parts)
|
def plus_percent_str(x):
"""Format percent string with sign and apt number of decimal places."""
if x < 10:
return '{:+.1f}%'.format(x)
else:
return '{:+.0f}%'.format(x)
|
def filter_by_channel(archives, allowed_channels):
"""
Filters out archive groups which are not in allowed_channels.
"""
# Collections compare function
import collections
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
lens = [] # Get all unique allowed channels length
for l in [len(x) for x in allowed_channels]:
if l not in lens:
lens.append(l)
result_archives = []
for group in archives:
if len(group) not in lens:
continue
gr_channels = [x[1] for x in group]
is_present = False
for ch in allowed_channels:
if compare(gr_channels, ch):
is_present = True
break
if is_present:
result_archives.append(group)
return result_archives
|
def cal_func(step, n, N, per):
"""
Parameters
----------
L : int
the length of the segment that will be got.
n : int
the number of the segment type that will be balanced.
N : int
the most type in the five types.
per : int(less than 1)
the percentage of the most type which is used retaining
in the final dataset
Returns
-------
moving_stepving :int
"""
N_res = N * per
moving_step = round(step * (n / N_res)) # moving_step-150 is good for NST
return moving_step
|
def get_alignment_character(alignment: str) -> str:
""" Returns an alignment character for string formatting """
# Determine left alignment character
if alignment in ["left", "l"]:
return ""
# Determine right alignment character
elif alignment in ["right", "r"]:
return ">"
# Determine center alignment character
elif alignment in ["center", "c"]:
return "^"
return ""
|
def powerset(L):
"""
Constructs the power set, 'potenz Menge' of a given list.
return list: of all possible subsets
"""
import itertools
pset = []
for n in range(len(L) + 1):
for sset in itertools.combinations(L, n):
pset.append(sset)
return pset
|
def wrap(width, text):
"""
Split strings into width for
wrapping
"""
lines = []
arr = text.split()
lengthSum = 0
line = []
for var in arr:
lengthSum += len(var) + 1
if lengthSum <= width:
line.append(var)
else:
lines.append(" ".join(line))
line = [var]
lengthSum = len(var)
lines.append(" ".join(line))
return lines
|
def _tuple_to_str(value: tuple) -> str:
""" returns a tuple as a string without parentheses """
return ','.join(map(str, value))
|
def optical_spectra(header_dict):
"""
access DR12 detail page for spectra
:param header_dict:
:return:
"""
url = "https://dr12.sdss.org/spectrumDetail?mjd={}&fiber={}&plateid={}".format(
header_dict['MJD'],
header_dict['FIBERID'],
header_dict['PLATEID']
)
print("\nOptical Spectra:")
print(url)
return url
|
def get_url(js):
""" @todo """
if 'homepage' in js:
return js['homepage']
if 'checkver' in js:
if 'url' in js['checkver']:
return js['checkver']['url']
if 'github' in js['checkver']:
return js['checkver']['github']
return ''
|
def b(s, encoding="utf-8"):
""" str/int/float to bytes """
if isinstance(s, bytes):
return s
if isinstance(s, (str, int ,float)):
return str(s).encode(encoding)
raise TypeError("unsupported type %s of %r" % (s.__class__.__name__, s))
|
def invoke_lambda_demo(*args):
"""Demo of a stand-alone way of using lambda
Examples:
>>> invoke_lambda_demo(10, 25, 'blah')\n
Here is what is happening:
(lambda x: x + x)(item)
[20, 50, 'blahblah']
Returns:
object: Returns double of the given argument.
"""
print("Here is what is happening:")
print(" (lambda x: x + x)(item)")
print("")
results = []
for item in args:
results.append((lambda x: x + x)(item))
return results
|
def tsub(tup1, tup2):
""" Subtracts tup1 elements from tup2 elements. """
return (tup1[0]-tup2[0], tup1[1]-tup2[1])
|
def remove_nones(dictionary: dict) -> dict:
"""remove_nones"""
return {
dictionary_key: key_value
for dictionary_key, key_value in dictionary.items()
if key_value is not None
}
|
def prepare_input_parameters(input_layer_size, hidden_layer_size, number_of_labels,
lambda_value):
"""Prepare input parameters as a dictionary"""
input_parameters = {}
input_parameters['input_layer_size'] = input_layer_size
input_parameters['hidden_layer_size'] = hidden_layer_size
input_parameters['number_of_labels'] = number_of_labels
input_parameters['lambda_value'] = lambda_value
return input_parameters
|
def get_nbits_to_copy(i: int, j: int, n: int) -> int:
"""Returns the number of bits to copy during a single byte process.
:param i: the number of the total bits processed.
:param j: the number of bits processed on current base type.
:param n: the number of bits current base type occupy.
"""
return min(n - j, 8 - (j % 8), 8 - (i % 8))
|
def _validate_severity(parser, arg):
"""Check that the severity level provided is correct."""
valid_severities = {"info": 0, "warning": 1, "error": 2}
if arg.strip().lower() not in valid_severities:
parser.error("Invalid severity. Options are error, warning, or info")
else:
return valid_severities[arg.strip().lower()]
|
def adjust_data(code_list, noun=12, verb=2):
"""Set the computer to a desired state by adjusting the noun and verb
parameters.
Parameters
----------
code_list : list
opcode as provided by advent of code
noun : int, optional
the first parameter (in position 1), by default 12
verb : int, optional
the second parameter (in position 2), by default 2
"""
code_list[1] = noun
code_list[2] = verb
return code_list
|
def norm_random(random_number):
"""Turn a random number to a number between 0 and max_value """
precision = 20
res = 0.
for i in range(precision):
bit = random_number & (1 << i)
res += bit
res %= precision
res /= precision
return res
|
def obs_preprocessor_tm_act_in_obs(obs):
"""
This takes the output of gym as input
Therefore the output of the memory must be the same as gym
"""
obs = (obs[0], obs[1], obs[2], obs[3], *obs[4:]) # >= 1 action
# logging.debug(f" (not same as old): preprocessed obs:{obs}")
return obs
|
def convertSnake(j):
""" this is convert FROM snake to json(camel) """
out = {}
for k in j:
new_k = k.split('_')
out[new_k[0] + ''.join(x.title() for x in new_k[1:])] = j[k]
return out
|
def label_case(snake_case):
"""Specialized helper function to replace underscore with spaces and capitalize first letter of string, but keep WMR and WMQ capitalized
Args:
snake_case (str): String written in snake case to be reformatted.
Returns:
str: The reformatted string.
"""
label_case = (snake_case
.replace("_", ' ')
.capitalize()
.replace('Wmr', 'WMR')
.replace('Wmq', 'WMQ'))
return label_case
|
def get_data(data_config, name):
"""
for backwards compatibility of old configs
:param data_config:
:param name:
:return:
"""
if "all" in data_config:
return data_config["all"]
elif name in data_config:
return data_config[name]
else:
return data_config
|
def compare(M, A):
"""
:param M: Matrix with Names and DNA sequences
:param A: Array with DNA values
:return: String representing a person's name
"""
for line in M[1:]:
match = True
for j in range(1, len(line)):
if A[j-1] == line[j]:
continue
else:
match = False
if match:
return line[0]
else:
continue
return 'No match'
|
def is_desired_workflow(run_json):
"""
Checks if this run is for the "Presubmit Checks" workflow.
"""
# Each workflow has a fixed ID.
# For the "Persubmit Checks" workflow, it is:
# https://api.github.com/repos/taichi-dev/taichi/actions/workflows/1291024
DESIRED_ID = 1291024
return run_json['workflow_id'] == DESIRED_ID
|
def simpleDijkstra(adjacencyList, source, target):
"""
This method performs the Dijkstra algorithm on simple graphs, given as an
adjacency list.
:param list[list[int] adjacencyList: A list where the indexes of elements in the outer list are
equal to the index of the graph node. The integers in the inner list are equal to the indexes
of graph nodes the outer node has an unweighted edge to.
:param int source: The source node from which we want to determine the path.
:param int target: The target node to which we want to determine the path.
:returns: The list of nodes we need to visit to reach target from source.
:rtype list[int]:
"""
if isinstance(adjacencyList, dict):
for element in adjacencyList:
if not isinstance(adjacencyList[element], list):
return None
else:
return None
if adjacencyList[source] == []:
return None
distances = {}
for node in adjacencyList:
if not node == source:
distances.update({node: None})
else:
distances.update({node: (0, [source])})
visitedNodes = [source]
currentPath = [source]
currentNode = source
while not currentNode == target:
for reachable in adjacencyList[currentNode]:
if distances[reachable] == None:
distances.update(
{
reachable: (
distances[currentNode][0] + 1,
currentPath + [reachable],
)
}
)
if distances[currentNode][0] + 1 < distances[reachable][0]:
distances.update(
{
reachable: (
distances[currentNode] + 1,
currentPath + [reachable],
)
}
)
nextNode = None
for node in distances:
if not distances[node] == None:
if not node in visitedNodes and nextNode == None:
nextNode = node
elif not node in visitedNodes:
if distances[node][0] < distances[nextNode][0]:
nextNode = node
if nextNode == None:
return None
else:
currentPath = distances[nextNode][1]
currentNode = nextNode
visitedNodes.append(nextNode)
return currentPath
|
def customsplit(s):
"""
Need to implement our own split, because for exponents of three digits
the 'E' marking the exponent is dropped, which is not supported by python.
:param s: The string to split.
:return: An array containing the mantissa and the exponent, or the value, when no split was possible.
"""
n = len(s)
i = n - 1
# Split the exponent from the string by looking for ['E']('+'|'-')D+
while i > 4:
if s[i] == '+' or s[i] == '-':
return [ s[0: i - 1], s[i: n] ]
i -= 1
return [ s ]
|
def utf8decode(value):
"""
Returns UTF-8 representation of the supplied 8-bit string representation
>>> utf8decode('foobar')
u'foobar'
"""
return value.decode("utf-8")
|
def get_last_n_path_elements_as_str(fpath: str, n: int) -> str:
"""
Args:
- fpath: string representing file path
- n: integer representing last number of filepath elements to keep
Returns:
-
"""
elements = fpath.split("/")[-n:]
return "/".join(elements)
|
def render_bandwidth_speed(speed):
"""
Renders speeds given in Mbps.
"""
if not speed:
return ""
if speed >= 1000000 and speed % 1000000 == 0:
return f"{int(speed / 1000000)} Tbps"
elif speed >= 1000 and speed % 1000 == 0:
return f"{int(speed / 1000)} Gbps"
elif speed >= 1000:
return f"{float(speed) / 1000} Gbps"
else:
return f"{speed} Mbps"
|
def _is_decoy_prefix(pg, prefix='DECOY_'):
"""Determine if a protein group should be considered decoy.
This function checks that all protein names in a group start with `prefix`.
You may need to provide your own function for correct filtering and FDR estimation.
Parameters
----------
pg : dict
A protein group dict produced by the :py:class:`ProtXML` parser.
prefix : str, optional
A prefix used to mark decoy proteins. Default is `'DECOY_'`.
Returns
-------
out : bool
"""
return all(p['protein_name'].startswith(prefix) for p in pg['protein'])
|
def unique_colname(suggested, existing):
"""Given a suggested column name and a list of existing names, returns
a name that is not present at existing by prepending _ characters."""
while suggested in existing:
suggested = '_{0}'.format(suggested)
return suggested
|
def generateExpoSystem(d, k):
""" int x int -> list[int]
returns a system of <k> capacities generated
by <d>
"""
# V: list[int]
V = [1]
# val, i: int
val = 1
for i in range(k-1):
val *= d
V.append(val)
return V
|
def convert(hours, minutes):
"""convert_hours = hours * 60 * 60
convert_minutes = minutes * 60"""
convert_to_seconds = hours * 60 * 60 + minutes * 60 #cleaner
print (convert_to_seconds)
return convert_to_seconds
|
def DetermineMinAndMax(MeetingPoints):
"""This function determines the minimal and maximal position where the wave function will be set to 0 depending on the points where the potential meets the guess energy and on
the minimum and maximum that are initially set for the potential.
Parameter:
----------
MeetingPoints (tuple) : the minimum and maximum point where the potentil meets the guessed energy
E_guess (float) : The minimum value of the position for the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
PotentialArray (numpy.darray) : a Numpy array that contains the potential for certain points
PositionPotential (numpy.darray) : a Numpy array that contains the positions that correspond to the potential array
Returns:
--------
Position_min (float) : the minimum value where psi=0
Position_max (float) : the maximum value where psi=0
"""
#Sets the min and max as the half of the distance between the min and the max plus the min or the max
Position_min = MeetingPoints[0] - (MeetingPoints[1] - MeetingPoints[0])/1
Position_max = MeetingPoints[1] + (MeetingPoints[1] - MeetingPoints[0])/1
return Position_min,Position_max
|
def fatorial(numero, mostrar=False):
"""
Teste
Feito por -Lndr-
"""
from math import factorial
if mostrar:
contador = numero
while contador > 0:
print(contador, end=" ")
if contador != 1:
print("x", end=" ")
contador -= 1
print("=", end=" ")
return factorial(numero)
|
def name_equality_check(setup_deps, pipfile_deps):
"""
Checks that all names present in either dependency file are present
in both dependency files
Args:
setup_deps (dict<str, list<tuple<str, str>>>):
Dictionary from setup.py dependency name keys to a list of
tuples as a value, with the tuples containing
a comparision operator and a version specification.
pipfile_deps (dict<str, list<tuple<str, str>>>):
Dictionary from Pipfile dependency name keys to a list of
tuples as a value, with the tuples containing
a comparision operator and a version specification.
Returns:
bool:
Whether the check passes - will always be true, otherwise the
function will not reach this line.
Raises:
ValueError:
If there are discrepancies between version names
"""
in_setup_not_pipfile = set(setup_deps.keys()).difference(
set(pipfile_deps.keys()))
in_pipfile_not_setup = set(pipfile_deps.keys()).difference(
set(setup_deps.keys()))
if len(in_setup_not_pipfile) or len(in_pipfile_not_setup):
err_msg = "Dependency name mismatch!\n"
if len(in_setup_not_pipfile):
err_msg += ("Dependencies in setup.py but not in Pipfile: " +
str(in_setup_not_pipfile) + "\n")
if len(in_pipfile_not_setup):
err_msg += ("Dependencies in Pipfile but not in setup.py: " +
str(in_pipfile_not_setup) + "\n")
raise ValueError(err_msg)
return True
|
def sbool(mixed, default=None):
"""Safe boolean cast."""
try:
return bool(mixed)
except:
return default
|
def RK4(f, x, t1, t2, pf, stim=None):
"""
Fourth-order, 4-step RK routine.
Returns the step, i.e. approximation to the integral.
If x is defined at time t_1, then stim should be an array of
stimulus values at times t_1, (t_1+t_2)/2, and t_2 (i.e. at t1 and t2, as
well as at the midpoint).
Alternatively, stim may be a function pointer.
"""
tmid = (t1 + t2)/2.0
dt = t2 - t1
if stim is None:
pf_in_1 = pf
pf_in_mid = pf
pf_in_2 = pf
else:
try:
# test if stim is a function
s1 = stim(t1)
s1, smid, s2 = (stim, stim, stim)
except TypeError:
# otherwise assume stim is an array
s1, smid, s2 = (stim[0], stim[1], stim[2])
pf_in_1 = (pf, s1)
pf_in_mid = (pf, smid)
pf_in_2 = (pf, s2)
K1 = f(t1, x, pf_in_1)
K2 = f(tmid, x + dt*K1/2.0, pf_in_mid)
K3 = f(tmid, x + dt*K2/2.0, pf_in_mid)
K4 = f(t2, x + dt*K3, pf_in_2)
return dt * (K1/2.0 + K2 + K3 + K4/2.0) / 3.0
|
def list2tuple(list_to_convert):
"""
Converts a list of lists of lists ... into a tuple of tuples of tuples
[1, 2, ['A', 'B', ['alpha', 'beta', 'gamma'], 'C'], 3] -->
--> (1, 2, ('A', 'B', ('alpha', 'beta', 'gamma'), 'C'), 3)
https://stackoverflow.com/questions/1014352/how-do-i-convert-a-nested-tuple-of-tuples-and-lists-to-lists-of-lists-in-python
"""
if not isinstance(list_to_convert, (list, tuple)):
return list_to_convert
return tuple(map(list2tuple, list_to_convert))
|
def set_fba_name(source, year):
"""
Generate name of FBA used when saving parquet
:param source: str, source
:param year: str, year
:return: str, name of parquet
"""
return source if year is None else f'{source}_{year}'
|
def truncate(input_str, length):
"""Truncate string to specified length
Args:
input_str: String to truncate
length: Maximum length of output string
"""
if len(input_str) < (length - 3):
return input_str
return input_str[:(length - 3)] + '...'
|
def get_bit_label(drawer, register, index, qubit=True, layout=None, cregbundle=True):
"""Get the bit labels to display to the left of the wires.
Args:
drawer (str): which drawer is calling ("text", "mpl", or "latex")
register (QuantumRegister or ClassicalRegister): get bit_label for this register
index (int): index of bit in register
qubit (bool): Optional. if set True, a Qubit or QuantumRegister. Default: ``True``
layout (Layout): Optional. mapping of virtual to physical bits
cregbundle (bool): Optional. if set True bundle classical registers.
Default: ``True``.
Returns:
str: label to display for the register/index
"""
index_str = f"{index}" if drawer == "text" else f"{{{index}}}"
if register is None:
bit_label = index_str
return bit_label
if drawer == "text":
reg_name = f"{register.name}"
reg_name_index = f"{register.name}_{index}"
else:
reg_name = f"{{{register.name}}}"
reg_name_index = f"{{{register.name}}}_{{{index}}}"
# Clbits
if not qubit:
if cregbundle:
bit_label = f"{register.name}"
elif register.size == 1:
bit_label = reg_name
else:
bit_label = reg_name_index
return bit_label
# Qubits
if register.size == 1:
bit_label = reg_name
elif layout is None:
bit_label = reg_name_index
elif layout[index]:
virt_bit = layout[index]
try:
virt_reg = next(reg for reg in layout.get_registers() if virt_bit in reg)
if drawer == "text":
bit_label = f"{virt_reg.name}_{virt_reg[:].index(virt_bit)} -> {index}"
else:
bit_label = (
f"{{{virt_reg.name}}}_{{{virt_reg[:].index(virt_bit)}}} \\mapsto {{{index}}}"
)
except StopIteration:
if drawer == "text":
bit_label = f"{virt_bit} -> {index}"
else:
bit_label = f"{{{virt_bit}}} \\mapsto {{{index}}}"
else:
bit_label = index_str
return bit_label
|
def find_where_wires_cross(coords1, coords2):
"""Find and return intesections of the wires
based on their coordinates
"""
matches = []
coords1 = set(coords1)
coords2 = set(coords2)
for coord in coords1:
if coord in coords2:
matches.append(coord)
return matches
|
def _is_ipv4_like(s):
"""Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected.
"""
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True
|
def maybe_hparams_to_dict(hparams):
"""If :attr:`hparams` is an instance of :class:`~texar.hyperparams.HParams`,
converts it to a `dict` and returns. If :attr:`hparams` is a `dict`,
returns as is.
"""
if hparams is None:
return None
if isinstance(hparams, dict):
return hparams
return hparams.todict()
|
def json_set_hook(dct):
"""
Return an encoded set to it's python representation.
"""
if not isinstance(dct, dict):
return dct
if '__set__' not in dct:
return dct
return set((tuple(item) if isinstance(item, list) else item) for item in dct['__set__'])
|
def isbn_10_check_digit(nine_digits):
"""Function to get the check digit for a 10-digit ISBN"""
if len(nine_digits) != 9: return None
try: int(nine_digits)
except: return None
remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)
if remainder == 0: tenth_digit = 0
else: tenth_digit = 11 - remainder
if tenth_digit == 10: tenth_digit = 'X'
return str(tenth_digit)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.