content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def time_in_range(start, end, current):
"""Check if current time is between start and end time"""
return start <= current <= end
|
0abba537e76c2ea8e56d03f85be3b139dd54e9fd
| 55,856
|
def divider_row(sizes):
""" Create a one line string of '-' characters of given length in each column """
s = ''
for i in range(len(sizes)):
s = s + '-'.ljust(sizes[i], '-') + ' '
return s
|
1a054e94179f3a5484bfe28b65ffaf2b30bb5df1
| 55,858
|
def get_padding_for_kernel_size(kernel_size):
"""Compute padding size given kernel size."""
if kernel_size == 7:
return (3, 3)
elif kernel_size == 3:
return (1, 1)
else:
raise ValueError('Padding for kernel size {} not known.'.format(
kernel_size))
|
417f7a53caadc90207bb87ea1336e55487902abd
| 55,867
|
def delta16(v1, v2):
"""Return the delta (difference) between two increasing 16-bit counters,
accounting for the wraparound from 65535 back to 0"""
diff = v2 - v1
if diff < 0:
diff += (1<<16)
return diff
|
96b4418114189b1a1ac986ac3a544c98537684d6
| 55,874
|
def get_filename(url):
"""
Parses filename from given url
"""
if url.find('/'):
return url.rsplit('/', 1)[1]
|
7fc1868691f512dd49db76753bad1b160108add5
| 55,878
|
def ApplySConscript(self, sconscript_file):
"""Applies a SConscript to the current environment.
Args:
self: Environment to modify.
sconscript_file: Name of SConscript file to apply.
Returns:
The return value from the call to SConscript().
ApplySConscript() should be used when an existing SConscript which sets up an
environment gets too large, or when there is common setup between multiple
environments which can't be reduced into a parent environment which the
multiple child environments Clone() from. The latter case is necessary
because env.Clone() only enables single inheritance for environments.
ApplySConscript() is NOT intended to replace the Tool() method. If you need
to add methods or builders to one or more environments, do that as a tool
(and write unit tests for them).
ApplySConscript() is equivalent to the following SCons call:
SConscript(sconscript_file, exports={'env':self})
The called SConscript should import the 'env' variable to get access to the
calling environment:
Import('env')
Changes made to env in the called SConscript will be applied to the
environment calling ApplySConscript() - that is, env in the called SConscript
is a reference to the calling environment.
If you need to export multiple variables to the called SConscript, or return
variables from it, use the existing SConscript() function.
"""
return self.SConscript(sconscript_file, exports={'env': self})
|
cbb44a59aafe661403ec3ec5003f3dfccde51b60
| 55,879
|
import pickle
def load_pickle(path):
"""
Load a saved pickle file
Parameters
----------
path: str
path of the saved file
Returns
-------
loaded pickle file
"""
with open(path, 'rb') as file:
variable = pickle.load(file)
return variable
|
a7603050d95d5d3f48659ed837ba5fa157b05b7b
| 55,881
|
def bb_image(url):
"""BB Code image markup for `url`."""
return '[img]{}[/img]'.format(url)
|
75af6e3f9555100f246f9624c1b25369e16fcf3f
| 55,884
|
def get_ratio_and_volume_list(volume_to_average_volume_ratio, volume_to_two_week_volume_ratio, returns):
"""
returns tupled list of (return, volume_to_average_volume_ratio, volume_to_two_week_volume_ratio)
for given stock. Inputs can be calculated using functions above.
"""
tupled_ratio_and_return = []
for i in range(len(returns)):
tupled_ratio_and_return += [(returns[i], volume_to_average_volume_ratio[i], volume_to_two_week_volume_ratio[i])]
return tupled_ratio_and_return
|
9bc7b164ef9b92ad7750e6e88ff6d10f9cc8dac0
| 55,885
|
import math
import random
def expran(alpha=1):
"""
Returns a value greater than 0 from the exponential distribution
(https://en.wikipedia.org/wiki/Exponential_distribution) with stretching factor alpha.
Increasing alpha "prefers" smaller numbers. The distribution is unbounded but when
alpha=1 then %99.9 of the time the value returned will be than 6.9077554, i.e. -log(.001).
The distribution density is f(x)=(exp -x) with a mean of 1.0.
"""
return (- math.log(1.0 - random.random())) / alpha
|
4f6261ac384e57f09fb21b9b3af395e217643367
| 55,887
|
import random
def gen_username(first_name: str, last_name: str = '') -> str:
"""Returns a hopefully unique username for Django."""
return first_name + last_name + str(random.random())[2:7]
|
a8644fa25e8b131aaf98295aecc5288313d1c901
| 55,889
|
def calculate_loan_to_value_ratio(loan_amount, home_value):
"""Calculates users loan to value ratio based on inputs.
Args:
loan_amount (int): The requested loan amount.
home_value (int): The home value.
Returns:
The loan-to-value ratio.
"""
loan_to_value_ratio = int(loan_amount) / int(home_value)
return loan_to_value_ratio
|
c5ea07d01a4898c1006e4d54f536b8e33379aa62
| 55,891
|
def diag_adjacent(p):
"""Return the adjacent positions to p including diagonal adjaceny"""
return [
(x, y, z)
for x in range(p[0] - 1, p[0] + 2)
for y in range(p[1] - 1, p[1] + 2)
for z in range(p[2] - 1, p[2] + 2)
if (x, y, z) != p
]
|
49a3f29116a022c74c8f4f5316d656722819d466
| 55,893
|
def guess_filetype(parm):
"""Infer a filetype from the snippet parameter.
If the `editorlang` key exists, try that first. Otherwise, try to
guess based on the node's type. If neither match well, return an
empty string.
:param parm: Paramter containing the snippet
:type parm: :class:`hou.Parm`
:return: Suggested file extension
:rtype: str
"""
exts = {
"python": ".py",
"vex": ".h",
"opencl": ".cl",
}
editorlang = parm.parmTemplate().tags().get("editorlang")
if editorlang:
return exts[editorlang.lower()]
# Try by node type
type_map = {
"wrangle": exts["vex"],
"python": exts["python"],
"opencl": exts["opencl"]
}
for searchterm, ext in type_map.items():
if searchterm in parm.node().type().nameComponents()[2]:
return ext
return ""
|
6a0723a1a654f7040cfab5cacdc7b894a3b0c700
| 55,896
|
def _binary_apply(op, leftval, rightval, cast_type, infix=True):
"""
Applies the binary operator 'op' to 'leftval' and 'rightval'.
The operands are cast to the type 'cast_type' first.
Examples
--------
>>> _binary_apply("add", "a", "b", "i64", infix=False)
'add(i64(a), i64(b))'
>>> _binary_apply("+", "a", "b", "i64")
'(i64(a) + i64(b))'
"""
if infix:
return "({cast_type}({leftval}) {op} {cast_type}({rightval}))".format(
op=op, leftval=leftval, rightval=rightval, cast_type=cast_type)
else:
return "{op}({cast_type}({leftval}), {cast_type}({rightval}))".format(
op=op, leftval=leftval, rightval=rightval, cast_type=cast_type)
|
5a01bdbdb5823bed58dbc2d31ea06b19b010bd5e
| 55,898
|
def payoff_put(underlying, strike, gearing=1.0):
"""payoff_put
Payoff of put option.
:param float underlying:
:param float strike:
:param float gearing: Coefficient of this option. Default value is 1.
"""
return gearing * max(strike - underlying, 0)
|
42a9932ac176226f01d5bb0fa5f58893d391a64f
| 55,901
|
def extract_basis_nwchem(data, name):
"""Extract atomic orbital, charge density fitting, or exchange
correlation functional basis data from a text region passed in as
data. The charge density fitting and exchange correlation functional
basis set data are employed for density functional calculations.
@param data: text region containing basis set data
@type data : str
@param name: name of basis type: "ao basis", "cd basis", or "xc basis"
@type name : str
@return: per-element basis set chunks
@rtype : list
"""
begin_marker = """BASIS "{0}" PRINT""".format(name)
end_marker = "END"
# search for the basis set data begin marker
# calling "upper" on data because original data has inconsistent
# capitalization
begin = data.upper().find(begin_marker.upper())
end = data.upper().find(end_marker, begin)
# No basis data found
if begin == -1:
return []
trimmed = data[begin + len(begin_marker): end - len(end_marker)].strip()
chunks = []
lines = []
# group lines of data delimited by #BASIS SET... into per-element chunks
for line in trimmed.split("\n"):
if line.upper().startswith("#BASIS SET"):
if lines:
chunks.append(lines)
lines = [line]
else:
lines.append(line)
# handle trailing chunk that is not followed by another #BASIS SET...
if lines and (not chunks or lines != chunks[-1]):
chunks.append(lines)
# join lines back into solid text blocks
chunks = ["\n".join(c) for c in chunks]
return chunks
|
e63889ffc2a50322158e89e30db7fa9a3c76b818
| 55,909
|
def format_security(value):
"""Given a system security status as a float, return a rounded string."""
return str(round(value, 1))
|
365c827735872e19fda72d974c292c77f9415fef
| 55,910
|
from operator import add
def addLists(lst1, lst2):
"""Return the elementwise sum of lst1 and lst2.
>>> addLists([1, 2, 3],[4, 5, 6])
[5, 7, 9]
"""
assert len(lst1) == len(lst2), "The lists have to be the same length."
return list(map(add, lst1, lst2))
|
4e9a45eb380934585637c4ddc7abdcb685f17980
| 55,913
|
def make_collectitem(item):
"""Return JSON-serializable collection item."""
json_item = {
'nodeid': item.nodeid,
'type': item.__class__.__name__,
}
try:
location = item.location
except AttributeError:
pass
else:
json_item['lineno'] = location[1]
return json_item
|
c8aa794bee07261262f82ce9ed6c6d3f2731a862
| 55,914
|
def FindApprovalDefByID(approval_id, config):
"""Find the specified approval, or return None."""
for approval_def in config.approval_defs:
if approval_def.approval_id == approval_id:
return approval_def
return None
|
075d3135a58c6c7f9bc245220e2c9507126fabb4
| 55,915
|
def dict_match(a, b):
"""
Check if all attribute/value pairs in a also appears in b
:param a: A dictionary
:param b: A dictionary
:return: True/False
"""
res = []
for k, v in a.items():
try:
res.append(b[k] == v)
except KeyError:
pass
return all(res)
|
7b775e87a61deb8414b2f950ba95845e995ccef2
| 55,918
|
import math
def discrete_contour(contour, Dl):
""" Takes contour points to get a number of intermediate points
Args:
contour: `List` contour or list of points to get intermediate points
Dl: `int` distance to get a point by segment
Returns:
new_contour: `List` new contour with intermediate points
"""
# If contour has less of two points is not valid for operations
if len(contour) < 2:
print("Error: no valid segment")
return contour
# New contour variable
new_contour = []
# Iterate through all contour points
for idx, cordinate in enumerate(contour):
# Select next contour for operation
if not idx == len(contour)-1:
next_cordinate = contour[idx+1]
else:
next_cordinate = contour[0]
# Calculate length of segment
segment_lenth = math.sqrt((next_cordinate[0] - cordinate[0])**2 +\
(next_cordinate[1] - cordinate[1])**2)
divitions = segment_lenth/Dl # Number of new point for current segment
dy = next_cordinate[1] - cordinate[1] # Segment's height
dx = next_cordinate[0] - cordinate[0] # Segment's width
if not divitions:
ddy = 0 # Dy value to sum in Y axis
ddx = 0 # Dx value to sum in X axis
else:
ddy = dy/divitions # Dy value to sum in Y axis
ddx = dx/divitions # Dx value to sum in X axis
# get new intermediate points in segments
for idx in range(0, int(divitions)):
new_contour.append((int(cordinate[0] + (ddx*idx)),
int(cordinate[1] + (ddy*idx))))
# Return new contour with intermediate points
return new_contour
|
5a7ba6110313add959cf5c63a0cc827acab4deab
| 55,919
|
def exp(b, c):
"""Returns: b**c
Precondition: b a float, c ≥ 0 an int"""
assert type(b) == float, repr(b) + " is not a float "
assert c >= 0 and type(c) == int, repr(c) + 'is the wrong format'
if c == 0:
return 1
left = b
right = exp(b, c-1)
return left * right
|
f01039e4170a19f91b9c468fe8140b34d566fd9b
| 55,920
|
def get_temp_disk_for_node_agent(node_agent: str) -> str:
"""Get temp disk location for node agent
:param node_agent: node agent
:return: temp disk location
"""
if node_agent.startswith('batch.node.unbuntu'):
return '/mnt'
elif node_agent.startswith('batch.node.windows'):
return 'D:\\batch'
else:
return '/mnt/resource'
|
4caaa3995363f585740e7b3f2a1c811d7a916ab2
| 55,921
|
def resdiv_r1(Vin, Vout, R2):
"""
Calculate the exact value of R1 with R2 given.
"""
return R2 * (Vin/Vout - 1)
|
285d989f7ae45c89c5a67be65cd73fe8a3fab232
| 55,925
|
def read_conll(path):
"""
Returns list of sentences. Sentence is a list of pairs (word, NE-label)
"""
data = open(path).read().split('\n\n')[1:]
data = list(map(str.splitlines, data))
get_token_and_ne = lambda s: (s.split()[0], s.split()[-1])
data = list(map(lambda sent: list(map(get_token_and_ne, sent)), data))
return data
|
f44431d81b322749afe741f12a440309474eb14b
| 55,929
|
def auth_and_chan(ctx):
"""Message check: same author and channel"""
def chk(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
return chk
|
03e41e41b8924f3bc0523d20e842426b50370664
| 55,931
|
def transform_eigenspace(eigvecs, eigvals, transform_function):
"""Return a function that multiplies a vector by a matrix with
transformed eigenvalues.
Let ``eigvecs`` and ``eigvals`` be selected eigenvectors and
eigenvalues. The columns of ``eigvecs``
are the eigenvectors associated with the corresponding entry of
``eigvals``. A function, ``a_mult``, is returned. This function
is the identity for all directions orthogonal to ``eigvecs``,
and has eigenvalues ``transform_function(eigvals)`` in the
space spanned by ``eigvecs``.
Parameters
------------
eigvecs: `numpy.ndarray` (N, K)
The eigenvectors.
eigvals: `numpy.ndarray` (K,)
The eigenvalues
transform_function: callable
A function from ``eigvals`` to a vector of the same length.
The output of ``transform_function(eigvals)`` will be the new
eigenvalues.
Returns
-----------
a_mult: callable
A linear function from a length-``K`` numpy vector to another
length-``K`` vector with the above-described eigendecomposition.
"""
if eigvecs.ndim != 2:
raise ValueError('``eigvecs`` must be 2d.')
if eigvals.ndim != 1:
raise ValueError('``eigvals`` must be 1d.')
if eigvecs.shape[1] != len(eigvals):
raise ValueError(
'The columns of ``eigvecs`` and length of ``eigvals`` must match.')
new_eigvals = transform_function(eigvals)
def a_mult(vec):
vec_loadings = eigvecs.T @ vec
# Equivalent to the more transparent:
# vec_perp = vec - eigvecs @ vec_loadings
# return vec_perp + eigvecs @ (new_eigvals * vec_loadings)
return vec + eigvecs @ ((new_eigvals - 1) * vec_loadings)
return a_mult
|
30c481da3d420d9a8c055bada75da4d9d9b96eda
| 55,934
|
def change_letter(token, index, letter):
"""
Replaces a letter in a token.
token: word to be used
index: index of the letter
letter: letter used to replace
returns STRING
"""
_list = list(token)
_list[index] = letter
return ''.join(_list)
|
5983be861818eafa2ce2efe05471f1a0507ab7c5
| 55,940
|
def parse_json(json, length='km'):
"""
parse uber api result json
Args:
json (json) - result requested json
length (str) - type of length values to return - miles, kilometers or meters (mi/km/m)
Returns:
price, distance_estimation, time_estimation
"""
length_dict = {'mi':1, 'km':1.60934, 'm':1609.34}
if json is None:
return -1, -1, -1
else:
if length not in length_dict.keys():
mult = 1
else:
mult = length_dict[length]
distance_estimation = json['trip']['distance_estimate'] * mult
time_estimation = json['trip']['duration_estimate']
price = json['fare']['value']
return price, distance_estimation, time_estimation
|
0a19a562ce4ad9f75e5fbecd4c3bd07036ab1ab5
| 55,941
|
def compute_l(i, m, j, n):
"""Computes the `l` index from the composition of shift operators, Q_{i, m} Q_{j, n} = Q_{k, l} in Proposition 2
of the paper (regarding efficient multiplication of simple Jacobians)."""
if i >= 0 and j >= 0:
return max(m - j, n)
elif i >= 0 and j <= 0:
return max(m, n) + min(i, -j)
elif i <= 0 and j >= 0 and i + j >= 0:
return max(m - i - j, n)
elif i <= 0 and j >= 0 and i + j <= 0:
return max(n + i + j, m)
else:
return max(m, n + i)
|
8d0221d766d88cc22fdf93ebbf56e846129a7413
| 55,943
|
import codecs
def set2file(d, path, encoding='utf-8', func=None):
"""
Dump a set to a file.
@param d: the set to be dumpped
@param path: the output file path
@param encoding: the output file encoding
@param func: a function applied to the value
@return: always return None
"""
with codecs.open(path, 'wb', encoding=encoding) as fo:
for value in d:
if func is not None:
value = func(value)
outline = u'{}\n'.format(value)
fo.write(outline)
return None
|
2c8a683ee632703fff5647d98978ef4f7dd30a27
| 55,947
|
import itertools
def get_deletion_effects(deletion_record, gff_db, regulatory_margin=2000):
"""
Figure out the effects of a deletion using a gff.
Args:
deletion_record (vcf.Record): a vcf Record representing the
deletion
gff_db (gffutils.FeatureDB): a gffutils DB of a genome's
annotations
regulatory_margin (int): the amount of sequence on either side
of the deletion to look in for genes to be classified as
having their regulatory regions affected
"""
affected_genes = set()
intergenic = True
regulatory, intronic, coding = [False] * 3
# first, go through all the features that overlap the deletion
# and use them to set the above booleans and add any affected
# genes to affected_genes
features_in_deletion = gff_db.region(
seqid=deletion_record.CHROM,
start=deletion_record.POS,
end=deletion_record.sv_end,
)
for feature in features_in_deletion:
if feature.featuretype == "gene":
affected_genes.add(feature.attributes["Name"][0].upper())
intergenic = False
intronic = True
elif feature.featuretype == "CDS":
coding = True
intronic = False
# next, look for any genes *near* the deletion
features_near_deletion = itertools.chain(
gff_db.region(
seqid=deletion_record.CHROM,
start=deletion_record.POS - regulatory_margin,
end=deletion_record.POS,
),
gff_db.region(
seqid=deletion_record.CHROM,
start=deletion_record.sv_end,
end=deletion_record.sv_end + regulatory_margin,
),
)
for feature in features_near_deletion:
if feature.featuretype == "gene":
gene_name = feature.attributes["Name"][0].upper()
# only consider this a deletion of a regulatory region if
# this gene has not been otherwise affected
if gene_name not in affected_genes:
regulatory = True
intergenic = False
affected_genes.add(gene_name)
return affected_genes, intergenic, regulatory, intronic, coding
|
c381bb2ad62d34c850429a391c7cef8affa66fe8
| 55,949
|
def find_lcs(first_sentence_tokens: tuple, second_sentence_tokens: tuple, lcs_matrix: list) -> tuple:
"""
Finds the longest common subsequence itself using the Needleman–Wunsch algorithm
:param first_sentence_tokens: a tuple of tokens
:param second_sentence_tokens: a tuple of tokens
:param lcs_matrix: a filled lcs matrix
:return: the longest common subsequence
"""
checking_first_sentence = (not isinstance(first_sentence_tokens, tuple) or
None in first_sentence_tokens)
checking_second_sentence = (not isinstance(second_sentence_tokens, tuple) or
None in second_sentence_tokens)
checking_lcs_matrix = (not lcs_matrix or
not isinstance(lcs_matrix, list))
if checking_lcs_matrix or checking_first_sentence or checking_second_sentence:
return ()
length_1 = len(first_sentence_tokens)
length_2 = len(second_sentence_tokens)
if len(lcs_matrix) != length_1 or len(lcs_matrix[0]) != length_2:
return ()
if lcs_matrix[0][0] != 0 and lcs_matrix[0][0] != 1:
return ()
lcs_list = []
row = len(first_sentence_tokens) - 1
column = len(second_sentence_tokens) - 1
if row < 0:
row = 0
elif column < 0:
column = 0
while row >= 0 and column >= 0:
if first_sentence_tokens[row] == second_sentence_tokens[column]:
lcs_list.append(second_sentence_tokens[column])
row -= 1
column -= 1
else:
if lcs_matrix[row - 1][column] > lcs_matrix[row][column - 1] or column == 0:
row -= 1
else:
column -= 1
return tuple(reversed(lcs_list))
|
0603617a31f07774956b8a8d62b72def03618405
| 55,950
|
def ord(space, w_val):
"""Return the integer ordinal of a character."""
return space.ord(w_val)
|
f73a6c22c53f4c0770e952457d7f5e4cd1411d6e
| 55,952
|
from typing import Any
from typing import Optional
def get_node_anchor(node: Any) -> Optional[str]:
"""
Returns a node's Anchor/Alias name or None wheh there isn't one.
"""
if (
not hasattr(node, "anchor")
or node.anchor is None
or node.anchor.value is None
or not node.anchor.value
):
return None
return str(node.anchor.value)
|
2b68331e14176c4e947bef7667686f22a3efa01a
| 55,956
|
def clip(value, min_v, max_v):
"""
Clip the given value to be within a range
https://stackoverflow.com/questions/9775731/clamping-floating-numbers-in-python
"""
return max(min(value, max_v), min_v)
|
cd4a5977832acbdc8522fa0637aa79743e586fe9
| 55,958
|
import gzip
def is_gzip(file_path: str) -> bool:
"""Return whether a file is a gzip file or not
:param file_path: the path to the file.
:return: whether the file is a gzip file or not.
"""
is_gzip_ = False
with gzip.open(file_path, "rb") as f:
try:
f.read(1)
is_gzip_ = True
except OSError:
pass
return is_gzip_
|
15afaaf43c4ca66f802139857c79ff578227e31f
| 55,963
|
def parse_version(version_str):
"""
Parse the bazel version string, returning a tuple.
Derived from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/version_check.bzl
Args:
version_str: The version string to parse.
Returns:
The tuple of version elements.
"""
# Extract just the semver part
semver_str = version_str.partition(" ")[0].partition("-")[0]
# Return version tuple
return tuple([n for n in semver_str.split(".")])
|
35254f87707293ecd203d12f3eb36bb3160abe32
| 55,964
|
import json
def make_control_event(control_type, timestamp):
"""Make a control event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': timestamp,
'encoding':'json',
'channel':'[?occupancy=metrics.publishers]control_pri',
'data': json.dumps({
'type': 'CONTROL',
'controlType': control_type,
})
})
}
|
845bf1b28a92ea37f5e3d4d1dfc25c739f1c27cf
| 55,970
|
def invert_dict(d):
""" Utility for swapping keys and values in a dictionary.
Parameters
----------
d : Dict[Any, Any]
Returns
-------
Dict[Any, Any]
"""
return {v: k for k, v in d.items()}
|
3d8b84289e8291a9539381769f0cf0f43db8807c
| 55,971
|
import struct
def rgb555_to_rgb888(data):
"""
Converts RGB555 to (vnc-compatible) RGB888
"""
out = bytearray()
for b in struct.unpack('<%dH' % (len(data)/2), data):
out.append(((b) & 0b11111) << 3)
out.append(((b >> 5) & 0b11111) << 3)
out.append(((b >> 10) & 0b11111) << 3)
out.append(0)
return bytes(out)
|
bb1cb2359472329fa781ac597bbec74c44900bce
| 55,978
|
def get_tags(f):
"""Breaks down a file name into its tags and returns them as a list."""
if "/" in f:
f = f.split("/")[-1]
if "." in f:
f = f.split(".")[0]
if "_" in f:
f = f.split("_")
if ":" in f:
f = f.split(":")
return f
|
262d4b855cf3309fe61bdbdfb97551b3ddadb1f6
| 55,983
|
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
|
54a7ddcaaaeb0795c0afdbefa2aee3b9718e60d5
| 55,987
|
def load_custom_overrides(_, __, value):
"""Load custom overrides from user
Parameters
----------
value : list
list of str (override_name=override_value)
Returns
-------
dict
user overrides
"""
user_overrides = {}
for override in value:
override_name, override_value = override.split('=')
user_overrides[override_name] = override_value
return user_overrides
|
d22d1637c61b1588397c6de26993538554dbba34
| 55,988
|
from typing import List
def from_to_by(start: float, stop: float, step: float, digits: int = 8) -> List[float]:
"""Sequence between start and stop (inclusive) by step, rounded to digits."""
length = round((stop - start) / step)
return [round(start + step * x, digits) for x in range(length + 1)]
|
ada3897d18376df575e2156ad2d888045b7f5bf7
| 55,992
|
def underscore_to_camel(name):
"""convert a snake case string to camel case"""
string_list = name.strip('_').split('_')
result = string_list[0].lower()
for s in string_list[1:]:
result = '{0}{1}'.format(result, s.title())
return result
|
f649febdc1f2711bad0b60006a2d6c673a949c7a
| 55,993
|
def load_classes(filename):
"""
This function takes in a class_file filename and returns a dict of classes and ids (in string format)
:param filename: the filename of the class_file to load and parse
:returns: a dict of classes and ids in string format
"""
dict1 = {}
f = open(filename,'r')
classes = f.read().split('\n')[:-1]
for value in classes:
key, val = value.split(":")
key = key.strip(" ")
val = val.strip(" ")
dict1[val] = key
f.close()
return dict1
|
871ae10e384ab7add390d0bea44f4d7d9846ee87
| 55,998
|
def is_exchange_token_protected(f):
"""Is the given view function exchange token protected?"""
return getattr(f, 'is_exchange_token_protected', False)
|
78b4980b4ba8a3128fb1c8968e5d8cf1b97ebcce
| 56,002
|
def wattersons_theta_seq(seg_sites, num_seqs):
"""
Return Watterson's Theta (per sequence)
"""
a1 = sum([1.0/i for i in range(1, num_seqs)])
return float(seg_sites) / a1
|
7ee4729cb71be232b768e8c0359c719f0a20cdc4
| 56,007
|
import itertools
def flatten_weights(weights):
"""Convert weight parameters to a 1 dimension array (more convenient for optimization algorithms)"""
nested_list = [weights_2d_array.flatten().tolist() for weights_2d_array in weights]
flat_list = list(itertools.chain(*nested_list))
return flat_list
|
c99e3cb415a3923bdd087aa21324f661c28895b0
| 56,010
|
def bool_flag(value):
"""
Handle the ability to pass a parameter that can
have no value or a boolean value. No value gives
True (like the switch is on). Absense of the value
returns False (switch is off). You can also pass
"TRUE", "True", "true", "1", "FALSE", "False",
"false", "0" like: param=True
"""
if value is None:
return False
if value == '':
return True
if value.lower() == 'true':
return True
if value == "1":
return True
if value.lower() == 'false':
return False
if value == "0":
return False
|
08e29747b35712796c38243db1f6fcc43a58a5a4
| 56,017
|
import json
def parse(content):
"""
Get image url, description
:param bs4.BeautifulSoup content: BeautifulSoup from html content
:return: image and description
:rtype: tuple
"""
img = description = None
attrs = {'type': 'application/ld+json'}
full_info = content.find('script', attrs=attrs).text
info_dict = json.loads(full_info)
if 'image' in info_dict.keys():
img = info_dict['image']
if 'description' in info_dict.keys():
description = info_dict['description']
return img, description
|
4a17d41e81f9335531b70a12325ace5007d11234
| 56,018
|
import re
def is_valid_flc_policy_id(flc_policy_id):
"""
Validates a file lifecycle policy ID, also known as the policy "lc_policy_name".
A valid policy ID should look like: lc-policy-00000003 - It should
always start with "lc-policy-" and end with 8 hexadecimal characters in lower
case.
:type flc_policy_id: str
:param flc_policy_id: The file lifecycle policy ID to be validated.
:rtype: bool
:return: True or False depending on whether flc_policy_id passes validation.
"""
if flc_policy_id is None:
return False
match = re.match(r'^lc-policy-[0-9a-f]{8}$', flc_policy_id)
if not match:
return False
return True
|
bfce92bc31714228aaabf16fb54300c8dd2905af
| 56,022
|
def prime_sieve_erathosthenes(n):
"""Sieve of Erathosthenes
Output the list of all primes less than n
"""
not_prime = []
prime = []
if n < 2:
print("No primes less than 2")
return 1
for i in range(2, n+1):
if i not in not_prime:
prime.append(i)
for j in range(i*i,n+1, i):
not_prime.append(j)
return prime
|
65fdcc624d5691a9ef25d1874bd4a11080e25a47
| 56,026
|
def expScale(initVal, exp):
"""
Applies an exponent exp to a value initVal and returns value.
Will work whether initVal is positive or negative or zero.
"""
val = initVal
if val > 0:
val = val ** exp
if val < 0:
val *= -1
val = val ** exp
val *= -1
return val
|
5c897f394f28697c17121ebff71734b846f85bf0
| 56,029
|
import socket
def _create_socket_address(ip_address, port):
"""Convert a given IPv6 address (string) and port number into a socket address"""
# `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr`
# (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.).
return socket.getaddrinfo(ip_address, port)[0][4]
|
5f79cdaa98885a97f5d119c6031c935013b3eeae
| 56,033
|
def dir_dicom_paths(directory):
"""Return list of paths to files within a directory at any level below, beginning with 'I'. This is the naming
convention expected for the project.
Args:
directory (pathlib Path): A path
Returns:
(list of pathlib Paths) All paths to files beginning with 'I' within the given directory
"""
return list(directory.glob("**/I*"))
|
d30753fe36350a0067dc0b6fd6f6a5d4b02a56b1
| 56,037
|
def slice_size(slice_objects):
"""
Returns the total number of elements in the combined slices
Also works if given a single slice
"""
num_elements = 0
try:
for sl in slice_objects:
num_elements += (sl.stop - (sl.start + 1)) // sl.step + 1
except TypeError:
num_elements += (slice_objects.stop - (slice_objects.start + 1)) \
// slice_objects.step + 1
return num_elements
|
ef3ae504537390cbe2a1458298813f51a4ddb89f
| 56,040
|
def reverseByteOrder(data):
"""Reverses the byte order of an int (16-bit) or long (32-bit) value."""
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
|
17b82e885c8fd52dab54f428a75e739a8dd60102
| 56,042
|
import random
import math
def allowed_value(client, field_id):
"""Creates and returns a value that is allowed for the given field_id."""
# Get information on the field
field = client.single_field(field_id)
# Get the allowed values to update
if field["field_type"] == "numeric":
min_val = field["field_min"]
max_val = field["field_max"]
if min_val is None:
min_val = 0
if max_val is None:
max_val = 99
# Value needs to be between min_val and max_val
# If they are floats, min_val needs to be rounded up and max_val rounded down.
post_value = random.randint(int(math.ceil(min_val)), int(max_val))
else:
data_options = {
"numeric": "1",
"date": "11-11-2017",
"string": "testing",
"dropdown": "1",
"radio": "1",
"textarea": "testing",
"slider": "5",
"checkbox": "1",
"calculation": "5",
"year": "2005",
}
post_value = data_options[field["field_type"]]
return post_value
|
aa151e277f14fc4b32af6cb708f6ba31082bad16
| 56,045
|
def E(G):
"""
Returns a set of edges on a graph
Parameters
----------
G = A networkx graph.
Returns
-------
set of edges belonging to graph graph
"""
return set(G.edges())
|
67166de146cbb437080f123204d398bde258fb0c
| 56,048
|
def compute_dl_target(location, lpp_source, nim_lpp_sources):
"""
Compute suma DL target based on lpp source name.
When the location is empty, set the location path to
/usr/sys/inst.images
Check if a lpp_source NIM resource already exist and check the path is
the same
When the location is not a path, check that a NIM lpp_source
corresponding to the location value exists, and returns the
location path of this NIM ressource.
return:
return code : 0 - OK
1 - if error
dl_target value or msg in case of error
"""
if not location or not location.strip():
loc = "/usr/sys/inst.images"
else:
loc = location.rstrip('/')
if loc[0] == '/':
dl_target = "{}/{}".format(loc, lpp_source)
if lpp_source in nim_lpp_sources \
and nim_lpp_sources[lpp_source] != dl_target:
return 1, "SUMA Error: lpp source location mismatch. It already " \
"exists a lpp source '{}' with a location different as '{}'" \
.format(lpp_source, dl_target)
else:
if loc not in nim_lpp_sources:
return 1, "SUMA Error: lpp_source: '{}' does not exist" \
.format(loc)
dl_target = nim_lpp_sources[loc]
return 0, dl_target
|
24ec97eb7763025b89fc87b0644228fe2411ce06
| 56,051
|
def fix_spaces_in_column_names(columns):
"""Returns the cleaned-up column names without spaces and in upper-case."""
new_names = []
for col in columns:
new_name = col.strip()
new_name = "".join(new_name.split())
new_name = new_name.replace(".", "")
new_name = new_name.upper()
new_names.append(new_name)
return new_names
|
774734f4b613027daddadcac5167950484673968
| 56,054
|
def fixup_columns(cols):
"""Replace index location column to name with `col` prefix
Args:
cols (list): List of original columns
Returns:
list: List of column names
"""
out_cols = []
for col in cols:
if type(col) == int:
out_cols.append('col{:d}'.format(col))
else:
out_cols.append(col)
return out_cols
|
74851613a72c9e9052b495509b6d40faf6d094df
| 56,058
|
def represent_option(options, default="-"):
"""
Representation function for option dicts
Args:
options: the options dict
default: the default value for unknown options
Returns:
function: the representation function
"""
def represent(value, row=None):
return options.get(value, default)
return represent
|
de8eb888c54c7245f812117bcf6a38b107b323b2
| 56,064
|
def find_index_of(string, character, occurrence=1):
"""Find the n-th index of a character in a string."""
count = 0
for char_index in range(len(string)):
if string[char_index] == character:
count += 1
if count == occurrence:
return char_index
return -1
|
2993a82345f0ea337f31a5009904201df2692b4e
| 56,065
|
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(
pvc_obj,
wait=True,
snapshot_name=None
):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
|
302ebb5f611813faa843836d80a0f05da2848d8d
| 56,067
|
def get_creator_node_id(local_node_id, response):
"""Returns the node ID of the node that submitted the given create request"""
if response.directionality_flag == 1:
return response.remote_node_id
else:
return local_node_id
|
0466b37c11162583fa0034ccc4247eacd8a4e59d
| 56,075
|
def calc_dg_frac_load_method(dg_profile, dg_requirement, regional_load, settings):
"""Calculate the hourly distributed generation in a single region where generation
required to be a fraction of total sales.
Parameters
----------
dg_profile : Series
Hourly normalized generation profile
dg_requirement : float
The fraction of total sales that DG must constitute
regional_load : Series
Hourly load for a given region
settings : dict
User-defined parameters from a settings file
Returns
-------
Series
8760 hourly generation
"""
annual_load = regional_load.sum()
dg_capacity_factor = dg_profile.mean()
distribution_loss = settings["avg_distribution_loss"]
required_dg_gen = annual_load * dg_requirement * (1 - distribution_loss)
dg_capacity = required_dg_gen / 8760 / dg_capacity_factor
hourly_gen = dg_profile * dg_capacity
return hourly_gen
|
d9d9b28f6b68f8040fd960e8b04cdbf74b9bd20d
| 56,077
|
import torch
import warnings
def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool:
""" Checks if the given minimum and maximum values are valid, meaning that
they exist and the min value is less than the max value.
"""
if min_val.numel() == 0 or max_val.numel() == 0:
warnings.warn(
"must run observer before calling calculate_qparams. " +
"Returning default values."
)
return False
if min_val.dim() == 0 or max_val.dim() == 0:
if min_val == float("inf") and max_val == float("-inf"):
warnings.warn(
"must run observer before calling calculate_qparams. " +
"Returning default values."
)
return False
assert min_val <= max_val, "min {} should be less than max {}".format(
min_val, max_val
)
else:
assert torch.all(
min_val <= max_val
), "min {} should be less than max {}".format(min_val, max_val)
return True
|
3659bb7e18b4846fbeaf43d8302037e59024c058
| 56,080
|
from typing import Callable
def callable_name(func: Callable) -> str:
"""Return the qualified name (e.g. package.module.func) for the given callable."""
if func.__module__ == 'builtins':
return func.__name__
else:
return '{}.{}'.format(func.__module__, func.__qualname__)
|
6eeeb86d333ee1090957d656c2545a4a609ff918
| 56,081
|
def remove_cations(SMILES):
"""
Removes periodic table group 1 and 7 counterions from the SMILES
strings.
Args:
-----
SMILES (str) -- the SMILES string representation of the
molecule.
Returns:
--------
SMILES (str) -- the string representation of the molecule with
the counterions omitted.
"""
# Assertions
assert isinstance(SMILES, str), 'the SMILES must be a string'
# Functionality
split_SMILES = SMILES.split(".")
ion_list = ['[Li+]', '[Na+]', '[K+]', '[Rb+]', '[Cs+]', '[Fr+]', '[F-]',
'[Cl-]', '[Br-]', '[I-]', '[At-]']
SMILES = [i for i in split_SMILES if i not in ion_list]
SMILES = '.'.join(SMILES)
return SMILES
|
49ad3cfda1fa2be267bf9bf070774079db4b0370
| 56,083
|
def find_first_month_and_year(num_months, end_month, end_year):
"""Find the month and year that is num_months prior to end_month, end_year.
Num_months includes the end_month and the first_month."""
excess = num_months - end_month
full_years_prior = excess / 12
months_in_partial_years_prior = excess % 12
if months_in_partial_years_prior > 0:
first_year = end_year - full_years_prior - 1
first_month = 13 - months_in_partial_years_prior
else:
first_year = end_year - full_years_prior
first_month = 1
return first_month, first_year
|
e1985aa198a3f2ab2625bd699bb1621586172037
| 56,085
|
def is_timedef(keyword):
"""Check if time definition keyword
Args:
Profiles keyword
Returns:
True if time definition keyword
"""
vtime = ('TIME ', 'DAY ', 'MONTH ', 'YEAR ', 'YEARS ')
return bool(keyword in vtime)
|
77e7db6d4ea3ea2a520ef8c79b18eea207131ddc
| 56,086
|
import random
def choose_true_edges(edges, K):
"""
Randomly choose a fixed number of existing edges
:param edges: The graph's edges
:param K: Fixed number of edges to choose
:return: A list of K true edges
"""
indexes = random.sample(range(1, len(edges)), K)
true_edges = []
for i in indexes:
true_edges.append(edges[i])
return true_edges
|
9ed573e5e5a2b45ec2327a63c5383e2006dbd496
| 56,087
|
import binascii
def hb(b):
"""
Example:
hb(b'\x12\xde\x11\x22\x33\x44') == '12de11223344'
"""
return binascii.hexlify(b).decode()
|
ff3787d611cbfb073fc6a92a74a89736ce7c8fed
| 56,088
|
def operating_roa(operating_income, average_assets):
"""Computes operating return on assets.
Parameters
----------
operating_income : int or float
Operating income
average_assets : int or float
Average total assets
Returns
-------
out : int or float
Operating return on assets
"""
return operating_income / average_assets
|
544955f388d67139122a86153b3336b69f3124b3
| 56,090
|
def process_style(line, style_re):
"""Process the line for the specified style compiled regular expression,
returning the line and style (if any) values"""
matched = style_re.search(line)
if matched:
return (style_re.sub('', line), matched.group(1))
else:
return (line, '')
|
f3660b099a6b8ba4b266761da3fb724425e06afe
| 56,095
|
def define_orderers(orderer_names, orderer_hosts, domain=None):
"""Define orderers as connection objects.
Args:
orderer_names (Iterable): List of orderer names.
orderer_hosts (Iterable): List of orderer hosts.
domain (str): Domain used. Defaults to none.
Returns:
dict: A dictionary of Orderer Connections
"""
orderer_connections = {}
for name, host in zip(orderer_names, orderer_hosts):
if domain:
key = "{name}.{domain}".format(name=name, domain=domain)
else:
key = name
orderer_connections[key] = {"url": ("grpc://" + host + ":7050")}
return orderer_connections
|
32ea2ed7b89afdf64356674e9a1fa0aea85792e3
| 56,096
|
def Eq(state, command, *column_values):
"""Equals filter
Accepts one or more column-value pairs. Keep only rows where value in the column equals specified value.
Example: Eq~column1~1
"""
df = state.df()
for c,v in state.expand_column_values(column_values):
state.log_info(f"Equals: {c} == {v}")
index = df[c] == v
try:
index = index | (df[c] == int(v))
except:
pass
try:
index = index | (df[c] == float(v))
except:
pass
df = df.loc[index,:]
return state.with_df(df)
|
df3297444d3c91d1b88a118593f429583e9b4e94
| 56,101
|
def overall_coverage(coverage):
"""Overall proof coverage."""
cov = coverage.overall_coverage
return {
'percentage': cov['percentage'],
'hit': cov['hit'],
'total': cov['total']
}
|
690d4d9f5b7bf5bed0c4ebaffc6b6793b04bdcfa
| 56,111
|
def get_node_information(data_node):
"""
It parses nodes in GPML xml file.
:param data_node: XML node of the type "data"
:return: node id and structured information of the form:
{
"database_name": database_name,
"database_id": database_id,
"text": text,
"type": type,
"groupref": groupref
}
"""
# get database and its corresponding unique identifier.
xref = data_node.find("xref")
database_name = xref.attrs["database"]
database_id = xref.attrs["id"]
# get the graph id.
id = data_node.attrs.get("graphid", None)
text = data_node.attrs["textlabel"]
type = data_node.attrs.get("type", "unknown")
groupref = data_node.attrs.get("groupref", None)
# form a document.
return (id, {
"database_name": database_name,
"database_id": database_id,
"text": text,
"type": type,
"groupref": groupref
})
|
4798f9fc440082b4b9e82ffce05b4a63150dd94d
| 56,118
|
def validate_engine(engine):
"""
Validate database Engine for DBInstance
Property: DBInstance.Engine
Property: DBCluster.Engine
"""
VALID_DB_ENGINES = (
"MySQL",
"mysql",
"oracle-se1",
"oracle-se2",
"oracle-se",
"oracle-ee",
"sqlserver-ee",
"sqlserver-se",
"sqlserver-ex",
"sqlserver-web",
"postgres",
"aurora",
"aurora-mysql",
"aurora-postgresql",
"mariadb",
)
if engine not in VALID_DB_ENGINES:
raise ValueError(
"DBInstance Engine must be one of: %s" % ", ".join(VALID_DB_ENGINES)
)
return engine
|
d008010c369eca2cd2d01b9928b388c062db6647
| 56,119
|
def insert_nulls(df, fill_val=-99999999.99):
"""replaces fill_val with null in all columns of df.
:df: Dataframe
:fill_val: fill value to be replaced with nulls.
default=-99999999.99
:returns: Dataframe with fill_val replaced with nulls
"""
for col in df.columns.tolist():
df.ix[df[col] == fill_val, col] = None
return df
|
71fdf29a16916ee1f119b5267043960c9a6d5227
| 56,120
|
def download_reads_from_pangea(sample, module_name, field_name):
"""Return the local filepath for reads from the sample after downloading."""
filename = f'{sample.name}_reads.fq.gz'
ar = sample.analysis_result(module_name).get()
field = ar.field(field_name).get()
field.download_file(filename=filename)
return filename
|
03de7be0e42b3ad0fac5df0377978d6ca262a393
| 56,127
|
def format_for_output(metric_dict):
"""
Coverts a dictionary into a list of strings of key,value pairs for output logging
# Parameters
matric_dict: `Dict[str, Dict[str, str or float]]
# Returns
`List[str]`
"""
return ["{}: {}\n".format(k, v) for k, v in metric_dict.items()] + ["\n"]
|
b3c5493bda17dc32c522d991010b0d6e7164517d
| 56,128
|
def _encode_text(text: str) -> list[int]:
"""
Converts the text from literal values to numerical values.
Uses the table conversion A -> 1, B -> 2, ...
Args:
text (str): Text to be encoded.
Returns:
list[int]: Encoded text as a list of ints.
Each position corresponds to a character
from input text.
"""
return [ord(c) for c in text]
|
4a39aa74e7f289376c35e0214bf9736496182cfc
| 56,130
|
def convert_iterable_to_string_of_types(iterable_var):
"""Convert an iterable of values to a string of their types.
Parameters
----------
iterable_var : iterable
An iterable of variables, e.g. a list of integers.
Returns
-------
str
String representation of the types in `iterable_var`. One per item
in `iterable_var`. Separated by commas.
"""
types = [str(type(var_i)) for var_i in iterable_var]
return ", ".join(types)
|
f67d6ff4e96e272ecdd48b51a219054b6578ebc5
| 56,131
|
def shorten(name):
"""
Return a shortened version of a name so it can be used in a file path.
"""
return name.split(" ")[0].lower()
|
ce350319b1db23131a901ec7ba580fb62cfcaed6
| 56,132
|
def numberOfPhotosInDB(dbCursor):
"""Returns the number of all photos in database pointed to by dbCursor"""
dbCursor.execute('''SELECT * FROM eyesInPhotos''')
allDBPhotos = dbCursor.fetchall()
return len(allDBPhotos)
|
95e2c5d8763d516bc833107ff61e8c1b499ee6ad
| 56,133
|
def coverage(reference: str, target: str) -> int:
"""
The number of substitutions in an alignment.
>>> coverage("---ATGGC", "GTTA-GGG")
4
"""
return sum([ref != "-" and tgt != "-" for ref, tgt in zip(reference, target)])
|
dddf35feaa5c05ef5d571d216df412c8302e94a9
| 56,136
|
def get_ffmpeg_dimens(input_size: tuple[int, int], max_res: int) -> str:
"""
:param input_size: Tuple of (width, height)
:param max_res: Maximum resolution (as in 1080p, 720p, 360p, etc)
:return: String that can be used in ffmpeg -vf
"""
w, h = input_size
return f"scale={min(max_res, w)}:-2" if w < h else f"scale=-2:{min(max_res, h)}"
|
01d38ec7f60e0ff20844fca60e709da05291eab6
| 56,137
|
def calculate_ranks(true_entity_score, all_scores):
"""
Calculates the rank of the true entity.
Parameter
---------
:param true_entity_scores: torch.Tensor - shape(batch, 1)
The score of the true entity
:param all_scores: torch.Tensor - shape(batch, num_entities)
The scores of all entities
:return: float
The rank of the true entity
"""
assert len(true_entity_score.shape) == 2
assert len(all_scores.shape) == 2
all_scores = all_scores > true_entity_score
true_rank = all_scores.sum(dim=1) + 1
return true_rank
|
ed0d25524557f0e103d5697fc3c5060a17829251
| 56,139
|
def addRoundKey(state, roundKey):
"""Adds (XORs) the round key to the state."""
for i in range(16):
state[i] ^= roundKey[i]
return state
|
b2ad011511cf131f8c1b72ff504b66e9958080e4
| 56,144
|
def ntToPosixSlashes(filepath):
"""
Replaces all occurrences of NT slashes (\) in provided
filepath with Posix ones (/)
>>> ntToPosixSlashes('C:\\Windows')
'C:/Windows'
"""
return filepath.replace('\\', '/') if filepath else filepath
|
528caf6ee8e1514fd73cd9b65006d797331eadd9
| 56,145
|
def listsplit(sourcelist, delimiter):
"""
Splits a list at a given delimiter. Returns a list of lists.
Example:
>>> l = ['a', '', 'b', 'c', '', '', 'd']
>>> listsplit(l, '')
[['a'], ['b', 'c'], ['d']]
"""
result = []
start = 0
while True:
try:
position = sourcelist.index(delimiter, start)
if start == position:
start += 1
continue
result.append(sourcelist[start:position])
start = position + 1
except ValueError:
tail = sourcelist[start:]
if tail:
result.append(tail)
break
return result
|
6642122e93fed44603f9c5977766546b8fa5699e
| 56,149
|
def parse_phonemes(phonemes):
"""Parse mimic phoneme string into a list of phone, duration pairs.
Arguments
phonemes (bytes): phoneme output from mimic
Returns:
(list) list of phoneme duration pairs
"""
phon_str = phonemes.decode()
pairs = phon_str.split(' ')
return [pair.split(':') for pair in pairs if ':' in pair]
|
0d78d31fe369e193b18478119707e8b9de6159fd
| 56,151
|
def get_environment_variable_name(register_name: str) -> str:
"""
Convert the name of the register to the name of the environment variable that assigns it.
>>> get_environment_variable_name("m.motor.inductance_dq")
'M__MOTOR__INDUCTANCE_DQ'
"""
return register_name.upper().replace(".", "__")
|
c19d5d3abea6f338acea7107bbfed7fca8ee6213
| 56,152
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.