repository_name stringlengths 5 67 | func_path_in_repository stringlengths 4 234 | func_name stringlengths 0 314 | whole_func_string stringlengths 52 3.87M | language stringclasses 6
values | func_code_string stringlengths 52 3.87M | func_code_tokens listlengths 15 672k | func_documentation_string stringlengths 1 47.2k | func_documentation_tokens listlengths 1 3.92k | split_name stringclasses 1
value | func_code_url stringlengths 85 339 |
|---|---|---|---|---|---|---|---|---|---|---|
riga/tfdeploy | tfdeploy.py | Unpack | def Unpack(a, num, axis):
"""
Unpack op.
"""
return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis)) | python | def Unpack(a, num, axis):
"""
Unpack op.
"""
return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis)) | [
"def",
"Unpack",
"(",
"a",
",",
"num",
",",
"axis",
")",
":",
"return",
"tuple",
"(",
"np",
".",
"squeeze",
"(",
"b",
",",
"axis",
"=",
"axis",
")",
"for",
"b",
"in",
"np",
".",
"split",
"(",
"a",
",",
"num",
",",
"axis",
"=",
"axis",
")",
... | Unpack op. | [
"Unpack",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1216-L1220 |
riga/tfdeploy | tfdeploy.py | ReverseSequence | def ReverseSequence(a, seq_lengths, seq_dim, batch_dim):
"""
Sequential reverse op.
"""
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs... | python | def ReverseSequence(a, seq_lengths, seq_dim, batch_dim):
"""
Sequential reverse op.
"""
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs... | [
"def",
"ReverseSequence",
"(",
"a",
",",
"seq_lengths",
",",
"seq_dim",
",",
"batch_dim",
")",
":",
"r",
"=",
"np",
".",
"copy",
"(",
"a",
")",
"invidxs",
"=",
"(",
"len",
"(",
"r",
".",
"shape",
")",
"-",
"1",
")",
"*",
"[",
"slice",
"(",
"Non... | Sequential reverse op. | [
"Sequential",
"reverse",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1224-L1243 |
riga/tfdeploy | tfdeploy.py | ReverseV2 | def ReverseV2(a, axes):
"""
Reverse op.
"""
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape)))
return np.copy(a[idxs]), | python | def ReverseV2(a, axes):
"""
Reverse op.
"""
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape)))
return np.copy(a[idxs]), | [
"def",
"ReverseV2",
"(",
"a",
",",
"axes",
")",
":",
"idxs",
"=",
"tuple",
"(",
"slice",
"(",
"None",
",",
"None",
",",
"2",
"*",
"int",
"(",
"i",
"not",
"in",
"axes",
")",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
"."... | Reverse op. | [
"Reverse",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1247-L1252 |
riga/tfdeploy | tfdeploy.py | Betainc | def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
return sp.special.betainc(a, b, x), | python | def Betainc(a, b, x):
"""
Complemented, incomplete gamma op.
"""
return sp.special.betainc(a, b, x), | [
"def",
"Betainc",
"(",
"a",
",",
"b",
",",
"x",
")",
":",
"return",
"sp",
".",
"special",
".",
"betainc",
"(",
"a",
",",
"b",
",",
"x",
")",
","
] | Complemented, incomplete gamma op. | [
"Complemented",
"incomplete",
"gamma",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1588-L1592 |
riga/tfdeploy | tfdeploy.py | Diag | def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r, | python | def Diag(a):
"""
Diag op.
"""
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r, | [
"def",
"Diag",
"(",
"a",
")",
":",
"r",
"=",
"np",
".",
"zeros",
"(",
"2",
"*",
"a",
".",
"shape",
",",
"dtype",
"=",
"a",
".",
"dtype",
")",
"for",
"idx",
",",
"v",
"in",
"np",
".",
"ndenumerate",
"(",
"a",
")",
":",
"r",
"[",
"2",
"*",
... | Diag op. | [
"Diag",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1600-L1607 |
riga/tfdeploy | tfdeploy.py | MatrixDiagPart | def MatrixDiagPart(a):
"""
Batched diag op that returns only the diagonal elements.
"""
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.diagonal(a[pos])
return r, | python | def MatrixDiagPart(a):
"""
Batched diag op that returns only the diagonal elements.
"""
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.diagonal(a[pos])
return r, | [
"def",
"MatrixDiagPart",
"(",
"a",
")",
":",
"r",
"=",
"np",
".",
"zeros",
"(",
"a",
".",
"shape",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"min",
"(",
"a",
".",
"shape",
"[",
"-",
"2",
":",
"]",
")",
",",
")",
")",
"for",
"coord",
"in",
"np",
... | Batched diag op that returns only the diagonal elements. | [
"Batched",
"diag",
"op",
"that",
"returns",
"only",
"the",
"diagonal",
"elements",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1619-L1627 |
riga/tfdeploy | tfdeploy.py | MatMul | def MatMul(a, b, transpose_a, transpose_b):
"""
Matrix multiplication op.
"""
return np.dot(a if not transpose_a else np.transpose(a),
b if not transpose_b else np.transpose(b)), | python | def MatMul(a, b, transpose_a, transpose_b):
"""
Matrix multiplication op.
"""
return np.dot(a if not transpose_a else np.transpose(a),
b if not transpose_b else np.transpose(b)), | [
"def",
"MatMul",
"(",
"a",
",",
"b",
",",
"transpose_a",
",",
"transpose_b",
")",
":",
"return",
"np",
".",
"dot",
"(",
"a",
"if",
"not",
"transpose_a",
"else",
"np",
".",
"transpose",
"(",
"a",
")",
",",
"b",
"if",
"not",
"transpose_b",
"else",
"n... | Matrix multiplication op. | [
"Matrix",
"multiplication",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1631-L1636 |
riga/tfdeploy | tfdeploy.py | MatrixInverse | def MatrixInverse(a, adj):
"""
Matrix inversion op.
"""
return np.linalg.inv(a if not adj else _adjoint(a)), | python | def MatrixInverse(a, adj):
"""
Matrix inversion op.
"""
return np.linalg.inv(a if not adj else _adjoint(a)), | [
"def",
"MatrixInverse",
"(",
"a",
",",
"adj",
")",
":",
"return",
"np",
".",
"linalg",
".",
"inv",
"(",
"a",
"if",
"not",
"adj",
"else",
"_adjoint",
"(",
"a",
")",
")",
","
] | Matrix inversion op. | [
"Matrix",
"inversion",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1648-L1652 |
riga/tfdeploy | tfdeploy.py | MatrixSolve | def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
return np.linalg.solve(a if not adj else _adjoint(a), rhs), | python | def MatrixSolve(a, rhs, adj):
"""
Matrix solve op.
"""
return np.linalg.solve(a if not adj else _adjoint(a), rhs), | [
"def",
"MatrixSolve",
"(",
"a",
",",
"rhs",
",",
"adj",
")",
":",
"return",
"np",
".",
"linalg",
".",
"solve",
"(",
"a",
"if",
"not",
"adj",
"else",
"_adjoint",
"(",
"a",
")",
",",
"rhs",
")",
","
] | Matrix solve op. | [
"Matrix",
"solve",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1664-L1668 |
riga/tfdeploy | tfdeploy.py | MatrixTriangularSolve | def MatrixTriangularSolve(a, rhs, lower, adj):
"""
Matrix triangular solve op.
"""
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.c... | python | def MatrixTriangularSolve(a, rhs, lower, adj):
"""
Matrix triangular solve op.
"""
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.c... | [
"def",
"MatrixTriangularSolve",
"(",
"a",
",",
"rhs",
",",
"lower",
",",
"adj",
")",
":",
"trans",
"=",
"0",
"if",
"not",
"adj",
"else",
"2",
"r",
"=",
"np",
".",
"empty",
"(",
"rhs",
".",
"shape",
")",
".",
"astype",
"(",
"a",
".",
"dtype",
")... | Matrix triangular solve op. | [
"Matrix",
"triangular",
"solve",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1672-L1684 |
riga/tfdeploy | tfdeploy.py | MatrixSolveLs | def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, | python | def MatrixSolveLs(a, rhs, l2_reg):
"""
Matrix least-squares solve op.
"""
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r, | [
"def",
"MatrixSolveLs",
"(",
"a",
",",
"rhs",
",",
"l2_reg",
")",
":",
"r",
"=",
"np",
".",
"empty",
"(",
"rhs",
".",
"shape",
")",
".",
"astype",
"(",
"a",
".",
"dtype",
")",
"for",
"coord",
"in",
"np",
".",
"ndindex",
"(",
"a",
".",
"shape",
... | Matrix least-squares solve op. | [
"Matrix",
"least",
"-",
"squares",
"solve",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1688-L1697 |
riga/tfdeploy | tfdeploy.py | SelfAdjointEig | def SelfAdjointEig(a):
"""
Eigen decomp op.
"""
shape = list(a.shape)
shape[-2] += 1
return np.append(*np.linalg.eig(a)).reshape(*shape), | python | def SelfAdjointEig(a):
"""
Eigen decomp op.
"""
shape = list(a.shape)
shape[-2] += 1
return np.append(*np.linalg.eig(a)).reshape(*shape), | [
"def",
"SelfAdjointEig",
"(",
"a",
")",
":",
"shape",
"=",
"list",
"(",
"a",
".",
"shape",
")",
"shape",
"[",
"-",
"2",
"]",
"+=",
"1",
"return",
"np",
".",
"append",
"(",
"*",
"np",
".",
"linalg",
".",
"eig",
"(",
"a",
")",
")",
".",
"reshap... | Eigen decomp op. | [
"Eigen",
"decomp",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1701-L1707 |
riga/tfdeploy | tfdeploy.py | Svd | def Svd(a, uv, full):
"""
Single value decomp op.
"""
u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)
return s, u, v | python | def Svd(a, uv, full):
"""
Single value decomp op.
"""
u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)
return s, u, v | [
"def",
"Svd",
"(",
"a",
",",
"uv",
",",
"full",
")",
":",
"u",
",",
"s",
",",
"v",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"a",
",",
"full_matrices",
"=",
"full",
",",
"compute_uv",
"=",
"uv",
")",
"return",
"s",
",",
"u",
",",
"v"
] | Single value decomp op. | [
"Single",
"value",
"decomp",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1719-L1724 |
riga/tfdeploy | tfdeploy.py | Sum | def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Sum(a, axis, keep_dims):
"""
Sum reduction op.
"""
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Sum",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"sum",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"ke... | Sum reduction op. | [
"Sum",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1804-L1809 |
riga/tfdeploy | tfdeploy.py | Prod | def Prod(a, axis, keep_dims):
"""
Prod reduction op.
"""
return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Prod(a, axis, keep_dims):
"""
Prod reduction op.
"""
return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Prod",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"prod",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"... | Prod reduction op. | [
"Prod",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1813-L1818 |
riga/tfdeploy | tfdeploy.py | Min | def Min(a, axis, keep_dims):
"""
Min reduction op.
"""
return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Min(a, axis, keep_dims):
"""
Min reduction op.
"""
return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Min",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"amin",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"k... | Min reduction op. | [
"Min",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1822-L1827 |
riga/tfdeploy | tfdeploy.py | Max | def Max(a, axis, keep_dims):
"""
Max reduction op.
"""
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Max(a, axis, keep_dims):
"""
Max reduction op.
"""
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Max",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"amax",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"k... | Max reduction op. | [
"Max",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1831-L1836 |
riga/tfdeploy | tfdeploy.py | Mean | def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Mean",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"mean",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"... | Mean reduction op. | [
"Mean",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1840-L1845 |
riga/tfdeploy | tfdeploy.py | All | def All(a, axis, keep_dims):
"""
All reduction op.
"""
return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def All(a, axis, keep_dims):
"""
All reduction op.
"""
return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"All",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"all",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"ke... | All reduction op. | [
"All",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1849-L1854 |
riga/tfdeploy | tfdeploy.py | Any | def Any(a, axis, keep_dims):
"""
Any reduction op.
"""
return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | python | def Any(a, axis, keep_dims):
"""
Any reduction op.
"""
return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Any",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"any",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"ke... | Any reduction op. | [
"Any",
"reduction",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1858-L1863 |
riga/tfdeploy | tfdeploy.py | SegmentSum | def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | python | def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | [
"def",
"SegmentSum",
"(",
"a",
",",
"ids",
",",
"*",
"args",
")",
":",
"func",
"=",
"lambda",
"idxs",
":",
"reduce",
"(",
"np",
".",
"add",
",",
"a",
"[",
"idxs",
"]",
")",
"return",
"seg_map",
"(",
"func",
",",
"a",
",",
"ids",
")",
","
] | Segmented sum op. | [
"Segmented",
"sum",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1881-L1886 |
riga/tfdeploy | tfdeploy.py | SegmentProd | def SegmentProd(a, ids):
"""
Segmented prod op.
"""
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids), | python | def SegmentProd(a, ids):
"""
Segmented prod op.
"""
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids), | [
"def",
"SegmentProd",
"(",
"a",
",",
"ids",
")",
":",
"func",
"=",
"lambda",
"idxs",
":",
"reduce",
"(",
"np",
".",
"multiply",
",",
"a",
"[",
"idxs",
"]",
")",
"return",
"seg_map",
"(",
"func",
",",
"a",
",",
"ids",
")",
","
] | Segmented prod op. | [
"Segmented",
"prod",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1890-L1895 |
riga/tfdeploy | tfdeploy.py | SegmentMin | def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMin(a, ids):
"""
Segmented min op.
"""
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids), | [
"def",
"SegmentMin",
"(",
"a",
",",
"ids",
")",
":",
"func",
"=",
"lambda",
"idxs",
":",
"np",
".",
"amin",
"(",
"a",
"[",
"idxs",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"seg_map",
"(",
"func",
",",
"a",
",",
"ids",
")",
","
] | Segmented min op. | [
"Segmented",
"min",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1899-L1904 |
riga/tfdeploy | tfdeploy.py | SegmentMax | def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMax(a, ids):
"""
Segmented max op.
"""
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids), | [
"def",
"SegmentMax",
"(",
"a",
",",
"ids",
")",
":",
"func",
"=",
"lambda",
"idxs",
":",
"np",
".",
"amax",
"(",
"a",
"[",
"idxs",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"seg_map",
"(",
"func",
",",
"a",
",",
"ids",
")",
","
] | Segmented max op. | [
"Segmented",
"max",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1908-L1913 |
riga/tfdeploy | tfdeploy.py | SegmentMean | def SegmentMean(a, ids):
"""
Segmented mean op.
"""
func = lambda idxs: np.mean(a[idxs], axis=0)
return seg_map(func, a, ids), | python | def SegmentMean(a, ids):
"""
Segmented mean op.
"""
func = lambda idxs: np.mean(a[idxs], axis=0)
return seg_map(func, a, ids), | [
"def",
"SegmentMean",
"(",
"a",
",",
"ids",
")",
":",
"func",
"=",
"lambda",
"idxs",
":",
"np",
".",
"mean",
"(",
"a",
"[",
"idxs",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"seg_map",
"(",
"func",
",",
"a",
",",
"ids",
")",
","
] | Segmented mean op. | [
"Segmented",
"mean",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1917-L1922 |
riga/tfdeploy | tfdeploy.py | SparseSegmentSqrtN | def SparseSegmentSqrtN(a, idxs, ids):
"""
Sparse segmented sum / sqrt(n=len(idxs)) op.
"""
func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
return seg_map(func, a, ids), | python | def SparseSegmentSqrtN(a, idxs, ids):
"""
Sparse segmented sum / sqrt(n=len(idxs)) op.
"""
func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
return seg_map(func, a, ids), | [
"def",
"SparseSegmentSqrtN",
"(",
"a",
",",
"idxs",
",",
"ids",
")",
":",
"func",
"=",
"lambda",
"_idxs",
":",
"np",
".",
"divide",
"(",
"reduce",
"(",
"np",
".",
"add",
",",
"a",
"[",
"idxs",
"]",
"[",
"_idxs",
"]",
")",
",",
"np",
".",
"math"... | Sparse segmented sum / sqrt(n=len(idxs)) op. | [
"Sparse",
"segmented",
"sum",
"/",
"sqrt",
"(",
"n",
"=",
"len",
"(",
"idxs",
"))",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1942-L1947 |
riga/tfdeploy | tfdeploy.py | ListDiff | def ListDiff(a, b):
"""
List diff op.
"""
d = np.setdiff1d(a, b)
return d, np.searchsorted(a, d).astype(np.int32) | python | def ListDiff(a, b):
"""
List diff op.
"""
d = np.setdiff1d(a, b)
return d, np.searchsorted(a, d).astype(np.int32) | [
"def",
"ListDiff",
"(",
"a",
",",
"b",
")",
":",
"d",
"=",
"np",
".",
"setdiff1d",
"(",
"a",
",",
"b",
")",
"return",
"d",
",",
"np",
".",
"searchsorted",
"(",
"a",
",",
"d",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")"
] | List diff op. | [
"List",
"diff",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1971-L1976 |
riga/tfdeploy | tfdeploy.py | Unique | def Unique(a, t):
"""
Unique op.
"""
_, idxs, inv = np.unique(a, return_index=True, return_inverse=True)
return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t]) | python | def Unique(a, t):
"""
Unique op.
"""
_, idxs, inv = np.unique(a, return_index=True, return_inverse=True)
return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t]) | [
"def",
"Unique",
"(",
"a",
",",
"t",
")",
":",
"_",
",",
"idxs",
",",
"inv",
"=",
"np",
".",
"unique",
"(",
"a",
",",
"return_index",
"=",
"True",
",",
"return_inverse",
"=",
"True",
")",
"return",
"np",
".",
"copy",
"(",
"a",
")",
"[",
"np",
... | Unique op. | [
"Unique",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1988-L1993 |
riga/tfdeploy | tfdeploy.py | Elu | def Elu(a):
"""
Elu op.
"""
return np.where(a < 0, np.subtract(np.exp(a), 1), a), | python | def Elu(a):
"""
Elu op.
"""
return np.where(a < 0, np.subtract(np.exp(a), 1), a), | [
"def",
"Elu",
"(",
"a",
")",
":",
"return",
"np",
".",
"where",
"(",
"a",
"<",
"0",
",",
"np",
".",
"subtract",
"(",
"np",
".",
"exp",
"(",
"a",
")",
",",
"1",
")",
",",
"a",
")",
","
] | Elu op. | [
"Elu",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2037-L2041 |
riga/tfdeploy | tfdeploy.py | Softsign | def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), | python | def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), | [
"def",
"Softsign",
"(",
"a",
")",
":",
"return",
"np",
".",
"divide",
"(",
"a",
",",
"np",
".",
"add",
"(",
"np",
".",
"abs",
"(",
"a",
")",
",",
"1",
")",
")",
","
] | Softsign op. | [
"Softsign",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2053-L2057 |
riga/tfdeploy | tfdeploy.py | Softmax | def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)), | python | def Softmax(a):
"""
Softmax op.
"""
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)), | [
"def",
"Softmax",
"(",
"a",
")",
":",
"e",
"=",
"np",
".",
"exp",
"(",
"a",
")",
"return",
"np",
".",
"divide",
"(",
"e",
",",
"np",
".",
"sum",
"(",
"e",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
")",
","
] | Softmax op. | [
"Softmax",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2077-L2082 |
riga/tfdeploy | tfdeploy.py | Conv1D | def Conv1D(a, f, strides, padding, data_format):
"""
1D conv op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv = np.sum(patches, axis=tuple(range(-f.ndim, -1)))
if data_format.deco... | python | def Conv1D(a, f, strides, padding, data_format):
"""
1D conv op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv = np.sum(patches, axis=tuple(range(-f.ndim, -1)))
if data_format.deco... | [
"def",
"Conv1D",
"(",
"a",
",",
"f",
",",
"strides",
",",
"padding",
",",
"data_format",
")",
":",
"if",
"data_format",
".",
"decode",
"(",
"\"ascii\"",
")",
"==",
"\"NCHW\"",
":",
"a",
"=",
"np",
".",
"rollaxis",
"(",
"a",
",",
"1",
",",
"-",
"1... | 1D conv op. | [
"1D",
"conv",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2125-L2138 |
riga/tfdeploy | tfdeploy.py | Conv3D | def Conv3D(a, f, strides, padding):
"""
3D conv op.
"""
patches = _conv_patches(a, f, strides, padding.decode("ascii"))
return np.sum(patches, axis=tuple(range(-f.ndim, -1))), | python | def Conv3D(a, f, strides, padding):
"""
3D conv op.
"""
patches = _conv_patches(a, f, strides, padding.decode("ascii"))
return np.sum(patches, axis=tuple(range(-f.ndim, -1))), | [
"def",
"Conv3D",
"(",
"a",
",",
"f",
",",
"strides",
",",
"padding",
")",
":",
"patches",
"=",
"_conv_patches",
"(",
"a",
",",
"f",
",",
"strides",
",",
"padding",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"return",
"np",
".",
"sum",
"(",
"patches... | 3D conv op. | [
"3D",
"conv",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2159-L2164 |
riga/tfdeploy | tfdeploy.py | AvgPool | def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_forma... | python | def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_forma... | [
"def",
"AvgPool",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
",",
"data_format",
")",
":",
"if",
"data_format",
".",
"decode",
"(",
"\"ascii\"",
")",
"==",
"\"NCHW\"",
":",
"a",
"=",
"np",
".",
"rollaxis",
"(",
"a",
",",
"1",
",",
"-",
"... | Average pooling op. | [
"Average",
"pooling",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2190-L2203 |
riga/tfdeploy | tfdeploy.py | MaxPool | def MaxPool(a, k, strides, padding, data_format):
"""
Maximum pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.amax(patches, axis=tuple(range(-len(k), 0)))
if data_format.d... | python | def MaxPool(a, k, strides, padding, data_format):
"""
Maximum pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.amax(patches, axis=tuple(range(-len(k), 0)))
if data_format.d... | [
"def",
"MaxPool",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
",",
"data_format",
")",
":",
"if",
"data_format",
".",
"decode",
"(",
"\"ascii\"",
")",
"==",
"\"NCHW\"",
":",
"a",
"=",
"np",
".",
"rollaxis",
"(",
"a",
",",
"1",
",",
"-",
"... | Maximum pooling op. | [
"Maximum",
"pooling",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2207-L2220 |
riga/tfdeploy | tfdeploy.py | AvgPool3D | def AvgPool3D(a, k, strides, padding):
"""
Average 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.average(patches, axis=tuple(range(-len(k), 0))), | python | def AvgPool3D(a, k, strides, padding):
"""
Average 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.average(patches, axis=tuple(range(-len(k), 0))), | [
"def",
"AvgPool3D",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
")",
":",
"patches",
"=",
"_pool_patches",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"return",
"np",
".",
"average",
"(",
"... | Average 3D pooling op. | [
"Average",
"3D",
"pooling",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2224-L2229 |
riga/tfdeploy | tfdeploy.py | MaxPool3D | def MaxPool3D(a, k, strides, padding):
"""
Maximum 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.amax(patches, axis=tuple(range(-len(k), 0))), | python | def MaxPool3D(a, k, strides, padding):
"""
Maximum 3D pooling op.
"""
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.amax(patches, axis=tuple(range(-len(k), 0))), | [
"def",
"MaxPool3D",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
")",
":",
"patches",
"=",
"_pool_patches",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"return",
"np",
".",
"amax",
"(",
"pat... | Maximum 3D pooling op. | [
"Maximum",
"3D",
"pooling",
"op",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L2233-L2238 |
riga/tfdeploy | tfdeploy.py | Model.get | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In ca... | python | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In ca... | [
"def",
"get",
"(",
"self",
",",
"*",
"names",
",",
"*",
"*",
"kwargs",
")",
":",
"tensors",
"=",
"tuple",
"(",
"self",
".",
"_get",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
"for",
"name",
"in",
"names",
")",
"return",
"tensors",
"[",
"0",
"]"... | get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In case a tensor is passed, it's name is used for ... | [
"get",
"(",
"*",
"names",
"key",
"=",
"None",
")",
"Returns",
"one",
"or",
"more",
":",
"py",
":",
"class",
":",
"Tensor",
"instances",
"given",
"by",
"*",
"names",
"*",
"using",
"a",
"deep",
"lookup",
"within",
"the",
"model",
".",
"If",
"*",
"key... | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L115-L123 |
riga/tfdeploy | tfdeploy.py | Model.add | def add(self, tensor, tf_sess=None, key=None, **kwargs):
"""
Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_... | python | def add(self, tensor, tf_sess=None, key=None, **kwargs):
"""
Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_... | [
"def",
"add",
"(",
"self",
",",
"tensor",
",",
"tf_sess",
"=",
"None",
",",
"key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"tensor",
",",
"Tensor",
")",
":",
"tensor",
"=",
"Tensor",
"(",
"tensor",
",",
"tf... | Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_sess* should be a valid
tensorflow session and *kwargs* are forwarded... | [
"Adds",
"a",
"new",
"root",
"*",
"tensor",
"*",
"for",
"a",
"*",
"key",
"*",
"which",
"if",
"*",
"None",
"*",
"defaults",
"to",
"a",
"consecutive",
"number",
".",
"When",
"*",
"tensor",
"*",
"is",
"not",
"an",
"instance",
"of",
":",
"py",
":",
"c... | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L145-L161 |
riga/tfdeploy | tfdeploy.py | Model.load | def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
s... | python | def load(self, path):
"""
Loads all tensors from a file defined by *path* and adds them to the root set.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
s... | [
"def",
"load",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"roots",
"=... | Loads all tensors from a file defined by *path* and adds them to the root set. | [
"Loads",
"all",
"tensors",
"from",
"a",
"file",
"defined",
"by",
"*",
"path",
"*",
"and",
"adds",
"them",
"to",
"the",
"root",
"set",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L163-L172 |
riga/tfdeploy | tfdeploy.py | Model.save | def save(self, path):
"""
Saves all tensors of the root set to a file defined by *path*.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "wb") as f:
pickle.dump(self.roots, f) | python | def save(self, path):
"""
Saves all tensors of the root set to a file defined by *path*.
"""
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "wb") as f:
pickle.dump(self.roots, f) | [
"def",
"save",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
"... | Saves all tensors of the root set to a file defined by *path*. | [
"Saves",
"all",
"tensors",
"of",
"the",
"root",
"set",
"to",
"a",
"file",
"defined",
"by",
"*",
"path",
"*",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L174-L180 |
riga/tfdeploy | tfdeploy.py | Ensemble.get | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor ... | python | def get(self, *names, **kwargs):
""" get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor ... | [
"def",
"get",
"(",
"self",
",",
"*",
"names",
",",
"*",
"*",
"kwargs",
")",
":",
"# create empty tensor ensembles with our method",
"tensor_ensembles",
"=",
"[",
"TensorEnsemble",
"(",
"[",
"]",
",",
"self",
".",
"method",
")",
"for",
"name",
"in",
"names",
... | get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor defined by a specific *name*, the
ass... | [
"get",
"(",
"*",
"names",
"key",
"=",
"None",
")",
"Returns",
"one",
"or",
"more",
":",
"py",
":",
"class",
":",
"TensorEnsemble",
"instances",
"given",
"by",
"*",
"names",
"*",
"using",
"a",
"deep",
"lookup",
"within",
"all",
"read",
"models",
".",
... | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L620-L639 |
riga/tfdeploy | tfdeploy.py | Ensemble.load | def load(self, paths):
"""
Loads models from a list of *paths*.
"""
for path in paths:
self.models.append(Model(path)) | python | def load(self, paths):
"""
Loads models from a list of *paths*.
"""
for path in paths:
self.models.append(Model(path)) | [
"def",
"load",
"(",
"self",
",",
"paths",
")",
":",
"for",
"path",
"in",
"paths",
":",
"self",
".",
"models",
".",
"append",
"(",
"Model",
"(",
"path",
")",
")"
] | Loads models from a list of *paths*. | [
"Loads",
"models",
"from",
"a",
"list",
"of",
"*",
"paths",
"*",
"."
] | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L641-L646 |
riga/tfdeploy | tfdeploy.py | TensorEnsemble.eval | def eval(self, feed_dict=None):
"""
Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuple... | python | def eval(self, feed_dict=None):
"""
Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuple... | [
"def",
"eval",
"(",
"self",
",",
"feed_dict",
"=",
"None",
")",
":",
"# first, check that the length of all feed_dict keys match our own length",
"for",
"tensor_ensemble",
"in",
"feed_dict",
":",
"if",
"len",
"(",
"tensor_ensemble",
".",
"tensors",
")",
"!=",
"len",
... | Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuples must have the same
length as the list of s... | [
"Evaluates",
"all",
"contained",
"tensors",
"using",
"a",
"*",
"feed_dict",
"*",
"and",
"returns",
"the",
"ensemble",
"value",
".",
"The",
"keys",
"of",
"*",
"feed_dict",
"*",
"must",
"be",
"tensor",
"ensembles",
".",
"Its",
"values",
"can",
"be",
"batches... | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L673-L700 |
riga/tfdeploy | tfdeploy.py | TensorEnsemble.func | def func(self, values):
"""
The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*.
"""
if self.method == METHOD_MEAN:
return self.func_mean(values)
e... | python | def func(self, values):
"""
The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*.
"""
if self.method == METHOD_MEAN:
return self.func_mean(values)
e... | [
"def",
"func",
"(",
"self",
",",
"values",
")",
":",
"if",
"self",
".",
"method",
"==",
"METHOD_MEAN",
":",
"return",
"self",
".",
"func_mean",
"(",
"values",
")",
"elif",
"self",
".",
"method",
"==",
"METHOD_MAX",
":",
"return",
"self",
".",
"func_max... | The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*. | [
"The",
"actual",
"ensembling",
"logic",
"that",
"combines",
"multiple",
"*",
"values",
"*",
".",
"The",
"method",
"call",
"is",
"forwareded",
"tothe",
"ensemble",
"method",
"-",
"specific",
"variant",
"which",
"is",
"determined",
"using",
"*",
"method",
"*",
... | train | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L705-L719 |
adafruit/Adafruit_CircuitPython_MatrixKeypad | adafruit_matrixkeypad.py | Matrix_Keypad.pressed_keys | def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pi... | python | def pressed_keys(self):
"""An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation"""
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pi... | [
"def",
"pressed_keys",
"(",
"self",
")",
":",
"# make a list of all the keys that are detected",
"pressed",
"=",
"[",
"]",
"# set all pins pins to be inputs w/pullups",
"for",
"pin",
"in",
"self",
".",
"row_pins",
"+",
"self",
".",
"col_pins",
":",
"pin",
".",
"dire... | An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation | [
"An",
"array",
"containing",
"all",
"detected",
"keys",
"that",
"are",
"pressed",
"from",
"the",
"initalized",
"list",
"-",
"of",
"-",
"lists",
"passed",
"in",
"during",
"creation"
] | train | https://github.com/adafruit/Adafruit_CircuitPython_MatrixKeypad/blob/f530b1a920a40ef09ec1394b7760f243a243045a/adafruit_matrixkeypad.py#L69-L91 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.get_load_times | def get_load_times(self, asset_type):
"""
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
"""
load_times = []
search_str = '{0}_load_time'.format(asset_type)
for har_pag... | python | def get_load_times(self, asset_type):
"""
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
"""
load_times = []
search_str = '{0}_load_time'.format(asset_type)
for har_pag... | [
"def",
"get_load_times",
"(",
"self",
",",
"asset_type",
")",
":",
"load_times",
"=",
"[",
"]",
"search_str",
"=",
"'{0}_load_time'",
".",
"format",
"(",
"asset_type",
")",
"for",
"har_page",
"in",
"self",
".",
"pages",
":",
"val",
"=",
"getattr",
"(",
"... | Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for | [
"Just",
"a",
"list",
"of",
"the",
"load",
"times",
"of",
"a",
"certain",
"asset",
"type",
"for",
"each",
"page"
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L41-L53 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.get_stdev | def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_... | python | def get_stdev(self, asset_type):
"""
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_... | [
"def",
"get_stdev",
"(",
"self",
",",
"asset_type",
")",
":",
"load_times",
"=",
"[",
"]",
"# Handle edge cases like TTFB",
"if",
"asset_type",
"==",
"'ttfb'",
":",
"for",
"page",
"in",
"self",
".",
"pages",
":",
"if",
"page",
".",
"time_to_first_byte",
"is"... | Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision | [
"Returns",
"the",
"standard",
"deviation",
"for",
"a",
"set",
"of",
"a",
"certain",
"asset",
"type",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L55-L79 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.pages | def pages(self):
"""
The aggregate pages of all the parser objects.
"""
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id =... | python | def pages(self):
"""
The aggregate pages of all the parser objects.
"""
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id =... | [
"def",
"pages",
"(",
"self",
")",
":",
"pages",
"=",
"[",
"]",
"for",
"har_dict",
"in",
"self",
".",
"har_data",
":",
"har_parser",
"=",
"HarParser",
"(",
"har_data",
"=",
"har_dict",
")",
"if",
"self",
".",
"page_id",
":",
"for",
"page",
"in",
"har_... | The aggregate pages of all the parser objects. | [
"The",
"aggregate",
"pages",
"of",
"all",
"the",
"parser",
"objects",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L82-L95 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.time_to_first_byte | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision... | python | def time_to_first_byte(self):
"""
The aggregate time to first byte for all pages.
"""
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision... | [
"def",
"time_to_first_byte",
"(",
"self",
")",
":",
"ttfb",
"=",
"[",
"]",
"for",
"page",
"in",
"self",
".",
"pages",
":",
"if",
"page",
".",
"time_to_first_byte",
"is",
"not",
"None",
":",
"ttfb",
".",
"append",
"(",
"page",
".",
"time_to_first_byte",
... | The aggregate time to first byte for all pages. | [
"The",
"aggregate",
"time",
"to",
"first",
"byte",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L105-L113 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.page_load_time | def page_load_time(self):
"""
The average total load time for all runs (not weighted).
"""
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision) | python | def page_load_time(self):
"""
The average total load time for all runs (not weighted).
"""
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision) | [
"def",
"page_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'page'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | The average total load time for all runs (not weighted). | [
"The",
"average",
"total",
"load",
"time",
"for",
"all",
"runs",
"(",
"not",
"weighted",
")",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L116-L121 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.js_load_time | def js_load_time(self):
"""
Returns aggregate javascript load time.
"""
load_times = self.get_load_times('js')
return round(mean(load_times), self.decimal_precision) | python | def js_load_time(self):
"""
Returns aggregate javascript load time.
"""
load_times = self.get_load_times('js')
return round(mean(load_times), self.decimal_precision) | [
"def",
"js_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'js'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate javascript load time. | [
"Returns",
"aggregate",
"javascript",
"load",
"time",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L124-L129 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.css_load_time | def css_load_time(self):
"""
Returns aggregate css load time for all pages.
"""
load_times = self.get_load_times('css')
return round(mean(load_times), self.decimal_precision) | python | def css_load_time(self):
"""
Returns aggregate css load time for all pages.
"""
load_times = self.get_load_times('css')
return round(mean(load_times), self.decimal_precision) | [
"def",
"css_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'css'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate css load time for all pages. | [
"Returns",
"aggregate",
"css",
"load",
"time",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L132-L137 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.image_load_time | def image_load_time(self):
"""
Returns aggregate image load time for all pages.
"""
load_times = self.get_load_times('image')
return round(mean(load_times), self.decimal_precision) | python | def image_load_time(self):
"""
Returns aggregate image load time for all pages.
"""
load_times = self.get_load_times('image')
return round(mean(load_times), self.decimal_precision) | [
"def",
"image_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'image'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate image load time for all pages. | [
"Returns",
"aggregate",
"image",
"load",
"time",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L140-L145 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.html_load_time | def html_load_time(self):
"""
Returns aggregate html load time for all pages.
"""
load_times = self.get_load_times('html')
return round(mean(load_times), self.decimal_precision) | python | def html_load_time(self):
"""
Returns aggregate html load time for all pages.
"""
load_times = self.get_load_times('html')
return round(mean(load_times), self.decimal_precision) | [
"def",
"html_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'html'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate html load time for all pages. | [
"Returns",
"aggregate",
"html",
"load",
"time",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L148-L153 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.audio_load_time | def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) | python | def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) | [
"def",
"audio_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'audio'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate audio load time for all pages. | [
"Returns",
"aggregate",
"audio",
"load",
"time",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L156-L161 |
mrname/haralyzer | haralyzer/multihar.py | MultiHarParser.video_load_time | def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | python | def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | [
"def",
"video_load_time",
"(",
"self",
")",
":",
"load_times",
"=",
"self",
".",
"get_load_times",
"(",
"'video'",
")",
"return",
"round",
"(",
"mean",
"(",
"load_times",
")",
",",
"self",
".",
"decimal_precision",
")"
] | Returns aggregate video load time for all pages. | [
"Returns",
"aggregate",
"video",
"load",
"time",
"for",
"all",
"pages",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/multihar.py#L164-L169 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_headers | def match_headers(self, entry, header_type, header, value, regex=True):
"""
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
... | python | def match_headers(self, entry, header_type, header, value, regex=True):
"""
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
... | [
"def",
"match_headers",
"(",
"self",
",",
"entry",
",",
"header_type",
",",
"header",
",",
"value",
",",
"regex",
"=",
"True",
")",
":",
"if",
"header_type",
"not",
"in",
"entry",
":",
"raise",
"ValueError",
"(",
"'Invalid header_type, should be either:\\n\\n'",... | Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
... | [
"Function",
"to",
"match",
"headers",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L40-L73 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_content_type | def match_content_type(entry, content_type, regex=True):
"""
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` in... | python | def match_content_type(entry, content_type, regex=True):
"""
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` in... | [
"def",
"match_content_type",
"(",
"entry",
",",
"content_type",
",",
"regex",
"=",
"True",
")",
":",
"mimeType",
"=",
"entry",
"[",
"'response'",
"]",
"[",
"'content'",
"]",
"[",
"'mimeType'",
"]",
"if",
"regex",
"and",
"re",
".",
"search",
"(",
"content... | Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match. | [
"Matches",
"the",
"content",
"type",
"of",
"a",
"request",
"using",
"the",
"mimeType",
"metadata",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L76-L92 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_request_type | def match_request_type(self, entry, request_type, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param rege... | python | def match_request_type(self, entry, request_type, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param rege... | [
"def",
"match_request_type",
"(",
"self",
",",
"entry",
",",
"request_type",
",",
"regex",
"=",
"True",
")",
":",
"if",
"regex",
":",
"return",
"re",
".",
"search",
"(",
"request_type",
",",
"entry",
"[",
"'request'",
"]",
"[",
"'method'",
"]",
",",
"f... | Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match | [
"Helper",
"function",
"that",
"returns",
"entries",
"with",
"a",
"request",
"type",
"matching",
"the",
"given",
"request_type",
"argument",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L94-L107 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_http_version | def match_http_version(entry, http_version, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``b... | python | def match_http_version(entry, http_version, regex=True):
"""
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``b... | [
"def",
"match_http_version",
"(",
"entry",
",",
"http_version",
",",
"regex",
"=",
"True",
")",
":",
"response_version",
"=",
"entry",
"[",
"'response'",
"]",
"[",
"'httpVersion'",
"]",
"if",
"regex",
":",
"return",
"re",
".",
"search",
"(",
"http_version",
... | Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match | [
"Helper",
"function",
"that",
"returns",
"entries",
"with",
"a",
"request",
"type",
"matching",
"the",
"given",
"request_type",
"argument",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L110-L124 |
mrname/haralyzer | haralyzer/assets.py | HarParser.match_status_code | def match_status_code(self, entry, status_code, regex=True):
"""
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status... | python | def match_status_code(self, entry, status_code, regex=True):
"""
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status... | [
"def",
"match_status_code",
"(",
"self",
",",
"entry",
",",
"status_code",
",",
"regex",
"=",
"True",
")",
":",
"if",
"regex",
":",
"return",
"re",
".",
"search",
"(",
"status_code",
",",
"str",
"(",
"entry",
"[",
"'response'",
"]",
"[",
"'status'",
"]... | Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`... | [
"Helper",
"function",
"that",
"returns",
"entries",
"with",
"a",
"status",
"code",
"matching",
"then",
"given",
"status_code",
"argument",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L126-L141 |
mrname/haralyzer | haralyzer/assets.py | HarParser.create_asset_timeline | def create_asset_timeline(self, asset_list):
"""
Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were ... | python | def create_asset_timeline(self, asset_list):
"""
Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were ... | [
"def",
"create_asset_timeline",
"(",
"self",
",",
"asset_list",
")",
":",
"results",
"=",
"dict",
"(",
")",
"for",
"asset",
"in",
"asset_list",
":",
"time_key",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"asset",
"[",
"'startedDateTime'",
"]",
")",... | Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were loading at that time.
:param asset_list: ``list`` of the... | [
"Returns",
"a",
"dict",
"of",
"the",
"timeline",
"for",
"the",
"requested",
"assets",
".",
"The",
"key",
"is",
"a",
"datetime",
"object",
"(",
"down",
"to",
"the",
"millisecond",
")",
"of",
"ANY",
"time",
"where",
"at",
"least",
"one",
"of",
"the",
"re... | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L143-L171 |
mrname/haralyzer | haralyzer/assets.py | HarParser.pages | def pages(self):
"""
This is a list of HarPage objects, each of which represents a page
from the HAR file.
"""
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for ent... | python | def pages(self):
"""
This is a list of HarPage objects, each of which represents a page
from the HAR file.
"""
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for ent... | [
"def",
"pages",
"(",
"self",
")",
":",
"# Start with a page object for unknown entries if the HAR data has",
"# any entries with no page ID",
"pages",
"=",
"[",
"]",
"if",
"any",
"(",
"'pageref'",
"not",
"in",
"entry",
"for",
"entry",
"in",
"self",
".",
"har_data",
... | This is a list of HarPage objects, each of which represents a page
from the HAR file. | [
"This",
"is",
"a",
"list",
"of",
"HarPage",
"objects",
"each",
"of",
"which",
"represents",
"a",
"page",
"from",
"the",
"HAR",
"file",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L174-L188 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_size_trans | def _get_asset_size_trans(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_si... | python | def _get_asset_size_trans(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_si... | [
"def",
"_get_asset_size_trans",
"(",
"self",
",",
"asset_type",
")",
":",
"if",
"asset_type",
"==",
"'page'",
":",
"assets",
"=",
"self",
".",
"entries",
"else",
":",
"assets",
"=",
"getattr",
"(",
"self",
",",
"'{0}_files'",
".",
"format",
"(",
"asset_typ... | Helper function to dynamically create *_size properties. | [
"Helper",
"function",
"to",
"dynamically",
"create",
"*",
"_size",
"properties",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L266-L274 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_size | def _get_asset_size(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size(ass... | python | def _get_asset_size(self, asset_type):
"""
Helper function to dynamically create *_size properties.
"""
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size(ass... | [
"def",
"_get_asset_size",
"(",
"self",
",",
"asset_type",
")",
":",
"if",
"asset_type",
"==",
"'page'",
":",
"assets",
"=",
"self",
".",
"entries",
"else",
":",
"assets",
"=",
"getattr",
"(",
"self",
",",
"'{0}_files'",
".",
"format",
"(",
"asset_type",
... | Helper function to dynamically create *_size properties. | [
"Helper",
"function",
"to",
"dynamically",
"create",
"*",
"_size",
"properties",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L276-L284 |
mrname/haralyzer | haralyzer/assets.py | HarPage._get_asset_load | def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['o... | python | def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['o... | [
"def",
"_get_asset_load",
"(",
"self",
",",
"asset_type",
")",
":",
"if",
"asset_type",
"==",
"'initial'",
":",
"return",
"self",
".",
"actual_page",
"[",
"'time'",
"]",
"elif",
"asset_type",
"==",
"'content'",
":",
"return",
"self",
".",
"pageTimings",
"[",... | Helper function to dynamically create *_load_time properties. Return
value is in ms. | [
"Helper",
"function",
"to",
"dynamically",
"create",
"*",
"_load_time",
"properties",
".",
"Return",
"value",
"is",
"in",
"ms",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L286-L306 |
mrname/haralyzer | haralyzer/assets.py | HarPage.filter_entries | def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_... | python | def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_... | [
"def",
"filter_entries",
"(",
"self",
",",
"request_type",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"status_code",
"=",
"None",
",",
"http_version",
"=",
"None",
",",
"regex",
"=",
"True",
")",
":",
"results",
"=",
"[",
"]",
"for",
"entry",
... | Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` o... | [
"Returns",
"a",
"list",
"of",
"entry",
"objects",
"based",
"on",
"the",
"filter",
"criteria",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L308-L351 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_load_time | def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous tra... | python | def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous tra... | [
"def",
"get_load_time",
"(",
"self",
",",
"request_type",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"status_code",
"=",
"None",
",",
"asynchronous",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"entries",
"=",
"self",
".",
"filter_entries",
... | This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for... | [
"This",
"method",
"can",
"return",
"the",
"TOTAL",
"load",
"time",
"for",
"the",
"assets",
"or",
"the",
"ACTUAL",
"load",
"time",
"the",
"difference",
"being",
"that",
"the",
"actual",
"load",
"time",
"takes",
"asynchronous",
"transactions",
"into",
"account",... | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L353-L384 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_total_size | def get_total_size(self, entries):
"""
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['bodySize'] > 0:
size += entr... | python | def get_total_size(self, entries):
"""
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['bodySize'] > 0:
size += entr... | [
"def",
"get_total_size",
"(",
"self",
",",
"entries",
")",
":",
"size",
"=",
"0",
"for",
"entry",
"in",
"entries",
":",
"if",
"entry",
"[",
"'response'",
"]",
"[",
"'bodySize'",
"]",
">",
"0",
":",
"size",
"+=",
"entry",
"[",
"'response'",
"]",
"[",
... | Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of. | [
"Returns",
"the",
"total",
"size",
"of",
"a",
"collection",
"of",
"entries",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L386-L396 |
mrname/haralyzer | haralyzer/assets.py | HarPage.get_total_size_trans | def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in... | python | def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in... | [
"def",
"get_total_size_trans",
"(",
"self",
",",
"entries",
")",
":",
"size",
"=",
"0",
"for",
"entry",
"in",
"entries",
":",
"if",
"entry",
"[",
"'response'",
"]",
"[",
"'_transferSize'",
"]",
">",
"0",
":",
"size",
"+=",
"entry",
"[",
"'response'",
"... | Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of. | [
"Returns",
"the",
"total",
"size",
"of",
"a",
"collection",
"of",
"entries",
"-",
"transferred",
"."
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L398-L410 |
mrname/haralyzer | haralyzer/assets.py | HarPage.time_to_first_byte | def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for ... | python | def time_to_first_byte(self):
"""
Time to first byte of the page request in ms
"""
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for ... | [
"def",
"time_to_first_byte",
"(",
"self",
")",
":",
"# The unknown page is just a placeholder for entries with no page ID.",
"# As such, it would not have a TTFB",
"if",
"self",
".",
"page_id",
"==",
"'unknown'",
":",
"return",
"None",
"ttfb",
"=",
"0",
"for",
"entry",
"i... | Time to first byte of the page request in ms | [
"Time",
"to",
"first",
"byte",
"of",
"the",
"page",
"request",
"in",
"ms"
] | train | https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L449-L468 |
kako-nawao/django-group-by | django_group_by/queryset.py | GroupByQuerySetMixinBase.group_by | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
return self._clone(klass=GroupByQuerySet, setup=True, _fields=fields) | python | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
return self._clone(klass=GroupByQuerySet, setup=True, _fields=fields) | [
"def",
"group_by",
"(",
"self",
",",
"*",
"fields",
")",
":",
"fields",
"=",
"self",
".",
"_expand_group_by_fields",
"(",
"self",
".",
"model",
",",
"fields",
")",
"return",
"self",
".",
"_clone",
"(",
"klass",
"=",
"GroupByQuerySet",
",",
"setup",
"=",
... | Clone the queryset using GroupByQuerySet.
:param fields:
:return: | [
"Clone",
"the",
"queryset",
"using",
"GroupByQuerySet",
"."
] | train | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/queryset.py#L34-L42 |
kako-nawao/django-group-by | django_group_by/group.py | AggregatedGroup._data | def _data(self):
"""
Cached data built from instance raw _values as a dictionary.
"""
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depend... | python | def _data(self):
"""
Cached data built from instance raw _values as a dictionary.
"""
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depend... | [
"def",
"_data",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"# Iterate all keys and values",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_row_values",
".",
"items",
"(",
")",
":",
"# Split related model fields",
"attrs",
"=",
"k",
".",
"rsplit",
"(",
"'__'"... | Cached data built from instance raw _values as a dictionary. | [
"Cached",
"data",
"built",
"from",
"instance",
"raw",
"_values",
"as",
"a",
"dictionary",
"."
] | train | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L19-L43 |
kako-nawao/django-group-by | django_group_by/group.py | AggregatedGroup._set_values | def _set_values(self):
"""
Populate instance with given.
"""
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
#... | python | def _set_values(self):
"""
Populate instance with given.
"""
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
#... | [
"def",
"_set_values",
"(",
"self",
")",
":",
"# Iterate all keys and values in data",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_data",
".",
"items",
"(",
")",
":",
"# If it's a dict, process it (it's probably instance data)",
"if",
"isinstance",
"(",
"v",
",",
"d... | Populate instance with given. | [
"Populate",
"instance",
"with",
"given",
"."
] | train | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/group.py#L45-L77 |
kako-nawao/django-group-by | django_group_by/iterable.py | GroupByIterableMixinBase.group_by | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
clone = self._values(*fields)
clone._iterable_class = GroupByIterable
return clone | python | def group_by(self, *fields):
"""
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
"""
fields = self._expand_group_by_fields(self.model, fields)
clone = self._values(*fields)
clone._iterable_class = GroupByIterable
return clone | [
"def",
"group_by",
"(",
"self",
",",
"*",
"fields",
")",
":",
"fields",
"=",
"self",
".",
"_expand_group_by_fields",
"(",
"self",
".",
"model",
",",
"fields",
")",
"clone",
"=",
"self",
".",
"_values",
"(",
"*",
"fields",
")",
"clone",
".",
"_iterable_... | Clone the queryset using GroupByQuerySet.
:param fields:
:return: | [
"Clone",
"the",
"queryset",
"using",
"GroupByQuerySet",
"."
] | train | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/iterable.py#L37-L47 |
kako-nawao/django-group-by | django_group_by/mixin.py | GroupByMixin._expand_group_by_fields | def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res... | python | def _expand_group_by_fields(cls, model, fields):
"""
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
"""
# Containers for resulting fields and related model fields
res... | [
"def",
"_expand_group_by_fields",
"(",
"cls",
",",
"model",
",",
"fields",
")",
":",
"# Containers for resulting fields and related model fields",
"res",
"=",
"[",
"]",
"related",
"=",
"{",
"}",
"# Add own fields and populate related fields",
"for",
"field_name",
"in",
... | Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields | [
"Expand",
"FK",
"fields",
"into",
"all",
"related",
"object",
"s",
"fields",
"to",
"avoid",
"future",
"lookups",
"."
] | train | https://github.com/kako-nawao/django-group-by/blob/0d901513620acebc736722b040cff83d6483789a/django_group_by/mixin.py#L23-L74 |
SiLab-Bonn/basil | basil/TL/Dummy.py | Dummy.write | def write(self, addr, data):
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
'''
logger.debug(
... | python | def write(self, addr, data):
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
'''
logger.debug(
... | [
"def",
"write",
"(",
"self",
",",
"addr",
",",
"data",
")",
":",
"logger",
".",
"debug",
"(",
"\"Dummy SiTransferLayer.write addr: %s data: %s\"",
"%",
"(",
"hex",
"(",
"addr",
")",
",",
"data",
")",
")",
"for",
"curr_addr",
",",
"d",
"in",
"enumerate",
... | Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing | [
"Write",
"to",
"dummy",
"memory"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Dummy.py#L36-L53 |
SiLab-Bonn/basil | basil/TL/Dummy.py | Dummy.read | def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 ... | python | def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 ... | [
"def",
"read",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"logger",
".",
"debug",
"(",
"\"Dummy SiTransferLayer.read addr: %s size: %s\"",
"%",
"(",
"hex",
"(",
"addr",
")",
",",
"size",
")",
")",
"return",
"array",
".",
"array",
"(",
"'B'",
",",
... | Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to. | [
"Parameters",
"----------",
"addr",
":",
"int",
"The",
"register",
"address",
".",
"size",
":",
"int",
"Length",
"of",
"data",
"to",
"be",
"read",
"(",
"number",
"of",
"bytes",
")",
"."
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Dummy.py#L55-L70 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.set_value | def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
... | python | def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
... | [
"def",
"set_value",
"(",
"self",
",",
"value",
",",
"addr",
",",
"size",
",",
"offset",
",",
"*",
"*",
"kwargs",
")",
":",
"div_offset",
",",
"mod_offset",
"=",
"divmod",
"(",
"offset",
",",
"8",
")",
"div_size",
",",
"mod_size",
"=",
"divmod",
"(",
... | Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit siz... | [
"Writing",
"a",
"value",
"of",
"any",
"arbitrary",
"size",
"(",
"max",
".",
"unsigned",
"int",
"64",
")",
"and",
"offset",
"to",
"a",
"register",
"Parameters",
"----------",
"value",
":",
"int",
"str",
"The",
"register",
"value",
"(",
"int",
"long",
"bit... | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L81-L110 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.get_value | def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
... | python | def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
... | [
"def",
"get_value",
"(",
"self",
",",
"addr",
",",
"size",
",",
"offset",
",",
"*",
"*",
"kwargs",
")",
":",
"div_offset",
",",
"mod_offset",
"=",
"divmod",
"(",
"offset",
",",
"8",
")",
"div_size",
",",
"mod_size",
"=",
"divmod",
"(",
"size",
"+",
... | Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to ... | [
"Reading",
"a",
"value",
"of",
"any",
"arbitrary",
"size",
"(",
"max",
".",
"unsigned",
"int",
"64",
")",
"and",
"offset",
"from",
"a",
"register",
"Parameters",
"----------",
"addr",
":",
"int",
"The",
"register",
"address",
".",
"size",
":",
"int",
"Bi... | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L112-L136 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.set_bytes | def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
... | python | def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
... | [
"def",
"set_bytes",
"(",
"self",
",",
"data",
",",
"addr",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_intf",
".",
"write",
"(",
"self",
".",
"_conf",
"[",
"'base_addr'",
"]",
"+",
"addr",
",",
"data",
")"
] | Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing | [
"Writing",
"bytes",
"of",
"any",
"arbitrary",
"size",
"Parameters",
"----------",
"data",
":",
"iterable",
"The",
"data",
"(",
"byte",
"array",
")",
"to",
"be",
"written",
".",
"addr",
":",
"int",
"The",
"register",
"address",
".",
"Returns",
"-------",
"n... | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L138-L152 |
SiLab-Bonn/basil | basil/HL/RegisterHardwareLayer.py | RegisterHardwareLayer.get_bytes | def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
... | python | def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
... | [
"def",
"get_bytes",
"(",
"self",
",",
"addr",
",",
"size",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_intf",
".",
"read",
"(",
"self",
".",
"_conf",
"[",
"'base_addr'",
"]",
"+",
"addr",
",",
"size",
")"
] | Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array. | [
"Reading",
"bytes",
"of",
"any",
"arbitrary",
"size",
"Parameters",
"----------",
".",
"addr",
":",
"int",
"The",
"register",
"address",
".",
"size",
":",
"int",
"Byte",
"length",
"of",
"the",
"value",
".",
"Returns",
"-------",
"data",
":",
"iterable",
"B... | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/RegisterHardwareLayer.py#L154-L169 |
SiLab-Bonn/basil | basil/RL/StdRegister.py | StdRegister.write | def write(self, size=None):
"""
to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
... | python | def write(self, size=None):
"""
to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
... | [
"def",
"write",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"self",
".",
"_drv",
".",
"set_data",
"(",
"self",
".",
"tobytes",
"(",
")",
")",
"else",
":",
"self",
".",
"_drv",
".",
"set_data",
"(",
"self",
... | to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
fields: ...... | [
"to",
"call",
"start",
"()",
"automatically",
"set",
"yaml",
"file",
"as",
"follows",
":",
"registers",
":",
"-",
"name",
":",
"CCPD_PCB",
"type",
":",
"StdRegister",
"hw_driver",
":",
"CCPD_PCB_SPI",
"size",
":",
"32",
"auto_start",
":",
"True",
"<",
"---... | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/RL/StdRegister.py#L102-L120 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.from_value | def from_value(cls, value, size=None, fmt='Q', **kwargs):
'''
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
'''
bl = cls(**kwargs) # size is 0 by default
bl.fromvalue(value=value, size=size, fmt=fmt)
return bl | python | def from_value(cls, value, size=None, fmt='Q', **kwargs):
'''
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
'''
bl = cls(**kwargs) # size is 0 by default
bl.fromvalue(value=value, size=size, fmt=fmt)
return bl | [
"def",
"from_value",
"(",
"cls",
",",
"value",
",",
"size",
"=",
"None",
",",
"fmt",
"=",
"'Q'",
",",
"*",
"*",
"kwargs",
")",
":",
"bl",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"# size is 0 by default",
"bl",
".",
"fromvalue",
"(",
"value",
"=",... | Factory method
For format characters see: https://docs.python.org/2/library/struct.html | [
"Factory",
"method"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L32-L40 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.fromvalue | def fromvalue(self, value, size=None, fmt='Q'):
'''
Append from a int/long number.
'''
if size and value.bit_length() > size:
raise TypeError('Value is too big for given size')
self.frombytes(struct.pack(fmt, value))
if size:
if not isinstance(size... | python | def fromvalue(self, value, size=None, fmt='Q'):
'''
Append from a int/long number.
'''
if size and value.bit_length() > size:
raise TypeError('Value is too big for given size')
self.frombytes(struct.pack(fmt, value))
if size:
if not isinstance(size... | [
"def",
"fromvalue",
"(",
"self",
",",
"value",
",",
"size",
"=",
"None",
",",
"fmt",
"=",
"'Q'",
")",
":",
"if",
"size",
"and",
"value",
".",
"bit_length",
"(",
")",
">",
"size",
":",
"raise",
"TypeError",
"(",
"'Value is too big for given size'",
")",
... | Append from a int/long number. | [
"Append",
"from",
"a",
"int",
"/",
"long",
"number",
"."
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L42-L55 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic.tovalue | def tovalue(self, fmt='Q'):
'''
Convert bitstring to a int/long number.
'''
format_size = struct.calcsize(fmt)
if self.length() > format_size * 8:
raise TypeError('Cannot convert to number')
ba = self.copy()
ba.extend((format_size * 8 - self.length()) ... | python | def tovalue(self, fmt='Q'):
'''
Convert bitstring to a int/long number.
'''
format_size = struct.calcsize(fmt)
if self.length() > format_size * 8:
raise TypeError('Cannot convert to number')
ba = self.copy()
ba.extend((format_size * 8 - self.length()) ... | [
"def",
"tovalue",
"(",
"self",
",",
"fmt",
"=",
"'Q'",
")",
":",
"format_size",
"=",
"struct",
".",
"calcsize",
"(",
"fmt",
")",
"if",
"self",
".",
"length",
"(",
")",
">",
"format_size",
"*",
"8",
":",
"raise",
"TypeError",
"(",
"'Cannot convert to nu... | Convert bitstring to a int/long number. | [
"Convert",
"bitstring",
"to",
"a",
"int",
"/",
"long",
"number",
"."
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L57-L66 |
SiLab-Bonn/basil | basil/utils/BitLogic.py | BitLogic._swap_slice_indices | def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError... | python | def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError... | [
"def",
"_swap_slice_indices",
"(",
"self",
",",
"slc",
",",
"make_slice",
"=",
"False",
")",
":",
"try",
":",
"start",
"=",
"slc",
".",
"start",
"stop",
"=",
"slc",
".",
"stop",
"slc_step",
"=",
"slc",
".",
"step",
"except",
"AttributeError",
":",
"if"... | Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. | [
"Swap",
"slice",
"indices"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/utils/BitLogic.py#L107-L136 |
SiLab-Bonn/basil | examples/lx9/host/lx9.py | Pixel._run_seq | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size... | python | def _run_seq(self, size):
"""
Send the contents of self['SEQ'] to the chip and wait until it finishes.
"""
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size... | [
"def",
"_run_seq",
"(",
"self",
",",
"size",
")",
":",
"# Write the sequence to the sequence generator (hw driver)",
"self",
"[",
"'SEQ'",
"]",
".",
"write",
"(",
"size",
")",
"#write pattern to memory",
"self",
"[",
"'SEQ'",
"]",
".",
"set_size",
"(",
"size",
"... | Send the contents of self['SEQ'] to the chip and wait until it finishes. | [
"Send",
"the",
"contents",
"of",
"self",
"[",
"SEQ",
"]",
"to",
"the",
"chip",
"and",
"wait",
"until",
"it",
"finishes",
"."
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/lx9/host/lx9.py#L73-L89 |
SiLab-Bonn/basil | examples/lx9/host/lx9.py | Pixel._clear_strobes | def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIX... | python | def _clear_strobes(self):
"""
Resets the "enable" and "load" output streams to all 0.
"""
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIX... | [
"def",
"_clear_strobes",
"(",
"self",
")",
":",
"#reset some stuff",
"self",
"[",
"'SEQ'",
"]",
"[",
"'GLOBAL_SHIFT_EN'",
"]",
".",
"setall",
"(",
"False",
")",
"self",
"[",
"'SEQ'",
"]",
"[",
"'GLOBAL_CTR_LD'",
"]",
".",
"setall",
"(",
"False",
")",
"se... | Resets the "enable" and "load" output streams to all 0. | [
"Resets",
"the",
"enable",
"and",
"load",
"output",
"streams",
"to",
"all",
"0",
"."
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/examples/lx9/host/lx9.py#L91-L101 |
SiLab-Bonn/basil | basil/HL/spi.py | spi.set_data | def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem... | python | def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem... | [
"def",
"set_data",
"(",
"self",
",",
"data",
",",
"addr",
"=",
"0",
")",
":",
"if",
"self",
".",
"_mem_bytes",
"<",
"len",
"(",
"data",
")",
":",
"raise",
"ValueError",
"(",
"'Size of data (%d bytes) is too big for memory (%d bytes)'",
"%",
"(",
"len",
"(",
... | Sets data for outgoing stream | [
"Sets",
"data",
"for",
"outgoing",
"stream"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L107-L113 |
SiLab-Bonn/basil | basil/HL/spi.py | spi.get_data | def get_data(self, size=None, addr=None):
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
... | python | def get_data(self, size=None, addr=None):
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
... | [
"def",
"get_data",
"(",
"self",
",",
"size",
"=",
"None",
",",
"addr",
"=",
"None",
")",
":",
"# readback memory offset",
"if",
"addr",
"is",
"None",
":",
"addr",
"=",
"self",
".",
"_mem_bytes",
"if",
"size",
"and",
"self",
".",
"_mem_bytes",
"<",
"siz... | Gets data for incoming stream | [
"Gets",
"data",
"for",
"incoming",
"stream"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/spi.py#L116-L130 |
SiLab-Bonn/basil | basil/TL/Serial.py | Serial.init | def init(self):
'''
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
'''
super(Serial, self).init()
self.read_termination = self._init.get('read_termination', None)
... | python | def init(self):
'''
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
'''
super(Serial, self).init()
self.read_termination = self._init.get('read_termination', None)
... | [
"def",
"init",
"(",
"self",
")",
":",
"super",
"(",
"Serial",
",",
"self",
")",
".",
"init",
"(",
")",
"self",
".",
"read_termination",
"=",
"self",
".",
"_init",
".",
"get",
"(",
"'read_termination'",
",",
"None",
")",
"self",
".",
"write_termination"... | Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol | [
"Initialize",
"serial",
"device",
".",
"Parameters",
"of",
"serial",
".",
"Serial",
":",
"http",
":",
"//",
"pyserial",
".",
"sourceforge",
".",
"net",
"/",
"pyserial_api",
".",
"html",
"Plus",
"termination",
"string",
"parameter",
"eol"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Serial.py#L24-L40 |
SiLab-Bonn/basil | basil/HL/GPAC.py | AdcMax11644._setup_adc | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', flags | self.MAX11644_SETUP))) | python | def _setup_adc(self, flags):
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', flags | self.MAX11644_SETUP))) | [
"def",
"_setup_adc",
"(",
"self",
",",
"flags",
")",
":",
"self",
".",
"_intf",
".",
"write",
"(",
"self",
".",
"_base_addr",
"+",
"self",
".",
"MAX11644_ADD",
",",
"array",
"(",
"'B'",
",",
"pack",
"(",
"'B'",
",",
"flags",
"|",
"self",
".",
"MAX1... | Initialize ADC | [
"Initialize",
"ADC"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L174-L177 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.read_eeprom_calibration | def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.C... | python | def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.C... | [
"def",
"read_eeprom_calibration",
"(",
"self",
")",
":",
"# use default values for temperature, EEPROM values are usually not calibrated and random",
"header",
"=",
"self",
".",
"get_format",
"(",
")",
"if",
"header",
"==",
"self",
".",
"HEADER_GPAC",
":",
"data",
"=",
... | Reading EEPROM calibration for sources and regulators | [
"Reading",
"EEPROM",
"calibration",
"for",
"sources",
"and",
"regulators"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L716-L737 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_voltage | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
... | python | def get_voltage(self, channel, unit='V'):
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
... | [
"def",
"get_voltage",
"(",
"self",
",",
"channel",
",",
"unit",
"=",
"'V'",
")",
":",
"adc_ch",
"=",
"self",
".",
"_ch_map",
"[",
"channel",
"]",
"[",
"'ADCV'",
"]",
"[",
"'adc_ch'",
"]",
"address",
"=",
"self",
".",
"_ch_map",
"[",
"channel",
"]",
... | Reading voltage | [
"Reading",
"voltage"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L764-L783 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_current | def get_current(self, channel, unit='A'):
'''Reading current
'''
values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address'])
raw = values[self._ch_map[channel]['ADCI']['adc_ch']]
dac_offset = self._ch_cal[channel]['ADCI']['offset']
dac_gain = self._ch_c... | python | def get_current(self, channel, unit='A'):
'''Reading current
'''
values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address'])
raw = values[self._ch_map[channel]['ADCI']['adc_ch']]
dac_offset = self._ch_cal[channel]['ADCI']['offset']
dac_gain = self._ch_c... | [
"def",
"get_current",
"(",
"self",
",",
"channel",
",",
"unit",
"=",
"'A'",
")",
":",
"values",
"=",
"self",
".",
"_get_adc_value",
"(",
"address",
"=",
"self",
".",
"_ch_map",
"[",
"channel",
"]",
"[",
"'ADCI'",
"]",
"[",
"'address'",
"]",
")",
"raw... | Reading current | [
"Reading",
"current"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L785-L819 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.set_enable | def set_enable(self, channel, value):
'''Enable/Disable output of power channel
'''
try:
bit = self._ch_map[channel]['GPIOEN']['bit']
except KeyError:
raise ValueError('set_enable() not supported for channel %s' % channel)
self._set_power_gpio_value(bit=bi... | python | def set_enable(self, channel, value):
'''Enable/Disable output of power channel
'''
try:
bit = self._ch_map[channel]['GPIOEN']['bit']
except KeyError:
raise ValueError('set_enable() not supported for channel %s' % channel)
self._set_power_gpio_value(bit=bi... | [
"def",
"set_enable",
"(",
"self",
",",
"channel",
",",
"value",
")",
":",
"try",
":",
"bit",
"=",
"self",
".",
"_ch_map",
"[",
"channel",
"]",
"[",
"'GPIOEN'",
"]",
"[",
"'bit'",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'set_enable(... | Enable/Disable output of power channel | [
"Enable",
"/",
"Disable",
"output",
"of",
"power",
"channel"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L821-L828 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.get_over_current | def get_over_current(self, channel):
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
raise ValueError('get_over_current() not supported for channel %s' % channel)
return not self._get_p... | python | def get_over_current(self, channel):
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
raise ValueError('get_over_current() not supported for channel %s' % channel)
return not self._get_p... | [
"def",
"get_over_current",
"(",
"self",
",",
"channel",
")",
":",
"try",
":",
"bit",
"=",
"self",
".",
"_ch_map",
"[",
"channel",
"]",
"[",
"'GPIOOC'",
"]",
"[",
"'bit'",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'get_over_current() not ... | Reading over current status of power channel | [
"Reading",
"over",
"current",
"status",
"of",
"power",
"channel"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L830-L837 |
SiLab-Bonn/basil | basil/HL/GPAC.py | GPAC.set_current_limit | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
# TODO: add units / calibration
if unit == 'raw':
value = value
elif unit == 'A':
value = int(value * 1000 * self.CURRENT_LIMIT_... | python | def set_current_limit(self, channel, value, unit='A'):
'''Setting current limit
Note: same limit for all channels.
'''
# TODO: add units / calibration
if unit == 'raw':
value = value
elif unit == 'A':
value = int(value * 1000 * self.CURRENT_LIMIT_... | [
"def",
"set_current_limit",
"(",
"self",
",",
"channel",
",",
"value",
",",
"unit",
"=",
"'A'",
")",
":",
"# TODO: add units / calibration",
"if",
"unit",
"==",
"'raw'",
":",
"value",
"=",
"value",
"elif",
"unit",
"==",
"'A'",
":",
"value",
"=",
"int",
"... | Setting current limit
Note: same limit for all channels. | [
"Setting",
"current",
"limit"
] | train | https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/HL/GPAC.py#L839-L856 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.