id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
12,100
|
nschloe/optimesh
|
optimesh/cvt/lloyd.py
|
quasi_newton_uniform_lloyd
|
def quasi_newton_uniform_lloyd(points, cells, *args, omega=1.0, **kwargs):
"""Relaxed Lloyd's algorithm. omega=1 leads to Lloyd's algorithm, overrelaxation
omega=2 gives good results. Check out
Xiao Xiao,
Over-Relaxation Lloyd Method For Computing Centroidal Voronoi Tessellations,
Master's thesis,
<https://scholarcommons.sc.edu/etd/295/>.
Everything above omega=2 can lead to flickering, i.e., rapidly alternating updates
and bad meshes.
"""
def get_new_points(mesh):
x = (
mesh.node_coords
- omega / 2 * jac_uniform(mesh) / mesh.control_volumes[:, None]
)
# update boundary and ghosts
idx = mesh.is_boundary_node & ~ghosted_mesh.is_ghost_point
x[idx] = mesh.node_coords[idx]
x[ghosted_mesh.is_ghost_point] = ghosted_mesh.reflect_ghost(
x[ghosted_mesh.mirrors]
)
return x
ghosted_mesh = GhostedMesh(points, cells)
runner(
get_new_points,
ghosted_mesh,
*args,
**kwargs,
update_topology=lambda mesh: ghosted_mesh.update_topology(),
# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),
)
mesh = ghosted_mesh.get_unghosted_mesh()
return mesh.node_coords, mesh.cells["nodes"]
|
python
|
def quasi_newton_uniform_lloyd(points, cells, *args, omega=1.0, **kwargs):
"""Relaxed Lloyd's algorithm. omega=1 leads to Lloyd's algorithm, overrelaxation
omega=2 gives good results. Check out
Xiao Xiao,
Over-Relaxation Lloyd Method For Computing Centroidal Voronoi Tessellations,
Master's thesis,
<https://scholarcommons.sc.edu/etd/295/>.
Everything above omega=2 can lead to flickering, i.e., rapidly alternating updates
and bad meshes.
"""
def get_new_points(mesh):
x = (
mesh.node_coords
- omega / 2 * jac_uniform(mesh) / mesh.control_volumes[:, None]
)
# update boundary and ghosts
idx = mesh.is_boundary_node & ~ghosted_mesh.is_ghost_point
x[idx] = mesh.node_coords[idx]
x[ghosted_mesh.is_ghost_point] = ghosted_mesh.reflect_ghost(
x[ghosted_mesh.mirrors]
)
return x
ghosted_mesh = GhostedMesh(points, cells)
runner(
get_new_points,
ghosted_mesh,
*args,
**kwargs,
update_topology=lambda mesh: ghosted_mesh.update_topology(),
# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),
)
mesh = ghosted_mesh.get_unghosted_mesh()
return mesh.node_coords, mesh.cells["nodes"]
|
[
"def",
"quasi_newton_uniform_lloyd",
"(",
"points",
",",
"cells",
",",
"*",
"args",
",",
"omega",
"=",
"1.0",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"get_new_points",
"(",
"mesh",
")",
":",
"x",
"=",
"(",
"mesh",
".",
"node_coords",
"-",
"omega",
"/",
"2",
"*",
"jac_uniform",
"(",
"mesh",
")",
"/",
"mesh",
".",
"control_volumes",
"[",
":",
",",
"None",
"]",
")",
"# update boundary and ghosts",
"idx",
"=",
"mesh",
".",
"is_boundary_node",
"&",
"~",
"ghosted_mesh",
".",
"is_ghost_point",
"x",
"[",
"idx",
"]",
"=",
"mesh",
".",
"node_coords",
"[",
"idx",
"]",
"x",
"[",
"ghosted_mesh",
".",
"is_ghost_point",
"]",
"=",
"ghosted_mesh",
".",
"reflect_ghost",
"(",
"x",
"[",
"ghosted_mesh",
".",
"mirrors",
"]",
")",
"return",
"x",
"ghosted_mesh",
"=",
"GhostedMesh",
"(",
"points",
",",
"cells",
")",
"runner",
"(",
"get_new_points",
",",
"ghosted_mesh",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
"update_topology",
"=",
"lambda",
"mesh",
":",
"ghosted_mesh",
".",
"update_topology",
"(",
")",
",",
"# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),",
")",
"mesh",
"=",
"ghosted_mesh",
".",
"get_unghosted_mesh",
"(",
")",
"return",
"mesh",
".",
"node_coords",
",",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]"
] |
Relaxed Lloyd's algorithm. omega=1 leads to Lloyd's algorithm, overrelaxation
omega=2 gives good results. Check out
Xiao Xiao,
Over-Relaxation Lloyd Method For Computing Centroidal Voronoi Tessellations,
Master's thesis,
<https://scholarcommons.sc.edu/etd/295/>.
Everything above omega=2 can lead to flickering, i.e., rapidly alternating updates
and bad meshes.
|
[
"Relaxed",
"Lloyd",
"s",
"algorithm",
".",
"omega",
"=",
"1",
"leads",
"to",
"Lloyd",
"s",
"algorithm",
"overrelaxation",
"omega",
"=",
"2",
"gives",
"good",
"results",
".",
"Check",
"out"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cvt/lloyd.py#L8-L46
|
12,101
|
nschloe/optimesh
|
optimesh/cpt.py
|
_energy_uniform_per_node
|
def _energy_uniform_per_node(X, cells):
"""The CPT mesh energy is defined as
sum_i E_i,
E_i = 1/(d+1) * sum int_{omega_i} ||x - x_i||^2 rho(x) dx,
see Chen-Holst. This method gives the E_i and assumes uniform density, rho(x) = 1.
"""
dim = 2
mesh = MeshTri(X, cells)
star_integrals = numpy.zeros(mesh.node_coords.shape[0])
# Python loop over the cells... slow!
for cell, cell_volume in zip(mesh.cells["nodes"], mesh.cell_volumes):
for idx in cell:
xi = mesh.node_coords[idx]
tri = mesh.node_coords[cell]
val = quadpy.triangle.integrate(
lambda x: numpy.einsum("ij,ij->i", x.T - xi, x.T - xi),
tri,
# Take any scheme with order 2
quadpy.triangle.Dunavant(2),
)
star_integrals[idx] += val
return star_integrals / (dim + 1)
|
python
|
def _energy_uniform_per_node(X, cells):
"""The CPT mesh energy is defined as
sum_i E_i,
E_i = 1/(d+1) * sum int_{omega_i} ||x - x_i||^2 rho(x) dx,
see Chen-Holst. This method gives the E_i and assumes uniform density, rho(x) = 1.
"""
dim = 2
mesh = MeshTri(X, cells)
star_integrals = numpy.zeros(mesh.node_coords.shape[0])
# Python loop over the cells... slow!
for cell, cell_volume in zip(mesh.cells["nodes"], mesh.cell_volumes):
for idx in cell:
xi = mesh.node_coords[idx]
tri = mesh.node_coords[cell]
val = quadpy.triangle.integrate(
lambda x: numpy.einsum("ij,ij->i", x.T - xi, x.T - xi),
tri,
# Take any scheme with order 2
quadpy.triangle.Dunavant(2),
)
star_integrals[idx] += val
return star_integrals / (dim + 1)
|
[
"def",
"_energy_uniform_per_node",
"(",
"X",
",",
"cells",
")",
":",
"dim",
"=",
"2",
"mesh",
"=",
"MeshTri",
"(",
"X",
",",
"cells",
")",
"star_integrals",
"=",
"numpy",
".",
"zeros",
"(",
"mesh",
".",
"node_coords",
".",
"shape",
"[",
"0",
"]",
")",
"# Python loop over the cells... slow!",
"for",
"cell",
",",
"cell_volume",
"in",
"zip",
"(",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
",",
"mesh",
".",
"cell_volumes",
")",
":",
"for",
"idx",
"in",
"cell",
":",
"xi",
"=",
"mesh",
".",
"node_coords",
"[",
"idx",
"]",
"tri",
"=",
"mesh",
".",
"node_coords",
"[",
"cell",
"]",
"val",
"=",
"quadpy",
".",
"triangle",
".",
"integrate",
"(",
"lambda",
"x",
":",
"numpy",
".",
"einsum",
"(",
"\"ij,ij->i\"",
",",
"x",
".",
"T",
"-",
"xi",
",",
"x",
".",
"T",
"-",
"xi",
")",
",",
"tri",
",",
"# Take any scheme with order 2",
"quadpy",
".",
"triangle",
".",
"Dunavant",
"(",
"2",
")",
",",
")",
"star_integrals",
"[",
"idx",
"]",
"+=",
"val",
"return",
"star_integrals",
"/",
"(",
"dim",
"+",
"1",
")"
] |
The CPT mesh energy is defined as
sum_i E_i,
E_i = 1/(d+1) * sum int_{omega_i} ||x - x_i||^2 rho(x) dx,
see Chen-Holst. This method gives the E_i and assumes uniform density, rho(x) = 1.
|
[
"The",
"CPT",
"mesh",
"energy",
"is",
"defined",
"as"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cpt.py#L91-L116
|
12,102
|
nschloe/optimesh
|
optimesh/cpt.py
|
jac_uniform
|
def jac_uniform(X, cells):
"""The approximated Jacobian is
partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
= 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,
see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|
with b_j being the ordinary barycenter.
"""
dim = 2
mesh = MeshTri(X, cells)
jac = numpy.zeros(X.shape)
for k in range(mesh.cells["nodes"].shape[1]):
i = mesh.cells["nodes"][:, k]
fastfunc.add.at(
jac,
i,
((mesh.node_coords[i] - mesh.cell_barycenters).T * mesh.cell_volumes).T,
)
return 2 / (dim + 1) * jac
|
python
|
def jac_uniform(X, cells):
"""The approximated Jacobian is
partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
= 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,
see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|
with b_j being the ordinary barycenter.
"""
dim = 2
mesh = MeshTri(X, cells)
jac = numpy.zeros(X.shape)
for k in range(mesh.cells["nodes"].shape[1]):
i = mesh.cells["nodes"][:, k]
fastfunc.add.at(
jac,
i,
((mesh.node_coords[i] - mesh.cell_barycenters).T * mesh.cell_volumes).T,
)
return 2 / (dim + 1) * jac
|
[
"def",
"jac_uniform",
"(",
"X",
",",
"cells",
")",
":",
"dim",
"=",
"2",
"mesh",
"=",
"MeshTri",
"(",
"X",
",",
"cells",
")",
"jac",
"=",
"numpy",
".",
"zeros",
"(",
"X",
".",
"shape",
")",
"for",
"k",
"in",
"range",
"(",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
".",
"shape",
"[",
"1",
"]",
")",
":",
"i",
"=",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
"[",
":",
",",
"k",
"]",
"fastfunc",
".",
"add",
".",
"at",
"(",
"jac",
",",
"i",
",",
"(",
"(",
"mesh",
".",
"node_coords",
"[",
"i",
"]",
"-",
"mesh",
".",
"cell_barycenters",
")",
".",
"T",
"*",
"mesh",
".",
"cell_volumes",
")",
".",
"T",
",",
")",
"return",
"2",
"/",
"(",
"dim",
"+",
"1",
")",
"*",
"jac"
] |
The approximated Jacobian is
partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
= 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,
see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|
with b_j being the ordinary barycenter.
|
[
"The",
"approximated",
"Jacobian",
"is"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cpt.py#L123-L147
|
12,103
|
nschloe/optimesh
|
optimesh/cpt.py
|
solve_hessian_approx_uniform
|
def solve_hessian_approx_uniform(X, cells, rhs):
"""As discussed above, the approximated Jacobian is
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
To get the Hessian, we have to form its derivative. As a simplifications,
let us assume again that |tau_j| is independent of the node positions. Then we get
partial_ii E = 2/(d+1) |omega_i| - 2/(d+1)**2 |omega_i|,
partial_ij E = -2/(d+1)**2 |tau_j|.
The terms with (d+1)**2 are from the barycenter in `partial_i E`. It turns out from
numerical experiments that the negative term in `partial_ii E` is detrimental to the
convergence. Hence, this approximated Hessian solver only considers the off-diagonal
contributions from the barycentric terms.
"""
dim = 2
mesh = MeshTri(X, cells)
# Create matrix in IJV format
row_idx = []
col_idx = []
val = []
cells = mesh.cells["nodes"].T
n = X.shape[0]
# Main diagonal, 2/(d+1) |omega_i| x_i
a = mesh.cell_volumes * (2 / (dim + 1))
for i in [0, 1, 2]:
row_idx += [cells[i]]
col_idx += [cells[i]]
val += [a]
# terms corresponding to -2/(d+1) * b_j |tau_j|
a = mesh.cell_volumes * (2 / (dim + 1) ** 2)
for i in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]:
edges = cells[i]
# Leads to funny osciilatory movements
# row_idx += [edges[0], edges[0], edges[0]]
# col_idx += [edges[0], edges[1], edges[2]]
# val += [-a, -a, -a]
# Best so far
row_idx += [edges[0], edges[0]]
col_idx += [edges[1], edges[2]]
val += [-a, -a]
row_idx = numpy.concatenate(row_idx)
col_idx = numpy.concatenate(col_idx)
val = numpy.concatenate(val)
# Set Dirichlet conditions on the boundary
matrix = scipy.sparse.coo_matrix((val, (row_idx, col_idx)), shape=(n, n))
# Transform to CSR format for efficiency
matrix = matrix.tocsr()
# Apply Dirichlet conditions.
# Set all Dirichlet rows to 0.
for i in numpy.where(mesh.is_boundary_node)[0]:
matrix.data[matrix.indptr[i] : matrix.indptr[i + 1]] = 0.0
# Set the diagonal and RHS.
d = matrix.diagonal()
d[mesh.is_boundary_node] = 1.0
matrix.setdiag(d)
rhs[mesh.is_boundary_node] = 0.0
out = scipy.sparse.linalg.spsolve(matrix, rhs)
# PyAMG fails on circleci.
# ml = pyamg.ruge_stuben_solver(matrix)
# # Keep an eye on multiple rhs-solves in pyamg,
# # <https://github.com/pyamg/pyamg/issues/215>.
# tol = 1.0e-10
# out = numpy.column_stack(
# [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]
# )
return out
|
python
|
def solve_hessian_approx_uniform(X, cells, rhs):
"""As discussed above, the approximated Jacobian is
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
To get the Hessian, we have to form its derivative. As a simplifications,
let us assume again that |tau_j| is independent of the node positions. Then we get
partial_ii E = 2/(d+1) |omega_i| - 2/(d+1)**2 |omega_i|,
partial_ij E = -2/(d+1)**2 |tau_j|.
The terms with (d+1)**2 are from the barycenter in `partial_i E`. It turns out from
numerical experiments that the negative term in `partial_ii E` is detrimental to the
convergence. Hence, this approximated Hessian solver only considers the off-diagonal
contributions from the barycentric terms.
"""
dim = 2
mesh = MeshTri(X, cells)
# Create matrix in IJV format
row_idx = []
col_idx = []
val = []
cells = mesh.cells["nodes"].T
n = X.shape[0]
# Main diagonal, 2/(d+1) |omega_i| x_i
a = mesh.cell_volumes * (2 / (dim + 1))
for i in [0, 1, 2]:
row_idx += [cells[i]]
col_idx += [cells[i]]
val += [a]
# terms corresponding to -2/(d+1) * b_j |tau_j|
a = mesh.cell_volumes * (2 / (dim + 1) ** 2)
for i in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]:
edges = cells[i]
# Leads to funny osciilatory movements
# row_idx += [edges[0], edges[0], edges[0]]
# col_idx += [edges[0], edges[1], edges[2]]
# val += [-a, -a, -a]
# Best so far
row_idx += [edges[0], edges[0]]
col_idx += [edges[1], edges[2]]
val += [-a, -a]
row_idx = numpy.concatenate(row_idx)
col_idx = numpy.concatenate(col_idx)
val = numpy.concatenate(val)
# Set Dirichlet conditions on the boundary
matrix = scipy.sparse.coo_matrix((val, (row_idx, col_idx)), shape=(n, n))
# Transform to CSR format for efficiency
matrix = matrix.tocsr()
# Apply Dirichlet conditions.
# Set all Dirichlet rows to 0.
for i in numpy.where(mesh.is_boundary_node)[0]:
matrix.data[matrix.indptr[i] : matrix.indptr[i + 1]] = 0.0
# Set the diagonal and RHS.
d = matrix.diagonal()
d[mesh.is_boundary_node] = 1.0
matrix.setdiag(d)
rhs[mesh.is_boundary_node] = 0.0
out = scipy.sparse.linalg.spsolve(matrix, rhs)
# PyAMG fails on circleci.
# ml = pyamg.ruge_stuben_solver(matrix)
# # Keep an eye on multiple rhs-solves in pyamg,
# # <https://github.com/pyamg/pyamg/issues/215>.
# tol = 1.0e-10
# out = numpy.column_stack(
# [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]
# )
return out
|
[
"def",
"solve_hessian_approx_uniform",
"(",
"X",
",",
"cells",
",",
"rhs",
")",
":",
"dim",
"=",
"2",
"mesh",
"=",
"MeshTri",
"(",
"X",
",",
"cells",
")",
"# Create matrix in IJV format",
"row_idx",
"=",
"[",
"]",
"col_idx",
"=",
"[",
"]",
"val",
"=",
"[",
"]",
"cells",
"=",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
".",
"T",
"n",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"# Main diagonal, 2/(d+1) |omega_i| x_i",
"a",
"=",
"mesh",
".",
"cell_volumes",
"*",
"(",
"2",
"/",
"(",
"dim",
"+",
"1",
")",
")",
"for",
"i",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"row_idx",
"+=",
"[",
"cells",
"[",
"i",
"]",
"]",
"col_idx",
"+=",
"[",
"cells",
"[",
"i",
"]",
"]",
"val",
"+=",
"[",
"a",
"]",
"# terms corresponding to -2/(d+1) * b_j |tau_j|",
"a",
"=",
"mesh",
".",
"cell_volumes",
"*",
"(",
"2",
"/",
"(",
"dim",
"+",
"1",
")",
"**",
"2",
")",
"for",
"i",
"in",
"[",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"[",
"1",
",",
"2",
",",
"0",
"]",
",",
"[",
"2",
",",
"0",
",",
"1",
"]",
"]",
":",
"edges",
"=",
"cells",
"[",
"i",
"]",
"# Leads to funny osciilatory movements",
"# row_idx += [edges[0], edges[0], edges[0]]",
"# col_idx += [edges[0], edges[1], edges[2]]",
"# val += [-a, -a, -a]",
"# Best so far",
"row_idx",
"+=",
"[",
"edges",
"[",
"0",
"]",
",",
"edges",
"[",
"0",
"]",
"]",
"col_idx",
"+=",
"[",
"edges",
"[",
"1",
"]",
",",
"edges",
"[",
"2",
"]",
"]",
"val",
"+=",
"[",
"-",
"a",
",",
"-",
"a",
"]",
"row_idx",
"=",
"numpy",
".",
"concatenate",
"(",
"row_idx",
")",
"col_idx",
"=",
"numpy",
".",
"concatenate",
"(",
"col_idx",
")",
"val",
"=",
"numpy",
".",
"concatenate",
"(",
"val",
")",
"# Set Dirichlet conditions on the boundary",
"matrix",
"=",
"scipy",
".",
"sparse",
".",
"coo_matrix",
"(",
"(",
"val",
",",
"(",
"row_idx",
",",
"col_idx",
")",
")",
",",
"shape",
"=",
"(",
"n",
",",
"n",
")",
")",
"# Transform to CSR format for efficiency",
"matrix",
"=",
"matrix",
".",
"tocsr",
"(",
")",
"# Apply Dirichlet conditions.",
"# Set all Dirichlet rows to 0.",
"for",
"i",
"in",
"numpy",
".",
"where",
"(",
"mesh",
".",
"is_boundary_node",
")",
"[",
"0",
"]",
":",
"matrix",
".",
"data",
"[",
"matrix",
".",
"indptr",
"[",
"i",
"]",
":",
"matrix",
".",
"indptr",
"[",
"i",
"+",
"1",
"]",
"]",
"=",
"0.0",
"# Set the diagonal and RHS.",
"d",
"=",
"matrix",
".",
"diagonal",
"(",
")",
"d",
"[",
"mesh",
".",
"is_boundary_node",
"]",
"=",
"1.0",
"matrix",
".",
"setdiag",
"(",
"d",
")",
"rhs",
"[",
"mesh",
".",
"is_boundary_node",
"]",
"=",
"0.0",
"out",
"=",
"scipy",
".",
"sparse",
".",
"linalg",
".",
"spsolve",
"(",
"matrix",
",",
"rhs",
")",
"# PyAMG fails on circleci.",
"# ml = pyamg.ruge_stuben_solver(matrix)",
"# # Keep an eye on multiple rhs-solves in pyamg,",
"# # <https://github.com/pyamg/pyamg/issues/215>.",
"# tol = 1.0e-10",
"# out = numpy.column_stack(",
"# [ml.solve(rhs[:, 0], tol=tol), ml.solve(rhs[:, 1], tol=tol)]",
"# )",
"return",
"out"
] |
As discussed above, the approximated Jacobian is
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
To get the Hessian, we have to form its derivative. As a simplifications,
let us assume again that |tau_j| is independent of the node positions. Then we get
partial_ii E = 2/(d+1) |omega_i| - 2/(d+1)**2 |omega_i|,
partial_ij E = -2/(d+1)**2 |tau_j|.
The terms with (d+1)**2 are from the barycenter in `partial_i E`. It turns out from
numerical experiments that the negative term in `partial_ii E` is detrimental to the
convergence. Hence, this approximated Hessian solver only considers the off-diagonal
contributions from the barycentric terms.
|
[
"As",
"discussed",
"above",
"the",
"approximated",
"Jacobian",
"is"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cpt.py#L150-L227
|
12,104
|
nschloe/optimesh
|
optimesh/cpt.py
|
quasi_newton_uniform
|
def quasi_newton_uniform(points, cells, *args, **kwargs):
"""Like linear_solve above, but assuming rho==1. Note that the energy gradient
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) \\int_{tau_j} rho
becomes
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
Because of the dependence of |tau_j| on the point coordinates, this is a nonlinear
problem.
This method makes the simplifying assumption that |tau_j| does in fact _not_ depend
on the point coordinates. With this, one still only needs to solve a linear system.
"""
def get_new_points(mesh):
# do one Newton step
# TODO need copy?
x = mesh.node_coords.copy()
cells = mesh.cells["nodes"]
jac_x = jac_uniform(x, cells)
x -= solve_hessian_approx_uniform(x, cells, jac_x)
return x
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return mesh.node_coords, mesh.cells["nodes"]
|
python
|
def quasi_newton_uniform(points, cells, *args, **kwargs):
"""Like linear_solve above, but assuming rho==1. Note that the energy gradient
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) \\int_{tau_j} rho
becomes
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
Because of the dependence of |tau_j| on the point coordinates, this is a nonlinear
problem.
This method makes the simplifying assumption that |tau_j| does in fact _not_ depend
on the point coordinates. With this, one still only needs to solve a linear system.
"""
def get_new_points(mesh):
# do one Newton step
# TODO need copy?
x = mesh.node_coords.copy()
cells = mesh.cells["nodes"]
jac_x = jac_uniform(x, cells)
x -= solve_hessian_approx_uniform(x, cells, jac_x)
return x
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return mesh.node_coords, mesh.cells["nodes"]
|
[
"def",
"quasi_newton_uniform",
"(",
"points",
",",
"cells",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"get_new_points",
"(",
"mesh",
")",
":",
"# do one Newton step",
"# TODO need copy?",
"x",
"=",
"mesh",
".",
"node_coords",
".",
"copy",
"(",
")",
"cells",
"=",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
"jac_x",
"=",
"jac_uniform",
"(",
"x",
",",
"cells",
")",
"x",
"-=",
"solve_hessian_approx_uniform",
"(",
"x",
",",
"cells",
",",
"jac_x",
")",
"return",
"x",
"mesh",
"=",
"MeshTri",
"(",
"points",
",",
"cells",
")",
"runner",
"(",
"get_new_points",
",",
"mesh",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"mesh",
".",
"node_coords",
",",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]"
] |
Like linear_solve above, but assuming rho==1. Note that the energy gradient
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) \\int_{tau_j} rho
becomes
\\partial E_i = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|.
Because of the dependence of |tau_j| on the point coordinates, this is a nonlinear
problem.
This method makes the simplifying assumption that |tau_j| does in fact _not_ depend
on the point coordinates. With this, one still only needs to solve a linear system.
|
[
"Like",
"linear_solve",
"above",
"but",
"assuming",
"rho",
"==",
"1",
".",
"Note",
"that",
"the",
"energy",
"gradient"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cpt.py#L230-L257
|
12,105
|
nschloe/optimesh
|
optimesh/laplace.py
|
fixed_point
|
def fixed_point(points, cells, *args, **kwargs):
"""Perform k steps of Laplacian smoothing to the mesh, i.e., moving each
interior vertex to the arithmetic average of its neighboring points.
"""
def get_new_points(mesh):
# move interior points into average of their neighbors
num_neighbors = numpy.zeros(len(mesh.node_coords), dtype=int)
idx = mesh.edges["nodes"]
fastfunc.add.at(num_neighbors, idx, numpy.ones(idx.shape, dtype=int))
new_points = numpy.zeros(mesh.node_coords.shape)
fastfunc.add.at(new_points, idx[:, 0], mesh.node_coords[idx[:, 1]])
fastfunc.add.at(new_points, idx[:, 1], mesh.node_coords[idx[:, 0]])
new_points /= num_neighbors[:, None]
idx = mesh.is_boundary_node
new_points[idx] = mesh.node_coords[idx]
return new_points
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return mesh.node_coords, mesh.cells["nodes"]
|
python
|
def fixed_point(points, cells, *args, **kwargs):
"""Perform k steps of Laplacian smoothing to the mesh, i.e., moving each
interior vertex to the arithmetic average of its neighboring points.
"""
def get_new_points(mesh):
# move interior points into average of their neighbors
num_neighbors = numpy.zeros(len(mesh.node_coords), dtype=int)
idx = mesh.edges["nodes"]
fastfunc.add.at(num_neighbors, idx, numpy.ones(idx.shape, dtype=int))
new_points = numpy.zeros(mesh.node_coords.shape)
fastfunc.add.at(new_points, idx[:, 0], mesh.node_coords[idx[:, 1]])
fastfunc.add.at(new_points, idx[:, 1], mesh.node_coords[idx[:, 0]])
new_points /= num_neighbors[:, None]
idx = mesh.is_boundary_node
new_points[idx] = mesh.node_coords[idx]
return new_points
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return mesh.node_coords, mesh.cells["nodes"]
|
[
"def",
"fixed_point",
"(",
"points",
",",
"cells",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"get_new_points",
"(",
"mesh",
")",
":",
"# move interior points into average of their neighbors",
"num_neighbors",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"mesh",
".",
"node_coords",
")",
",",
"dtype",
"=",
"int",
")",
"idx",
"=",
"mesh",
".",
"edges",
"[",
"\"nodes\"",
"]",
"fastfunc",
".",
"add",
".",
"at",
"(",
"num_neighbors",
",",
"idx",
",",
"numpy",
".",
"ones",
"(",
"idx",
".",
"shape",
",",
"dtype",
"=",
"int",
")",
")",
"new_points",
"=",
"numpy",
".",
"zeros",
"(",
"mesh",
".",
"node_coords",
".",
"shape",
")",
"fastfunc",
".",
"add",
".",
"at",
"(",
"new_points",
",",
"idx",
"[",
":",
",",
"0",
"]",
",",
"mesh",
".",
"node_coords",
"[",
"idx",
"[",
":",
",",
"1",
"]",
"]",
")",
"fastfunc",
".",
"add",
".",
"at",
"(",
"new_points",
",",
"idx",
"[",
":",
",",
"1",
"]",
",",
"mesh",
".",
"node_coords",
"[",
"idx",
"[",
":",
",",
"0",
"]",
"]",
")",
"new_points",
"/=",
"num_neighbors",
"[",
":",
",",
"None",
"]",
"idx",
"=",
"mesh",
".",
"is_boundary_node",
"new_points",
"[",
"idx",
"]",
"=",
"mesh",
".",
"node_coords",
"[",
"idx",
"]",
"return",
"new_points",
"mesh",
"=",
"MeshTri",
"(",
"points",
",",
"cells",
")",
"runner",
"(",
"get_new_points",
",",
"mesh",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"mesh",
".",
"node_coords",
",",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]"
] |
Perform k steps of Laplacian smoothing to the mesh, i.e., moving each
interior vertex to the arithmetic average of its neighboring points.
|
[
"Perform",
"k",
"steps",
"of",
"Laplacian",
"smoothing",
"to",
"the",
"mesh",
"i",
".",
"e",
".",
"moving",
"each",
"interior",
"vertex",
"to",
"the",
"arithmetic",
"average",
"of",
"its",
"neighboring",
"points",
"."
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/laplace.py#L12-L34
|
12,106
|
nschloe/optimesh
|
optimesh/odt.py
|
energy
|
def energy(mesh, uniform_density=False):
"""The mesh energy is defined as
E = int_Omega |u_l(x) - u(x)| rho(x) dx
where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh.
"""
# E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2
dim = mesh.cells["nodes"].shape[1] - 1
star_volume = numpy.zeros(mesh.node_coords.shape[0])
for i in range(3):
idx = mesh.cells["nodes"][:, i]
if uniform_density:
# rho = 1,
# int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle|
fastfunc.add.at(star_volume, idx, mesh.cell_volumes)
else:
# rho = 1 / tau_j,
# int_{star} phi_i * rho = 1/(d+1) |num triangles in star|
fastfunc.add.at(star_volume, idx, numpy.ones(idx.shape, dtype=float))
x2 = numpy.einsum("ij,ij->i", mesh.node_coords, mesh.node_coords)
out = 1 / (dim + 1) * numpy.dot(star_volume, x2)
# could be cached
assert dim == 2
x = mesh.node_coords[:, :2]
triangles = numpy.moveaxis(x[mesh.cells["nodes"]], 0, 1)
val = quadpy.triangle.integrate(
lambda x: x[0] ** 2 + x[1] ** 2,
triangles,
# Take any scheme with order 2
quadpy.triangle.Dunavant(2),
)
if uniform_density:
val = numpy.sum(val)
else:
rho = 1.0 / mesh.cell_volumes
val = numpy.dot(val, rho)
assert out >= val
return out - val
|
python
|
def energy(mesh, uniform_density=False):
"""The mesh energy is defined as
E = int_Omega |u_l(x) - u(x)| rho(x) dx
where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh.
"""
# E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2
dim = mesh.cells["nodes"].shape[1] - 1
star_volume = numpy.zeros(mesh.node_coords.shape[0])
for i in range(3):
idx = mesh.cells["nodes"][:, i]
if uniform_density:
# rho = 1,
# int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle|
fastfunc.add.at(star_volume, idx, mesh.cell_volumes)
else:
# rho = 1 / tau_j,
# int_{star} phi_i * rho = 1/(d+1) |num triangles in star|
fastfunc.add.at(star_volume, idx, numpy.ones(idx.shape, dtype=float))
x2 = numpy.einsum("ij,ij->i", mesh.node_coords, mesh.node_coords)
out = 1 / (dim + 1) * numpy.dot(star_volume, x2)
# could be cached
assert dim == 2
x = mesh.node_coords[:, :2]
triangles = numpy.moveaxis(x[mesh.cells["nodes"]], 0, 1)
val = quadpy.triangle.integrate(
lambda x: x[0] ** 2 + x[1] ** 2,
triangles,
# Take any scheme with order 2
quadpy.triangle.Dunavant(2),
)
if uniform_density:
val = numpy.sum(val)
else:
rho = 1.0 / mesh.cell_volumes
val = numpy.dot(val, rho)
assert out >= val
return out - val
|
[
"def",
"energy",
"(",
"mesh",
",",
"uniform_density",
"=",
"False",
")",
":",
"# E = 1/(d+1) sum_i ||x_i||^2 |omega_i| - int_Omega_i ||x||^2",
"dim",
"=",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"star_volume",
"=",
"numpy",
".",
"zeros",
"(",
"mesh",
".",
"node_coords",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"idx",
"=",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
"[",
":",
",",
"i",
"]",
"if",
"uniform_density",
":",
"# rho = 1,",
"# int_{star} phi_i * rho = 1/(d+1) sum_{triangles in star} |triangle|",
"fastfunc",
".",
"add",
".",
"at",
"(",
"star_volume",
",",
"idx",
",",
"mesh",
".",
"cell_volumes",
")",
"else",
":",
"# rho = 1 / tau_j,",
"# int_{star} phi_i * rho = 1/(d+1) |num triangles in star|",
"fastfunc",
".",
"add",
".",
"at",
"(",
"star_volume",
",",
"idx",
",",
"numpy",
".",
"ones",
"(",
"idx",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
")",
"x2",
"=",
"numpy",
".",
"einsum",
"(",
"\"ij,ij->i\"",
",",
"mesh",
".",
"node_coords",
",",
"mesh",
".",
"node_coords",
")",
"out",
"=",
"1",
"/",
"(",
"dim",
"+",
"1",
")",
"*",
"numpy",
".",
"dot",
"(",
"star_volume",
",",
"x2",
")",
"# could be cached",
"assert",
"dim",
"==",
"2",
"x",
"=",
"mesh",
".",
"node_coords",
"[",
":",
",",
":",
"2",
"]",
"triangles",
"=",
"numpy",
".",
"moveaxis",
"(",
"x",
"[",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]",
"]",
",",
"0",
",",
"1",
")",
"val",
"=",
"quadpy",
".",
"triangle",
".",
"integrate",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"**",
"2",
"+",
"x",
"[",
"1",
"]",
"**",
"2",
",",
"triangles",
",",
"# Take any scheme with order 2",
"quadpy",
".",
"triangle",
".",
"Dunavant",
"(",
"2",
")",
",",
")",
"if",
"uniform_density",
":",
"val",
"=",
"numpy",
".",
"sum",
"(",
"val",
")",
"else",
":",
"rho",
"=",
"1.0",
"/",
"mesh",
".",
"cell_volumes",
"val",
"=",
"numpy",
".",
"dot",
"(",
"val",
",",
"rho",
")",
"assert",
"out",
">=",
"val",
"return",
"out",
"-",
"val"
] |
The mesh energy is defined as
E = int_Omega |u_l(x) - u(x)| rho(x) dx
where u(x) = ||x||^2 and u_l is its piecewise linearization on the mesh.
|
[
"The",
"mesh",
"energy",
"is",
"defined",
"as"
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/odt.py#L28-L70
|
12,107
|
nschloe/optimesh
|
optimesh/cvt/block_diagonal.py
|
quasi_newton_uniform_blocks
|
def quasi_newton_uniform_blocks(points, cells, *args, **kwargs):
"""Lloyd's algorithm can be though of a diagonal-only Hessian; this method
incorporates the diagonal blocks, too.
"""
def get_new_points(mesh):
# TODO need copy?
x = mesh.node_coords.copy()
x += update(mesh)
# update ghosts
x[ghosted_mesh.is_ghost_point] = ghosted_mesh.reflect_ghost(
x[ghosted_mesh.mirrors]
)
return x
ghosted_mesh = GhostedMesh(points, cells)
runner(
get_new_points,
ghosted_mesh,
*args,
**kwargs,
update_topology=lambda mesh: ghosted_mesh.update_topology(),
# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),
)
mesh = ghosted_mesh.get_unghosted_mesh()
return mesh.node_coords, mesh.cells["nodes"]
|
python
|
def quasi_newton_uniform_blocks(points, cells, *args, **kwargs):
"""Lloyd's algorithm can be though of a diagonal-only Hessian; this method
incorporates the diagonal blocks, too.
"""
def get_new_points(mesh):
# TODO need copy?
x = mesh.node_coords.copy()
x += update(mesh)
# update ghosts
x[ghosted_mesh.is_ghost_point] = ghosted_mesh.reflect_ghost(
x[ghosted_mesh.mirrors]
)
return x
ghosted_mesh = GhostedMesh(points, cells)
runner(
get_new_points,
ghosted_mesh,
*args,
**kwargs,
update_topology=lambda mesh: ghosted_mesh.update_topology(),
# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),
)
mesh = ghosted_mesh.get_unghosted_mesh()
return mesh.node_coords, mesh.cells["nodes"]
|
[
"def",
"quasi_newton_uniform_blocks",
"(",
"points",
",",
"cells",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"get_new_points",
"(",
"mesh",
")",
":",
"# TODO need copy?",
"x",
"=",
"mesh",
".",
"node_coords",
".",
"copy",
"(",
")",
"x",
"+=",
"update",
"(",
"mesh",
")",
"# update ghosts",
"x",
"[",
"ghosted_mesh",
".",
"is_ghost_point",
"]",
"=",
"ghosted_mesh",
".",
"reflect_ghost",
"(",
"x",
"[",
"ghosted_mesh",
".",
"mirrors",
"]",
")",
"return",
"x",
"ghosted_mesh",
"=",
"GhostedMesh",
"(",
"points",
",",
"cells",
")",
"runner",
"(",
"get_new_points",
",",
"ghosted_mesh",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
"update_topology",
"=",
"lambda",
"mesh",
":",
"ghosted_mesh",
".",
"update_topology",
"(",
")",
",",
"# get_stats_mesh=lambda mesh: ghosted_mesh.get_unghosted_mesh(),",
")",
"mesh",
"=",
"ghosted_mesh",
".",
"get_unghosted_mesh",
"(",
")",
"return",
"mesh",
".",
"node_coords",
",",
"mesh",
".",
"cells",
"[",
"\"nodes\"",
"]"
] |
Lloyd's algorithm can be though of a diagonal-only Hessian; this method
incorporates the diagonal blocks, too.
|
[
"Lloyd",
"s",
"algorithm",
"can",
"be",
"though",
"of",
"a",
"diagonal",
"-",
"only",
"Hessian",
";",
"this",
"method",
"incorporates",
"the",
"diagonal",
"blocks",
"too",
"."
] |
b85f48d1559a51a01cc3df6214c61ca8ad5ed786
|
https://github.com/nschloe/optimesh/blob/b85f48d1559a51a01cc3df6214c61ca8ad5ed786/optimesh/cvt/block_diagonal.py#L12-L39
|
12,108
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
new
|
def new(filename: str, *, file_attrs: Optional[Dict[str, str]] = None) -> LoomConnection:
"""
Create an empty Loom file, and return it as a context manager.
"""
if filename.startswith("~/"):
filename = os.path.expanduser(filename)
if file_attrs is None:
file_attrs = {}
# Create the file (empty).
# Yes, this might cause an exception, which we prefer to send to the caller
f = h5py.File(name=filename, mode='w')
f.create_group('/layers')
f.create_group('/row_attrs')
f.create_group('/col_attrs')
f.create_group('/row_graphs')
f.create_group('/col_graphs')
f.flush()
f.close()
ds = connect(filename, validate=False)
for vals in file_attrs:
ds.attrs[vals] = file_attrs[vals]
# store creation date
currentTime = time.localtime(time.time())
ds.attrs['CreationDate'] = timestamp()
ds.attrs["LOOM_SPEC_VERSION"] = loompy.loom_spec_version
return ds
|
python
|
def new(filename: str, *, file_attrs: Optional[Dict[str, str]] = None) -> LoomConnection:
"""
Create an empty Loom file, and return it as a context manager.
"""
if filename.startswith("~/"):
filename = os.path.expanduser(filename)
if file_attrs is None:
file_attrs = {}
# Create the file (empty).
# Yes, this might cause an exception, which we prefer to send to the caller
f = h5py.File(name=filename, mode='w')
f.create_group('/layers')
f.create_group('/row_attrs')
f.create_group('/col_attrs')
f.create_group('/row_graphs')
f.create_group('/col_graphs')
f.flush()
f.close()
ds = connect(filename, validate=False)
for vals in file_attrs:
ds.attrs[vals] = file_attrs[vals]
# store creation date
currentTime = time.localtime(time.time())
ds.attrs['CreationDate'] = timestamp()
ds.attrs["LOOM_SPEC_VERSION"] = loompy.loom_spec_version
return ds
|
[
"def",
"new",
"(",
"filename",
":",
"str",
",",
"*",
",",
"file_attrs",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"LoomConnection",
":",
"if",
"filename",
".",
"startswith",
"(",
"\"~/\"",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"if",
"file_attrs",
"is",
"None",
":",
"file_attrs",
"=",
"{",
"}",
"# Create the file (empty).",
"# Yes, this might cause an exception, which we prefer to send to the caller",
"f",
"=",
"h5py",
".",
"File",
"(",
"name",
"=",
"filename",
",",
"mode",
"=",
"'w'",
")",
"f",
".",
"create_group",
"(",
"'/layers'",
")",
"f",
".",
"create_group",
"(",
"'/row_attrs'",
")",
"f",
".",
"create_group",
"(",
"'/col_attrs'",
")",
"f",
".",
"create_group",
"(",
"'/row_graphs'",
")",
"f",
".",
"create_group",
"(",
"'/col_graphs'",
")",
"f",
".",
"flush",
"(",
")",
"f",
".",
"close",
"(",
")",
"ds",
"=",
"connect",
"(",
"filename",
",",
"validate",
"=",
"False",
")",
"for",
"vals",
"in",
"file_attrs",
":",
"ds",
".",
"attrs",
"[",
"vals",
"]",
"=",
"file_attrs",
"[",
"vals",
"]",
"# store creation date",
"currentTime",
"=",
"time",
".",
"localtime",
"(",
"time",
".",
"time",
"(",
")",
")",
"ds",
".",
"attrs",
"[",
"'CreationDate'",
"]",
"=",
"timestamp",
"(",
")",
"ds",
".",
"attrs",
"[",
"\"LOOM_SPEC_VERSION\"",
"]",
"=",
"loompy",
".",
"loom_spec_version",
"return",
"ds"
] |
Create an empty Loom file, and return it as a context manager.
|
[
"Create",
"an",
"empty",
"Loom",
"file",
"and",
"return",
"it",
"as",
"a",
"context",
"manager",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L993-L1020
|
12,109
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
create
|
def create(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], col_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], *, file_attrs: Dict[str, str] = None) -> None:
"""
Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten.
"""
if isinstance(row_attrs, loompy.AttributeManager):
row_attrs = {k: v[:] for k, v in row_attrs.items()}
if isinstance(col_attrs, loompy.AttributeManager):
col_attrs = {k: v[:] for k, v in col_attrs.items()}
if isinstance(layers, np.ndarray) or scipy.sparse.issparse(layers):
layers = {"": layers}
elif isinstance(layers, loompy.LayerManager):
layers = {k: v[:, :] for k, v in layers.items()}
if "" not in layers:
raise ValueError("Data for default layer must be provided")
# Sanity checks
shape = layers[""].shape # type: ignore
if shape[0] == 0 or shape[1] == 0:
raise ValueError("Main matrix cannot be empty")
for name, layer in layers.items():
if layer.shape != shape: # type: ignore
raise ValueError(f"Layer '{name}' is not the same shape as the main matrix")
for name, ra in row_attrs.items():
if ra.shape[0] != shape[0]:
raise ValueError(f"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})")
for name, ca in col_attrs.items():
if ca.shape[0] != shape[1]:
raise ValueError(f"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})")
try:
with new(filename, file_attrs=file_attrs) as ds:
for key, vals in layers.items():
ds.layer[key] = vals
for key, vals in row_attrs.items():
ds.ra[key] = vals
for key, vals in col_attrs.items():
ds.ca[key] = vals
except ValueError as ve:
#ds.close(suppress_warning=True) # ds does not exist here
if os.path.exists(filename):
os.remove(filename)
raise ve
|
python
|
def create(filename: str, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], row_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], col_attrs: Union[loompy.AttributeManager, Dict[str, np.ndarray]], *, file_attrs: Dict[str, str] = None) -> None:
"""
Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten.
"""
if isinstance(row_attrs, loompy.AttributeManager):
row_attrs = {k: v[:] for k, v in row_attrs.items()}
if isinstance(col_attrs, loompy.AttributeManager):
col_attrs = {k: v[:] for k, v in col_attrs.items()}
if isinstance(layers, np.ndarray) or scipy.sparse.issparse(layers):
layers = {"": layers}
elif isinstance(layers, loompy.LayerManager):
layers = {k: v[:, :] for k, v in layers.items()}
if "" not in layers:
raise ValueError("Data for default layer must be provided")
# Sanity checks
shape = layers[""].shape # type: ignore
if shape[0] == 0 or shape[1] == 0:
raise ValueError("Main matrix cannot be empty")
for name, layer in layers.items():
if layer.shape != shape: # type: ignore
raise ValueError(f"Layer '{name}' is not the same shape as the main matrix")
for name, ra in row_attrs.items():
if ra.shape[0] != shape[0]:
raise ValueError(f"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})")
for name, ca in col_attrs.items():
if ca.shape[0] != shape[1]:
raise ValueError(f"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})")
try:
with new(filename, file_attrs=file_attrs) as ds:
for key, vals in layers.items():
ds.layer[key] = vals
for key, vals in row_attrs.items():
ds.ra[key] = vals
for key, vals in col_attrs.items():
ds.ca[key] = vals
except ValueError as ve:
#ds.close(suppress_warning=True) # ds does not exist here
if os.path.exists(filename):
os.remove(filename)
raise ve
|
[
"def",
"create",
"(",
"filename",
":",
"str",
",",
"layers",
":",
"Union",
"[",
"np",
".",
"ndarray",
",",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
",",
"loompy",
".",
"LayerManager",
"]",
",",
"row_attrs",
":",
"Union",
"[",
"loompy",
".",
"AttributeManager",
",",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
"]",
",",
"col_attrs",
":",
"Union",
"[",
"loompy",
".",
"AttributeManager",
",",
"Dict",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
"]",
",",
"*",
",",
"file_attrs",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"if",
"isinstance",
"(",
"row_attrs",
",",
"loompy",
".",
"AttributeManager",
")",
":",
"row_attrs",
"=",
"{",
"k",
":",
"v",
"[",
":",
"]",
"for",
"k",
",",
"v",
"in",
"row_attrs",
".",
"items",
"(",
")",
"}",
"if",
"isinstance",
"(",
"col_attrs",
",",
"loompy",
".",
"AttributeManager",
")",
":",
"col_attrs",
"=",
"{",
"k",
":",
"v",
"[",
":",
"]",
"for",
"k",
",",
"v",
"in",
"col_attrs",
".",
"items",
"(",
")",
"}",
"if",
"isinstance",
"(",
"layers",
",",
"np",
".",
"ndarray",
")",
"or",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"layers",
")",
":",
"layers",
"=",
"{",
"\"\"",
":",
"layers",
"}",
"elif",
"isinstance",
"(",
"layers",
",",
"loompy",
".",
"LayerManager",
")",
":",
"layers",
"=",
"{",
"k",
":",
"v",
"[",
":",
",",
":",
"]",
"for",
"k",
",",
"v",
"in",
"layers",
".",
"items",
"(",
")",
"}",
"if",
"\"\"",
"not",
"in",
"layers",
":",
"raise",
"ValueError",
"(",
"\"Data for default layer must be provided\"",
")",
"# Sanity checks",
"shape",
"=",
"layers",
"[",
"\"\"",
"]",
".",
"shape",
"# type: ignore",
"if",
"shape",
"[",
"0",
"]",
"==",
"0",
"or",
"shape",
"[",
"1",
"]",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Main matrix cannot be empty\"",
")",
"for",
"name",
",",
"layer",
"in",
"layers",
".",
"items",
"(",
")",
":",
"if",
"layer",
".",
"shape",
"!=",
"shape",
":",
"# type: ignore",
"raise",
"ValueError",
"(",
"f\"Layer '{name}' is not the same shape as the main matrix\"",
")",
"for",
"name",
",",
"ra",
"in",
"row_attrs",
".",
"items",
"(",
")",
":",
"if",
"ra",
".",
"shape",
"[",
"0",
"]",
"!=",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"f\"Row attribute '{name}' is not the same length ({ra.shape[0]}) as number of rows in main matrix ({shape[0]})\"",
")",
"for",
"name",
",",
"ca",
"in",
"col_attrs",
".",
"items",
"(",
")",
":",
"if",
"ca",
".",
"shape",
"[",
"0",
"]",
"!=",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"f\"Column attribute '{name}' is not the same length ({ca.shape[0]}) as number of columns in main matrix ({shape[1]})\"",
")",
"try",
":",
"with",
"new",
"(",
"filename",
",",
"file_attrs",
"=",
"file_attrs",
")",
"as",
"ds",
":",
"for",
"key",
",",
"vals",
"in",
"layers",
".",
"items",
"(",
")",
":",
"ds",
".",
"layer",
"[",
"key",
"]",
"=",
"vals",
"for",
"key",
",",
"vals",
"in",
"row_attrs",
".",
"items",
"(",
")",
":",
"ds",
".",
"ra",
"[",
"key",
"]",
"=",
"vals",
"for",
"key",
",",
"vals",
"in",
"col_attrs",
".",
"items",
"(",
")",
":",
"ds",
".",
"ca",
"[",
"key",
"]",
"=",
"vals",
"except",
"ValueError",
"as",
"ve",
":",
"#ds.close(suppress_warning=True) # ds does not exist here",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"raise",
"ve"
] |
Create a new Loom file from the given data.
Args:
filename (str): The filename (typically using a ``.loom`` file extension)
layers: One of the following:
* Two-dimensional (N-by-M) numpy ndarray of float values
* Sparse matrix (e.g. :class:`scipy.sparse.csr_matrix`)
* Dictionary of named layers, each an N-by-M ndarray or sparse matrix
* A :class:`.LayerManager`, with each layer an N-by-M ndarray
row_attrs (dict): Row attributes, where keys are attribute names and values
are numpy arrays (float or string) of length N
col_attrs (dict): Column attributes, where keys are attribute names and
values are numpy arrays (float or string) of length M
file_attrs (dict): Global attributes, where keys are attribute names and
values are strings
Returns:
Nothing
Remarks:
If the file exists, it will be overwritten.
|
[
"Create",
"a",
"new",
"Loom",
"file",
"from",
"the",
"given",
"data",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L1023-L1089
|
12,110
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
connect
|
def connect(filename: str, mode: str = 'r+', *, validate: bool = True, spec_version: str = "2.0.1") -> LoomConnection:
"""
Establish a connection to a .loom file.
Args:
filename: Path to the Loom file to open
mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+'
validate: Validate the file structure against the Loom file format specification
spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old")
Returns:
A LoomConnection instance.
Remarks:
This function should typically be used as a context manager (i.e. inside a ``with``-block):
.. highlight:: python
.. code-block:: python
import loompy
with loompy.connect("mydata.loom") as ds:
print(ds.ca.keys())
This ensures that the file will be closed automatically when the context block ends
Note: if validation is requested, an exception is raised if validation fails.
"""
return LoomConnection(filename, mode, validate=validate, spec_version=spec_version)
|
python
|
def connect(filename: str, mode: str = 'r+', *, validate: bool = True, spec_version: str = "2.0.1") -> LoomConnection:
"""
Establish a connection to a .loom file.
Args:
filename: Path to the Loom file to open
mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+'
validate: Validate the file structure against the Loom file format specification
spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old")
Returns:
A LoomConnection instance.
Remarks:
This function should typically be used as a context manager (i.e. inside a ``with``-block):
.. highlight:: python
.. code-block:: python
import loompy
with loompy.connect("mydata.loom") as ds:
print(ds.ca.keys())
This ensures that the file will be closed automatically when the context block ends
Note: if validation is requested, an exception is raised if validation fails.
"""
return LoomConnection(filename, mode, validate=validate, spec_version=spec_version)
|
[
"def",
"connect",
"(",
"filename",
":",
"str",
",",
"mode",
":",
"str",
"=",
"'r+'",
",",
"*",
",",
"validate",
":",
"bool",
"=",
"True",
",",
"spec_version",
":",
"str",
"=",
"\"2.0.1\"",
")",
"->",
"LoomConnection",
":",
"return",
"LoomConnection",
"(",
"filename",
",",
"mode",
",",
"validate",
"=",
"validate",
",",
"spec_version",
"=",
"spec_version",
")"
] |
Establish a connection to a .loom file.
Args:
filename: Path to the Loom file to open
mode: Read/write mode, 'r+' (read/write) or 'r' (read-only), defaults to 'r+'
validate: Validate the file structure against the Loom file format specification
spec_version: The loom file spec version to validate against (e.g. "2.0.1" or "old")
Returns:
A LoomConnection instance.
Remarks:
This function should typically be used as a context manager (i.e. inside a ``with``-block):
.. highlight:: python
.. code-block:: python
import loompy
with loompy.connect("mydata.loom") as ds:
print(ds.ca.keys())
This ensures that the file will be closed automatically when the context block ends
Note: if validation is requested, an exception is raised if validation fails.
|
[
"Establish",
"a",
"connection",
"to",
"a",
".",
"loom",
"file",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L1290-L1316
|
12,111
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.last_modified
|
def last_modified(self) -> str:
"""
Return an ISO8601 timestamp indicating when the file was last modified
Returns:
An ISO8601 timestamp indicating when the file was last modified
Remarks:
If the file has no timestamp, and mode is 'r+', a new timestamp is created and returned.
Otherwise, the current time in UTC is returned
"""
if "last_modified" in self.attrs:
return self.attrs["last_modified"]
elif self.mode == "r+":
# Make sure the file has modification timestamps
self.attrs["last_modified"] = timestamp()
return self.attrs["last_modified"]
return timestamp()
|
python
|
def last_modified(self) -> str:
"""
Return an ISO8601 timestamp indicating when the file was last modified
Returns:
An ISO8601 timestamp indicating when the file was last modified
Remarks:
If the file has no timestamp, and mode is 'r+', a new timestamp is created and returned.
Otherwise, the current time in UTC is returned
"""
if "last_modified" in self.attrs:
return self.attrs["last_modified"]
elif self.mode == "r+":
# Make sure the file has modification timestamps
self.attrs["last_modified"] = timestamp()
return self.attrs["last_modified"]
return timestamp()
|
[
"def",
"last_modified",
"(",
"self",
")",
"->",
"str",
":",
"if",
"\"last_modified\"",
"in",
"self",
".",
"attrs",
":",
"return",
"self",
".",
"attrs",
"[",
"\"last_modified\"",
"]",
"elif",
"self",
".",
"mode",
"==",
"\"r+\"",
":",
"# Make sure the file has modification timestamps",
"self",
".",
"attrs",
"[",
"\"last_modified\"",
"]",
"=",
"timestamp",
"(",
")",
"return",
"self",
".",
"attrs",
"[",
"\"last_modified\"",
"]",
"return",
"timestamp",
"(",
")"
] |
Return an ISO8601 timestamp indicating when the file was last modified
Returns:
An ISO8601 timestamp indicating when the file was last modified
Remarks:
If the file has no timestamp, and mode is 'r+', a new timestamp is created and returned.
Otherwise, the current time in UTC is returned
|
[
"Return",
"an",
"ISO8601",
"timestamp",
"indicating",
"when",
"the",
"file",
"was",
"last",
"modified"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L115-L132
|
12,112
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.get_changes_since
|
def get_changes_since(self, timestamp: str) -> Dict[str, List]:
"""
Get a summary of the parts of the file that changed since the given time
Args:
timestamp: ISO8601 timestamp
Return:
dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
"""
rg = []
cg = []
ra = []
ca = []
layers = []
if self.last_modified() > timestamp:
if self.row_graphs.last_modified() > timestamp:
for name in self.row_graphs.keys():
if self.row_graphs.last_modified(name) > timestamp:
rg.append(name)
if self.col_graphs.last_modified() > timestamp:
for name in self.col_graphs.keys():
if self.col_graphs.last_modified(name) > timestamp:
cg.append(name)
if self.ra.last_modified() > timestamp:
for name in self.ra.keys():
if self.ra.last_modified(name) > timestamp:
ra.append(name)
if self.ca.last_modified() > timestamp:
for name in self.ca.keys():
if self.ca.last_modified(name) > timestamp:
ca.append(name)
if self.layers.last_modified() > timestamp:
for name in self.layers.keys():
if self.layers.last_modified(name) > timestamp:
layers.append(name)
return {"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}
|
python
|
def get_changes_since(self, timestamp: str) -> Dict[str, List]:
"""
Get a summary of the parts of the file that changed since the given time
Args:
timestamp: ISO8601 timestamp
Return:
dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
"""
rg = []
cg = []
ra = []
ca = []
layers = []
if self.last_modified() > timestamp:
if self.row_graphs.last_modified() > timestamp:
for name in self.row_graphs.keys():
if self.row_graphs.last_modified(name) > timestamp:
rg.append(name)
if self.col_graphs.last_modified() > timestamp:
for name in self.col_graphs.keys():
if self.col_graphs.last_modified(name) > timestamp:
cg.append(name)
if self.ra.last_modified() > timestamp:
for name in self.ra.keys():
if self.ra.last_modified(name) > timestamp:
ra.append(name)
if self.ca.last_modified() > timestamp:
for name in self.ca.keys():
if self.ca.last_modified(name) > timestamp:
ca.append(name)
if self.layers.last_modified() > timestamp:
for name in self.layers.keys():
if self.layers.last_modified(name) > timestamp:
layers.append(name)
return {"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}
|
[
"def",
"get_changes_since",
"(",
"self",
",",
"timestamp",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"]",
":",
"rg",
"=",
"[",
"]",
"cg",
"=",
"[",
"]",
"ra",
"=",
"[",
"]",
"ca",
"=",
"[",
"]",
"layers",
"=",
"[",
"]",
"if",
"self",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"if",
"self",
".",
"row_graphs",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"for",
"name",
"in",
"self",
".",
"row_graphs",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"row_graphs",
".",
"last_modified",
"(",
"name",
")",
">",
"timestamp",
":",
"rg",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"col_graphs",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"for",
"name",
"in",
"self",
".",
"col_graphs",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"col_graphs",
".",
"last_modified",
"(",
"name",
")",
">",
"timestamp",
":",
"cg",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"ra",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"for",
"name",
"in",
"self",
".",
"ra",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"ra",
".",
"last_modified",
"(",
"name",
")",
">",
"timestamp",
":",
"ra",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"ca",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"for",
"name",
"in",
"self",
".",
"ca",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"ca",
".",
"last_modified",
"(",
"name",
")",
">",
"timestamp",
":",
"ca",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"layers",
".",
"last_modified",
"(",
")",
">",
"timestamp",
":",
"for",
"name",
"in",
"self",
".",
"layers",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"layers",
".",
"last_modified",
"(",
"name",
")",
">",
"timestamp",
":",
"layers",
".",
"append",
"(",
"name",
")",
"return",
"{",
"\"row_graphs\"",
":",
"rg",
",",
"\"col_graphs\"",
":",
"cg",
",",
"\"row_attrs\"",
":",
"ra",
",",
"\"col_attrs\"",
":",
"ca",
",",
"\"layers\"",
":",
"layers",
"}"
] |
Get a summary of the parts of the file that changed since the given time
Args:
timestamp: ISO8601 timestamp
Return:
dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
|
[
"Get",
"a",
"summary",
"of",
"the",
"parts",
"of",
"the",
"file",
"that",
"changed",
"since",
"the",
"given",
"time"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L134-L171
|
12,113
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.sparse
|
def sparse(self, rows: np.ndarray = None, cols: np.ndarray = None, layer: str = None) -> scipy.sparse.coo_matrix:
"""
Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM
Args:
rows: Rows to include, or None to include all
cols: Columns to include, or None to include all
layer: Layer to return, or None to return the default layer
Returns:
Sparse matrix (:class:`scipy.sparse.coo_matrix`)
"""
if layer is None:
return self.layers[""].sparse(rows=rows, cols=cols)
else:
return self.layers[layer].sparse(rows=rows, cols=cols)
|
python
|
def sparse(self, rows: np.ndarray = None, cols: np.ndarray = None, layer: str = None) -> scipy.sparse.coo_matrix:
"""
Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM
Args:
rows: Rows to include, or None to include all
cols: Columns to include, or None to include all
layer: Layer to return, or None to return the default layer
Returns:
Sparse matrix (:class:`scipy.sparse.coo_matrix`)
"""
if layer is None:
return self.layers[""].sparse(rows=rows, cols=cols)
else:
return self.layers[layer].sparse(rows=rows, cols=cols)
|
[
"def",
"sparse",
"(",
"self",
",",
"rows",
":",
"np",
".",
"ndarray",
"=",
"None",
",",
"cols",
":",
"np",
".",
"ndarray",
"=",
"None",
",",
"layer",
":",
"str",
"=",
"None",
")",
"->",
"scipy",
".",
"sparse",
".",
"coo_matrix",
":",
"if",
"layer",
"is",
"None",
":",
"return",
"self",
".",
"layers",
"[",
"\"\"",
"]",
".",
"sparse",
"(",
"rows",
"=",
"rows",
",",
"cols",
"=",
"cols",
")",
"else",
":",
"return",
"self",
".",
"layers",
"[",
"layer",
"]",
".",
"sparse",
"(",
"rows",
"=",
"rows",
",",
"cols",
"=",
"cols",
")"
] |
Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM
Args:
rows: Rows to include, or None to include all
cols: Columns to include, or None to include all
layer: Layer to return, or None to return the default layer
Returns:
Sparse matrix (:class:`scipy.sparse.coo_matrix`)
|
[
"Return",
"the",
"main",
"matrix",
"or",
"specified",
"layer",
"as",
"a",
"scipy",
".",
"sparse",
".",
"coo_matrix",
"without",
"loading",
"dense",
"matrix",
"in",
"RAM"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L229-L244
|
12,114
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.close
|
def close(self, suppress_warning: bool = False) -> None:
"""
Close the connection. After this, the connection object becomes invalid. Warns user if called after closing.
Args:
suppress_warning: Suppresses warning message if True (defaults to false)
"""
if self._file is None:
if not suppress_warning:
# Warn user that they're being paranoid
# and should clean up their code
logging.warn("Connection to %s is already closed", self.filename)
else:
self._file.close()
self._file = None
self.layers = None # type: ignore
self.ra = None # type: ignore
self.row_attrs = None # type: ignore
self.ca = None # type: ignore
self.col_attrs = None # type: ignore
self.row_graphs = None # type: ignore
self.col_graphs = None # type: ignore
self.shape = (0, 0)
self._closed = True
|
python
|
def close(self, suppress_warning: bool = False) -> None:
"""
Close the connection. After this, the connection object becomes invalid. Warns user if called after closing.
Args:
suppress_warning: Suppresses warning message if True (defaults to false)
"""
if self._file is None:
if not suppress_warning:
# Warn user that they're being paranoid
# and should clean up their code
logging.warn("Connection to %s is already closed", self.filename)
else:
self._file.close()
self._file = None
self.layers = None # type: ignore
self.ra = None # type: ignore
self.row_attrs = None # type: ignore
self.ca = None # type: ignore
self.col_attrs = None # type: ignore
self.row_graphs = None # type: ignore
self.col_graphs = None # type: ignore
self.shape = (0, 0)
self._closed = True
|
[
"def",
"close",
"(",
"self",
",",
"suppress_warning",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"if",
"self",
".",
"_file",
"is",
"None",
":",
"if",
"not",
"suppress_warning",
":",
"# Warn user that they're being paranoid",
"# and should clean up their code",
"logging",
".",
"warn",
"(",
"\"Connection to %s is already closed\"",
",",
"self",
".",
"filename",
")",
"else",
":",
"self",
".",
"_file",
".",
"close",
"(",
")",
"self",
".",
"_file",
"=",
"None",
"self",
".",
"layers",
"=",
"None",
"# type: ignore",
"self",
".",
"ra",
"=",
"None",
"# type: ignore",
"self",
".",
"row_attrs",
"=",
"None",
"# type: ignore",
"self",
".",
"ca",
"=",
"None",
"# type: ignore",
"self",
".",
"col_attrs",
"=",
"None",
"# type: ignore",
"self",
".",
"row_graphs",
"=",
"None",
"# type: ignore",
"self",
".",
"col_graphs",
"=",
"None",
"# type: ignore",
"self",
".",
"shape",
"=",
"(",
"0",
",",
"0",
")",
"self",
".",
"_closed",
"=",
"True"
] |
Close the connection. After this, the connection object becomes invalid. Warns user if called after closing.
Args:
suppress_warning: Suppresses warning message if True (defaults to false)
|
[
"Close",
"the",
"connection",
".",
"After",
"this",
"the",
"connection",
"object",
"becomes",
"invalid",
".",
"Warns",
"user",
"if",
"called",
"after",
"closing",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L246-L269
|
12,115
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.permute
|
def permute(self, ordering: np.ndarray, axis: int) -> None:
"""
Permute the dataset along the indicated axis.
Args:
ordering (list of int): The desired order along the axis
axis (int): The axis along which to permute
Returns:
Nothing.
"""
if self._file.__contains__("tiles"):
del self._file['tiles']
ordering = list(np.array(ordering).flatten()) # Flatten the ordering, in case we got a column vector
self.layers._permute(ordering, axis=axis)
if axis == 0:
self.row_attrs._permute(ordering)
self.row_graphs._permute(ordering)
if axis == 1:
self.col_attrs._permute(ordering)
self.col_graphs._permute(ordering)
|
python
|
def permute(self, ordering: np.ndarray, axis: int) -> None:
"""
Permute the dataset along the indicated axis.
Args:
ordering (list of int): The desired order along the axis
axis (int): The axis along which to permute
Returns:
Nothing.
"""
if self._file.__contains__("tiles"):
del self._file['tiles']
ordering = list(np.array(ordering).flatten()) # Flatten the ordering, in case we got a column vector
self.layers._permute(ordering, axis=axis)
if axis == 0:
self.row_attrs._permute(ordering)
self.row_graphs._permute(ordering)
if axis == 1:
self.col_attrs._permute(ordering)
self.col_graphs._permute(ordering)
|
[
"def",
"permute",
"(",
"self",
",",
"ordering",
":",
"np",
".",
"ndarray",
",",
"axis",
":",
"int",
")",
"->",
"None",
":",
"if",
"self",
".",
"_file",
".",
"__contains__",
"(",
"\"tiles\"",
")",
":",
"del",
"self",
".",
"_file",
"[",
"'tiles'",
"]",
"ordering",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"ordering",
")",
".",
"flatten",
"(",
")",
")",
"# Flatten the ordering, in case we got a column vector",
"self",
".",
"layers",
".",
"_permute",
"(",
"ordering",
",",
"axis",
"=",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"self",
".",
"row_attrs",
".",
"_permute",
"(",
"ordering",
")",
"self",
".",
"row_graphs",
".",
"_permute",
"(",
"ordering",
")",
"if",
"axis",
"==",
"1",
":",
"self",
".",
"col_attrs",
".",
"_permute",
"(",
"ordering",
")",
"self",
".",
"col_graphs",
".",
"_permute",
"(",
"ordering",
")"
] |
Permute the dataset along the indicated axis.
Args:
ordering (list of int): The desired order along the axis
axis (int): The axis along which to permute
Returns:
Nothing.
|
[
"Permute",
"the",
"dataset",
"along",
"the",
"indicated",
"axis",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L784-L806
|
12,116
|
linnarsson-lab/loompy
|
loompy/loompy.py
|
LoomConnection.aggregate
|
def aggregate(self, out_file: str = None, select: np.ndarray = None, group_by: Union[str, np.ndarray] = "Clusters", aggr_by: str = "mean", aggr_ca_by: Dict[str, str] = None) -> np.ndarray:
"""
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
"""
ca = {} # type: Dict[str, np.ndarray]
if select is not None:
raise ValueError("The 'select' argument is deprecated")
if isinstance(group_by, np.ndarray):
labels = group_by
else:
labels = (self.ca[group_by]).astype('int')
_, zero_strt_sort_noholes_lbls = np.unique(labels, return_inverse=True)
n_groups = len(set(labels))
if aggr_ca_by is not None:
for key in self.ca.keys():
if key not in aggr_ca_by:
continue
func = aggr_ca_by[key]
if func == "tally":
for val in set(self.ca[key]):
if np.issubdtype(type(val), np.str_):
valnew = val.replace("/", "-") # Slashes are not allowed in attribute names
valnew = valnew.replace(".", "_") # Nor are periods
ca[key + "_" + str(valnew)] = npg.aggregate(zero_strt_sort_noholes_lbls, (self.ca[key] == val).astype('int'), func="sum", fill_value=0)
elif func == "mode":
def mode(x): # type: ignore
return scipy.stats.mode(x)[0][0]
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=mode, fill_value=0).astype('str')
elif func == "mean":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=0)
elif func == "first":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=self.ca[key][0])
m = np.empty((self.shape[0], n_groups))
for (_, selection, view) in self.scan(axis=0, layers=[""]):
vals_aggr = npg.aggregate(zero_strt_sort_noholes_lbls, view[:, :], func=aggr_by, axis=1, fill_value=0)
m[selection, :] = vals_aggr
if out_file is not None:
loompy.create(out_file, m, self.ra, ca)
return m
|
python
|
def aggregate(self, out_file: str = None, select: np.ndarray = None, group_by: Union[str, np.ndarray] = "Clusters", aggr_by: str = "mean", aggr_ca_by: Dict[str, str] = None) -> np.ndarray:
"""
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
"""
ca = {} # type: Dict[str, np.ndarray]
if select is not None:
raise ValueError("The 'select' argument is deprecated")
if isinstance(group_by, np.ndarray):
labels = group_by
else:
labels = (self.ca[group_by]).astype('int')
_, zero_strt_sort_noholes_lbls = np.unique(labels, return_inverse=True)
n_groups = len(set(labels))
if aggr_ca_by is not None:
for key in self.ca.keys():
if key not in aggr_ca_by:
continue
func = aggr_ca_by[key]
if func == "tally":
for val in set(self.ca[key]):
if np.issubdtype(type(val), np.str_):
valnew = val.replace("/", "-") # Slashes are not allowed in attribute names
valnew = valnew.replace(".", "_") # Nor are periods
ca[key + "_" + str(valnew)] = npg.aggregate(zero_strt_sort_noholes_lbls, (self.ca[key] == val).astype('int'), func="sum", fill_value=0)
elif func == "mode":
def mode(x): # type: ignore
return scipy.stats.mode(x)[0][0]
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=mode, fill_value=0).astype('str')
elif func == "mean":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=0)
elif func == "first":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=self.ca[key][0])
m = np.empty((self.shape[0], n_groups))
for (_, selection, view) in self.scan(axis=0, layers=[""]):
vals_aggr = npg.aggregate(zero_strt_sort_noholes_lbls, view[:, :], func=aggr_by, axis=1, fill_value=0)
m[selection, :] = vals_aggr
if out_file is not None:
loompy.create(out_file, m, self.ra, ca)
return m
|
[
"def",
"aggregate",
"(",
"self",
",",
"out_file",
":",
"str",
"=",
"None",
",",
"select",
":",
"np",
".",
"ndarray",
"=",
"None",
",",
"group_by",
":",
"Union",
"[",
"str",
",",
"np",
".",
"ndarray",
"]",
"=",
"\"Clusters\"",
",",
"aggr_by",
":",
"str",
"=",
"\"mean\"",
",",
"aggr_ca_by",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"ca",
"=",
"{",
"}",
"# type: Dict[str, np.ndarray]",
"if",
"select",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"The 'select' argument is deprecated\"",
")",
"if",
"isinstance",
"(",
"group_by",
",",
"np",
".",
"ndarray",
")",
":",
"labels",
"=",
"group_by",
"else",
":",
"labels",
"=",
"(",
"self",
".",
"ca",
"[",
"group_by",
"]",
")",
".",
"astype",
"(",
"'int'",
")",
"_",
",",
"zero_strt_sort_noholes_lbls",
"=",
"np",
".",
"unique",
"(",
"labels",
",",
"return_inverse",
"=",
"True",
")",
"n_groups",
"=",
"len",
"(",
"set",
"(",
"labels",
")",
")",
"if",
"aggr_ca_by",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"self",
".",
"ca",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"aggr_ca_by",
":",
"continue",
"func",
"=",
"aggr_ca_by",
"[",
"key",
"]",
"if",
"func",
"==",
"\"tally\"",
":",
"for",
"val",
"in",
"set",
"(",
"self",
".",
"ca",
"[",
"key",
"]",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"val",
")",
",",
"np",
".",
"str_",
")",
":",
"valnew",
"=",
"val",
".",
"replace",
"(",
"\"/\"",
",",
"\"-\"",
")",
"# Slashes are not allowed in attribute names",
"valnew",
"=",
"valnew",
".",
"replace",
"(",
"\".\"",
",",
"\"_\"",
")",
"# Nor are periods",
"ca",
"[",
"key",
"+",
"\"_\"",
"+",
"str",
"(",
"valnew",
")",
"]",
"=",
"npg",
".",
"aggregate",
"(",
"zero_strt_sort_noholes_lbls",
",",
"(",
"self",
".",
"ca",
"[",
"key",
"]",
"==",
"val",
")",
".",
"astype",
"(",
"'int'",
")",
",",
"func",
"=",
"\"sum\"",
",",
"fill_value",
"=",
"0",
")",
"elif",
"func",
"==",
"\"mode\"",
":",
"def",
"mode",
"(",
"x",
")",
":",
"# type: ignore",
"return",
"scipy",
".",
"stats",
".",
"mode",
"(",
"x",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"ca",
"[",
"key",
"]",
"=",
"npg",
".",
"aggregate",
"(",
"zero_strt_sort_noholes_lbls",
",",
"self",
".",
"ca",
"[",
"key",
"]",
",",
"func",
"=",
"mode",
",",
"fill_value",
"=",
"0",
")",
".",
"astype",
"(",
"'str'",
")",
"elif",
"func",
"==",
"\"mean\"",
":",
"ca",
"[",
"key",
"]",
"=",
"npg",
".",
"aggregate",
"(",
"zero_strt_sort_noholes_lbls",
",",
"self",
".",
"ca",
"[",
"key",
"]",
",",
"func",
"=",
"func",
",",
"fill_value",
"=",
"0",
")",
"elif",
"func",
"==",
"\"first\"",
":",
"ca",
"[",
"key",
"]",
"=",
"npg",
".",
"aggregate",
"(",
"zero_strt_sort_noholes_lbls",
",",
"self",
".",
"ca",
"[",
"key",
"]",
",",
"func",
"=",
"func",
",",
"fill_value",
"=",
"self",
".",
"ca",
"[",
"key",
"]",
"[",
"0",
"]",
")",
"m",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
",",
"n_groups",
")",
")",
"for",
"(",
"_",
",",
"selection",
",",
"view",
")",
"in",
"self",
".",
"scan",
"(",
"axis",
"=",
"0",
",",
"layers",
"=",
"[",
"\"\"",
"]",
")",
":",
"vals_aggr",
"=",
"npg",
".",
"aggregate",
"(",
"zero_strt_sort_noholes_lbls",
",",
"view",
"[",
":",
",",
":",
"]",
",",
"func",
"=",
"aggr_by",
",",
"axis",
"=",
"1",
",",
"fill_value",
"=",
"0",
")",
"m",
"[",
"selection",
",",
":",
"]",
"=",
"vals_aggr",
"if",
"out_file",
"is",
"not",
"None",
":",
"loompy",
".",
"create",
"(",
"out_file",
",",
"m",
",",
"self",
".",
"ra",
",",
"ca",
")",
"return",
"m"
] |
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
|
[
"Aggregate",
"the",
"Loom",
"file",
"by",
"applying",
"aggregation",
"functions",
"to",
"the",
"main",
"matrix",
"as",
"well",
"as",
"to",
"the",
"column",
"attributes"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L873-L934
|
12,117
|
linnarsson-lab/loompy
|
loompy/file_attribute_manager.py
|
FileAttributeManager.get
|
def get(self, name: str, default: Any = None) -> np.ndarray:
"""
Return the value for a named attribute if it exists, else default.
If default is not given, it defaults to None, so that this method never raises a KeyError.
"""
if name in self:
return self[name]
else:
return default
|
python
|
def get(self, name: str, default: Any = None) -> np.ndarray:
"""
Return the value for a named attribute if it exists, else default.
If default is not given, it defaults to None, so that this method never raises a KeyError.
"""
if name in self:
return self[name]
else:
return default
|
[
"def",
"get",
"(",
"self",
",",
"name",
":",
"str",
",",
"default",
":",
"Any",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"name",
"in",
"self",
":",
"return",
"self",
"[",
"name",
"]",
"else",
":",
"return",
"default"
] |
Return the value for a named attribute if it exists, else default.
If default is not given, it defaults to None, so that this method never raises a KeyError.
|
[
"Return",
"the",
"value",
"for",
"a",
"named",
"attribute",
"if",
"it",
"exists",
"else",
"default",
".",
"If",
"default",
"is",
"not",
"given",
"it",
"defaults",
"to",
"None",
"so",
"that",
"this",
"method",
"never",
"raises",
"a",
"KeyError",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/file_attribute_manager.py#L78-L86
|
12,118
|
linnarsson-lab/loompy
|
loompy/color.py
|
cat_colors
|
def cat_colors(N: int = 1, *, hue: str = None, luminosity: str = None, bgvalue: int = None, loop: bool = False, seed: str = "cat") -> Union[List[Any], colors.LinearSegmentedColormap]:
"""
Return a colormap suitable for N categorical values, optimized to be both aesthetically pleasing and perceptually distinct.
Args:
N The number of colors requested.
hue Controls the hue of the generated color. You can pass a string representing a color name: "red", "orange", "yellow", "green", "blue", "purple", "pink" and "monochrome" are currently supported. If you pass a hexidecimal color string such as "#00FFFF", its hue value will be used to generate colors.
luminosity Controls the luminosity of the generated color: "bright", "light" or "dark".
bgvalue If not None, then the corresponding index color will be set to light gray
loop If True, loop the color alphabet instead of generating random colors
seed If not None, use as the random seed (default: "cat")
Returns:
A set of colors in the requested format, either a list of values or a matplotlib LinearSegmentedColormap (when format="cmap")
If N <= 25 and hue and luminosity are both None, a subset of the optimally perceptually distinct "color alphabet" is returned.
Else, a pleasing set of random colors is returned.
Colors are designed to be displayed on a white background.
"""
c: List[str] = []
if N <= 25 and hue is None and luminosity is None:
c = _color_alphabet[:N]
elif not loop:
c = RandomColor(seed=seed).generate(count=N, hue=hue, luminosity=luminosity, format_="hex")
else:
n = N
while n > 0:
c += _color_alphabet[:n]
n -= 25
if bgvalue is not None:
c[bgvalue] = "#aaaaaa"
return colors.LinearSegmentedColormap.from_list("", c, N)
|
python
|
def cat_colors(N: int = 1, *, hue: str = None, luminosity: str = None, bgvalue: int = None, loop: bool = False, seed: str = "cat") -> Union[List[Any], colors.LinearSegmentedColormap]:
"""
Return a colormap suitable for N categorical values, optimized to be both aesthetically pleasing and perceptually distinct.
Args:
N The number of colors requested.
hue Controls the hue of the generated color. You can pass a string representing a color name: "red", "orange", "yellow", "green", "blue", "purple", "pink" and "monochrome" are currently supported. If you pass a hexidecimal color string such as "#00FFFF", its hue value will be used to generate colors.
luminosity Controls the luminosity of the generated color: "bright", "light" or "dark".
bgvalue If not None, then the corresponding index color will be set to light gray
loop If True, loop the color alphabet instead of generating random colors
seed If not None, use as the random seed (default: "cat")
Returns:
A set of colors in the requested format, either a list of values or a matplotlib LinearSegmentedColormap (when format="cmap")
If N <= 25 and hue and luminosity are both None, a subset of the optimally perceptually distinct "color alphabet" is returned.
Else, a pleasing set of random colors is returned.
Colors are designed to be displayed on a white background.
"""
c: List[str] = []
if N <= 25 and hue is None and luminosity is None:
c = _color_alphabet[:N]
elif not loop:
c = RandomColor(seed=seed).generate(count=N, hue=hue, luminosity=luminosity, format_="hex")
else:
n = N
while n > 0:
c += _color_alphabet[:n]
n -= 25
if bgvalue is not None:
c[bgvalue] = "#aaaaaa"
return colors.LinearSegmentedColormap.from_list("", c, N)
|
[
"def",
"cat_colors",
"(",
"N",
":",
"int",
"=",
"1",
",",
"*",
",",
"hue",
":",
"str",
"=",
"None",
",",
"luminosity",
":",
"str",
"=",
"None",
",",
"bgvalue",
":",
"int",
"=",
"None",
",",
"loop",
":",
"bool",
"=",
"False",
",",
"seed",
":",
"str",
"=",
"\"cat\"",
")",
"->",
"Union",
"[",
"List",
"[",
"Any",
"]",
",",
"colors",
".",
"LinearSegmentedColormap",
"]",
":",
"c",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"if",
"N",
"<=",
"25",
"and",
"hue",
"is",
"None",
"and",
"luminosity",
"is",
"None",
":",
"c",
"=",
"_color_alphabet",
"[",
":",
"N",
"]",
"elif",
"not",
"loop",
":",
"c",
"=",
"RandomColor",
"(",
"seed",
"=",
"seed",
")",
".",
"generate",
"(",
"count",
"=",
"N",
",",
"hue",
"=",
"hue",
",",
"luminosity",
"=",
"luminosity",
",",
"format_",
"=",
"\"hex\"",
")",
"else",
":",
"n",
"=",
"N",
"while",
"n",
">",
"0",
":",
"c",
"+=",
"_color_alphabet",
"[",
":",
"n",
"]",
"n",
"-=",
"25",
"if",
"bgvalue",
"is",
"not",
"None",
":",
"c",
"[",
"bgvalue",
"]",
"=",
"\"#aaaaaa\"",
"return",
"colors",
".",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"\"\"",
",",
"c",
",",
"N",
")"
] |
Return a colormap suitable for N categorical values, optimized to be both aesthetically pleasing and perceptually distinct.
Args:
N The number of colors requested.
hue Controls the hue of the generated color. You can pass a string representing a color name: "red", "orange", "yellow", "green", "blue", "purple", "pink" and "monochrome" are currently supported. If you pass a hexidecimal color string such as "#00FFFF", its hue value will be used to generate colors.
luminosity Controls the luminosity of the generated color: "bright", "light" or "dark".
bgvalue If not None, then the corresponding index color will be set to light gray
loop If True, loop the color alphabet instead of generating random colors
seed If not None, use as the random seed (default: "cat")
Returns:
A set of colors in the requested format, either a list of values or a matplotlib LinearSegmentedColormap (when format="cmap")
If N <= 25 and hue and luminosity are both None, a subset of the optimally perceptually distinct "color alphabet" is returned.
Else, a pleasing set of random colors is returned.
Colors are designed to be displayed on a white background.
|
[
"Return",
"a",
"colormap",
"suitable",
"for",
"N",
"categorical",
"values",
"optimized",
"to",
"be",
"both",
"aesthetically",
"pleasing",
"and",
"perceptually",
"distinct",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/color.py#L336-L367
|
12,119
|
linnarsson-lab/loompy
|
loompy/graph_manager.py
|
_renumber
|
def _renumber(a: np.ndarray, keys: np.ndarray, values: np.ndarray) -> np.ndarray:
"""
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
"""
ordering = np.argsort(keys)
keys = keys[ordering]
values = keys[ordering]
index = np.digitize(a.ravel(), keys, right=True)
return(values[index].reshape(a.shape))
|
python
|
def _renumber(a: np.ndarray, keys: np.ndarray, values: np.ndarray) -> np.ndarray:
"""
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
"""
ordering = np.argsort(keys)
keys = keys[ordering]
values = keys[ordering]
index = np.digitize(a.ravel(), keys, right=True)
return(values[index].reshape(a.shape))
|
[
"def",
"_renumber",
"(",
"a",
":",
"np",
".",
"ndarray",
",",
"keys",
":",
"np",
".",
"ndarray",
",",
"values",
":",
"np",
".",
"ndarray",
")",
"->",
"np",
".",
"ndarray",
":",
"ordering",
"=",
"np",
".",
"argsort",
"(",
"keys",
")",
"keys",
"=",
"keys",
"[",
"ordering",
"]",
"values",
"=",
"keys",
"[",
"ordering",
"]",
"index",
"=",
"np",
".",
"digitize",
"(",
"a",
".",
"ravel",
"(",
")",
",",
"keys",
",",
"right",
"=",
"True",
")",
"return",
"(",
"values",
"[",
"index",
"]",
".",
"reshape",
"(",
"a",
".",
"shape",
")",
")"
] |
Renumber 'a' by replacing any occurrence of 'keys' by the corresponding 'values'
|
[
"Renumber",
"a",
"by",
"replacing",
"any",
"occurrence",
"of",
"keys",
"by",
"the",
"corresponding",
"values"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/graph_manager.py#L7-L15
|
12,120
|
linnarsson-lab/loompy
|
loompy/loom_validator.py
|
LoomValidator.validate
|
def validate(self, path: str, strictness: str = "speconly") -> bool:
"""
Validate a file for conformance to the Loom specification
Args:
path: Full path to the file to be validated
strictness: "speconly" or "conventions"
Remarks:
In "speconly" mode, conformance is assessed relative to the file format specification
at http://linnarssonlab.org/loompy/format/. In "conventions" mode, conformance is additionally
assessed relative to attribute name and data type conventions given at http://linnarssonlab.org/loompy/conventions/.
"""
valid1 = True
with h5py.File(path, mode="r") as f:
valid1 = self.validate_spec(f)
if not valid1:
self.errors.append("For help, see http://linnarssonlab.org/loompy/format/")
valid2 = True
if strictness == "conventions":
with loompy.connect(path, mode="r") as ds:
valid2 = self.validate_conventions(ds)
if not valid2:
self.errors.append("For help, see http://linnarssonlab.org/loompy/conventions/")
return valid1 and valid2
|
python
|
def validate(self, path: str, strictness: str = "speconly") -> bool:
"""
Validate a file for conformance to the Loom specification
Args:
path: Full path to the file to be validated
strictness: "speconly" or "conventions"
Remarks:
In "speconly" mode, conformance is assessed relative to the file format specification
at http://linnarssonlab.org/loompy/format/. In "conventions" mode, conformance is additionally
assessed relative to attribute name and data type conventions given at http://linnarssonlab.org/loompy/conventions/.
"""
valid1 = True
with h5py.File(path, mode="r") as f:
valid1 = self.validate_spec(f)
if not valid1:
self.errors.append("For help, see http://linnarssonlab.org/loompy/format/")
valid2 = True
if strictness == "conventions":
with loompy.connect(path, mode="r") as ds:
valid2 = self.validate_conventions(ds)
if not valid2:
self.errors.append("For help, see http://linnarssonlab.org/loompy/conventions/")
return valid1 and valid2
|
[
"def",
"validate",
"(",
"self",
",",
"path",
":",
"str",
",",
"strictness",
":",
"str",
"=",
"\"speconly\"",
")",
"->",
"bool",
":",
"valid1",
"=",
"True",
"with",
"h5py",
".",
"File",
"(",
"path",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"f",
":",
"valid1",
"=",
"self",
".",
"validate_spec",
"(",
"f",
")",
"if",
"not",
"valid1",
":",
"self",
".",
"errors",
".",
"append",
"(",
"\"For help, see http://linnarssonlab.org/loompy/format/\"",
")",
"valid2",
"=",
"True",
"if",
"strictness",
"==",
"\"conventions\"",
":",
"with",
"loompy",
".",
"connect",
"(",
"path",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"ds",
":",
"valid2",
"=",
"self",
".",
"validate_conventions",
"(",
"ds",
")",
"if",
"not",
"valid2",
":",
"self",
".",
"errors",
".",
"append",
"(",
"\"For help, see http://linnarssonlab.org/loompy/conventions/\"",
")",
"return",
"valid1",
"and",
"valid2"
] |
Validate a file for conformance to the Loom specification
Args:
path: Full path to the file to be validated
strictness: "speconly" or "conventions"
Remarks:
In "speconly" mode, conformance is assessed relative to the file format specification
at http://linnarssonlab.org/loompy/format/. In "conventions" mode, conformance is additionally
assessed relative to attribute name and data type conventions given at http://linnarssonlab.org/loompy/conventions/.
|
[
"Validate",
"a",
"file",
"for",
"conformance",
"to",
"the",
"Loom",
"specification"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_validator.py#L34-L60
|
12,121
|
linnarsson-lab/loompy
|
loompy/attribute_manager.py
|
AttributeManager._permute
|
def _permute(self, ordering: np.ndarray) -> None:
"""
Permute all the attributes in the collection
Remarks:
This permutes the order of the values for each attribute in the file
"""
for key in self.keys():
self[key] = self[key][ordering]
|
python
|
def _permute(self, ordering: np.ndarray) -> None:
"""
Permute all the attributes in the collection
Remarks:
This permutes the order of the values for each attribute in the file
"""
for key in self.keys():
self[key] = self[key][ordering]
|
[
"def",
"_permute",
"(",
"self",
",",
"ordering",
":",
"np",
".",
"ndarray",
")",
"->",
"None",
":",
"for",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"self",
"[",
"key",
"]",
"=",
"self",
"[",
"key",
"]",
"[",
"ordering",
"]"
] |
Permute all the attributes in the collection
Remarks:
This permutes the order of the values for each attribute in the file
|
[
"Permute",
"all",
"the",
"attributes",
"in",
"the",
"collection"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/attribute_manager.py#L180-L188
|
12,122
|
linnarsson-lab/loompy
|
loompy/attribute_manager.py
|
AttributeManager.get
|
def get(self, name: str, default: np.ndarray) -> np.ndarray:
"""
Return the value for a named attribute if it exists, else default.
Default has to be a numpy array of correct size.
"""
if name in self:
return self[name]
else:
if not isinstance(default, np.ndarray):
raise ValueError(f"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values")
if default.shape[0] != self.ds.shape[self.axis]:
raise ValueError(f"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values but {len(default)} were given")
return default
|
python
|
def get(self, name: str, default: np.ndarray) -> np.ndarray:
"""
Return the value for a named attribute if it exists, else default.
Default has to be a numpy array of correct size.
"""
if name in self:
return self[name]
else:
if not isinstance(default, np.ndarray):
raise ValueError(f"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values")
if default.shape[0] != self.ds.shape[self.axis]:
raise ValueError(f"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values but {len(default)} were given")
return default
|
[
"def",
"get",
"(",
"self",
",",
"name",
":",
"str",
",",
"default",
":",
"np",
".",
"ndarray",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"name",
"in",
"self",
":",
"return",
"self",
"[",
"name",
"]",
"else",
":",
"if",
"not",
"isinstance",
"(",
"default",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"f\"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values\"",
")",
"if",
"default",
".",
"shape",
"[",
"0",
"]",
"!=",
"self",
".",
"ds",
".",
"shape",
"[",
"self",
".",
"axis",
"]",
":",
"raise",
"ValueError",
"(",
"f\"Default must be an np.ndarray with exactly {self.ds.shape[self.axis]} values but {len(default)} were given\"",
")",
"return",
"default"
] |
Return the value for a named attribute if it exists, else default.
Default has to be a numpy array of correct size.
|
[
"Return",
"the",
"value",
"for",
"a",
"named",
"attribute",
"if",
"it",
"exists",
"else",
"default",
".",
"Default",
"has",
"to",
"be",
"a",
"numpy",
"array",
"of",
"correct",
"size",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/attribute_manager.py#L190-L205
|
12,123
|
linnarsson-lab/loompy
|
loompy/normalize.py
|
normalize_attr_array
|
def normalize_attr_array(a: Any) -> np.ndarray:
"""
Take all kinds of array-like inputs and normalize to a one-dimensional np.ndarray
"""
if type(a) is np.ndarray:
return a
elif type(a) is np.matrix:
if a.shape[0] == 1:
return np.array(a)[0, :]
elif a.shape[1] == 1:
return np.array(a)[:, 0]
else:
raise ValueError("Attribute values must be 1-dimensional.")
elif type(a) is list or type(a) is tuple:
return np.array(a)
elif sparse.issparse(a):
return normalize_attr_array(a.todense())
else:
raise ValueError("Argument must be a list, tuple, numpy matrix, numpy ndarray or sparse matrix.")
|
python
|
def normalize_attr_array(a: Any) -> np.ndarray:
"""
Take all kinds of array-like inputs and normalize to a one-dimensional np.ndarray
"""
if type(a) is np.ndarray:
return a
elif type(a) is np.matrix:
if a.shape[0] == 1:
return np.array(a)[0, :]
elif a.shape[1] == 1:
return np.array(a)[:, 0]
else:
raise ValueError("Attribute values must be 1-dimensional.")
elif type(a) is list or type(a) is tuple:
return np.array(a)
elif sparse.issparse(a):
return normalize_attr_array(a.todense())
else:
raise ValueError("Argument must be a list, tuple, numpy matrix, numpy ndarray or sparse matrix.")
|
[
"def",
"normalize_attr_array",
"(",
"a",
":",
"Any",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"type",
"(",
"a",
")",
"is",
"np",
".",
"ndarray",
":",
"return",
"a",
"elif",
"type",
"(",
"a",
")",
"is",
"np",
".",
"matrix",
":",
"if",
"a",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"np",
".",
"array",
"(",
"a",
")",
"[",
"0",
",",
":",
"]",
"elif",
"a",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"return",
"np",
".",
"array",
"(",
"a",
")",
"[",
":",
",",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Attribute values must be 1-dimensional.\"",
")",
"elif",
"type",
"(",
"a",
")",
"is",
"list",
"or",
"type",
"(",
"a",
")",
"is",
"tuple",
":",
"return",
"np",
".",
"array",
"(",
"a",
")",
"elif",
"sparse",
".",
"issparse",
"(",
"a",
")",
":",
"return",
"normalize_attr_array",
"(",
"a",
".",
"todense",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Argument must be a list, tuple, numpy matrix, numpy ndarray or sparse matrix.\"",
")"
] |
Take all kinds of array-like inputs and normalize to a one-dimensional np.ndarray
|
[
"Take",
"all",
"kinds",
"of",
"array",
"-",
"like",
"inputs",
"and",
"normalize",
"to",
"a",
"one",
"-",
"dimensional",
"np",
".",
"ndarray"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/normalize.py#L29-L47
|
12,124
|
linnarsson-lab/loompy
|
loompy/to_html.py
|
to_html
|
def to_html(ds: Any) -> str:
"""
Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner.
"""
rm = min(10, ds.shape[0])
cm = min(10, ds.shape[1])
html = "<p>"
if ds.attrs.__contains__("title"):
html += "<strong>" + ds.attrs["title"] + "</strong> "
html += f"{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{'s' if len(ds.layers) > 1 else ''}<br/>(showing up to 10x10)<br/>"
html += ds.filename + "<br/>"
for (name, val) in ds.attrs.items():
html += f"name: <em>{val}</em><br/>"
html += "<table>"
# Emit column attributes
for ca in ds.col_attrs.keys():
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td> </td>" # Space for row attrs
html += "<td><strong>" + ca + "</strong></td>" # Col attr name
for v in ds.col_attrs[ca][:cm]:
html += "<td>" + str(v) + "</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit row attribute names
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td><strong>" + ra + "</strong></td>" # Row attr name
html += "<td> </td>" # Space for col attrs
for v in range(cm):
html += "<td> </td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit row attr values and matrix values
for row in range(rm):
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td>" + str(ds.row_attrs[ra][row]) + "</td>"
html += "<td> </td>" # Space for col attrs
for v in ds[row, :cm]:
html += "<td>" + str(v) + "</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit ellipses
if ds.shape[0] > rm:
html += "<tr>"
for v in range(rm + 1 + len(ds.row_attrs.keys())):
html += "<td>...</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
html += "</table>"
return html
|
python
|
def to_html(ds: Any) -> str:
"""
Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner.
"""
rm = min(10, ds.shape[0])
cm = min(10, ds.shape[1])
html = "<p>"
if ds.attrs.__contains__("title"):
html += "<strong>" + ds.attrs["title"] + "</strong> "
html += f"{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{'s' if len(ds.layers) > 1 else ''}<br/>(showing up to 10x10)<br/>"
html += ds.filename + "<br/>"
for (name, val) in ds.attrs.items():
html += f"name: <em>{val}</em><br/>"
html += "<table>"
# Emit column attributes
for ca in ds.col_attrs.keys():
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td> </td>" # Space for row attrs
html += "<td><strong>" + ca + "</strong></td>" # Col attr name
for v in ds.col_attrs[ca][:cm]:
html += "<td>" + str(v) + "</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit row attribute names
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td><strong>" + ra + "</strong></td>" # Row attr name
html += "<td> </td>" # Space for col attrs
for v in range(cm):
html += "<td> </td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit row attr values and matrix values
for row in range(rm):
html += "<tr>"
for ra in ds.row_attrs.keys():
html += "<td>" + str(ds.row_attrs[ra][row]) + "</td>"
html += "<td> </td>" # Space for col attrs
for v in ds[row, :cm]:
html += "<td>" + str(v) + "</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
# Emit ellipses
if ds.shape[0] > rm:
html += "<tr>"
for v in range(rm + 1 + len(ds.row_attrs.keys())):
html += "<td>...</td>"
if ds.shape[1] > cm:
html += "<td>...</td>"
html += "</tr>"
html += "</table>"
return html
|
[
"def",
"to_html",
"(",
"ds",
":",
"Any",
")",
"->",
"str",
":",
"rm",
"=",
"min",
"(",
"10",
",",
"ds",
".",
"shape",
"[",
"0",
"]",
")",
"cm",
"=",
"min",
"(",
"10",
",",
"ds",
".",
"shape",
"[",
"1",
"]",
")",
"html",
"=",
"\"<p>\"",
"if",
"ds",
".",
"attrs",
".",
"__contains__",
"(",
"\"title\"",
")",
":",
"html",
"+=",
"\"<strong>\"",
"+",
"ds",
".",
"attrs",
"[",
"\"title\"",
"]",
"+",
"\"</strong> \"",
"html",
"+=",
"f\"{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{'s' if len(ds.layers) > 1 else ''}<br/>(showing up to 10x10)<br/>\"",
"html",
"+=",
"ds",
".",
"filename",
"+",
"\"<br/>\"",
"for",
"(",
"name",
",",
"val",
")",
"in",
"ds",
".",
"attrs",
".",
"items",
"(",
")",
":",
"html",
"+=",
"f\"name: <em>{val}</em><br/>\"",
"html",
"+=",
"\"<table>\"",
"# Emit column attributes",
"for",
"ca",
"in",
"ds",
".",
"col_attrs",
".",
"keys",
"(",
")",
":",
"html",
"+=",
"\"<tr>\"",
"for",
"ra",
"in",
"ds",
".",
"row_attrs",
".",
"keys",
"(",
")",
":",
"html",
"+=",
"\"<td> </td>\"",
"# Space for row attrs",
"html",
"+=",
"\"<td><strong>\"",
"+",
"ca",
"+",
"\"</strong></td>\"",
"# Col attr name",
"for",
"v",
"in",
"ds",
".",
"col_attrs",
"[",
"ca",
"]",
"[",
":",
"cm",
"]",
":",
"html",
"+=",
"\"<td>\"",
"+",
"str",
"(",
"v",
")",
"+",
"\"</td>\"",
"if",
"ds",
".",
"shape",
"[",
"1",
"]",
">",
"cm",
":",
"html",
"+=",
"\"<td>...</td>\"",
"html",
"+=",
"\"</tr>\"",
"# Emit row attribute names",
"html",
"+=",
"\"<tr>\"",
"for",
"ra",
"in",
"ds",
".",
"row_attrs",
".",
"keys",
"(",
")",
":",
"html",
"+=",
"\"<td><strong>\"",
"+",
"ra",
"+",
"\"</strong></td>\"",
"# Row attr name",
"html",
"+=",
"\"<td> </td>\"",
"# Space for col attrs",
"for",
"v",
"in",
"range",
"(",
"cm",
")",
":",
"html",
"+=",
"\"<td> </td>\"",
"if",
"ds",
".",
"shape",
"[",
"1",
"]",
">",
"cm",
":",
"html",
"+=",
"\"<td>...</td>\"",
"html",
"+=",
"\"</tr>\"",
"# Emit row attr values and matrix values",
"for",
"row",
"in",
"range",
"(",
"rm",
")",
":",
"html",
"+=",
"\"<tr>\"",
"for",
"ra",
"in",
"ds",
".",
"row_attrs",
".",
"keys",
"(",
")",
":",
"html",
"+=",
"\"<td>\"",
"+",
"str",
"(",
"ds",
".",
"row_attrs",
"[",
"ra",
"]",
"[",
"row",
"]",
")",
"+",
"\"</td>\"",
"html",
"+=",
"\"<td> </td>\"",
"# Space for col attrs",
"for",
"v",
"in",
"ds",
"[",
"row",
",",
":",
"cm",
"]",
":",
"html",
"+=",
"\"<td>\"",
"+",
"str",
"(",
"v",
")",
"+",
"\"</td>\"",
"if",
"ds",
".",
"shape",
"[",
"1",
"]",
">",
"cm",
":",
"html",
"+=",
"\"<td>...</td>\"",
"html",
"+=",
"\"</tr>\"",
"# Emit ellipses",
"if",
"ds",
".",
"shape",
"[",
"0",
"]",
">",
"rm",
":",
"html",
"+=",
"\"<tr>\"",
"for",
"v",
"in",
"range",
"(",
"rm",
"+",
"1",
"+",
"len",
"(",
"ds",
".",
"row_attrs",
".",
"keys",
"(",
")",
")",
")",
":",
"html",
"+=",
"\"<td>...</td>\"",
"if",
"ds",
".",
"shape",
"[",
"1",
"]",
">",
"cm",
":",
"html",
"+=",
"\"<td>...</td>\"",
"html",
"+=",
"\"</tr>\"",
"html",
"+=",
"\"</table>\"",
"return",
"html"
] |
Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner.
|
[
"Return",
"an",
"HTML",
"representation",
"of",
"the",
"loom",
"file",
"or",
"view",
"showing",
"the",
"upper",
"-",
"left",
"10x10",
"corner",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/to_html.py#L4-L62
|
12,125
|
linnarsson-lab/loompy
|
loompy/loom_view.py
|
LoomView.permute
|
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
"""
if axis not in (0, 1):
raise ValueError("Axis must be 0 (rows) or 1 (columns)")
for layer in self.layers.values():
layer._permute(ordering, axis=axis)
if axis == 0:
if self.row_graphs is not None:
for g in self.row_graphs.values():
g._permute(ordering)
for a in self.row_attrs.values():
a._permute(ordering)
elif axis == 1:
if self.col_graphs is not None:
for g in self.col_graphs.values():
g._permute(ordering)
for a in self.col_attrs.values():
a._permute(ordering)
|
python
|
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
"""
if axis not in (0, 1):
raise ValueError("Axis must be 0 (rows) or 1 (columns)")
for layer in self.layers.values():
layer._permute(ordering, axis=axis)
if axis == 0:
if self.row_graphs is not None:
for g in self.row_graphs.values():
g._permute(ordering)
for a in self.row_attrs.values():
a._permute(ordering)
elif axis == 1:
if self.col_graphs is not None:
for g in self.col_graphs.values():
g._permute(ordering)
for a in self.col_attrs.values():
a._permute(ordering)
|
[
"def",
"permute",
"(",
"self",
",",
"ordering",
":",
"np",
".",
"ndarray",
",",
"*",
",",
"axis",
":",
"int",
")",
"->",
"None",
":",
"if",
"axis",
"not",
"in",
"(",
"0",
",",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Axis must be 0 (rows) or 1 (columns)\"",
")",
"for",
"layer",
"in",
"self",
".",
"layers",
".",
"values",
"(",
")",
":",
"layer",
".",
"_permute",
"(",
"ordering",
",",
"axis",
"=",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"if",
"self",
".",
"row_graphs",
"is",
"not",
"None",
":",
"for",
"g",
"in",
"self",
".",
"row_graphs",
".",
"values",
"(",
")",
":",
"g",
".",
"_permute",
"(",
"ordering",
")",
"for",
"a",
"in",
"self",
".",
"row_attrs",
".",
"values",
"(",
")",
":",
"a",
".",
"_permute",
"(",
"ordering",
")",
"elif",
"axis",
"==",
"1",
":",
"if",
"self",
".",
"col_graphs",
"is",
"not",
"None",
":",
"for",
"g",
"in",
"self",
".",
"col_graphs",
".",
"values",
"(",
")",
":",
"g",
".",
"_permute",
"(",
"ordering",
")",
"for",
"a",
"in",
"self",
".",
"col_attrs",
".",
"values",
"(",
")",
":",
"a",
".",
"_permute",
"(",
"ordering",
")"
] |
Permute the view, by permuting its layers, attributes and graphs
Args:
ordering (np.ndarray): The desired ordering along the axis
axis (int): 0, permute rows; 1, permute columns
|
[
"Permute",
"the",
"view",
"by",
"permuting",
"its",
"layers",
"attributes",
"and",
"graphs"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_view.py#L45-L68
|
12,126
|
linnarsson-lab/loompy
|
loompy/loom_layer.py
|
MemoryLoomLayer.permute
|
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the layer along an axis
Args:
axis: The axis to permute (0, permute the rows; 1, permute the columns)
ordering: The permutation vector
"""
if axis == 0:
self.values = self.values[ordering, :]
elif axis == 1:
self.values = self.values[:, ordering]
else:
raise ValueError("axis must be 0 or 1")
|
python
|
def permute(self, ordering: np.ndarray, *, axis: int) -> None:
"""
Permute the layer along an axis
Args:
axis: The axis to permute (0, permute the rows; 1, permute the columns)
ordering: The permutation vector
"""
if axis == 0:
self.values = self.values[ordering, :]
elif axis == 1:
self.values = self.values[:, ordering]
else:
raise ValueError("axis must be 0 or 1")
|
[
"def",
"permute",
"(",
"self",
",",
"ordering",
":",
"np",
".",
"ndarray",
",",
"*",
",",
"axis",
":",
"int",
")",
"->",
"None",
":",
"if",
"axis",
"==",
"0",
":",
"self",
".",
"values",
"=",
"self",
".",
"values",
"[",
"ordering",
",",
":",
"]",
"elif",
"axis",
"==",
"1",
":",
"self",
".",
"values",
"=",
"self",
".",
"values",
"[",
":",
",",
"ordering",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"axis must be 0 or 1\"",
")"
] |
Permute the layer along an axis
Args:
axis: The axis to permute (0, permute the rows; 1, permute the columns)
ordering: The permutation vector
|
[
"Permute",
"the",
"layer",
"along",
"an",
"axis"
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_layer.py#L30-L43
|
12,127
|
linnarsson-lab/loompy
|
loompy/loom_layer.py
|
LoomLayer._resize
|
def _resize(self, size: Tuple[int, int], axis: int = None) -> None:
"""Resize the dataset, or the specified axis.
The dataset must be stored in chunked format; it can be resized up to the "maximum shape" (keyword maxshape) specified at creation time.
The rank of the dataset cannot be changed.
"Size" should be a shape tuple, or if an axis is specified, an integer.
BEWARE: This functions differently than the NumPy resize() method!
The data is not "reshuffled" to fit in the new shape; each axis is grown or shrunk independently.
The coordinates of existing data are fixed.
"""
if self.name == "":
self.ds._file['/matrix'].resize(size, axis)
else:
self.ds._file['/layers/' + self.name].resize(size, axis)
|
python
|
def _resize(self, size: Tuple[int, int], axis: int = None) -> None:
"""Resize the dataset, or the specified axis.
The dataset must be stored in chunked format; it can be resized up to the "maximum shape" (keyword maxshape) specified at creation time.
The rank of the dataset cannot be changed.
"Size" should be a shape tuple, or if an axis is specified, an integer.
BEWARE: This functions differently than the NumPy resize() method!
The data is not "reshuffled" to fit in the new shape; each axis is grown or shrunk independently.
The coordinates of existing data are fixed.
"""
if self.name == "":
self.ds._file['/matrix'].resize(size, axis)
else:
self.ds._file['/layers/' + self.name].resize(size, axis)
|
[
"def",
"_resize",
"(",
"self",
",",
"size",
":",
"Tuple",
"[",
"int",
",",
"int",
"]",
",",
"axis",
":",
"int",
"=",
"None",
")",
"->",
"None",
":",
"if",
"self",
".",
"name",
"==",
"\"\"",
":",
"self",
".",
"ds",
".",
"_file",
"[",
"'/matrix'",
"]",
".",
"resize",
"(",
"size",
",",
"axis",
")",
"else",
":",
"self",
".",
"ds",
".",
"_file",
"[",
"'/layers/'",
"+",
"self",
".",
"name",
"]",
".",
"resize",
"(",
"size",
",",
"axis",
")"
] |
Resize the dataset, or the specified axis.
The dataset must be stored in chunked format; it can be resized up to the "maximum shape" (keyword maxshape) specified at creation time.
The rank of the dataset cannot be changed.
"Size" should be a shape tuple, or if an axis is specified, an integer.
BEWARE: This functions differently than the NumPy resize() method!
The data is not "reshuffled" to fit in the new shape; each axis is grown or shrunk independently.
The coordinates of existing data are fixed.
|
[
"Resize",
"the",
"dataset",
"or",
"the",
"specified",
"axis",
"."
] |
62c8373a92b058753baa3a95331fb541f560f599
|
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_layer.py#L130-L144
|
12,128
|
optimizely/python-sdk
|
optimizely/helpers/validator.py
|
is_datafile_valid
|
def is_datafile_valid(datafile):
""" Given a datafile determine if it is valid or not.
Args:
datafile: JSON string representing the project.
Returns:
Boolean depending upon whether datafile is valid or not.
"""
try:
datafile_json = json.loads(datafile)
except:
return False
try:
jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json)
except:
return False
return True
|
python
|
def is_datafile_valid(datafile):
""" Given a datafile determine if it is valid or not.
Args:
datafile: JSON string representing the project.
Returns:
Boolean depending upon whether datafile is valid or not.
"""
try:
datafile_json = json.loads(datafile)
except:
return False
try:
jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json)
except:
return False
return True
|
[
"def",
"is_datafile_valid",
"(",
"datafile",
")",
":",
"try",
":",
"datafile_json",
"=",
"json",
".",
"loads",
"(",
"datafile",
")",
"except",
":",
"return",
"False",
"try",
":",
"jsonschema",
".",
"Draft4Validator",
"(",
"constants",
".",
"JSON_SCHEMA",
")",
".",
"validate",
"(",
"datafile_json",
")",
"except",
":",
"return",
"False",
"return",
"True"
] |
Given a datafile determine if it is valid or not.
Args:
datafile: JSON string representing the project.
Returns:
Boolean depending upon whether datafile is valid or not.
|
[
"Given",
"a",
"datafile",
"determine",
"if",
"it",
"is",
"valid",
"or",
"not",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/validator.py#L24-L44
|
12,129
|
optimizely/python-sdk
|
optimizely/helpers/validator.py
|
is_user_profile_valid
|
def is_user_profile_valid(user_profile):
""" Determine if provided user profile is valid or not.
Args:
user_profile: User's profile which needs to be validated.
Returns:
Boolean depending upon whether profile is valid or not.
"""
if not user_profile:
return False
if not type(user_profile) is dict:
return False
if UserProfile.USER_ID_KEY not in user_profile:
return False
if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile:
return False
experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY)
if not type(experiment_bucket_map) is dict:
return False
for decision in experiment_bucket_map.values():
if type(decision) is not dict or UserProfile.VARIATION_ID_KEY not in decision:
return False
return True
|
python
|
def is_user_profile_valid(user_profile):
""" Determine if provided user profile is valid or not.
Args:
user_profile: User's profile which needs to be validated.
Returns:
Boolean depending upon whether profile is valid or not.
"""
if not user_profile:
return False
if not type(user_profile) is dict:
return False
if UserProfile.USER_ID_KEY not in user_profile:
return False
if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile:
return False
experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY)
if not type(experiment_bucket_map) is dict:
return False
for decision in experiment_bucket_map.values():
if type(decision) is not dict or UserProfile.VARIATION_ID_KEY not in decision:
return False
return True
|
[
"def",
"is_user_profile_valid",
"(",
"user_profile",
")",
":",
"if",
"not",
"user_profile",
":",
"return",
"False",
"if",
"not",
"type",
"(",
"user_profile",
")",
"is",
"dict",
":",
"return",
"False",
"if",
"UserProfile",
".",
"USER_ID_KEY",
"not",
"in",
"user_profile",
":",
"return",
"False",
"if",
"UserProfile",
".",
"EXPERIMENT_BUCKET_MAP_KEY",
"not",
"in",
"user_profile",
":",
"return",
"False",
"experiment_bucket_map",
"=",
"user_profile",
".",
"get",
"(",
"UserProfile",
".",
"EXPERIMENT_BUCKET_MAP_KEY",
")",
"if",
"not",
"type",
"(",
"experiment_bucket_map",
")",
"is",
"dict",
":",
"return",
"False",
"for",
"decision",
"in",
"experiment_bucket_map",
".",
"values",
"(",
")",
":",
"if",
"type",
"(",
"decision",
")",
"is",
"not",
"dict",
"or",
"UserProfile",
".",
"VARIATION_ID_KEY",
"not",
"in",
"decision",
":",
"return",
"False",
"return",
"True"
] |
Determine if provided user profile is valid or not.
Args:
user_profile: User's profile which needs to be validated.
Returns:
Boolean depending upon whether profile is valid or not.
|
[
"Determine",
"if",
"provided",
"user",
"profile",
"is",
"valid",
"or",
"not",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/validator.py#L126-L156
|
12,130
|
optimizely/python-sdk
|
optimizely/helpers/validator.py
|
is_attribute_valid
|
def is_attribute_valid(attribute_key, attribute_value):
""" Determine if given attribute is valid.
Args:
attribute_key: Variable which needs to be validated
attribute_value: Variable which needs to be validated
Returns:
False if attribute_key is not a string
False if attribute_value is not one of the supported attribute types
True otherwise
"""
if not isinstance(attribute_key, string_types):
return False
if isinstance(attribute_value, (string_types, bool)):
return True
if isinstance(attribute_value, (numbers.Integral, float)):
return is_finite_number(attribute_value)
return False
|
python
|
def is_attribute_valid(attribute_key, attribute_value):
""" Determine if given attribute is valid.
Args:
attribute_key: Variable which needs to be validated
attribute_value: Variable which needs to be validated
Returns:
False if attribute_key is not a string
False if attribute_value is not one of the supported attribute types
True otherwise
"""
if not isinstance(attribute_key, string_types):
return False
if isinstance(attribute_value, (string_types, bool)):
return True
if isinstance(attribute_value, (numbers.Integral, float)):
return is_finite_number(attribute_value)
return False
|
[
"def",
"is_attribute_valid",
"(",
"attribute_key",
",",
"attribute_value",
")",
":",
"if",
"not",
"isinstance",
"(",
"attribute_key",
",",
"string_types",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"attribute_value",
",",
"(",
"string_types",
",",
"bool",
")",
")",
":",
"return",
"True",
"if",
"isinstance",
"(",
"attribute_value",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
")",
")",
":",
"return",
"is_finite_number",
"(",
"attribute_value",
")",
"return",
"False"
] |
Determine if given attribute is valid.
Args:
attribute_key: Variable which needs to be validated
attribute_value: Variable which needs to be validated
Returns:
False if attribute_key is not a string
False if attribute_value is not one of the supported attribute types
True otherwise
|
[
"Determine",
"if",
"given",
"attribute",
"is",
"valid",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/validator.py#L174-L196
|
12,131
|
optimizely/python-sdk
|
optimizely/helpers/validator.py
|
is_finite_number
|
def is_finite_number(value):
""" Validates if the given value is a number, enforces
absolute limit of 2^53 and restricts NAN, INF, -INF.
Args:
value: Value to be validated.
Returns:
Boolean: True if value is a number and not NAN, INF, -INF or
greater than absolute limit of 2^53 else False.
"""
if not isinstance(value, (numbers.Integral, float)):
# numbers.Integral instead of int to accomodate long integer in python 2
return False
if isinstance(value, bool):
# bool is a subclass of int
return False
if isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return False
if abs(value) > (2**53):
return False
return True
|
python
|
def is_finite_number(value):
""" Validates if the given value is a number, enforces
absolute limit of 2^53 and restricts NAN, INF, -INF.
Args:
value: Value to be validated.
Returns:
Boolean: True if value is a number and not NAN, INF, -INF or
greater than absolute limit of 2^53 else False.
"""
if not isinstance(value, (numbers.Integral, float)):
# numbers.Integral instead of int to accomodate long integer in python 2
return False
if isinstance(value, bool):
# bool is a subclass of int
return False
if isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return False
if abs(value) > (2**53):
return False
return True
|
[
"def",
"is_finite_number",
"(",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
")",
")",
":",
"# numbers.Integral instead of int to accomodate long integer in python 2",
"return",
"False",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"# bool is a subclass of int",
"return",
"False",
"if",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"if",
"math",
".",
"isnan",
"(",
"value",
")",
"or",
"math",
".",
"isinf",
"(",
"value",
")",
":",
"return",
"False",
"if",
"abs",
"(",
"value",
")",
">",
"(",
"2",
"**",
"53",
")",
":",
"return",
"False",
"return",
"True"
] |
Validates if the given value is a number, enforces
absolute limit of 2^53 and restricts NAN, INF, -INF.
Args:
value: Value to be validated.
Returns:
Boolean: True if value is a number and not NAN, INF, -INF or
greater than absolute limit of 2^53 else False.
|
[
"Validates",
"if",
"the",
"given",
"value",
"is",
"a",
"number",
"enforces",
"absolute",
"limit",
"of",
"2^53",
"and",
"restricts",
"NAN",
"INF",
"-",
"INF",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/validator.py#L199-L225
|
12,132
|
optimizely/python-sdk
|
optimizely/helpers/validator.py
|
are_values_same_type
|
def are_values_same_type(first_val, second_val):
""" Method to verify that both values belong to same type. Float and integer are
considered as same type.
Args:
first_val: Value to validate.
second_Val: Value to validate.
Returns:
Boolean: True if both values belong to same type. Otherwise False.
"""
first_val_type = type(first_val)
second_val_type = type(second_val)
# use isinstance to accomodate Python 2 unicode and str types.
if isinstance(first_val, string_types) and isinstance(second_val, string_types):
return True
# Compare types if one of the values is bool because bool is a subclass on Integer.
if isinstance(first_val, bool) or isinstance(second_val, bool):
return first_val_type == second_val_type
# Treat ints and floats as same type.
if isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float)):
return True
return False
|
python
|
def are_values_same_type(first_val, second_val):
""" Method to verify that both values belong to same type. Float and integer are
considered as same type.
Args:
first_val: Value to validate.
second_Val: Value to validate.
Returns:
Boolean: True if both values belong to same type. Otherwise False.
"""
first_val_type = type(first_val)
second_val_type = type(second_val)
# use isinstance to accomodate Python 2 unicode and str types.
if isinstance(first_val, string_types) and isinstance(second_val, string_types):
return True
# Compare types if one of the values is bool because bool is a subclass on Integer.
if isinstance(first_val, bool) or isinstance(second_val, bool):
return first_val_type == second_val_type
# Treat ints and floats as same type.
if isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float)):
return True
return False
|
[
"def",
"are_values_same_type",
"(",
"first_val",
",",
"second_val",
")",
":",
"first_val_type",
"=",
"type",
"(",
"first_val",
")",
"second_val_type",
"=",
"type",
"(",
"second_val",
")",
"# use isinstance to accomodate Python 2 unicode and str types.",
"if",
"isinstance",
"(",
"first_val",
",",
"string_types",
")",
"and",
"isinstance",
"(",
"second_val",
",",
"string_types",
")",
":",
"return",
"True",
"# Compare types if one of the values is bool because bool is a subclass on Integer.",
"if",
"isinstance",
"(",
"first_val",
",",
"bool",
")",
"or",
"isinstance",
"(",
"second_val",
",",
"bool",
")",
":",
"return",
"first_val_type",
"==",
"second_val_type",
"# Treat ints and floats as same type.",
"if",
"isinstance",
"(",
"first_val",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
")",
")",
"and",
"isinstance",
"(",
"second_val",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
Method to verify that both values belong to same type. Float and integer are
considered as same type.
Args:
first_val: Value to validate.
second_Val: Value to validate.
Returns:
Boolean: True if both values belong to same type. Otherwise False.
|
[
"Method",
"to",
"verify",
"that",
"both",
"values",
"belong",
"to",
"same",
"type",
".",
"Float",
"and",
"integer",
"are",
"considered",
"as",
"same",
"type",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/validator.py#L228-L255
|
12,133
|
optimizely/python-sdk
|
optimizely/logger.py
|
reset_logger
|
def reset_logger(name, level=None, handler=None):
"""
Make a standard python logger object with default formatter, handler, etc.
Defaults are:
- level == logging.INFO
- handler == logging.StreamHandler()
Args:
name: a logger name.
level: an optional initial log level for this logger.
handler: an optional initial handler for this logger.
Returns: a standard python logger with a single handler.
"""
# Make the logger and set its level.
if level is None:
level = logging.INFO
logger = logging.getLogger(name)
logger.setLevel(level)
# Make the handler and attach it.
handler = handler or logging.StreamHandler()
handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT))
# We don't use ``.addHandler``, since this logger may have already been
# instantiated elsewhere with a different handler. It should only ever
# have one, not many.
logger.handlers = [handler]
return logger
|
python
|
def reset_logger(name, level=None, handler=None):
"""
Make a standard python logger object with default formatter, handler, etc.
Defaults are:
- level == logging.INFO
- handler == logging.StreamHandler()
Args:
name: a logger name.
level: an optional initial log level for this logger.
handler: an optional initial handler for this logger.
Returns: a standard python logger with a single handler.
"""
# Make the logger and set its level.
if level is None:
level = logging.INFO
logger = logging.getLogger(name)
logger.setLevel(level)
# Make the handler and attach it.
handler = handler or logging.StreamHandler()
handler.setFormatter(logging.Formatter(_DEFAULT_LOG_FORMAT))
# We don't use ``.addHandler``, since this logger may have already been
# instantiated elsewhere with a different handler. It should only ever
# have one, not many.
logger.handlers = [handler]
return logger
|
[
"def",
"reset_logger",
"(",
"name",
",",
"level",
"=",
"None",
",",
"handler",
"=",
"None",
")",
":",
"# Make the logger and set its level.",
"if",
"level",
"is",
"None",
":",
"level",
"=",
"logging",
".",
"INFO",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"# Make the handler and attach it.",
"handler",
"=",
"handler",
"or",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"_DEFAULT_LOG_FORMAT",
")",
")",
"# We don't use ``.addHandler``, since this logger may have already been",
"# instantiated elsewhere with a different handler. It should only ever",
"# have one, not many.",
"logger",
".",
"handlers",
"=",
"[",
"handler",
"]",
"return",
"logger"
] |
Make a standard python logger object with default formatter, handler, etc.
Defaults are:
- level == logging.INFO
- handler == logging.StreamHandler()
Args:
name: a logger name.
level: an optional initial log level for this logger.
handler: an optional initial handler for this logger.
Returns: a standard python logger with a single handler.
|
[
"Make",
"a",
"standard",
"python",
"logger",
"object",
"with",
"default",
"formatter",
"handler",
"etc",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/logger.py#L22-L52
|
12,134
|
optimizely/python-sdk
|
optimizely/logger.py
|
adapt_logger
|
def adapt_logger(logger):
"""
Adapt our custom logger.BaseLogger object into a standard logging.Logger object.
Adaptations are:
- NoOpLogger turns into a logger with a single NullHandler.
- SimpleLogger turns into a logger with a StreamHandler and level.
Args:
logger: Possibly a logger.BaseLogger, or a standard python logging.Logger.
Returns: a standard python logging.Logger.
"""
if isinstance(logger, logging.Logger):
return logger
# Use the standard python logger created by these classes.
if isinstance(logger, (SimpleLogger, NoOpLogger)):
return logger.logger
# Otherwise, return whatever we were given because we can't adapt.
return logger
|
python
|
def adapt_logger(logger):
"""
Adapt our custom logger.BaseLogger object into a standard logging.Logger object.
Adaptations are:
- NoOpLogger turns into a logger with a single NullHandler.
- SimpleLogger turns into a logger with a StreamHandler and level.
Args:
logger: Possibly a logger.BaseLogger, or a standard python logging.Logger.
Returns: a standard python logging.Logger.
"""
if isinstance(logger, logging.Logger):
return logger
# Use the standard python logger created by these classes.
if isinstance(logger, (SimpleLogger, NoOpLogger)):
return logger.logger
# Otherwise, return whatever we were given because we can't adapt.
return logger
|
[
"def",
"adapt_logger",
"(",
"logger",
")",
":",
"if",
"isinstance",
"(",
"logger",
",",
"logging",
".",
"Logger",
")",
":",
"return",
"logger",
"# Use the standard python logger created by these classes.",
"if",
"isinstance",
"(",
"logger",
",",
"(",
"SimpleLogger",
",",
"NoOpLogger",
")",
")",
":",
"return",
"logger",
".",
"logger",
"# Otherwise, return whatever we were given because we can't adapt.",
"return",
"logger"
] |
Adapt our custom logger.BaseLogger object into a standard logging.Logger object.
Adaptations are:
- NoOpLogger turns into a logger with a single NullHandler.
- SimpleLogger turns into a logger with a StreamHandler and level.
Args:
logger: Possibly a logger.BaseLogger, or a standard python logging.Logger.
Returns: a standard python logging.Logger.
|
[
"Adapt",
"our",
"custom",
"logger",
".",
"BaseLogger",
"object",
"into",
"a",
"standard",
"logging",
".",
"Logger",
"object",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/logger.py#L95-L117
|
12,135
|
optimizely/python-sdk
|
optimizely/user_profile.py
|
UserProfile.get_variation_for_experiment
|
def get_variation_for_experiment(self, experiment_id):
""" Helper method to retrieve variation ID for given experiment.
Args:
experiment_id: ID for experiment for which variation needs to be looked up for.
Returns:
Variation ID corresponding to the experiment. None if no decision available.
"""
return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)
|
python
|
def get_variation_for_experiment(self, experiment_id):
""" Helper method to retrieve variation ID for given experiment.
Args:
experiment_id: ID for experiment for which variation needs to be looked up for.
Returns:
Variation ID corresponding to the experiment. None if no decision available.
"""
return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY)
|
[
"def",
"get_variation_for_experiment",
"(",
"self",
",",
"experiment_id",
")",
":",
"return",
"self",
".",
"experiment_bucket_map",
".",
"get",
"(",
"experiment_id",
",",
"{",
"self",
".",
"VARIATION_ID_KEY",
":",
"None",
"}",
")",
".",
"get",
"(",
"self",
".",
"VARIATION_ID_KEY",
")"
] |
Helper method to retrieve variation ID for given experiment.
Args:
experiment_id: ID for experiment for which variation needs to be looked up for.
Returns:
Variation ID corresponding to the experiment. None if no decision available.
|
[
"Helper",
"method",
"to",
"retrieve",
"variation",
"ID",
"for",
"given",
"experiment",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/user_profile.py#L34-L44
|
12,136
|
optimizely/python-sdk
|
optimizely/helpers/event_tag_utils.py
|
get_numeric_value
|
def get_numeric_value(event_tags, logger=None):
"""
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
"""
logger_message_debug = None
numeric_metric_value = None
if event_tags is None:
logger_message_debug = 'Event tags is undefined.'
elif not isinstance(event_tags, dict):
logger_message_debug = 'Event tags is not a dictionary.'
elif NUMERIC_METRIC_TYPE not in event_tags:
logger_message_debug = 'The numeric metric key is not in event tags.'
else:
numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE]
try:
if isinstance(numeric_metric_value, (numbers.Integral, float, str)):
# Attempt to convert the numeric metric value to a float
# (if it isn't already a float).
cast_numeric_metric_value = float(numeric_metric_value)
# If not a float after casting, then make everything else a None.
# Other potential values are nan, inf, and -inf.
if not isinstance(cast_numeric_metric_value, float) \
or math.isnan(cast_numeric_metric_value) \
or math.isinf(cast_numeric_metric_value):
logger_message_debug = 'Provided numeric value {} is in an invalid format.'\
.format(numeric_metric_value)
numeric_metric_value = None
else:
# Handle booleans as a special case.
# They are treated like an integer in the cast, but we do not want to cast this.
if isinstance(numeric_metric_value, bool):
logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.'
numeric_metric_value = None
else:
numeric_metric_value = cast_numeric_metric_value
else:
logger_message_debug = 'Numeric metric value is not in integer, float, or string form.'
numeric_metric_value = None
except ValueError:
logger_message_debug = 'Value error while casting numeric metric value to a float.'
numeric_metric_value = None
# Log all potential debug messages while converting the numeric value to a float.
if logger and logger_message_debug:
logger.log(enums.LogLevels.DEBUG, logger_message_debug)
# Log the final numeric metric value
if numeric_metric_value is not None:
if logger:
logger.log(enums.LogLevels.INFO,
'The numeric metric value {} will be sent to results.'
.format(numeric_metric_value))
else:
if logger:
logger.log(enums.LogLevels.WARNING,
'The provided numeric metric value {} is in an invalid format and will not be sent to results.'
.format(numeric_metric_value))
return numeric_metric_value
|
python
|
def get_numeric_value(event_tags, logger=None):
"""
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
"""
logger_message_debug = None
numeric_metric_value = None
if event_tags is None:
logger_message_debug = 'Event tags is undefined.'
elif not isinstance(event_tags, dict):
logger_message_debug = 'Event tags is not a dictionary.'
elif NUMERIC_METRIC_TYPE not in event_tags:
logger_message_debug = 'The numeric metric key is not in event tags.'
else:
numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE]
try:
if isinstance(numeric_metric_value, (numbers.Integral, float, str)):
# Attempt to convert the numeric metric value to a float
# (if it isn't already a float).
cast_numeric_metric_value = float(numeric_metric_value)
# If not a float after casting, then make everything else a None.
# Other potential values are nan, inf, and -inf.
if not isinstance(cast_numeric_metric_value, float) \
or math.isnan(cast_numeric_metric_value) \
or math.isinf(cast_numeric_metric_value):
logger_message_debug = 'Provided numeric value {} is in an invalid format.'\
.format(numeric_metric_value)
numeric_metric_value = None
else:
# Handle booleans as a special case.
# They are treated like an integer in the cast, but we do not want to cast this.
if isinstance(numeric_metric_value, bool):
logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.'
numeric_metric_value = None
else:
numeric_metric_value = cast_numeric_metric_value
else:
logger_message_debug = 'Numeric metric value is not in integer, float, or string form.'
numeric_metric_value = None
except ValueError:
logger_message_debug = 'Value error while casting numeric metric value to a float.'
numeric_metric_value = None
# Log all potential debug messages while converting the numeric value to a float.
if logger and logger_message_debug:
logger.log(enums.LogLevels.DEBUG, logger_message_debug)
# Log the final numeric metric value
if numeric_metric_value is not None:
if logger:
logger.log(enums.LogLevels.INFO,
'The numeric metric value {} will be sent to results.'
.format(numeric_metric_value))
else:
if logger:
logger.log(enums.LogLevels.WARNING,
'The provided numeric metric value {} is in an invalid format and will not be sent to results.'
.format(numeric_metric_value))
return numeric_metric_value
|
[
"def",
"get_numeric_value",
"(",
"event_tags",
",",
"logger",
"=",
"None",
")",
":",
"logger_message_debug",
"=",
"None",
"numeric_metric_value",
"=",
"None",
"if",
"event_tags",
"is",
"None",
":",
"logger_message_debug",
"=",
"'Event tags is undefined.'",
"elif",
"not",
"isinstance",
"(",
"event_tags",
",",
"dict",
")",
":",
"logger_message_debug",
"=",
"'Event tags is not a dictionary.'",
"elif",
"NUMERIC_METRIC_TYPE",
"not",
"in",
"event_tags",
":",
"logger_message_debug",
"=",
"'The numeric metric key is not in event tags.'",
"else",
":",
"numeric_metric_value",
"=",
"event_tags",
"[",
"NUMERIC_METRIC_TYPE",
"]",
"try",
":",
"if",
"isinstance",
"(",
"numeric_metric_value",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
",",
"str",
")",
")",
":",
"# Attempt to convert the numeric metric value to a float",
"# (if it isn't already a float).",
"cast_numeric_metric_value",
"=",
"float",
"(",
"numeric_metric_value",
")",
"# If not a float after casting, then make everything else a None.",
"# Other potential values are nan, inf, and -inf.",
"if",
"not",
"isinstance",
"(",
"cast_numeric_metric_value",
",",
"float",
")",
"or",
"math",
".",
"isnan",
"(",
"cast_numeric_metric_value",
")",
"or",
"math",
".",
"isinf",
"(",
"cast_numeric_metric_value",
")",
":",
"logger_message_debug",
"=",
"'Provided numeric value {} is in an invalid format.'",
".",
"format",
"(",
"numeric_metric_value",
")",
"numeric_metric_value",
"=",
"None",
"else",
":",
"# Handle booleans as a special case.",
"# They are treated like an integer in the cast, but we do not want to cast this.",
"if",
"isinstance",
"(",
"numeric_metric_value",
",",
"bool",
")",
":",
"logger_message_debug",
"=",
"'Provided numeric value is a boolean, which is an invalid format.'",
"numeric_metric_value",
"=",
"None",
"else",
":",
"numeric_metric_value",
"=",
"cast_numeric_metric_value",
"else",
":",
"logger_message_debug",
"=",
"'Numeric metric value is not in integer, float, or string form.'",
"numeric_metric_value",
"=",
"None",
"except",
"ValueError",
":",
"logger_message_debug",
"=",
"'Value error while casting numeric metric value to a float.'",
"numeric_metric_value",
"=",
"None",
"# Log all potential debug messages while converting the numeric value to a float.",
"if",
"logger",
"and",
"logger_message_debug",
":",
"logger",
".",
"log",
"(",
"enums",
".",
"LogLevels",
".",
"DEBUG",
",",
"logger_message_debug",
")",
"# Log the final numeric metric value",
"if",
"numeric_metric_value",
"is",
"not",
"None",
":",
"if",
"logger",
":",
"logger",
".",
"log",
"(",
"enums",
".",
"LogLevels",
".",
"INFO",
",",
"'The numeric metric value {} will be sent to results.'",
".",
"format",
"(",
"numeric_metric_value",
")",
")",
"else",
":",
"if",
"logger",
":",
"logger",
".",
"log",
"(",
"enums",
".",
"LogLevels",
".",
"WARNING",
",",
"'The provided numeric metric value {} is in an invalid format and will not be sent to results.'",
".",
"format",
"(",
"numeric_metric_value",
")",
")",
"return",
"numeric_metric_value"
] |
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
|
[
"A",
"smart",
"getter",
"of",
"the",
"numeric",
"value",
"from",
"the",
"event",
"tags",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/event_tag_utils.py#L43-L123
|
12,137
|
optimizely/python-sdk
|
optimizely/lib/pymmh3.py
|
hash
|
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in xrange( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
|
python
|
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in xrange( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
|
[
"def",
"hash",
"(",
"key",
",",
"seed",
"=",
"0x0",
")",
":",
"key",
"=",
"bytearray",
"(",
"xencode",
"(",
"key",
")",
")",
"def",
"fmix",
"(",
"h",
")",
":",
"h",
"^=",
"h",
">>",
"16",
"h",
"=",
"(",
"h",
"*",
"0x85ebca6b",
")",
"&",
"0xFFFFFFFF",
"h",
"^=",
"h",
">>",
"13",
"h",
"=",
"(",
"h",
"*",
"0xc2b2ae35",
")",
"&",
"0xFFFFFFFF",
"h",
"^=",
"h",
">>",
"16",
"return",
"h",
"length",
"=",
"len",
"(",
"key",
")",
"nblocks",
"=",
"int",
"(",
"length",
"/",
"4",
")",
"h1",
"=",
"seed",
"c1",
"=",
"0xcc9e2d51",
"c2",
"=",
"0x1b873593",
"# body",
"for",
"block_start",
"in",
"xrange",
"(",
"0",
",",
"nblocks",
"*",
"4",
",",
"4",
")",
":",
"# ??? big endian?",
"k1",
"=",
"key",
"[",
"block_start",
"+",
"3",
"]",
"<<",
"24",
"|",
"key",
"[",
"block_start",
"+",
"2",
"]",
"<<",
"16",
"|",
"key",
"[",
"block_start",
"+",
"1",
"]",
"<<",
"8",
"|",
"key",
"[",
"block_start",
"+",
"0",
"]",
"k1",
"=",
"(",
"c1",
"*",
"k1",
")",
"&",
"0xFFFFFFFF",
"k1",
"=",
"(",
"k1",
"<<",
"15",
"|",
"k1",
">>",
"17",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"k1",
"=",
"(",
"c2",
"*",
"k1",
")",
"&",
"0xFFFFFFFF",
"h1",
"^=",
"k1",
"h1",
"=",
"(",
"h1",
"<<",
"13",
"|",
"h1",
">>",
"19",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"h1",
"=",
"(",
"h1",
"*",
"5",
"+",
"0xe6546b64",
")",
"&",
"0xFFFFFFFF",
"# tail",
"tail_index",
"=",
"nblocks",
"*",
"4",
"k1",
"=",
"0",
"tail_size",
"=",
"length",
"&",
"3",
"if",
"tail_size",
">=",
"3",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"2",
"]",
"<<",
"16",
"if",
"tail_size",
">=",
"2",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"1",
"]",
"<<",
"8",
"if",
"tail_size",
">=",
"1",
":",
"k1",
"^=",
"key",
"[",
"tail_index",
"+",
"0",
"]",
"if",
"tail_size",
">",
"0",
":",
"k1",
"=",
"(",
"k1",
"*",
"c1",
")",
"&",
"0xFFFFFFFF",
"k1",
"=",
"(",
"k1",
"<<",
"15",
"|",
"k1",
">>",
"17",
")",
"&",
"0xFFFFFFFF",
"# inlined ROTL32",
"k1",
"=",
"(",
"k1",
"*",
"c2",
")",
"&",
"0xFFFFFFFF",
"h1",
"^=",
"k1",
"#finalization",
"unsigned_val",
"=",
"fmix",
"(",
"h1",
"^",
"length",
")",
"if",
"unsigned_val",
"&",
"0x80000000",
"==",
"0",
":",
"return",
"unsigned_val",
"else",
":",
"return",
"-",
"(",
"(",
"unsigned_val",
"^",
"0xFFFFFFFF",
")",
"+",
"1",
")"
] |
Implements 32bit murmur3 hash.
|
[
"Implements",
"32bit",
"murmur3",
"hash",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/lib/pymmh3.py#L34-L94
|
12,138
|
optimizely/python-sdk
|
optimizely/lib/pymmh3.py
|
hash64
|
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
|
python
|
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
|
[
"def",
"hash64",
"(",
"key",
",",
"seed",
"=",
"0x0",
",",
"x64arch",
"=",
"True",
")",
":",
"hash_128",
"=",
"hash128",
"(",
"key",
",",
"seed",
",",
"x64arch",
")",
"unsigned_val1",
"=",
"hash_128",
"&",
"0xFFFFFFFFFFFFFFFF",
"if",
"unsigned_val1",
"&",
"0x8000000000000000",
"==",
"0",
":",
"signed_val1",
"=",
"unsigned_val1",
"else",
":",
"signed_val1",
"=",
"-",
"(",
"(",
"unsigned_val1",
"^",
"0xFFFFFFFFFFFFFFFF",
")",
"+",
"1",
")",
"unsigned_val2",
"=",
"(",
"hash_128",
">>",
"64",
")",
"&",
"0xFFFFFFFFFFFFFFFF",
"if",
"unsigned_val2",
"&",
"0x8000000000000000",
"==",
"0",
":",
"signed_val2",
"=",
"unsigned_val2",
"else",
":",
"signed_val2",
"=",
"-",
"(",
"(",
"unsigned_val2",
"^",
"0xFFFFFFFFFFFFFFFF",
")",
"+",
"1",
")",
"return",
"(",
"int",
"(",
"signed_val1",
")",
",",
"int",
"(",
"signed_val2",
")",
")"
] |
Implements 64bit murmur3 hash. Returns a tuple.
|
[
"Implements",
"64bit",
"murmur3",
"hash",
".",
"Returns",
"a",
"tuple",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/lib/pymmh3.py#L406-L423
|
12,139
|
optimizely/python-sdk
|
optimizely/lib/pymmh3.py
|
hash_bytes
|
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in xrange(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
|
python
|
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in xrange(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
|
[
"def",
"hash_bytes",
"(",
"key",
",",
"seed",
"=",
"0x0",
",",
"x64arch",
"=",
"True",
")",
":",
"hash_128",
"=",
"hash128",
"(",
"key",
",",
"seed",
",",
"x64arch",
")",
"bytestring",
"=",
"''",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"16",
",",
"1",
")",
":",
"lsbyte",
"=",
"hash_128",
"&",
"0xFF",
"bytestring",
"=",
"bytestring",
"+",
"str",
"(",
"chr",
"(",
"lsbyte",
")",
")",
"hash_128",
"=",
"hash_128",
">>",
"8",
"return",
"bytestring"
] |
Implements 128bit murmur3 hash. Returns a byte string.
|
[
"Implements",
"128bit",
"murmur3",
"hash",
".",
"Returns",
"a",
"byte",
"string",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/lib/pymmh3.py#L426-L438
|
12,140
|
optimizely/python-sdk
|
optimizely/bucketer.py
|
Bucketer._generate_bucket_value
|
def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
"""
ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE)
|
python
|
def _generate_bucket_value(self, bucketing_id):
""" Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
"""
ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE
return math.floor(ratio * MAX_TRAFFIC_VALUE)
|
[
"def",
"_generate_bucket_value",
"(",
"self",
",",
"bucketing_id",
")",
":",
"ratio",
"=",
"float",
"(",
"self",
".",
"_generate_unsigned_hash_code_32_bit",
"(",
"bucketing_id",
")",
")",
"/",
"MAX_HASH_VALUE",
"return",
"math",
".",
"floor",
"(",
"ratio",
"*",
"MAX_TRAFFIC_VALUE",
")"
] |
Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE).
Args:
bucketing_id: ID for bucketing.
Returns:
Bucket value corresponding to the provided bucketing ID.
|
[
"Helper",
"function",
"to",
"generate",
"bucket",
"value",
"in",
"half",
"-",
"closed",
"interval",
"[",
"0",
"MAX_TRAFFIC_VALUE",
")",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/bucketer.py#L55-L66
|
12,141
|
optimizely/python-sdk
|
optimizely/bucketer.py
|
Bucketer.find_bucket
|
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
"""
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None
|
python
|
def find_bucket(self, bucketing_id, parent_id, traffic_allocations):
""" Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
"""
bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id)
bucketing_number = self._generate_bucket_value(bucketing_key)
self.config.logger.debug('Assigned bucket %s to user with bucketing ID "%s".' % (
bucketing_number,
bucketing_id
))
for traffic_allocation in traffic_allocations:
current_end_of_range = traffic_allocation.get('endOfRange')
if bucketing_number < current_end_of_range:
return traffic_allocation.get('entityId')
return None
|
[
"def",
"find_bucket",
"(",
"self",
",",
"bucketing_id",
",",
"parent_id",
",",
"traffic_allocations",
")",
":",
"bucketing_key",
"=",
"BUCKETING_ID_TEMPLATE",
".",
"format",
"(",
"bucketing_id",
"=",
"bucketing_id",
",",
"parent_id",
"=",
"parent_id",
")",
"bucketing_number",
"=",
"self",
".",
"_generate_bucket_value",
"(",
"bucketing_key",
")",
"self",
".",
"config",
".",
"logger",
".",
"debug",
"(",
"'Assigned bucket %s to user with bucketing ID \"%s\".'",
"%",
"(",
"bucketing_number",
",",
"bucketing_id",
")",
")",
"for",
"traffic_allocation",
"in",
"traffic_allocations",
":",
"current_end_of_range",
"=",
"traffic_allocation",
".",
"get",
"(",
"'endOfRange'",
")",
"if",
"bucketing_number",
"<",
"current_end_of_range",
":",
"return",
"traffic_allocation",
".",
"get",
"(",
"'entityId'",
")",
"return",
"None"
] |
Determine entity based on bucket value and traffic allocations.
Args:
bucketing_id: ID to be used for bucketing the user.
parent_id: ID representing group or experiment.
traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations.
Returns:
Entity ID which may represent experiment or variation.
|
[
"Determine",
"entity",
"based",
"on",
"bucket",
"value",
"and",
"traffic",
"allocations",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/bucketer.py#L68-L92
|
12,142
|
optimizely/python-sdk
|
optimizely/bucketer.py
|
Bucketer.bucket
|
def bucket(self, experiment, user_id, bucketing_id):
""" For a given experiment and bucketing ID determines variation to be shown to user.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for user.
bucketing_id: ID to be used for bucketing the user.
Returns:
Variation in which user with ID user_id will be put in. None if no variation.
"""
if not experiment:
return None
# Determine if experiment is in a mutually exclusive group
if experiment.groupPolicy in GROUP_POLICIES:
group = self.config.get_group(experiment.groupId)
if not group:
return None
user_experiment_id = self.find_bucket(bucketing_id, experiment.groupId, group.trafficAllocation)
if not user_experiment_id:
self.config.logger.info('User "%s" is in no experiment.' % user_id)
return None
if user_experiment_id != experiment.id:
self.config.logger.info('User "%s" is not in experiment "%s" of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
return None
self.config.logger.info('User "%s" is in experiment %s of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
# Bucket user if not in white-list and in group (if any)
variation_id = self.find_bucket(bucketing_id, experiment.id, experiment.trafficAllocation)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
self.config.logger.info('User "%s" is in variation "%s" of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return variation
self.config.logger.info('User "%s" is in no variation.' % user_id)
return None
|
python
|
def bucket(self, experiment, user_id, bucketing_id):
""" For a given experiment and bucketing ID determines variation to be shown to user.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for user.
bucketing_id: ID to be used for bucketing the user.
Returns:
Variation in which user with ID user_id will be put in. None if no variation.
"""
if not experiment:
return None
# Determine if experiment is in a mutually exclusive group
if experiment.groupPolicy in GROUP_POLICIES:
group = self.config.get_group(experiment.groupId)
if not group:
return None
user_experiment_id = self.find_bucket(bucketing_id, experiment.groupId, group.trafficAllocation)
if not user_experiment_id:
self.config.logger.info('User "%s" is in no experiment.' % user_id)
return None
if user_experiment_id != experiment.id:
self.config.logger.info('User "%s" is not in experiment "%s" of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
return None
self.config.logger.info('User "%s" is in experiment %s of group %s.' % (
user_id,
experiment.key,
experiment.groupId
))
# Bucket user if not in white-list and in group (if any)
variation_id = self.find_bucket(bucketing_id, experiment.id, experiment.trafficAllocation)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
self.config.logger.info('User "%s" is in variation "%s" of experiment %s.' % (
user_id,
variation.key,
experiment.key
))
return variation
self.config.logger.info('User "%s" is in no variation.' % user_id)
return None
|
[
"def",
"bucket",
"(",
"self",
",",
"experiment",
",",
"user_id",
",",
"bucketing_id",
")",
":",
"if",
"not",
"experiment",
":",
"return",
"None",
"# Determine if experiment is in a mutually exclusive group",
"if",
"experiment",
".",
"groupPolicy",
"in",
"GROUP_POLICIES",
":",
"group",
"=",
"self",
".",
"config",
".",
"get_group",
"(",
"experiment",
".",
"groupId",
")",
"if",
"not",
"group",
":",
"return",
"None",
"user_experiment_id",
"=",
"self",
".",
"find_bucket",
"(",
"bucketing_id",
",",
"experiment",
".",
"groupId",
",",
"group",
".",
"trafficAllocation",
")",
"if",
"not",
"user_experiment_id",
":",
"self",
".",
"config",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is in no experiment.'",
"%",
"user_id",
")",
"return",
"None",
"if",
"user_experiment_id",
"!=",
"experiment",
".",
"id",
":",
"self",
".",
"config",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is not in experiment \"%s\" of group %s.'",
"%",
"(",
"user_id",
",",
"experiment",
".",
"key",
",",
"experiment",
".",
"groupId",
")",
")",
"return",
"None",
"self",
".",
"config",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is in experiment %s of group %s.'",
"%",
"(",
"user_id",
",",
"experiment",
".",
"key",
",",
"experiment",
".",
"groupId",
")",
")",
"# Bucket user if not in white-list and in group (if any)",
"variation_id",
"=",
"self",
".",
"find_bucket",
"(",
"bucketing_id",
",",
"experiment",
".",
"id",
",",
"experiment",
".",
"trafficAllocation",
")",
"if",
"variation_id",
":",
"variation",
"=",
"self",
".",
"config",
".",
"get_variation_from_id",
"(",
"experiment",
".",
"key",
",",
"variation_id",
")",
"self",
".",
"config",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is in variation \"%s\" of experiment %s.'",
"%",
"(",
"user_id",
",",
"variation",
".",
"key",
",",
"experiment",
".",
"key",
")",
")",
"return",
"variation",
"self",
".",
"config",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is in no variation.'",
"%",
"user_id",
")",
"return",
"None"
] |
For a given experiment and bucketing ID determines variation to be shown to user.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for user.
bucketing_id: ID to be used for bucketing the user.
Returns:
Variation in which user with ID user_id will be put in. None if no variation.
|
[
"For",
"a",
"given",
"experiment",
"and",
"bucketing",
"ID",
"determines",
"variation",
"to",
"be",
"shown",
"to",
"user",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/bucketer.py#L94-L147
|
12,143
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig._generate_key_map
|
def _generate_key_map(entity_list, key, entity_class):
""" Helper method to generate map from key to entity object for given list of dicts.
Args:
entity_list: List consisting of dict.
key: Key in each dict which will be key in the map.
entity_class: Class representing the entity.
Returns:
Map mapping key to entity object.
"""
key_map = {}
for obj in entity_list:
key_map[obj[key]] = entity_class(**obj)
return key_map
|
python
|
def _generate_key_map(entity_list, key, entity_class):
""" Helper method to generate map from key to entity object for given list of dicts.
Args:
entity_list: List consisting of dict.
key: Key in each dict which will be key in the map.
entity_class: Class representing the entity.
Returns:
Map mapping key to entity object.
"""
key_map = {}
for obj in entity_list:
key_map[obj[key]] = entity_class(**obj)
return key_map
|
[
"def",
"_generate_key_map",
"(",
"entity_list",
",",
"key",
",",
"entity_class",
")",
":",
"key_map",
"=",
"{",
"}",
"for",
"obj",
"in",
"entity_list",
":",
"key_map",
"[",
"obj",
"[",
"key",
"]",
"]",
"=",
"entity_class",
"(",
"*",
"*",
"obj",
")",
"return",
"key_map"
] |
Helper method to generate map from key to entity object for given list of dicts.
Args:
entity_list: List consisting of dict.
key: Key in each dict which will be key in the map.
entity_class: Class representing the entity.
Returns:
Map mapping key to entity object.
|
[
"Helper",
"method",
"to",
"generate",
"map",
"from",
"key",
"to",
"entity",
"object",
"for",
"given",
"list",
"of",
"dicts",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L134-L150
|
12,144
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig._deserialize_audience
|
def _deserialize_audience(audience_map):
""" Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
"""
for audience in audience_map.values():
condition_structure, condition_list = condition_helper.loads(audience.conditions)
audience.__dict__.update({
'conditionStructure': condition_structure,
'conditionList': condition_list
})
return audience_map
|
python
|
def _deserialize_audience(audience_map):
""" Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
"""
for audience in audience_map.values():
condition_structure, condition_list = condition_helper.loads(audience.conditions)
audience.__dict__.update({
'conditionStructure': condition_structure,
'conditionList': condition_list
})
return audience_map
|
[
"def",
"_deserialize_audience",
"(",
"audience_map",
")",
":",
"for",
"audience",
"in",
"audience_map",
".",
"values",
"(",
")",
":",
"condition_structure",
",",
"condition_list",
"=",
"condition_helper",
".",
"loads",
"(",
"audience",
".",
"conditions",
")",
"audience",
".",
"__dict__",
".",
"update",
"(",
"{",
"'conditionStructure'",
":",
"condition_structure",
",",
"'conditionList'",
":",
"condition_list",
"}",
")",
"return",
"audience_map"
] |
Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object.
|
[
"Helper",
"method",
"to",
"de",
"-",
"serialize",
"and",
"populate",
"audience",
"map",
"with",
"the",
"condition",
"list",
"and",
"structure",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L153-L170
|
12,145
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_typecast_value
|
def get_typecast_value(self, value, type):
""" Helper method to determine actual value based on type of feature variable.
Args:
value: Value in string form as it was parsed from datafile.
type: Type denoting the feature flag type.
Return:
Value type-casted based on type of feature variable.
"""
if type == entities.Variable.Type.BOOLEAN:
return value == 'true'
elif type == entities.Variable.Type.INTEGER:
return int(value)
elif type == entities.Variable.Type.DOUBLE:
return float(value)
else:
return value
|
python
|
def get_typecast_value(self, value, type):
""" Helper method to determine actual value based on type of feature variable.
Args:
value: Value in string form as it was parsed from datafile.
type: Type denoting the feature flag type.
Return:
Value type-casted based on type of feature variable.
"""
if type == entities.Variable.Type.BOOLEAN:
return value == 'true'
elif type == entities.Variable.Type.INTEGER:
return int(value)
elif type == entities.Variable.Type.DOUBLE:
return float(value)
else:
return value
|
[
"def",
"get_typecast_value",
"(",
"self",
",",
"value",
",",
"type",
")",
":",
"if",
"type",
"==",
"entities",
".",
"Variable",
".",
"Type",
".",
"BOOLEAN",
":",
"return",
"value",
"==",
"'true'",
"elif",
"type",
"==",
"entities",
".",
"Variable",
".",
"Type",
".",
"INTEGER",
":",
"return",
"int",
"(",
"value",
")",
"elif",
"type",
"==",
"entities",
".",
"Variable",
".",
"Type",
".",
"DOUBLE",
":",
"return",
"float",
"(",
"value",
")",
"else",
":",
"return",
"value"
] |
Helper method to determine actual value based on type of feature variable.
Args:
value: Value in string form as it was parsed from datafile.
type: Type denoting the feature flag type.
Return:
Value type-casted based on type of feature variable.
|
[
"Helper",
"method",
"to",
"determine",
"actual",
"value",
"based",
"on",
"type",
"of",
"feature",
"variable",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L172-L190
|
12,146
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_experiment_from_key
|
def get_experiment_from_key(self, experiment_key):
""" Get experiment for the provided experiment key.
Args:
experiment_key: Experiment key for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment key.
"""
experiment = self.experiment_key_map.get(experiment_key)
if experiment:
return experiment
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
python
|
def get_experiment_from_key(self, experiment_key):
""" Get experiment for the provided experiment key.
Args:
experiment_key: Experiment key for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment key.
"""
experiment = self.experiment_key_map.get(experiment_key)
if experiment:
return experiment
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
[
"def",
"get_experiment_from_key",
"(",
"self",
",",
"experiment_key",
")",
":",
"experiment",
"=",
"self",
".",
"experiment_key_map",
".",
"get",
"(",
"experiment_key",
")",
"if",
"experiment",
":",
"return",
"experiment",
"self",
".",
"logger",
".",
"error",
"(",
"'Experiment key \"%s\" is not in datafile.'",
"%",
"experiment_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidExperimentException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EXPERIMENT_KEY_ERROR",
")",
")",
"return",
"None"
] |
Get experiment for the provided experiment key.
Args:
experiment_key: Experiment key for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment key.
|
[
"Get",
"experiment",
"for",
"the",
"provided",
"experiment",
"key",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L228-L245
|
12,147
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_experiment_from_id
|
def get_experiment_from_id(self, experiment_id):
""" Get experiment for the provided experiment ID.
Args:
experiment_id: Experiment ID for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment ID.
"""
experiment = self.experiment_id_map.get(experiment_id)
if experiment:
return experiment
self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
python
|
def get_experiment_from_id(self, experiment_id):
""" Get experiment for the provided experiment ID.
Args:
experiment_id: Experiment ID for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment ID.
"""
experiment = self.experiment_id_map.get(experiment_id)
if experiment:
return experiment
self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
[
"def",
"get_experiment_from_id",
"(",
"self",
",",
"experiment_id",
")",
":",
"experiment",
"=",
"self",
".",
"experiment_id_map",
".",
"get",
"(",
"experiment_id",
")",
"if",
"experiment",
":",
"return",
"experiment",
"self",
".",
"logger",
".",
"error",
"(",
"'Experiment ID \"%s\" is not in datafile.'",
"%",
"experiment_id",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidExperimentException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EXPERIMENT_KEY_ERROR",
")",
")",
"return",
"None"
] |
Get experiment for the provided experiment ID.
Args:
experiment_id: Experiment ID for which experiment is to be determined.
Returns:
Experiment corresponding to the provided experiment ID.
|
[
"Get",
"experiment",
"for",
"the",
"provided",
"experiment",
"ID",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L247-L264
|
12,148
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_group
|
def get_group(self, group_id):
""" Get group for the provided group ID.
Args:
group_id: Group ID for which group is to be determined.
Returns:
Group corresponding to the provided group ID.
"""
group = self.group_id_map.get(group_id)
if group:
return group
self.logger.error('Group ID "%s" is not in datafile.' % group_id)
self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR))
return None
|
python
|
def get_group(self, group_id):
""" Get group for the provided group ID.
Args:
group_id: Group ID for which group is to be determined.
Returns:
Group corresponding to the provided group ID.
"""
group = self.group_id_map.get(group_id)
if group:
return group
self.logger.error('Group ID "%s" is not in datafile.' % group_id)
self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR))
return None
|
[
"def",
"get_group",
"(",
"self",
",",
"group_id",
")",
":",
"group",
"=",
"self",
".",
"group_id_map",
".",
"get",
"(",
"group_id",
")",
"if",
"group",
":",
"return",
"group",
"self",
".",
"logger",
".",
"error",
"(",
"'Group ID \"%s\" is not in datafile.'",
"%",
"group_id",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidGroupException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_GROUP_ID_ERROR",
")",
")",
"return",
"None"
] |
Get group for the provided group ID.
Args:
group_id: Group ID for which group is to be determined.
Returns:
Group corresponding to the provided group ID.
|
[
"Get",
"group",
"for",
"the",
"provided",
"group",
"ID",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L266-L283
|
12,149
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_audience
|
def get_audience(self, audience_id):
""" Get audience object for the provided audience ID.
Args:
audience_id: ID of the audience.
Returns:
Dict representing the audience.
"""
audience = self.audience_id_map.get(audience_id)
if audience:
return audience
self.logger.error('Audience ID "%s" is not in datafile.' % audience_id)
self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE_ERROR)))
|
python
|
def get_audience(self, audience_id):
""" Get audience object for the provided audience ID.
Args:
audience_id: ID of the audience.
Returns:
Dict representing the audience.
"""
audience = self.audience_id_map.get(audience_id)
if audience:
return audience
self.logger.error('Audience ID "%s" is not in datafile.' % audience_id)
self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE_ERROR)))
|
[
"def",
"get_audience",
"(",
"self",
",",
"audience_id",
")",
":",
"audience",
"=",
"self",
".",
"audience_id_map",
".",
"get",
"(",
"audience_id",
")",
"if",
"audience",
":",
"return",
"audience",
"self",
".",
"logger",
".",
"error",
"(",
"'Audience ID \"%s\" is not in datafile.'",
"%",
"audience_id",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidAudienceException",
"(",
"(",
"enums",
".",
"Errors",
".",
"INVALID_AUDIENCE_ERROR",
")",
")",
")"
] |
Get audience object for the provided audience ID.
Args:
audience_id: ID of the audience.
Returns:
Dict representing the audience.
|
[
"Get",
"audience",
"object",
"for",
"the",
"provided",
"audience",
"ID",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L285-L300
|
12,150
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_variation_from_key
|
def get_variation_from_key(self, experiment_key, variation_key):
""" Get variation given experiment and variation key.
Args:
experiment: Key representing parent experiment of variation.
variation_key: Key representing the variation.
Returns
Object representing the variation.
"""
variation_map = self.variation_key_map.get(experiment_key)
if variation_map:
variation = variation_map.get(variation_key)
if variation:
return variation
else:
self.logger.error('Variation key "%s" is not in datafile.' % variation_key)
self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))
return None
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
python
|
def get_variation_from_key(self, experiment_key, variation_key):
""" Get variation given experiment and variation key.
Args:
experiment: Key representing parent experiment of variation.
variation_key: Key representing the variation.
Returns
Object representing the variation.
"""
variation_map = self.variation_key_map.get(experiment_key)
if variation_map:
variation = variation_map.get(variation_key)
if variation:
return variation
else:
self.logger.error('Variation key "%s" is not in datafile.' % variation_key)
self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))
return None
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
[
"def",
"get_variation_from_key",
"(",
"self",
",",
"experiment_key",
",",
"variation_key",
")",
":",
"variation_map",
"=",
"self",
".",
"variation_key_map",
".",
"get",
"(",
"experiment_key",
")",
"if",
"variation_map",
":",
"variation",
"=",
"variation_map",
".",
"get",
"(",
"variation_key",
")",
"if",
"variation",
":",
"return",
"variation",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Variation key \"%s\" is not in datafile.'",
"%",
"variation_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidVariationException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_VARIATION_ERROR",
")",
")",
"return",
"None",
"self",
".",
"logger",
".",
"error",
"(",
"'Experiment key \"%s\" is not in datafile.'",
"%",
"experiment_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidExperimentException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EXPERIMENT_KEY_ERROR",
")",
")",
"return",
"None"
] |
Get variation given experiment and variation key.
Args:
experiment: Key representing parent experiment of variation.
variation_key: Key representing the variation.
Returns
Object representing the variation.
|
[
"Get",
"variation",
"given",
"experiment",
"and",
"variation",
"key",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L302-L326
|
12,151
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_variation_from_id
|
def get_variation_from_id(self, experiment_key, variation_id):
""" Get variation given experiment and variation ID.
Args:
experiment: Key representing parent experiment of variation.
variation_id: ID representing the variation.
Returns
Object representing the variation.
"""
variation_map = self.variation_id_map.get(experiment_key)
if variation_map:
variation = variation_map.get(variation_id)
if variation:
return variation
else:
self.logger.error('Variation ID "%s" is not in datafile.' % variation_id)
self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))
return None
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
python
|
def get_variation_from_id(self, experiment_key, variation_id):
""" Get variation given experiment and variation ID.
Args:
experiment: Key representing parent experiment of variation.
variation_id: ID representing the variation.
Returns
Object representing the variation.
"""
variation_map = self.variation_id_map.get(experiment_key)
if variation_map:
variation = variation_map.get(variation_id)
if variation:
return variation
else:
self.logger.error('Variation ID "%s" is not in datafile.' % variation_id)
self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))
return None
self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key)
self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))
return None
|
[
"def",
"get_variation_from_id",
"(",
"self",
",",
"experiment_key",
",",
"variation_id",
")",
":",
"variation_map",
"=",
"self",
".",
"variation_id_map",
".",
"get",
"(",
"experiment_key",
")",
"if",
"variation_map",
":",
"variation",
"=",
"variation_map",
".",
"get",
"(",
"variation_id",
")",
"if",
"variation",
":",
"return",
"variation",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Variation ID \"%s\" is not in datafile.'",
"%",
"variation_id",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidVariationException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_VARIATION_ERROR",
")",
")",
"return",
"None",
"self",
".",
"logger",
".",
"error",
"(",
"'Experiment key \"%s\" is not in datafile.'",
"%",
"experiment_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidExperimentException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EXPERIMENT_KEY_ERROR",
")",
")",
"return",
"None"
] |
Get variation given experiment and variation ID.
Args:
experiment: Key representing parent experiment of variation.
variation_id: ID representing the variation.
Returns
Object representing the variation.
|
[
"Get",
"variation",
"given",
"experiment",
"and",
"variation",
"ID",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L328-L352
|
12,152
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_event
|
def get_event(self, event_key):
""" Get event for the provided event key.
Args:
event_key: Event key for which event is to be determined.
Returns:
Event corresponding to the provided event key.
"""
event = self.event_key_map.get(event_key)
if event:
return event
self.logger.error('Event "%s" is not in datafile.' % event_key)
self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR))
return None
|
python
|
def get_event(self, event_key):
""" Get event for the provided event key.
Args:
event_key: Event key for which event is to be determined.
Returns:
Event corresponding to the provided event key.
"""
event = self.event_key_map.get(event_key)
if event:
return event
self.logger.error('Event "%s" is not in datafile.' % event_key)
self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR))
return None
|
[
"def",
"get_event",
"(",
"self",
",",
"event_key",
")",
":",
"event",
"=",
"self",
".",
"event_key_map",
".",
"get",
"(",
"event_key",
")",
"if",
"event",
":",
"return",
"event",
"self",
".",
"logger",
".",
"error",
"(",
"'Event \"%s\" is not in datafile.'",
"%",
"event_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidEventException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EVENT_KEY_ERROR",
")",
")",
"return",
"None"
] |
Get event for the provided event key.
Args:
event_key: Event key for which event is to be determined.
Returns:
Event corresponding to the provided event key.
|
[
"Get",
"event",
"for",
"the",
"provided",
"event",
"key",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L354-L371
|
12,153
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_attribute_id
|
def get_attribute_id(self, attribute_key):
""" Get attribute ID for the provided attribute key.
Args:
attribute_key: Attribute key for which attribute is to be fetched.
Returns:
Attribute ID corresponding to the provided attribute key.
"""
attribute = self.attribute_key_map.get(attribute_key)
has_reserved_prefix = attribute_key.startswith(RESERVED_ATTRIBUTE_PREFIX)
if attribute:
if has_reserved_prefix:
self.logger.warning(('Attribute %s unexpectedly has reserved prefix %s; using attribute ID '
'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX)))
return attribute.id
if has_reserved_prefix:
return attribute_key
self.logger.error('Attribute "%s" is not in datafile.' % attribute_key)
self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_ERROR))
return None
|
python
|
def get_attribute_id(self, attribute_key):
""" Get attribute ID for the provided attribute key.
Args:
attribute_key: Attribute key for which attribute is to be fetched.
Returns:
Attribute ID corresponding to the provided attribute key.
"""
attribute = self.attribute_key_map.get(attribute_key)
has_reserved_prefix = attribute_key.startswith(RESERVED_ATTRIBUTE_PREFIX)
if attribute:
if has_reserved_prefix:
self.logger.warning(('Attribute %s unexpectedly has reserved prefix %s; using attribute ID '
'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX)))
return attribute.id
if has_reserved_prefix:
return attribute_key
self.logger.error('Attribute "%s" is not in datafile.' % attribute_key)
self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_ERROR))
return None
|
[
"def",
"get_attribute_id",
"(",
"self",
",",
"attribute_key",
")",
":",
"attribute",
"=",
"self",
".",
"attribute_key_map",
".",
"get",
"(",
"attribute_key",
")",
"has_reserved_prefix",
"=",
"attribute_key",
".",
"startswith",
"(",
"RESERVED_ATTRIBUTE_PREFIX",
")",
"if",
"attribute",
":",
"if",
"has_reserved_prefix",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"(",
"'Attribute %s unexpectedly has reserved prefix %s; using attribute ID '",
"'instead of reserved attribute name.'",
"%",
"(",
"attribute_key",
",",
"RESERVED_ATTRIBUTE_PREFIX",
")",
")",
")",
"return",
"attribute",
".",
"id",
"if",
"has_reserved_prefix",
":",
"return",
"attribute_key",
"self",
".",
"logger",
".",
"error",
"(",
"'Attribute \"%s\" is not in datafile.'",
"%",
"attribute_key",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidAttributeException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_ATTRIBUTE_ERROR",
")",
")",
"return",
"None"
] |
Get attribute ID for the provided attribute key.
Args:
attribute_key: Attribute key for which attribute is to be fetched.
Returns:
Attribute ID corresponding to the provided attribute key.
|
[
"Get",
"attribute",
"ID",
"for",
"the",
"provided",
"attribute",
"key",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L373-L398
|
12,154
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_feature_from_key
|
def get_feature_from_key(self, feature_key):
""" Get feature for the provided feature key.
Args:
feature_key: Feature key for which feature is to be fetched.
Returns:
Feature corresponding to the provided feature key.
"""
feature = self.feature_key_map.get(feature_key)
if feature:
return feature
self.logger.error('Feature "%s" is not in datafile.' % feature_key)
return None
|
python
|
def get_feature_from_key(self, feature_key):
""" Get feature for the provided feature key.
Args:
feature_key: Feature key for which feature is to be fetched.
Returns:
Feature corresponding to the provided feature key.
"""
feature = self.feature_key_map.get(feature_key)
if feature:
return feature
self.logger.error('Feature "%s" is not in datafile.' % feature_key)
return None
|
[
"def",
"get_feature_from_key",
"(",
"self",
",",
"feature_key",
")",
":",
"feature",
"=",
"self",
".",
"feature_key_map",
".",
"get",
"(",
"feature_key",
")",
"if",
"feature",
":",
"return",
"feature",
"self",
".",
"logger",
".",
"error",
"(",
"'Feature \"%s\" is not in datafile.'",
"%",
"feature_key",
")",
"return",
"None"
] |
Get feature for the provided feature key.
Args:
feature_key: Feature key for which feature is to be fetched.
Returns:
Feature corresponding to the provided feature key.
|
[
"Get",
"feature",
"for",
"the",
"provided",
"feature",
"key",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L400-L415
|
12,155
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_rollout_from_id
|
def get_rollout_from_id(self, rollout_id):
""" Get rollout for the provided ID.
Args:
rollout_id: ID of the rollout to be fetched.
Returns:
Rollout corresponding to the provided ID.
"""
layer = self.rollout_id_map.get(rollout_id)
if layer:
return layer
self.logger.error('Rollout with ID "%s" is not in datafile.' % rollout_id)
return None
|
python
|
def get_rollout_from_id(self, rollout_id):
""" Get rollout for the provided ID.
Args:
rollout_id: ID of the rollout to be fetched.
Returns:
Rollout corresponding to the provided ID.
"""
layer = self.rollout_id_map.get(rollout_id)
if layer:
return layer
self.logger.error('Rollout with ID "%s" is not in datafile.' % rollout_id)
return None
|
[
"def",
"get_rollout_from_id",
"(",
"self",
",",
"rollout_id",
")",
":",
"layer",
"=",
"self",
".",
"rollout_id_map",
".",
"get",
"(",
"rollout_id",
")",
"if",
"layer",
":",
"return",
"layer",
"self",
".",
"logger",
".",
"error",
"(",
"'Rollout with ID \"%s\" is not in datafile.'",
"%",
"rollout_id",
")",
"return",
"None"
] |
Get rollout for the provided ID.
Args:
rollout_id: ID of the rollout to be fetched.
Returns:
Rollout corresponding to the provided ID.
|
[
"Get",
"rollout",
"for",
"the",
"provided",
"ID",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L417-L432
|
12,156
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_variable_value_for_variation
|
def get_variable_value_for_variation(self, variable, variation):
""" Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
"""
if not variable or not variation:
return None
if variation.id not in self.variation_variable_usage_map:
self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id)
return None
# Get all variable usages for the given variation
variable_usages = self.variation_variable_usage_map[variation.id]
# Find usage in given variation
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % (
variable.key,
variation.key,
variable_value
))
else:
variable_value = variable.defaultValue
self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (
variable.key,
variation.key,
variable_value
))
return variable_value
|
python
|
def get_variable_value_for_variation(self, variable, variation):
""" Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
"""
if not variable or not variation:
return None
if variation.id not in self.variation_variable_usage_map:
self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id)
return None
# Get all variable usages for the given variation
variable_usages = self.variation_variable_usage_map[variation.id]
# Find usage in given variation
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % (
variable.key,
variation.key,
variable_value
))
else:
variable_value = variable.defaultValue
self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (
variable.key,
variation.key,
variable_value
))
return variable_value
|
[
"def",
"get_variable_value_for_variation",
"(",
"self",
",",
"variable",
",",
"variation",
")",
":",
"if",
"not",
"variable",
"or",
"not",
"variation",
":",
"return",
"None",
"if",
"variation",
".",
"id",
"not",
"in",
"self",
".",
"variation_variable_usage_map",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Variation with ID \"%s\" is not in the datafile.'",
"%",
"variation",
".",
"id",
")",
"return",
"None",
"# Get all variable usages for the given variation",
"variable_usages",
"=",
"self",
".",
"variation_variable_usage_map",
"[",
"variation",
".",
"id",
"]",
"# Find usage in given variation",
"variable_usage",
"=",
"None",
"if",
"variable_usages",
":",
"variable_usage",
"=",
"variable_usages",
".",
"get",
"(",
"variable",
".",
"id",
")",
"if",
"variable_usage",
":",
"variable_value",
"=",
"variable_usage",
".",
"value",
"self",
".",
"logger",
".",
"info",
"(",
"'Value for variable \"%s\" for variation \"%s\" is \"%s\".'",
"%",
"(",
"variable",
".",
"key",
",",
"variation",
".",
"key",
",",
"variable_value",
")",
")",
"else",
":",
"variable_value",
"=",
"variable",
".",
"defaultValue",
"self",
".",
"logger",
".",
"info",
"(",
"'Variable \"%s\" is not used in variation \"%s\". Assigning default value \"%s\".'",
"%",
"(",
"variable",
".",
"key",
",",
"variation",
".",
"key",
",",
"variable_value",
")",
")",
"return",
"variable_value"
] |
Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
|
[
"Get",
"the",
"variable",
"value",
"for",
"the",
"given",
"variation",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L434-L476
|
12,157
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_variable_for_feature
|
def get_variable_for_feature(self, feature_key, variable_key):
""" Get the variable with the given variable key for the given feature.
Args:
feature_key: The key of the feature for which we are getting the variable.
variable_key: The key of the variable we are getting.
Returns:
Variable with the given key in the given variation.
"""
feature = self.feature_key_map.get(feature_key)
if not feature:
self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key)
return None
if variable_key not in feature.variables:
self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key)
return None
return feature.variables.get(variable_key)
|
python
|
def get_variable_for_feature(self, feature_key, variable_key):
""" Get the variable with the given variable key for the given feature.
Args:
feature_key: The key of the feature for which we are getting the variable.
variable_key: The key of the variable we are getting.
Returns:
Variable with the given key in the given variation.
"""
feature = self.feature_key_map.get(feature_key)
if not feature:
self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key)
return None
if variable_key not in feature.variables:
self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key)
return None
return feature.variables.get(variable_key)
|
[
"def",
"get_variable_for_feature",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
")",
":",
"feature",
"=",
"self",
".",
"feature_key_map",
".",
"get",
"(",
"feature_key",
")",
"if",
"not",
"feature",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Feature with key \"%s\" not found in the datafile.'",
"%",
"feature_key",
")",
"return",
"None",
"if",
"variable_key",
"not",
"in",
"feature",
".",
"variables",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Variable with key \"%s\" not found in the datafile.'",
"%",
"variable_key",
")",
"return",
"None",
"return",
"feature",
".",
"variables",
".",
"get",
"(",
"variable_key",
")"
] |
Get the variable with the given variable key for the given feature.
Args:
feature_key: The key of the feature for which we are getting the variable.
variable_key: The key of the variable we are getting.
Returns:
Variable with the given key in the given variation.
|
[
"Get",
"the",
"variable",
"with",
"the",
"given",
"variable",
"key",
"for",
"the",
"given",
"feature",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L478-L497
|
12,158
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.set_forced_variation
|
def set_forced_variation(self, experiment_key, user_id, variation_key):
""" Sets users to a map of experiments to forced variations.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
variation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
"""
experiment = self.get_experiment_from_key(experiment_key)
if not experiment:
# The invalid experiment key will be logged inside this call.
return False
experiment_id = experiment.id
if variation_key is None:
if user_id in self.forced_variation_map:
experiment_to_variation_map = self.forced_variation_map.get(user_id)
if experiment_id in experiment_to_variation_map:
del(self.forced_variation_map[user_id][experiment_id])
self.logger.debug('Variation mapped to experiment "%s" has been removed for user "%s".' % (
experiment_key,
user_id
))
else:
self.logger.debug('Nothing to remove. Variation mapped to experiment "%s" for user "%s" does not exist.' % (
experiment_key,
user_id
))
else:
self.logger.debug('Nothing to remove. User "%s" does not exist in the forced variation map.' % user_id)
return True
if not validator.is_non_empty_string(variation_key):
self.logger.debug('Variation key is invalid.')
return False
forced_variation = self.get_variation_from_key(experiment_key, variation_key)
if not forced_variation:
# The invalid variation key will be logged inside this call.
return False
variation_id = forced_variation.id
if user_id not in self.forced_variation_map:
self.forced_variation_map[user_id] = {experiment_id: variation_id}
else:
self.forced_variation_map[user_id][experiment_id] = variation_id
self.logger.debug('Set variation "%s" for experiment "%s" and user "%s" in the forced variation map.' % (
variation_id,
experiment_id,
user_id
))
return True
|
python
|
def set_forced_variation(self, experiment_key, user_id, variation_key):
""" Sets users to a map of experiments to forced variations.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
variation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
"""
experiment = self.get_experiment_from_key(experiment_key)
if not experiment:
# The invalid experiment key will be logged inside this call.
return False
experiment_id = experiment.id
if variation_key is None:
if user_id in self.forced_variation_map:
experiment_to_variation_map = self.forced_variation_map.get(user_id)
if experiment_id in experiment_to_variation_map:
del(self.forced_variation_map[user_id][experiment_id])
self.logger.debug('Variation mapped to experiment "%s" has been removed for user "%s".' % (
experiment_key,
user_id
))
else:
self.logger.debug('Nothing to remove. Variation mapped to experiment "%s" for user "%s" does not exist.' % (
experiment_key,
user_id
))
else:
self.logger.debug('Nothing to remove. User "%s" does not exist in the forced variation map.' % user_id)
return True
if not validator.is_non_empty_string(variation_key):
self.logger.debug('Variation key is invalid.')
return False
forced_variation = self.get_variation_from_key(experiment_key, variation_key)
if not forced_variation:
# The invalid variation key will be logged inside this call.
return False
variation_id = forced_variation.id
if user_id not in self.forced_variation_map:
self.forced_variation_map[user_id] = {experiment_id: variation_id}
else:
self.forced_variation_map[user_id][experiment_id] = variation_id
self.logger.debug('Set variation "%s" for experiment "%s" and user "%s" in the forced variation map.' % (
variation_id,
experiment_id,
user_id
))
return True
|
[
"def",
"set_forced_variation",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
",",
"variation_key",
")",
":",
"experiment",
"=",
"self",
".",
"get_experiment_from_key",
"(",
"experiment_key",
")",
"if",
"not",
"experiment",
":",
"# The invalid experiment key will be logged inside this call.",
"return",
"False",
"experiment_id",
"=",
"experiment",
".",
"id",
"if",
"variation_key",
"is",
"None",
":",
"if",
"user_id",
"in",
"self",
".",
"forced_variation_map",
":",
"experiment_to_variation_map",
"=",
"self",
".",
"forced_variation_map",
".",
"get",
"(",
"user_id",
")",
"if",
"experiment_id",
"in",
"experiment_to_variation_map",
":",
"del",
"(",
"self",
".",
"forced_variation_map",
"[",
"user_id",
"]",
"[",
"experiment_id",
"]",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Variation mapped to experiment \"%s\" has been removed for user \"%s\".'",
"%",
"(",
"experiment_key",
",",
"user_id",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Nothing to remove. Variation mapped to experiment \"%s\" for user \"%s\" does not exist.'",
"%",
"(",
"experiment_key",
",",
"user_id",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Nothing to remove. User \"%s\" does not exist in the forced variation map.'",
"%",
"user_id",
")",
"return",
"True",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"variation_key",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Variation key is invalid.'",
")",
"return",
"False",
"forced_variation",
"=",
"self",
".",
"get_variation_from_key",
"(",
"experiment_key",
",",
"variation_key",
")",
"if",
"not",
"forced_variation",
":",
"# The invalid variation key will be logged inside this call.",
"return",
"False",
"variation_id",
"=",
"forced_variation",
".",
"id",
"if",
"user_id",
"not",
"in",
"self",
".",
"forced_variation_map",
":",
"self",
".",
"forced_variation_map",
"[",
"user_id",
"]",
"=",
"{",
"experiment_id",
":",
"variation_id",
"}",
"else",
":",
"self",
".",
"forced_variation_map",
"[",
"user_id",
"]",
"[",
"experiment_id",
"]",
"=",
"variation_id",
"self",
".",
"logger",
".",
"debug",
"(",
"'Set variation \"%s\" for experiment \"%s\" and user \"%s\" in the forced variation map.'",
"%",
"(",
"variation_id",
",",
"experiment_id",
",",
"user_id",
")",
")",
"return",
"True"
] |
Sets users to a map of experiments to forced variations.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
variation_key: Key for variation. If None, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
|
[
"Sets",
"users",
"to",
"a",
"map",
"of",
"experiments",
"to",
"forced",
"variations",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L499-L555
|
12,159
|
optimizely/python-sdk
|
optimizely/project_config.py
|
ProjectConfig.get_forced_variation
|
def get_forced_variation(self, experiment_key, user_id):
""" Gets the forced variation key for the given user and experiment.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
Returns:
The variation which the given user and experiment should be forced into.
"""
if user_id not in self.forced_variation_map:
self.logger.debug('User "%s" is not in the forced variation map.' % user_id)
return None
experiment = self.get_experiment_from_key(experiment_key)
if not experiment:
# The invalid experiment key will be logged inside this call.
return None
experiment_to_variation_map = self.forced_variation_map.get(user_id)
if not experiment_to_variation_map:
self.logger.debug('No experiment "%s" mapped to user "%s" in the forced variation map.' % (
experiment_key,
user_id
))
return None
variation_id = experiment_to_variation_map.get(experiment.id)
if variation_id is None:
self.logger.debug(
'No variation mapped to experiment "%s" in the forced variation map.' % experiment_key
)
return None
variation = self.get_variation_from_id(experiment_key, variation_id)
self.logger.debug('Variation "%s" is mapped to experiment "%s" and user "%s" in the forced variation map' % (
variation.key,
experiment_key,
user_id
))
return variation
|
python
|
def get_forced_variation(self, experiment_key, user_id):
""" Gets the forced variation key for the given user and experiment.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
Returns:
The variation which the given user and experiment should be forced into.
"""
if user_id not in self.forced_variation_map:
self.logger.debug('User "%s" is not in the forced variation map.' % user_id)
return None
experiment = self.get_experiment_from_key(experiment_key)
if not experiment:
# The invalid experiment key will be logged inside this call.
return None
experiment_to_variation_map = self.forced_variation_map.get(user_id)
if not experiment_to_variation_map:
self.logger.debug('No experiment "%s" mapped to user "%s" in the forced variation map.' % (
experiment_key,
user_id
))
return None
variation_id = experiment_to_variation_map.get(experiment.id)
if variation_id is None:
self.logger.debug(
'No variation mapped to experiment "%s" in the forced variation map.' % experiment_key
)
return None
variation = self.get_variation_from_id(experiment_key, variation_id)
self.logger.debug('Variation "%s" is mapped to experiment "%s" and user "%s" in the forced variation map' % (
variation.key,
experiment_key,
user_id
))
return variation
|
[
"def",
"get_forced_variation",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
")",
":",
"if",
"user_id",
"not",
"in",
"self",
".",
"forced_variation_map",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'User \"%s\" is not in the forced variation map.'",
"%",
"user_id",
")",
"return",
"None",
"experiment",
"=",
"self",
".",
"get_experiment_from_key",
"(",
"experiment_key",
")",
"if",
"not",
"experiment",
":",
"# The invalid experiment key will be logged inside this call.",
"return",
"None",
"experiment_to_variation_map",
"=",
"self",
".",
"forced_variation_map",
".",
"get",
"(",
"user_id",
")",
"if",
"not",
"experiment_to_variation_map",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'No experiment \"%s\" mapped to user \"%s\" in the forced variation map.'",
"%",
"(",
"experiment_key",
",",
"user_id",
")",
")",
"return",
"None",
"variation_id",
"=",
"experiment_to_variation_map",
".",
"get",
"(",
"experiment",
".",
"id",
")",
"if",
"variation_id",
"is",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'No variation mapped to experiment \"%s\" in the forced variation map.'",
"%",
"experiment_key",
")",
"return",
"None",
"variation",
"=",
"self",
".",
"get_variation_from_id",
"(",
"experiment_key",
",",
"variation_id",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Variation \"%s\" is mapped to experiment \"%s\" and user \"%s\" in the forced variation map'",
"%",
"(",
"variation",
".",
"key",
",",
"experiment_key",
",",
"user_id",
")",
")",
"return",
"variation"
] |
Gets the forced variation key for the given user and experiment.
Args:
experiment_key: Key for experiment.
user_id: The user ID.
Returns:
The variation which the given user and experiment should be forced into.
|
[
"Gets",
"the",
"forced",
"variation",
"key",
"for",
"the",
"given",
"user",
"and",
"experiment",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/project_config.py#L557-L600
|
12,160
|
optimizely/python-sdk
|
optimizely/event_dispatcher.py
|
EventDispatcher.dispatch_event
|
def dispatch_event(event):
""" Dispatch the event being represented by the Event object.
Args:
event: Object holding information about the request to be dispatched to the Optimizely backend.
"""
try:
if event.http_verb == enums.HTTPVerbs.GET:
requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()
elif event.http_verb == enums.HTTPVerbs.POST:
requests.post(
event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT
).raise_for_status()
except request_exception.RequestException as error:
logging.error('Dispatch event failed. Error: %s' % str(error))
|
python
|
def dispatch_event(event):
""" Dispatch the event being represented by the Event object.
Args:
event: Object holding information about the request to be dispatched to the Optimizely backend.
"""
try:
if event.http_verb == enums.HTTPVerbs.GET:
requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status()
elif event.http_verb == enums.HTTPVerbs.POST:
requests.post(
event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT
).raise_for_status()
except request_exception.RequestException as error:
logging.error('Dispatch event failed. Error: %s' % str(error))
|
[
"def",
"dispatch_event",
"(",
"event",
")",
":",
"try",
":",
"if",
"event",
".",
"http_verb",
"==",
"enums",
".",
"HTTPVerbs",
".",
"GET",
":",
"requests",
".",
"get",
"(",
"event",
".",
"url",
",",
"params",
"=",
"event",
".",
"params",
",",
"timeout",
"=",
"REQUEST_TIMEOUT",
")",
".",
"raise_for_status",
"(",
")",
"elif",
"event",
".",
"http_verb",
"==",
"enums",
".",
"HTTPVerbs",
".",
"POST",
":",
"requests",
".",
"post",
"(",
"event",
".",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"event",
".",
"params",
")",
",",
"headers",
"=",
"event",
".",
"headers",
",",
"timeout",
"=",
"REQUEST_TIMEOUT",
")",
".",
"raise_for_status",
"(",
")",
"except",
"request_exception",
".",
"RequestException",
"as",
"error",
":",
"logging",
".",
"error",
"(",
"'Dispatch event failed. Error: %s'",
"%",
"str",
"(",
"error",
")",
")"
] |
Dispatch the event being represented by the Event object.
Args:
event: Object holding information about the request to be dispatched to the Optimizely backend.
|
[
"Dispatch",
"the",
"event",
"being",
"represented",
"by",
"the",
"Event",
"object",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_dispatcher.py#L28-L44
|
12,161
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely._validate_instantiation_options
|
def _validate_instantiation_options(self, datafile, skip_json_validation):
""" Helper method to validate all instantiation parameters.
Args:
datafile: JSON string representing the project.
skip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.
Raises:
Exception if provided instantiation options are valid.
"""
if not skip_json_validation and not validator.is_datafile_valid(datafile):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))
if not validator.is_event_dispatcher_valid(self.event_dispatcher):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))
if not validator.is_logger_valid(self.logger):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))
if not validator.is_error_handler_valid(self.error_handler):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))
|
python
|
def _validate_instantiation_options(self, datafile, skip_json_validation):
""" Helper method to validate all instantiation parameters.
Args:
datafile: JSON string representing the project.
skip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.
Raises:
Exception if provided instantiation options are valid.
"""
if not skip_json_validation and not validator.is_datafile_valid(datafile):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('datafile'))
if not validator.is_event_dispatcher_valid(self.event_dispatcher):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('event_dispatcher'))
if not validator.is_logger_valid(self.logger):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('logger'))
if not validator.is_error_handler_valid(self.error_handler):
raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT_ERROR.format('error_handler'))
|
[
"def",
"_validate_instantiation_options",
"(",
"self",
",",
"datafile",
",",
"skip_json_validation",
")",
":",
"if",
"not",
"skip_json_validation",
"and",
"not",
"validator",
".",
"is_datafile_valid",
"(",
"datafile",
")",
":",
"raise",
"exceptions",
".",
"InvalidInputException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'datafile'",
")",
")",
"if",
"not",
"validator",
".",
"is_event_dispatcher_valid",
"(",
"self",
".",
"event_dispatcher",
")",
":",
"raise",
"exceptions",
".",
"InvalidInputException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'event_dispatcher'",
")",
")",
"if",
"not",
"validator",
".",
"is_logger_valid",
"(",
"self",
".",
"logger",
")",
":",
"raise",
"exceptions",
".",
"InvalidInputException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'logger'",
")",
")",
"if",
"not",
"validator",
".",
"is_error_handler_valid",
"(",
"self",
".",
"error_handler",
")",
":",
"raise",
"exceptions",
".",
"InvalidInputException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'error_handler'",
")",
")"
] |
Helper method to validate all instantiation parameters.
Args:
datafile: JSON string representing the project.
skip_json_validation: Boolean representing whether JSON schema validation needs to be skipped or not.
Raises:
Exception if provided instantiation options are valid.
|
[
"Helper",
"method",
"to",
"validate",
"all",
"instantiation",
"parameters",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L89-L110
|
12,162
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely._validate_user_inputs
|
def _validate_user_inputs(self, attributes=None, event_tags=None):
""" Helper method to validate user inputs.
Args:
attributes: Dict representing user attributes.
event_tags: Dict representing metadata associated with an event.
Returns:
Boolean True if inputs are valid. False otherwise.
"""
if attributes and not validator.are_attributes_valid(attributes):
self.logger.error('Provided attributes are in an invalid format.')
self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_FORMAT))
return False
if event_tags and not validator.are_event_tags_valid(event_tags):
self.logger.error('Provided event tags are in an invalid format.')
self.error_handler.handle_error(exceptions.InvalidEventTagException(enums.Errors.INVALID_EVENT_TAG_FORMAT))
return False
return True
|
python
|
def _validate_user_inputs(self, attributes=None, event_tags=None):
""" Helper method to validate user inputs.
Args:
attributes: Dict representing user attributes.
event_tags: Dict representing metadata associated with an event.
Returns:
Boolean True if inputs are valid. False otherwise.
"""
if attributes and not validator.are_attributes_valid(attributes):
self.logger.error('Provided attributes are in an invalid format.')
self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_FORMAT))
return False
if event_tags and not validator.are_event_tags_valid(event_tags):
self.logger.error('Provided event tags are in an invalid format.')
self.error_handler.handle_error(exceptions.InvalidEventTagException(enums.Errors.INVALID_EVENT_TAG_FORMAT))
return False
return True
|
[
"def",
"_validate_user_inputs",
"(",
"self",
",",
"attributes",
"=",
"None",
",",
"event_tags",
"=",
"None",
")",
":",
"if",
"attributes",
"and",
"not",
"validator",
".",
"are_attributes_valid",
"(",
"attributes",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Provided attributes are in an invalid format.'",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidAttributeException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_ATTRIBUTE_FORMAT",
")",
")",
"return",
"False",
"if",
"event_tags",
"and",
"not",
"validator",
".",
"are_event_tags_valid",
"(",
"event_tags",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Provided event tags are in an invalid format.'",
")",
"self",
".",
"error_handler",
".",
"handle_error",
"(",
"exceptions",
".",
"InvalidEventTagException",
"(",
"enums",
".",
"Errors",
".",
"INVALID_EVENT_TAG_FORMAT",
")",
")",
"return",
"False",
"return",
"True"
] |
Helper method to validate user inputs.
Args:
attributes: Dict representing user attributes.
event_tags: Dict representing metadata associated with an event.
Returns:
Boolean True if inputs are valid. False otherwise.
|
[
"Helper",
"method",
"to",
"validate",
"user",
"inputs",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L112-L134
|
12,163
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely._send_impression_event
|
def _send_impression_event(self, experiment, variation, user_id, attributes):
""" Helper method to send impression event.
Args:
experiment: Experiment for which impression event is being sent.
variation: Variation picked for user for the given experiment.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
"""
impression_event = self.event_builder.create_impression_event(experiment,
variation.id,
user_id,
attributes)
self.logger.debug('Dispatching impression event to URL %s with params %s.' % (
impression_event.url,
impression_event.params
))
try:
self.event_dispatcher.dispatch_event(impression_event)
except:
self.logger.exception('Unable to dispatch impression event!')
self.notification_center.send_notifications(enums.NotificationTypes.ACTIVATE,
experiment, user_id, attributes, variation, impression_event)
|
python
|
def _send_impression_event(self, experiment, variation, user_id, attributes):
""" Helper method to send impression event.
Args:
experiment: Experiment for which impression event is being sent.
variation: Variation picked for user for the given experiment.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
"""
impression_event = self.event_builder.create_impression_event(experiment,
variation.id,
user_id,
attributes)
self.logger.debug('Dispatching impression event to URL %s with params %s.' % (
impression_event.url,
impression_event.params
))
try:
self.event_dispatcher.dispatch_event(impression_event)
except:
self.logger.exception('Unable to dispatch impression event!')
self.notification_center.send_notifications(enums.NotificationTypes.ACTIVATE,
experiment, user_id, attributes, variation, impression_event)
|
[
"def",
"_send_impression_event",
"(",
"self",
",",
"experiment",
",",
"variation",
",",
"user_id",
",",
"attributes",
")",
":",
"impression_event",
"=",
"self",
".",
"event_builder",
".",
"create_impression_event",
"(",
"experiment",
",",
"variation",
".",
"id",
",",
"user_id",
",",
"attributes",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Dispatching impression event to URL %s with params %s.'",
"%",
"(",
"impression_event",
".",
"url",
",",
"impression_event",
".",
"params",
")",
")",
"try",
":",
"self",
".",
"event_dispatcher",
".",
"dispatch_event",
"(",
"impression_event",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Unable to dispatch impression event!'",
")",
"self",
".",
"notification_center",
".",
"send_notifications",
"(",
"enums",
".",
"NotificationTypes",
".",
"ACTIVATE",
",",
"experiment",
",",
"user_id",
",",
"attributes",
",",
"variation",
",",
"impression_event",
")"
] |
Helper method to send impression event.
Args:
experiment: Experiment for which impression event is being sent.
variation: Variation picked for user for the given experiment.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
|
[
"Helper",
"method",
"to",
"send",
"impression",
"event",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L136-L162
|
12,164
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely._get_feature_variable_for_type
|
def _get_feature_variable_for_type(self, feature_key, variable_key, variable_type, user_id, attributes):
""" Helper method to determine value for a certain variable attached to a feature flag based on type of variable.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
variable_type: Type of variable which could be one of boolean/double/integer/string.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
if not validator.is_non_empty_string(feature_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))
return None
if not validator.is_non_empty_string(variable_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('variable_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
if not self._validate_user_inputs(attributes):
return None
feature_flag = self.config.get_feature_from_key(feature_key)
if not feature_flag:
return None
variable = self.config.get_variable_for_feature(feature_key, variable_key)
if not variable:
return None
# Return None if type differs
if variable.type != variable_type:
self.logger.warning(
'Requested variable type "%s", but variable is of type "%s". '
'Use correct API to retrieve value. Returning None.' % (variable_type, variable.type)
)
return None
feature_enabled = False
source_info = {}
variable_value = variable.defaultValue
decision = self.decision_service.get_variation_for_feature(feature_flag, user_id, attributes)
if decision.variation:
feature_enabled = decision.variation.featureEnabled
if feature_enabled:
variable_value = self.config.get_variable_value_for_variation(variable, decision.variation)
self.logger.info(
'Got variable value "%s" for variable "%s" of feature flag "%s".' % (
variable_value, variable_key, feature_key
)
)
else:
self.logger.info(
'Feature "%s" for variation "%s" is not enabled. '
'Returning the default variable value "%s".' % (feature_key, decision.variation.key, variable_value)
)
else:
self.logger.info(
'User "%s" is not in any variation or rollout rule. '
'Returning default value for variable "%s" of feature flag "%s".' % (user_id, variable_key, feature_key)
)
if decision.source == enums.DecisionSources.FEATURE_TEST:
source_info = {
'experiment_key': decision.experiment.key,
'variation_key': decision.variation.key
}
try:
actual_value = self.config.get_typecast_value(variable_value, variable_type)
except:
self.logger.error('Unable to cast value. Returning None.')
actual_value = None
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
enums.DecisionNotificationTypes.FEATURE_VARIABLE,
user_id,
attributes or {},
{
'feature_key': feature_key,
'feature_enabled': feature_enabled,
'source': decision.source,
'variable_key': variable_key,
'variable_value': actual_value,
'variable_type': variable_type,
'source_info': source_info
}
)
return actual_value
|
python
|
def _get_feature_variable_for_type(self, feature_key, variable_key, variable_type, user_id, attributes):
""" Helper method to determine value for a certain variable attached to a feature flag based on type of variable.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
variable_type: Type of variable which could be one of boolean/double/integer/string.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
if not validator.is_non_empty_string(feature_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))
return None
if not validator.is_non_empty_string(variable_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('variable_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
if not self._validate_user_inputs(attributes):
return None
feature_flag = self.config.get_feature_from_key(feature_key)
if not feature_flag:
return None
variable = self.config.get_variable_for_feature(feature_key, variable_key)
if not variable:
return None
# Return None if type differs
if variable.type != variable_type:
self.logger.warning(
'Requested variable type "%s", but variable is of type "%s". '
'Use correct API to retrieve value. Returning None.' % (variable_type, variable.type)
)
return None
feature_enabled = False
source_info = {}
variable_value = variable.defaultValue
decision = self.decision_service.get_variation_for_feature(feature_flag, user_id, attributes)
if decision.variation:
feature_enabled = decision.variation.featureEnabled
if feature_enabled:
variable_value = self.config.get_variable_value_for_variation(variable, decision.variation)
self.logger.info(
'Got variable value "%s" for variable "%s" of feature flag "%s".' % (
variable_value, variable_key, feature_key
)
)
else:
self.logger.info(
'Feature "%s" for variation "%s" is not enabled. '
'Returning the default variable value "%s".' % (feature_key, decision.variation.key, variable_value)
)
else:
self.logger.info(
'User "%s" is not in any variation or rollout rule. '
'Returning default value for variable "%s" of feature flag "%s".' % (user_id, variable_key, feature_key)
)
if decision.source == enums.DecisionSources.FEATURE_TEST:
source_info = {
'experiment_key': decision.experiment.key,
'variation_key': decision.variation.key
}
try:
actual_value = self.config.get_typecast_value(variable_value, variable_type)
except:
self.logger.error('Unable to cast value. Returning None.')
actual_value = None
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
enums.DecisionNotificationTypes.FEATURE_VARIABLE,
user_id,
attributes or {},
{
'feature_key': feature_key,
'feature_enabled': feature_enabled,
'source': decision.source,
'variable_key': variable_key,
'variable_value': actual_value,
'variable_type': variable_type,
'source_info': source_info
}
)
return actual_value
|
[
"def",
"_get_feature_variable_for_type",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
",",
"variable_type",
",",
"user_id",
",",
"attributes",
")",
":",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"feature_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'feature_key'",
")",
")",
"return",
"None",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"variable_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'variable_key'",
")",
")",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"None",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
")",
":",
"return",
"None",
"feature_flag",
"=",
"self",
".",
"config",
".",
"get_feature_from_key",
"(",
"feature_key",
")",
"if",
"not",
"feature_flag",
":",
"return",
"None",
"variable",
"=",
"self",
".",
"config",
".",
"get_variable_for_feature",
"(",
"feature_key",
",",
"variable_key",
")",
"if",
"not",
"variable",
":",
"return",
"None",
"# Return None if type differs",
"if",
"variable",
".",
"type",
"!=",
"variable_type",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Requested variable type \"%s\", but variable is of type \"%s\". '",
"'Use correct API to retrieve value. Returning None.'",
"%",
"(",
"variable_type",
",",
"variable",
".",
"type",
")",
")",
"return",
"None",
"feature_enabled",
"=",
"False",
"source_info",
"=",
"{",
"}",
"variable_value",
"=",
"variable",
".",
"defaultValue",
"decision",
"=",
"self",
".",
"decision_service",
".",
"get_variation_for_feature",
"(",
"feature_flag",
",",
"user_id",
",",
"attributes",
")",
"if",
"decision",
".",
"variation",
":",
"feature_enabled",
"=",
"decision",
".",
"variation",
".",
"featureEnabled",
"if",
"feature_enabled",
":",
"variable_value",
"=",
"self",
".",
"config",
".",
"get_variable_value_for_variation",
"(",
"variable",
",",
"decision",
".",
"variation",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Got variable value \"%s\" for variable \"%s\" of feature flag \"%s\".'",
"%",
"(",
"variable_value",
",",
"variable_key",
",",
"feature_key",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Feature \"%s\" for variation \"%s\" is not enabled. '",
"'Returning the default variable value \"%s\".'",
"%",
"(",
"feature_key",
",",
"decision",
".",
"variation",
".",
"key",
",",
"variable_value",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is not in any variation or rollout rule. '",
"'Returning default value for variable \"%s\" of feature flag \"%s\".'",
"%",
"(",
"user_id",
",",
"variable_key",
",",
"feature_key",
")",
")",
"if",
"decision",
".",
"source",
"==",
"enums",
".",
"DecisionSources",
".",
"FEATURE_TEST",
":",
"source_info",
"=",
"{",
"'experiment_key'",
":",
"decision",
".",
"experiment",
".",
"key",
",",
"'variation_key'",
":",
"decision",
".",
"variation",
".",
"key",
"}",
"try",
":",
"actual_value",
"=",
"self",
".",
"config",
".",
"get_typecast_value",
"(",
"variable_value",
",",
"variable_type",
")",
"except",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Unable to cast value. Returning None.'",
")",
"actual_value",
"=",
"None",
"self",
".",
"notification_center",
".",
"send_notifications",
"(",
"enums",
".",
"NotificationTypes",
".",
"DECISION",
",",
"enums",
".",
"DecisionNotificationTypes",
".",
"FEATURE_VARIABLE",
",",
"user_id",
",",
"attributes",
"or",
"{",
"}",
",",
"{",
"'feature_key'",
":",
"feature_key",
",",
"'feature_enabled'",
":",
"feature_enabled",
",",
"'source'",
":",
"decision",
".",
"source",
",",
"'variable_key'",
":",
"variable_key",
",",
"'variable_value'",
":",
"actual_value",
",",
"'variable_type'",
":",
"variable_type",
",",
"'source_info'",
":",
"source_info",
"}",
")",
"return",
"actual_value"
] |
Helper method to determine value for a certain variable attached to a feature flag based on type of variable.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
variable_type: Type of variable which could be one of boolean/double/integer/string.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
[
"Helper",
"method",
"to",
"determine",
"value",
"for",
"a",
"certain",
"variable",
"attached",
"to",
"a",
"feature",
"flag",
"based",
"on",
"type",
"of",
"variable",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L164-L263
|
12,165
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.activate
|
def activate(self, experiment_key, user_id, attributes=None):
""" Buckets visitor and sends impression event to Optimizely.
Args:
experiment_key: Experiment which needs to be activated.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
variation_key = self.get_variation(experiment_key, user_id, attributes)
if not variation_key:
self.logger.info('Not activating user "%s".' % user_id)
return None
experiment = self.config.get_experiment_from_key(experiment_key)
variation = self.config.get_variation_from_key(experiment_key, variation_key)
# Create and dispatch impression event
self.logger.info('Activating user "%s" in experiment "%s".' % (user_id, experiment.key))
self._send_impression_event(experiment, variation, user_id, attributes)
return variation.key
|
python
|
def activate(self, experiment_key, user_id, attributes=None):
""" Buckets visitor and sends impression event to Optimizely.
Args:
experiment_key: Experiment which needs to be activated.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
variation_key = self.get_variation(experiment_key, user_id, attributes)
if not variation_key:
self.logger.info('Not activating user "%s".' % user_id)
return None
experiment = self.config.get_experiment_from_key(experiment_key)
variation = self.config.get_variation_from_key(experiment_key, variation_key)
# Create and dispatch impression event
self.logger.info('Activating user "%s" in experiment "%s".' % (user_id, experiment.key))
self._send_impression_event(experiment, variation, user_id, attributes)
return variation.key
|
[
"def",
"activate",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'activate'",
")",
")",
"return",
"None",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"experiment_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'experiment_key'",
")",
")",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"None",
"variation_key",
"=",
"self",
".",
"get_variation",
"(",
"experiment_key",
",",
"user_id",
",",
"attributes",
")",
"if",
"not",
"variation_key",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Not activating user \"%s\".'",
"%",
"user_id",
")",
"return",
"None",
"experiment",
"=",
"self",
".",
"config",
".",
"get_experiment_from_key",
"(",
"experiment_key",
")",
"variation",
"=",
"self",
".",
"config",
".",
"get_variation_from_key",
"(",
"experiment_key",
",",
"variation_key",
")",
"# Create and dispatch impression event",
"self",
".",
"logger",
".",
"info",
"(",
"'Activating user \"%s\" in experiment \"%s\".'",
"%",
"(",
"user_id",
",",
"experiment",
".",
"key",
")",
")",
"self",
".",
"_send_impression_event",
"(",
"experiment",
",",
"variation",
",",
"user_id",
",",
"attributes",
")",
"return",
"variation",
".",
"key"
] |
Buckets visitor and sends impression event to Optimizely.
Args:
experiment_key: Experiment which needs to be activated.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
|
[
"Buckets",
"visitor",
"and",
"sends",
"impression",
"event",
"to",
"Optimizely",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L265-L303
|
12,166
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.track
|
def track(self, event_key, user_id, attributes=None, event_tags=None):
""" Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if not validator.is_non_empty_string(event_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))
return
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return
if not self._validate_user_inputs(attributes, event_tags):
return
event = self.config.get_event(event_key)
if not event:
self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key))
return
conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)
self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id))
self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (
conversion_event.url,
conversion_event.params
))
try:
self.event_dispatcher.dispatch_event(conversion_event)
except:
self.logger.exception('Unable to dispatch conversion event!')
self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,
attributes, event_tags, conversion_event)
|
python
|
def track(self, event_key, user_id, attributes=None, event_tags=None):
""" Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if not validator.is_non_empty_string(event_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))
return
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return
if not self._validate_user_inputs(attributes, event_tags):
return
event = self.config.get_event(event_key)
if not event:
self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key))
return
conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)
self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id))
self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (
conversion_event.url,
conversion_event.params
))
try:
self.event_dispatcher.dispatch_event(conversion_event)
except:
self.logger.exception('Unable to dispatch conversion event!')
self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,
attributes, event_tags, conversion_event)
|
[
"def",
"track",
"(",
"self",
",",
"event_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
",",
"event_tags",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'track'",
")",
")",
"return",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"event_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'event_key'",
")",
")",
"return",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
",",
"event_tags",
")",
":",
"return",
"event",
"=",
"self",
".",
"config",
".",
"get_event",
"(",
"event_key",
")",
"if",
"not",
"event",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Not tracking user \"%s\" for event \"%s\".'",
"%",
"(",
"user_id",
",",
"event_key",
")",
")",
"return",
"conversion_event",
"=",
"self",
".",
"event_builder",
".",
"create_conversion_event",
"(",
"event_key",
",",
"user_id",
",",
"attributes",
",",
"event_tags",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Tracking event \"%s\" for user \"%s\".'",
"%",
"(",
"event_key",
",",
"user_id",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Dispatching conversion event to URL %s with params %s.'",
"%",
"(",
"conversion_event",
".",
"url",
",",
"conversion_event",
".",
"params",
")",
")",
"try",
":",
"self",
".",
"event_dispatcher",
".",
"dispatch_event",
"(",
"conversion_event",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Unable to dispatch conversion event!'",
")",
"self",
".",
"notification_center",
".",
"send_notifications",
"(",
"enums",
".",
"NotificationTypes",
".",
"TRACK",
",",
"event_key",
",",
"user_id",
",",
"attributes",
",",
"event_tags",
",",
"conversion_event",
")"
] |
Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
|
[
"Send",
"conversion",
"event",
"to",
"Optimizely",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L305-L346
|
12,167
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_variation
|
def get_variation(self, experiment_key, user_id, attributes=None):
""" Gets variation where user will be bucketed.
Args:
experiment_key: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
experiment = self.config.get_experiment_from_key(experiment_key)
variation_key = None
if not experiment:
self.logger.info('Experiment key "%s" is invalid. Not activating user "%s".' % (
experiment_key,
user_id
))
return None
if not self._validate_user_inputs(attributes):
return None
variation = self.decision_service.get_variation(experiment, user_id, attributes)
if variation:
variation_key = variation.key
if self.config.is_feature_experiment(experiment.id):
decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST
else:
decision_notification_type = enums.DecisionNotificationTypes.AB_TEST
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
decision_notification_type,
user_id,
attributes or {},
{
'experiment_key': experiment_key,
'variation_key': variation_key
}
)
return variation_key
|
python
|
def get_variation(self, experiment_key, user_id, attributes=None):
""" Gets variation where user will be bucketed.
Args:
experiment_key: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
experiment = self.config.get_experiment_from_key(experiment_key)
variation_key = None
if not experiment:
self.logger.info('Experiment key "%s" is invalid. Not activating user "%s".' % (
experiment_key,
user_id
))
return None
if not self._validate_user_inputs(attributes):
return None
variation = self.decision_service.get_variation(experiment, user_id, attributes)
if variation:
variation_key = variation.key
if self.config.is_feature_experiment(experiment.id):
decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST
else:
decision_notification_type = enums.DecisionNotificationTypes.AB_TEST
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
decision_notification_type,
user_id,
attributes or {},
{
'experiment_key': experiment_key,
'variation_key': variation_key
}
)
return variation_key
|
[
"def",
"get_variation",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'get_variation'",
")",
")",
"return",
"None",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"experiment_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'experiment_key'",
")",
")",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"None",
"experiment",
"=",
"self",
".",
"config",
".",
"get_experiment_from_key",
"(",
"experiment_key",
")",
"variation_key",
"=",
"None",
"if",
"not",
"experiment",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Experiment key \"%s\" is invalid. Not activating user \"%s\".'",
"%",
"(",
"experiment_key",
",",
"user_id",
")",
")",
"return",
"None",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
")",
":",
"return",
"None",
"variation",
"=",
"self",
".",
"decision_service",
".",
"get_variation",
"(",
"experiment",
",",
"user_id",
",",
"attributes",
")",
"if",
"variation",
":",
"variation_key",
"=",
"variation",
".",
"key",
"if",
"self",
".",
"config",
".",
"is_feature_experiment",
"(",
"experiment",
".",
"id",
")",
":",
"decision_notification_type",
"=",
"enums",
".",
"DecisionNotificationTypes",
".",
"FEATURE_TEST",
"else",
":",
"decision_notification_type",
"=",
"enums",
".",
"DecisionNotificationTypes",
".",
"AB_TEST",
"self",
".",
"notification_center",
".",
"send_notifications",
"(",
"enums",
".",
"NotificationTypes",
".",
"DECISION",
",",
"decision_notification_type",
",",
"user_id",
",",
"attributes",
"or",
"{",
"}",
",",
"{",
"'experiment_key'",
":",
"experiment_key",
",",
"'variation_key'",
":",
"variation_key",
"}",
")",
"return",
"variation_key"
] |
Gets variation where user will be bucketed.
Args:
experiment_key: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Variation key representing the variation the user will be bucketed in.
None if user is not in experiment or if experiment is not Running.
|
[
"Gets",
"variation",
"where",
"user",
"will",
"be",
"bucketed",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L348-L406
|
12,168
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.is_feature_enabled
|
def is_feature_enabled(self, feature_key, user_id, attributes=None):
""" Returns true if the feature is enabled for the given user.
Args:
feature_key: The key of the feature for which we are determining if it is enabled or not for the given user.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
True if the feature is enabled for the user. False otherwise.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('is_feature_enabled'))
return False
if not validator.is_non_empty_string(feature_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))
return False
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return False
if not self._validate_user_inputs(attributes):
return False
feature = self.config.get_feature_from_key(feature_key)
if not feature:
return False
feature_enabled = False
source_info = {}
decision = self.decision_service.get_variation_for_feature(feature, user_id, attributes)
is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST
if decision.variation:
if decision.variation.featureEnabled is True:
feature_enabled = True
# Send event if Decision came from an experiment.
if is_source_experiment:
source_info = {
'experiment_key': decision.experiment.key,
'variation_key': decision.variation.key
}
self._send_impression_event(decision.experiment,
decision.variation,
user_id,
attributes)
if feature_enabled:
self.logger.info('Feature "%s" is enabled for user "%s".' % (feature_key, user_id))
else:
self.logger.info('Feature "%s" is not enabled for user "%s".' % (feature_key, user_id))
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
enums.DecisionNotificationTypes.FEATURE,
user_id,
attributes or {},
{
'feature_key': feature_key,
'feature_enabled': feature_enabled,
'source': decision.source,
'source_info': source_info
}
)
return feature_enabled
|
python
|
def is_feature_enabled(self, feature_key, user_id, attributes=None):
""" Returns true if the feature is enabled for the given user.
Args:
feature_key: The key of the feature for which we are determining if it is enabled or not for the given user.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
True if the feature is enabled for the user. False otherwise.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('is_feature_enabled'))
return False
if not validator.is_non_empty_string(feature_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key'))
return False
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return False
if not self._validate_user_inputs(attributes):
return False
feature = self.config.get_feature_from_key(feature_key)
if not feature:
return False
feature_enabled = False
source_info = {}
decision = self.decision_service.get_variation_for_feature(feature, user_id, attributes)
is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST
if decision.variation:
if decision.variation.featureEnabled is True:
feature_enabled = True
# Send event if Decision came from an experiment.
if is_source_experiment:
source_info = {
'experiment_key': decision.experiment.key,
'variation_key': decision.variation.key
}
self._send_impression_event(decision.experiment,
decision.variation,
user_id,
attributes)
if feature_enabled:
self.logger.info('Feature "%s" is enabled for user "%s".' % (feature_key, user_id))
else:
self.logger.info('Feature "%s" is not enabled for user "%s".' % (feature_key, user_id))
self.notification_center.send_notifications(
enums.NotificationTypes.DECISION,
enums.DecisionNotificationTypes.FEATURE,
user_id,
attributes or {},
{
'feature_key': feature_key,
'feature_enabled': feature_enabled,
'source': decision.source,
'source_info': source_info
}
)
return feature_enabled
|
[
"def",
"is_feature_enabled",
"(",
"self",
",",
"feature_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'is_feature_enabled'",
")",
")",
"return",
"False",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"feature_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'feature_key'",
")",
")",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"False",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
")",
":",
"return",
"False",
"feature",
"=",
"self",
".",
"config",
".",
"get_feature_from_key",
"(",
"feature_key",
")",
"if",
"not",
"feature",
":",
"return",
"False",
"feature_enabled",
"=",
"False",
"source_info",
"=",
"{",
"}",
"decision",
"=",
"self",
".",
"decision_service",
".",
"get_variation_for_feature",
"(",
"feature",
",",
"user_id",
",",
"attributes",
")",
"is_source_experiment",
"=",
"decision",
".",
"source",
"==",
"enums",
".",
"DecisionSources",
".",
"FEATURE_TEST",
"if",
"decision",
".",
"variation",
":",
"if",
"decision",
".",
"variation",
".",
"featureEnabled",
"is",
"True",
":",
"feature_enabled",
"=",
"True",
"# Send event if Decision came from an experiment.",
"if",
"is_source_experiment",
":",
"source_info",
"=",
"{",
"'experiment_key'",
":",
"decision",
".",
"experiment",
".",
"key",
",",
"'variation_key'",
":",
"decision",
".",
"variation",
".",
"key",
"}",
"self",
".",
"_send_impression_event",
"(",
"decision",
".",
"experiment",
",",
"decision",
".",
"variation",
",",
"user_id",
",",
"attributes",
")",
"if",
"feature_enabled",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Feature \"%s\" is enabled for user \"%s\".'",
"%",
"(",
"feature_key",
",",
"user_id",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Feature \"%s\" is not enabled for user \"%s\".'",
"%",
"(",
"feature_key",
",",
"user_id",
")",
")",
"self",
".",
"notification_center",
".",
"send_notifications",
"(",
"enums",
".",
"NotificationTypes",
".",
"DECISION",
",",
"enums",
".",
"DecisionNotificationTypes",
".",
"FEATURE",
",",
"user_id",
",",
"attributes",
"or",
"{",
"}",
",",
"{",
"'feature_key'",
":",
"feature_key",
",",
"'feature_enabled'",
":",
"feature_enabled",
",",
"'source'",
":",
"decision",
".",
"source",
",",
"'source_info'",
":",
"source_info",
"}",
")",
"return",
"feature_enabled"
] |
Returns true if the feature is enabled for the given user.
Args:
feature_key: The key of the feature for which we are determining if it is enabled or not for the given user.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
True if the feature is enabled for the user. False otherwise.
|
[
"Returns",
"true",
"if",
"the",
"feature",
"is",
"enabled",
"for",
"the",
"given",
"user",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L408-L476
|
12,169
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_enabled_features
|
def get_enabled_features(self, user_id, attributes=None):
""" Returns the list of features that are enabled for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
A list of the keys of the features that are enabled for the user.
"""
enabled_features = []
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features'))
return enabled_features
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return enabled_features
if not self._validate_user_inputs(attributes):
return enabled_features
for feature in self.config.feature_key_map.values():
if self.is_feature_enabled(feature.key, user_id, attributes):
enabled_features.append(feature.key)
return enabled_features
|
python
|
def get_enabled_features(self, user_id, attributes=None):
""" Returns the list of features that are enabled for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
A list of the keys of the features that are enabled for the user.
"""
enabled_features = []
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features'))
return enabled_features
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return enabled_features
if not self._validate_user_inputs(attributes):
return enabled_features
for feature in self.config.feature_key_map.values():
if self.is_feature_enabled(feature.key, user_id, attributes):
enabled_features.append(feature.key)
return enabled_features
|
[
"def",
"get_enabled_features",
"(",
"self",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"enabled_features",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'get_enabled_features'",
")",
")",
"return",
"enabled_features",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"enabled_features",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
")",
":",
"return",
"enabled_features",
"for",
"feature",
"in",
"self",
".",
"config",
".",
"feature_key_map",
".",
"values",
"(",
")",
":",
"if",
"self",
".",
"is_feature_enabled",
"(",
"feature",
".",
"key",
",",
"user_id",
",",
"attributes",
")",
":",
"enabled_features",
".",
"append",
"(",
"feature",
".",
"key",
")",
"return",
"enabled_features"
] |
Returns the list of features that are enabled for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
A list of the keys of the features that are enabled for the user.
|
[
"Returns",
"the",
"list",
"of",
"features",
"that",
"are",
"enabled",
"for",
"the",
"user",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L478-L505
|
12,170
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_feature_variable_boolean
|
def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.BOOLEAN
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
python
|
def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.BOOLEAN
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
[
"def",
"get_feature_variable_boolean",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"variable_type",
"=",
"entities",
".",
"Variable",
".",
"Type",
".",
"BOOLEAN",
"return",
"self",
".",
"_get_feature_variable_for_type",
"(",
"feature_key",
",",
"variable_key",
",",
"variable_type",
",",
"user_id",
",",
"attributes",
")"
] |
Returns value for a certain boolean variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Boolean value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
[
"Returns",
"value",
"for",
"a",
"certain",
"boolean",
"variable",
"attached",
"to",
"a",
"feature",
"flag",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L507-L524
|
12,171
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_feature_variable_double
|
def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain double variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Double value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.DOUBLE
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
python
|
def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain double variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Double value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.DOUBLE
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
[
"def",
"get_feature_variable_double",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"variable_type",
"=",
"entities",
".",
"Variable",
".",
"Type",
".",
"DOUBLE",
"return",
"self",
".",
"_get_feature_variable_for_type",
"(",
"feature_key",
",",
"variable_key",
",",
"variable_type",
",",
"user_id",
",",
"attributes",
")"
] |
Returns value for a certain double variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Double value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
[
"Returns",
"value",
"for",
"a",
"certain",
"double",
"variable",
"attached",
"to",
"a",
"feature",
"flag",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L526-L543
|
12,172
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_feature_variable_integer
|
def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain integer variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Integer value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.INTEGER
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
python
|
def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain integer variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Integer value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.INTEGER
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
[
"def",
"get_feature_variable_integer",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"variable_type",
"=",
"entities",
".",
"Variable",
".",
"Type",
".",
"INTEGER",
"return",
"self",
".",
"_get_feature_variable_for_type",
"(",
"feature_key",
",",
"variable_key",
",",
"variable_type",
",",
"user_id",
",",
"attributes",
")"
] |
Returns value for a certain integer variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Integer value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
[
"Returns",
"value",
"for",
"a",
"certain",
"integer",
"variable",
"attached",
"to",
"a",
"feature",
"flag",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L545-L562
|
12,173
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_feature_variable_string
|
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.STRING
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
python
|
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.STRING
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
|
[
"def",
"get_feature_variable_string",
"(",
"self",
",",
"feature_key",
",",
"variable_key",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"variable_type",
"=",
"entities",
".",
"Variable",
".",
"Type",
".",
"STRING",
"return",
"self",
".",
"_get_feature_variable_for_type",
"(",
"feature_key",
",",
"variable_key",
",",
"variable_type",
",",
"user_id",
",",
"attributes",
")"
] |
Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
|
[
"Returns",
"value",
"for",
"a",
"certain",
"string",
"variable",
"attached",
"to",
"a",
"feature",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L564-L581
|
12,174
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.set_forced_variation
|
def set_forced_variation(self, experiment_key, user_id, variation_key):
""" Force a user into a variation for a given experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
variation_key: A string variation key that specifies the variation which the user.
will be forced into. If null, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('set_forced_variation'))
return False
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return False
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return False
return self.config.set_forced_variation(experiment_key, user_id, variation_key)
|
python
|
def set_forced_variation(self, experiment_key, user_id, variation_key):
""" Force a user into a variation for a given experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
variation_key: A string variation key that specifies the variation which the user.
will be forced into. If null, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('set_forced_variation'))
return False
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return False
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return False
return self.config.set_forced_variation(experiment_key, user_id, variation_key)
|
[
"def",
"set_forced_variation",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
",",
"variation_key",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'set_forced_variation'",
")",
")",
"return",
"False",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"experiment_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'experiment_key'",
")",
")",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"False",
"return",
"self",
".",
"config",
".",
"set_forced_variation",
"(",
"experiment_key",
",",
"user_id",
",",
"variation_key",
")"
] |
Force a user into a variation for a given experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
variation_key: A string variation key that specifies the variation which the user.
will be forced into. If null, then clear the existing experiment-to-variation mapping.
Returns:
A boolean value that indicates if the set completed successfully.
|
[
"Force",
"a",
"user",
"into",
"a",
"variation",
"for",
"a",
"given",
"experiment",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L583-L608
|
12,175
|
optimizely/python-sdk
|
optimizely/optimizely.py
|
Optimizely.get_forced_variation
|
def get_forced_variation(self, experiment_key, user_id):
""" Gets the forced variation for a given user and experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
Returns:
The forced variation key. None if no forced variation key.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
forced_variation = self.config.get_forced_variation(experiment_key, user_id)
return forced_variation.key if forced_variation else None
|
python
|
def get_forced_variation(self, experiment_key, user_id):
""" Gets the forced variation for a given user and experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
Returns:
The forced variation key. None if no forced variation key.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))
return None
if not validator.is_non_empty_string(experiment_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))
return None
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return None
forced_variation = self.config.get_forced_variation(experiment_key, user_id)
return forced_variation.key if forced_variation else None
|
[
"def",
"get_forced_variation",
"(",
"self",
",",
"experiment_key",
",",
"user_id",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'get_forced_variation'",
")",
")",
"return",
"None",
"if",
"not",
"validator",
".",
"is_non_empty_string",
"(",
"experiment_key",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'experiment_key'",
")",
")",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"None",
"forced_variation",
"=",
"self",
".",
"config",
".",
"get_forced_variation",
"(",
"experiment_key",
",",
"user_id",
")",
"return",
"forced_variation",
".",
"key",
"if",
"forced_variation",
"else",
"None"
] |
Gets the forced variation for a given user and experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
Returns:
The forced variation key. None if no forced variation key.
|
[
"Gets",
"the",
"forced",
"variation",
"for",
"a",
"given",
"user",
"and",
"experiment",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L610-L634
|
12,176
|
optimizely/python-sdk
|
optimizely/helpers/audience.py
|
is_user_in_experiment
|
def is_user_in_experiment(config, experiment, attributes, logger):
""" Determine for given experiment if user satisfies the audiences for the experiment.
Args:
config: project_config.ProjectConfig object representing the project.
experiment: Object representing the experiment.
attributes: Dict representing user attributes which will be used in determining
if the audience conditions are met. If not provided, default to an empty dict.
logger: Provides a logger to send log messages to.
Returns:
Boolean representing if user satisfies audience conditions for any of the audiences or not.
"""
audience_conditions = experiment.getAudienceConditionsOrIds()
logger.debug(audience_logs.EVALUATING_AUDIENCES_COMBINED.format(
experiment.key,
json.dumps(audience_conditions)
))
# Return True in case there are no audiences
if audience_conditions is None or audience_conditions == []:
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(
experiment.key,
'TRUE'
))
return True
if attributes is None:
attributes = {}
def evaluate_custom_attr(audienceId, index):
audience = config.get_audience(audienceId)
custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator(
audience.conditionList, attributes, logger)
return custom_attr_condition_evaluator.evaluate(index)
def evaluate_audience(audienceId):
audience = config.get_audience(audienceId)
if audience is None:
return None
logger.debug(audience_logs.EVALUATING_AUDIENCE.format(audienceId, audience.conditions))
result = condition_tree_evaluator.evaluate(
audience.conditionStructure,
lambda index: evaluate_custom_attr(audienceId, index)
)
result_str = str(result).upper() if result is not None else 'UNKNOWN'
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT.format(audienceId, result_str))
return result
eval_result = condition_tree_evaluator.evaluate(
audience_conditions,
evaluate_audience
)
eval_result = eval_result or False
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(
experiment.key,
str(eval_result).upper()
))
return eval_result
|
python
|
def is_user_in_experiment(config, experiment, attributes, logger):
""" Determine for given experiment if user satisfies the audiences for the experiment.
Args:
config: project_config.ProjectConfig object representing the project.
experiment: Object representing the experiment.
attributes: Dict representing user attributes which will be used in determining
if the audience conditions are met. If not provided, default to an empty dict.
logger: Provides a logger to send log messages to.
Returns:
Boolean representing if user satisfies audience conditions for any of the audiences or not.
"""
audience_conditions = experiment.getAudienceConditionsOrIds()
logger.debug(audience_logs.EVALUATING_AUDIENCES_COMBINED.format(
experiment.key,
json.dumps(audience_conditions)
))
# Return True in case there are no audiences
if audience_conditions is None or audience_conditions == []:
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(
experiment.key,
'TRUE'
))
return True
if attributes is None:
attributes = {}
def evaluate_custom_attr(audienceId, index):
audience = config.get_audience(audienceId)
custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator(
audience.conditionList, attributes, logger)
return custom_attr_condition_evaluator.evaluate(index)
def evaluate_audience(audienceId):
audience = config.get_audience(audienceId)
if audience is None:
return None
logger.debug(audience_logs.EVALUATING_AUDIENCE.format(audienceId, audience.conditions))
result = condition_tree_evaluator.evaluate(
audience.conditionStructure,
lambda index: evaluate_custom_attr(audienceId, index)
)
result_str = str(result).upper() if result is not None else 'UNKNOWN'
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT.format(audienceId, result_str))
return result
eval_result = condition_tree_evaluator.evaluate(
audience_conditions,
evaluate_audience
)
eval_result = eval_result or False
logger.info(audience_logs.AUDIENCE_EVALUATION_RESULT_COMBINED.format(
experiment.key,
str(eval_result).upper()
))
return eval_result
|
[
"def",
"is_user_in_experiment",
"(",
"config",
",",
"experiment",
",",
"attributes",
",",
"logger",
")",
":",
"audience_conditions",
"=",
"experiment",
".",
"getAudienceConditionsOrIds",
"(",
")",
"logger",
".",
"debug",
"(",
"audience_logs",
".",
"EVALUATING_AUDIENCES_COMBINED",
".",
"format",
"(",
"experiment",
".",
"key",
",",
"json",
".",
"dumps",
"(",
"audience_conditions",
")",
")",
")",
"# Return True in case there are no audiences",
"if",
"audience_conditions",
"is",
"None",
"or",
"audience_conditions",
"==",
"[",
"]",
":",
"logger",
".",
"info",
"(",
"audience_logs",
".",
"AUDIENCE_EVALUATION_RESULT_COMBINED",
".",
"format",
"(",
"experiment",
".",
"key",
",",
"'TRUE'",
")",
")",
"return",
"True",
"if",
"attributes",
"is",
"None",
":",
"attributes",
"=",
"{",
"}",
"def",
"evaluate_custom_attr",
"(",
"audienceId",
",",
"index",
")",
":",
"audience",
"=",
"config",
".",
"get_audience",
"(",
"audienceId",
")",
"custom_attr_condition_evaluator",
"=",
"condition_helper",
".",
"CustomAttributeConditionEvaluator",
"(",
"audience",
".",
"conditionList",
",",
"attributes",
",",
"logger",
")",
"return",
"custom_attr_condition_evaluator",
".",
"evaluate",
"(",
"index",
")",
"def",
"evaluate_audience",
"(",
"audienceId",
")",
":",
"audience",
"=",
"config",
".",
"get_audience",
"(",
"audienceId",
")",
"if",
"audience",
"is",
"None",
":",
"return",
"None",
"logger",
".",
"debug",
"(",
"audience_logs",
".",
"EVALUATING_AUDIENCE",
".",
"format",
"(",
"audienceId",
",",
"audience",
".",
"conditions",
")",
")",
"result",
"=",
"condition_tree_evaluator",
".",
"evaluate",
"(",
"audience",
".",
"conditionStructure",
",",
"lambda",
"index",
":",
"evaluate_custom_attr",
"(",
"audienceId",
",",
"index",
")",
")",
"result_str",
"=",
"str",
"(",
"result",
")",
".",
"upper",
"(",
")",
"if",
"result",
"is",
"not",
"None",
"else",
"'UNKNOWN'",
"logger",
".",
"info",
"(",
"audience_logs",
".",
"AUDIENCE_EVALUATION_RESULT",
".",
"format",
"(",
"audienceId",
",",
"result_str",
")",
")",
"return",
"result",
"eval_result",
"=",
"condition_tree_evaluator",
".",
"evaluate",
"(",
"audience_conditions",
",",
"evaluate_audience",
")",
"eval_result",
"=",
"eval_result",
"or",
"False",
"logger",
".",
"info",
"(",
"audience_logs",
".",
"AUDIENCE_EVALUATION_RESULT_COMBINED",
".",
"format",
"(",
"experiment",
".",
"key",
",",
"str",
"(",
"eval_result",
")",
".",
"upper",
"(",
")",
")",
")",
"return",
"eval_result"
] |
Determine for given experiment if user satisfies the audiences for the experiment.
Args:
config: project_config.ProjectConfig object representing the project.
experiment: Object representing the experiment.
attributes: Dict representing user attributes which will be used in determining
if the audience conditions are met. If not provided, default to an empty dict.
logger: Provides a logger to send log messages to.
Returns:
Boolean representing if user satisfies audience conditions for any of the audiences or not.
|
[
"Determine",
"for",
"given",
"experiment",
"if",
"user",
"satisfies",
"the",
"audiences",
"for",
"the",
"experiment",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/audience.py#L21-L91
|
12,177
|
optimizely/python-sdk
|
optimizely/event_builder.py
|
BaseEventBuilder._get_common_params
|
def _get_common_params(self, user_id, attributes):
""" Get params which are used same in both conversion and impression events.
Args:
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Dict consisting of parameters common to both impression and conversion events.
"""
commonParams = {}
commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()
commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()
visitor = {}
visitor[self.EventParams.END_USER_ID] = user_id
visitor[self.EventParams.SNAPSHOTS] = []
commonParams[self.EventParams.USERS] = []
commonParams[self.EventParams.USERS].append(visitor)
commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes)
commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk'
commonParams[self.EventParams.ENRICH_DECISIONS] = True
commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__
commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip()
commonParams[self.EventParams.REVISION] = self._get_revision()
return commonParams
|
python
|
def _get_common_params(self, user_id, attributes):
""" Get params which are used same in both conversion and impression events.
Args:
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Dict consisting of parameters common to both impression and conversion events.
"""
commonParams = {}
commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()
commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()
visitor = {}
visitor[self.EventParams.END_USER_ID] = user_id
visitor[self.EventParams.SNAPSHOTS] = []
commonParams[self.EventParams.USERS] = []
commonParams[self.EventParams.USERS].append(visitor)
commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes)
commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk'
commonParams[self.EventParams.ENRICH_DECISIONS] = True
commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__
commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip()
commonParams[self.EventParams.REVISION] = self._get_revision()
return commonParams
|
[
"def",
"_get_common_params",
"(",
"self",
",",
"user_id",
",",
"attributes",
")",
":",
"commonParams",
"=",
"{",
"}",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"PROJECT_ID",
"]",
"=",
"self",
".",
"_get_project_id",
"(",
")",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"ACCOUNT_ID",
"]",
"=",
"self",
".",
"_get_account_id",
"(",
")",
"visitor",
"=",
"{",
"}",
"visitor",
"[",
"self",
".",
"EventParams",
".",
"END_USER_ID",
"]",
"=",
"user_id",
"visitor",
"[",
"self",
".",
"EventParams",
".",
"SNAPSHOTS",
"]",
"=",
"[",
"]",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"USERS",
"]",
"=",
"[",
"]",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"USERS",
"]",
".",
"append",
"(",
"visitor",
")",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"USERS",
"]",
"[",
"0",
"]",
"[",
"self",
".",
"EventParams",
".",
"ATTRIBUTES",
"]",
"=",
"self",
".",
"_get_attributes",
"(",
"attributes",
")",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"SOURCE_SDK_TYPE",
"]",
"=",
"'python-sdk'",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"ENRICH_DECISIONS",
"]",
"=",
"True",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"SOURCE_SDK_VERSION",
"]",
"=",
"version",
".",
"__version__",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"ANONYMIZE_IP",
"]",
"=",
"self",
".",
"_get_anonymize_ip",
"(",
")",
"commonParams",
"[",
"self",
".",
"EventParams",
".",
"REVISION",
"]",
"=",
"self",
".",
"_get_revision",
"(",
")",
"return",
"commonParams"
] |
Get params which are used same in both conversion and impression events.
Args:
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Dict consisting of parameters common to both impression and conversion events.
|
[
"Get",
"params",
"which",
"are",
"used",
"same",
"in",
"both",
"conversion",
"and",
"impression",
"events",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_builder.py#L109-L138
|
12,178
|
optimizely/python-sdk
|
optimizely/event_builder.py
|
EventBuilder._get_required_params_for_impression
|
def _get_required_params_for_impression(self, experiment, variation_id):
""" Get parameters that are required for the impression event to register.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
Returns:
Dict consisting of decisions and events info for impression event.
"""
snapshot = {}
snapshot[self.EventParams.DECISIONS] = [{
self.EventParams.EXPERIMENT_ID: experiment.id,
self.EventParams.VARIATION_ID: variation_id,
self.EventParams.CAMPAIGN_ID: experiment.layerId
}]
snapshot[self.EventParams.EVENTS] = [{
self.EventParams.EVENT_ID: experiment.layerId,
self.EventParams.TIME: self._get_time(),
self.EventParams.KEY: 'campaign_activated',
self.EventParams.UUID: str(uuid.uuid4())
}]
return snapshot
|
python
|
def _get_required_params_for_impression(self, experiment, variation_id):
""" Get parameters that are required for the impression event to register.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
Returns:
Dict consisting of decisions and events info for impression event.
"""
snapshot = {}
snapshot[self.EventParams.DECISIONS] = [{
self.EventParams.EXPERIMENT_ID: experiment.id,
self.EventParams.VARIATION_ID: variation_id,
self.EventParams.CAMPAIGN_ID: experiment.layerId
}]
snapshot[self.EventParams.EVENTS] = [{
self.EventParams.EVENT_ID: experiment.layerId,
self.EventParams.TIME: self._get_time(),
self.EventParams.KEY: 'campaign_activated',
self.EventParams.UUID: str(uuid.uuid4())
}]
return snapshot
|
[
"def",
"_get_required_params_for_impression",
"(",
"self",
",",
"experiment",
",",
"variation_id",
")",
":",
"snapshot",
"=",
"{",
"}",
"snapshot",
"[",
"self",
".",
"EventParams",
".",
"DECISIONS",
"]",
"=",
"[",
"{",
"self",
".",
"EventParams",
".",
"EXPERIMENT_ID",
":",
"experiment",
".",
"id",
",",
"self",
".",
"EventParams",
".",
"VARIATION_ID",
":",
"variation_id",
",",
"self",
".",
"EventParams",
".",
"CAMPAIGN_ID",
":",
"experiment",
".",
"layerId",
"}",
"]",
"snapshot",
"[",
"self",
".",
"EventParams",
".",
"EVENTS",
"]",
"=",
"[",
"{",
"self",
".",
"EventParams",
".",
"EVENT_ID",
":",
"experiment",
".",
"layerId",
",",
"self",
".",
"EventParams",
".",
"TIME",
":",
"self",
".",
"_get_time",
"(",
")",
",",
"self",
".",
"EventParams",
".",
"KEY",
":",
"'campaign_activated'",
",",
"self",
".",
"EventParams",
".",
"UUID",
":",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"}",
"]",
"return",
"snapshot"
] |
Get parameters that are required for the impression event to register.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
Returns:
Dict consisting of decisions and events info for impression event.
|
[
"Get",
"parameters",
"that",
"are",
"required",
"for",
"the",
"impression",
"event",
"to",
"register",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_builder.py#L211-L236
|
12,179
|
optimizely/python-sdk
|
optimizely/event_builder.py
|
EventBuilder._get_required_params_for_conversion
|
def _get_required_params_for_conversion(self, event_key, event_tags):
""" Get parameters that are required for the conversion event to register.
Args:
event_key: Key representing the event which needs to be recorded.
event_tags: Dict representing metadata associated with the event.
Returns:
Dict consisting of the decisions and events info for conversion event.
"""
snapshot = {}
event_dict = {
self.EventParams.EVENT_ID: self.config.get_event(event_key).id,
self.EventParams.TIME: self._get_time(),
self.EventParams.KEY: event_key,
self.EventParams.UUID: str(uuid.uuid4())
}
if event_tags:
revenue_value = event_tag_utils.get_revenue_value(event_tags)
if revenue_value is not None:
event_dict[event_tag_utils.REVENUE_METRIC_TYPE] = revenue_value
numeric_value = event_tag_utils.get_numeric_value(event_tags, self.config.logger)
if numeric_value is not None:
event_dict[event_tag_utils.NUMERIC_METRIC_TYPE] = numeric_value
if len(event_tags) > 0:
event_dict[self.EventParams.TAGS] = event_tags
snapshot[self.EventParams.EVENTS] = [event_dict]
return snapshot
|
python
|
def _get_required_params_for_conversion(self, event_key, event_tags):
""" Get parameters that are required for the conversion event to register.
Args:
event_key: Key representing the event which needs to be recorded.
event_tags: Dict representing metadata associated with the event.
Returns:
Dict consisting of the decisions and events info for conversion event.
"""
snapshot = {}
event_dict = {
self.EventParams.EVENT_ID: self.config.get_event(event_key).id,
self.EventParams.TIME: self._get_time(),
self.EventParams.KEY: event_key,
self.EventParams.UUID: str(uuid.uuid4())
}
if event_tags:
revenue_value = event_tag_utils.get_revenue_value(event_tags)
if revenue_value is not None:
event_dict[event_tag_utils.REVENUE_METRIC_TYPE] = revenue_value
numeric_value = event_tag_utils.get_numeric_value(event_tags, self.config.logger)
if numeric_value is not None:
event_dict[event_tag_utils.NUMERIC_METRIC_TYPE] = numeric_value
if len(event_tags) > 0:
event_dict[self.EventParams.TAGS] = event_tags
snapshot[self.EventParams.EVENTS] = [event_dict]
return snapshot
|
[
"def",
"_get_required_params_for_conversion",
"(",
"self",
",",
"event_key",
",",
"event_tags",
")",
":",
"snapshot",
"=",
"{",
"}",
"event_dict",
"=",
"{",
"self",
".",
"EventParams",
".",
"EVENT_ID",
":",
"self",
".",
"config",
".",
"get_event",
"(",
"event_key",
")",
".",
"id",
",",
"self",
".",
"EventParams",
".",
"TIME",
":",
"self",
".",
"_get_time",
"(",
")",
",",
"self",
".",
"EventParams",
".",
"KEY",
":",
"event_key",
",",
"self",
".",
"EventParams",
".",
"UUID",
":",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"}",
"if",
"event_tags",
":",
"revenue_value",
"=",
"event_tag_utils",
".",
"get_revenue_value",
"(",
"event_tags",
")",
"if",
"revenue_value",
"is",
"not",
"None",
":",
"event_dict",
"[",
"event_tag_utils",
".",
"REVENUE_METRIC_TYPE",
"]",
"=",
"revenue_value",
"numeric_value",
"=",
"event_tag_utils",
".",
"get_numeric_value",
"(",
"event_tags",
",",
"self",
".",
"config",
".",
"logger",
")",
"if",
"numeric_value",
"is",
"not",
"None",
":",
"event_dict",
"[",
"event_tag_utils",
".",
"NUMERIC_METRIC_TYPE",
"]",
"=",
"numeric_value",
"if",
"len",
"(",
"event_tags",
")",
">",
"0",
":",
"event_dict",
"[",
"self",
".",
"EventParams",
".",
"TAGS",
"]",
"=",
"event_tags",
"snapshot",
"[",
"self",
".",
"EventParams",
".",
"EVENTS",
"]",
"=",
"[",
"event_dict",
"]",
"return",
"snapshot"
] |
Get parameters that are required for the conversion event to register.
Args:
event_key: Key representing the event which needs to be recorded.
event_tags: Dict representing metadata associated with the event.
Returns:
Dict consisting of the decisions and events info for conversion event.
|
[
"Get",
"parameters",
"that",
"are",
"required",
"for",
"the",
"conversion",
"event",
"to",
"register",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_builder.py#L238-L270
|
12,180
|
optimizely/python-sdk
|
optimizely/event_builder.py
|
EventBuilder.create_impression_event
|
def create_impression_event(self, experiment, variation_id, user_id, attributes):
""" Create impression Event to be sent to the logging endpoint.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Event object encapsulating the impression event.
"""
params = self._get_common_params(user_id, attributes)
impression_params = self._get_required_params_for_impression(experiment, variation_id)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS)
|
python
|
def create_impression_event(self, experiment, variation_id, user_id, attributes):
""" Create impression Event to be sent to the logging endpoint.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Event object encapsulating the impression event.
"""
params = self._get_common_params(user_id, attributes)
impression_params = self._get_required_params_for_impression(experiment, variation_id)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS)
|
[
"def",
"create_impression_event",
"(",
"self",
",",
"experiment",
",",
"variation_id",
",",
"user_id",
",",
"attributes",
")",
":",
"params",
"=",
"self",
".",
"_get_common_params",
"(",
"user_id",
",",
"attributes",
")",
"impression_params",
"=",
"self",
".",
"_get_required_params_for_impression",
"(",
"experiment",
",",
"variation_id",
")",
"params",
"[",
"self",
".",
"EventParams",
".",
"USERS",
"]",
"[",
"0",
"]",
"[",
"self",
".",
"EventParams",
".",
"SNAPSHOTS",
"]",
".",
"append",
"(",
"impression_params",
")",
"return",
"Event",
"(",
"self",
".",
"EVENTS_URL",
",",
"params",
",",
"http_verb",
"=",
"self",
".",
"HTTP_VERB",
",",
"headers",
"=",
"self",
".",
"HTTP_HEADERS",
")"
] |
Create impression Event to be sent to the logging endpoint.
Args:
experiment: Experiment for which impression needs to be recorded.
variation_id: ID for variation which would be presented to user.
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Event object encapsulating the impression event.
|
[
"Create",
"impression",
"Event",
"to",
"be",
"sent",
"to",
"the",
"logging",
"endpoint",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_builder.py#L272-L293
|
12,181
|
optimizely/python-sdk
|
optimizely/event_builder.py
|
EventBuilder.create_conversion_event
|
def create_conversion_event(self, event_key, user_id, attributes, event_tags):
""" Create conversion Event to be sent to the logging endpoint.
Args:
event_key: Key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing user attributes and values.
event_tags: Dict representing metadata associated with the event.
Returns:
Event object encapsulating the conversion event.
"""
params = self._get_common_params(user_id, attributes)
conversion_params = self._get_required_params_for_conversion(event_key, event_tags)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS)
|
python
|
def create_conversion_event(self, event_key, user_id, attributes, event_tags):
""" Create conversion Event to be sent to the logging endpoint.
Args:
event_key: Key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing user attributes and values.
event_tags: Dict representing metadata associated with the event.
Returns:
Event object encapsulating the conversion event.
"""
params = self._get_common_params(user_id, attributes)
conversion_params = self._get_required_params_for_conversion(event_key, event_tags)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params)
return Event(self.EVENTS_URL,
params,
http_verb=self.HTTP_VERB,
headers=self.HTTP_HEADERS)
|
[
"def",
"create_conversion_event",
"(",
"self",
",",
"event_key",
",",
"user_id",
",",
"attributes",
",",
"event_tags",
")",
":",
"params",
"=",
"self",
".",
"_get_common_params",
"(",
"user_id",
",",
"attributes",
")",
"conversion_params",
"=",
"self",
".",
"_get_required_params_for_conversion",
"(",
"event_key",
",",
"event_tags",
")",
"params",
"[",
"self",
".",
"EventParams",
".",
"USERS",
"]",
"[",
"0",
"]",
"[",
"self",
".",
"EventParams",
".",
"SNAPSHOTS",
"]",
".",
"append",
"(",
"conversion_params",
")",
"return",
"Event",
"(",
"self",
".",
"EVENTS_URL",
",",
"params",
",",
"http_verb",
"=",
"self",
".",
"HTTP_VERB",
",",
"headers",
"=",
"self",
".",
"HTTP_HEADERS",
")"
] |
Create conversion Event to be sent to the logging endpoint.
Args:
event_key: Key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing user attributes and values.
event_tags: Dict representing metadata associated with the event.
Returns:
Event object encapsulating the conversion event.
|
[
"Create",
"conversion",
"Event",
"to",
"be",
"sent",
"to",
"the",
"logging",
"endpoint",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/event_builder.py#L295-L315
|
12,182
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
_audience_condition_deserializer
|
def _audience_condition_deserializer(obj_dict):
""" Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
"""
return [
obj_dict.get('name'),
obj_dict.get('value'),
obj_dict.get('type'),
obj_dict.get('match')
]
|
python
|
def _audience_condition_deserializer(obj_dict):
""" Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
"""
return [
obj_dict.get('name'),
obj_dict.get('value'),
obj_dict.get('type'),
obj_dict.get('match')
]
|
[
"def",
"_audience_condition_deserializer",
"(",
"obj_dict",
")",
":",
"return",
"[",
"obj_dict",
".",
"get",
"(",
"'name'",
")",
",",
"obj_dict",
".",
"get",
"(",
"'value'",
")",
",",
"obj_dict",
".",
"get",
"(",
"'type'",
")",
",",
"obj_dict",
".",
"get",
"(",
"'match'",
")",
"]"
] |
Deserializer defining how dict objects need to be decoded for audience conditions.
Args:
obj_dict: Dict representing one audience condition.
Returns:
List consisting of condition key with corresponding value, type and match.
|
[
"Deserializer",
"defining",
"how",
"dict",
"objects",
"need",
"to",
"be",
"decoded",
"for",
"audience",
"conditions",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L328-L342
|
12,183
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator._get_condition_json
|
def _get_condition_json(self, index):
""" Method to generate json for logging audience condition.
Args:
index: Index of the condition.
Returns:
String: Audience condition JSON.
"""
condition = self.condition_data[index]
condition_log = {
'name': condition[0],
'value': condition[1],
'type': condition[2],
'match': condition[3]
}
return json.dumps(condition_log)
|
python
|
def _get_condition_json(self, index):
""" Method to generate json for logging audience condition.
Args:
index: Index of the condition.
Returns:
String: Audience condition JSON.
"""
condition = self.condition_data[index]
condition_log = {
'name': condition[0],
'value': condition[1],
'type': condition[2],
'match': condition[3]
}
return json.dumps(condition_log)
|
[
"def",
"_get_condition_json",
"(",
"self",
",",
"index",
")",
":",
"condition",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"condition_log",
"=",
"{",
"'name'",
":",
"condition",
"[",
"0",
"]",
",",
"'value'",
":",
"condition",
"[",
"1",
"]",
",",
"'type'",
":",
"condition",
"[",
"2",
"]",
",",
"'match'",
":",
"condition",
"[",
"3",
"]",
"}",
"return",
"json",
".",
"dumps",
"(",
"condition_log",
")"
] |
Method to generate json for logging audience condition.
Args:
index: Index of the condition.
Returns:
String: Audience condition JSON.
|
[
"Method",
"to",
"generate",
"json",
"for",
"logging",
"audience",
"condition",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L47-L64
|
12,184
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator.is_value_type_valid_for_exact_conditions
|
def is_value_type_valid_for_exact_conditions(self, value):
""" Method to validate if the value is valid for exact match type evaluation.
Args:
value: Value to validate.
Returns:
Boolean: True if value is a string, boolean, or number. Otherwise False.
"""
# No need to check for bool since bool is a subclass of int
if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)):
return True
return False
|
python
|
def is_value_type_valid_for_exact_conditions(self, value):
""" Method to validate if the value is valid for exact match type evaluation.
Args:
value: Value to validate.
Returns:
Boolean: True if value is a string, boolean, or number. Otherwise False.
"""
# No need to check for bool since bool is a subclass of int
if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)):
return True
return False
|
[
"def",
"is_value_type_valid_for_exact_conditions",
"(",
"self",
",",
"value",
")",
":",
"# No need to check for bool since bool is a subclass of int",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
"or",
"isinstance",
"(",
"value",
",",
"(",
"numbers",
".",
"Integral",
",",
"float",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
Method to validate if the value is valid for exact match type evaluation.
Args:
value: Value to validate.
Returns:
Boolean: True if value is a string, boolean, or number. Otherwise False.
|
[
"Method",
"to",
"validate",
"if",
"the",
"value",
"is",
"valid",
"for",
"exact",
"match",
"type",
"evaluation",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L66-L79
|
12,185
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator.exists_evaluator
|
def exists_evaluator(self, index):
""" Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
"""
attr_name = self.condition_data[index][0]
return self.attributes.get(attr_name) is not None
|
python
|
def exists_evaluator(self, index):
""" Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
"""
attr_name = self.condition_data[index][0]
return self.attributes.get(attr_name) is not None
|
[
"def",
"exists_evaluator",
"(",
"self",
",",
"index",
")",
":",
"attr_name",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"0",
"]",
"return",
"self",
".",
"attributes",
".",
"get",
"(",
"attr_name",
")",
"is",
"not",
"None"
] |
Evaluate the given exists match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean: True if the user attributes have a non-null value for the given condition,
otherwise False.
|
[
"Evaluate",
"the",
"given",
"exists",
"match",
"condition",
"for",
"the",
"user",
"attributes",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L131-L142
|
12,186
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator.greater_than_evaluator
|
def greater_than_evaluator(self, index):
""" Evaluate the given greater than match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attribute value is greater than the condition value.
- False if the user attribute value is less than or equal to the condition value.
None: if the condition value isn't finite or the user attribute value isn't finite.
"""
condition_name = self.condition_data[index][0]
condition_value = self.condition_data[index][1]
user_value = self.attributes.get(condition_name)
if not validator.is_finite_number(condition_value):
self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(
self._get_condition_json(index)
))
return None
if not self.is_value_a_number(user_value):
self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(
self._get_condition_json(index),
type(user_value),
condition_name
))
return None
if not validator.is_finite_number(user_value):
self.logger.warning(audience_logs.INFINITE_ATTRIBUTE_VALUE.format(
self._get_condition_json(index),
condition_name
))
return None
return user_value > condition_value
|
python
|
def greater_than_evaluator(self, index):
""" Evaluate the given greater than match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attribute value is greater than the condition value.
- False if the user attribute value is less than or equal to the condition value.
None: if the condition value isn't finite or the user attribute value isn't finite.
"""
condition_name = self.condition_data[index][0]
condition_value = self.condition_data[index][1]
user_value = self.attributes.get(condition_name)
if not validator.is_finite_number(condition_value):
self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(
self._get_condition_json(index)
))
return None
if not self.is_value_a_number(user_value):
self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(
self._get_condition_json(index),
type(user_value),
condition_name
))
return None
if not validator.is_finite_number(user_value):
self.logger.warning(audience_logs.INFINITE_ATTRIBUTE_VALUE.format(
self._get_condition_json(index),
condition_name
))
return None
return user_value > condition_value
|
[
"def",
"greater_than_evaluator",
"(",
"self",
",",
"index",
")",
":",
"condition_name",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"0",
"]",
"condition_value",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"1",
"]",
"user_value",
"=",
"self",
".",
"attributes",
".",
"get",
"(",
"condition_name",
")",
"if",
"not",
"validator",
".",
"is_finite_number",
"(",
"condition_value",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNKNOWN_CONDITION_VALUE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
")",
")",
"return",
"None",
"if",
"not",
"self",
".",
"is_value_a_number",
"(",
"user_value",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNEXPECTED_TYPE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
"type",
"(",
"user_value",
")",
",",
"condition_name",
")",
")",
"return",
"None",
"if",
"not",
"validator",
".",
"is_finite_number",
"(",
"user_value",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"INFINITE_ATTRIBUTE_VALUE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
"condition_name",
")",
")",
"return",
"None",
"return",
"user_value",
">",
"condition_value"
] |
Evaluate the given greater than match condition for the user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attribute value is greater than the condition value.
- False if the user attribute value is less than or equal to the condition value.
None: if the condition value isn't finite or the user attribute value isn't finite.
|
[
"Evaluate",
"the",
"given",
"greater",
"than",
"match",
"condition",
"for",
"the",
"user",
"attributes",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L144-L181
|
12,187
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator.substring_evaluator
|
def substring_evaluator(self, index):
""" Evaluate the given substring match condition for the given user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the condition value is a substring of the user attribute value.
- False if the condition value is not a substring of the user attribute value.
None: if the condition value isn't a string or the user attribute value isn't a string.
"""
condition_name = self.condition_data[index][0]
condition_value = self.condition_data[index][1]
user_value = self.attributes.get(condition_name)
if not isinstance(condition_value, string_types):
self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(
self._get_condition_json(index),
))
return None
if not isinstance(user_value, string_types):
self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(
self._get_condition_json(index),
type(user_value),
condition_name
))
return None
return condition_value in user_value
|
python
|
def substring_evaluator(self, index):
""" Evaluate the given substring match condition for the given user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the condition value is a substring of the user attribute value.
- False if the condition value is not a substring of the user attribute value.
None: if the condition value isn't a string or the user attribute value isn't a string.
"""
condition_name = self.condition_data[index][0]
condition_value = self.condition_data[index][1]
user_value = self.attributes.get(condition_name)
if not isinstance(condition_value, string_types):
self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(
self._get_condition_json(index),
))
return None
if not isinstance(user_value, string_types):
self.logger.warning(audience_logs.UNEXPECTED_TYPE.format(
self._get_condition_json(index),
type(user_value),
condition_name
))
return None
return condition_value in user_value
|
[
"def",
"substring_evaluator",
"(",
"self",
",",
"index",
")",
":",
"condition_name",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"0",
"]",
"condition_value",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"1",
"]",
"user_value",
"=",
"self",
".",
"attributes",
".",
"get",
"(",
"condition_name",
")",
"if",
"not",
"isinstance",
"(",
"condition_value",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNKNOWN_CONDITION_VALUE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
")",
")",
"return",
"None",
"if",
"not",
"isinstance",
"(",
"user_value",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNEXPECTED_TYPE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
"type",
"(",
"user_value",
")",
",",
"condition_name",
")",
")",
"return",
"None",
"return",
"condition_value",
"in",
"user_value"
] |
Evaluate the given substring match condition for the given user attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the condition value is a substring of the user attribute value.
- False if the condition value is not a substring of the user attribute value.
None: if the condition value isn't a string or the user attribute value isn't a string.
|
[
"Evaluate",
"the",
"given",
"substring",
"match",
"condition",
"for",
"the",
"given",
"user",
"attributes",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L222-L252
|
12,188
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
CustomAttributeConditionEvaluator.evaluate
|
def evaluate(self, index):
""" Given a custom attribute audience condition and user attributes, evaluate the
condition against the attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attributes match the given condition.
- False if the user attributes don't match the given condition.
None: if the user attributes and condition can't be evaluated.
"""
if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE:
self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index)))
return None
condition_match = self.condition_data[index][3]
if condition_match is None:
condition_match = ConditionMatchTypes.EXACT
if condition_match not in self.EVALUATORS_BY_MATCH_TYPE:
self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index)))
return None
if condition_match != ConditionMatchTypes.EXISTS:
attribute_key = self.condition_data[index][0]
if attribute_key not in self.attributes:
self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))
return None
if self.attributes.get(attribute_key) is None:
self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))
return None
return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)
|
python
|
def evaluate(self, index):
""" Given a custom attribute audience condition and user attributes, evaluate the
condition against the attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attributes match the given condition.
- False if the user attributes don't match the given condition.
None: if the user attributes and condition can't be evaluated.
"""
if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE:
self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index)))
return None
condition_match = self.condition_data[index][3]
if condition_match is None:
condition_match = ConditionMatchTypes.EXACT
if condition_match not in self.EVALUATORS_BY_MATCH_TYPE:
self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index)))
return None
if condition_match != ConditionMatchTypes.EXISTS:
attribute_key = self.condition_data[index][0]
if attribute_key not in self.attributes:
self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))
return None
if self.attributes.get(attribute_key) is None:
self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key))
return None
return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)
|
[
"def",
"evaluate",
"(",
"self",
",",
"index",
")",
":",
"if",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"2",
"]",
"!=",
"self",
".",
"CUSTOM_ATTRIBUTE_CONDITION_TYPE",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNKNOWN_CONDITION_TYPE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
")",
")",
"return",
"None",
"condition_match",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"3",
"]",
"if",
"condition_match",
"is",
"None",
":",
"condition_match",
"=",
"ConditionMatchTypes",
".",
"EXACT",
"if",
"condition_match",
"not",
"in",
"self",
".",
"EVALUATORS_BY_MATCH_TYPE",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"audience_logs",
".",
"UNKNOWN_MATCH_TYPE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
")",
")",
"return",
"None",
"if",
"condition_match",
"!=",
"ConditionMatchTypes",
".",
"EXISTS",
":",
"attribute_key",
"=",
"self",
".",
"condition_data",
"[",
"index",
"]",
"[",
"0",
"]",
"if",
"attribute_key",
"not",
"in",
"self",
".",
"attributes",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"audience_logs",
".",
"MISSING_ATTRIBUTE_VALUE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
"attribute_key",
")",
")",
"return",
"None",
"if",
"self",
".",
"attributes",
".",
"get",
"(",
"attribute_key",
")",
"is",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"audience_logs",
".",
"NULL_ATTRIBUTE_VALUE",
".",
"format",
"(",
"self",
".",
"_get_condition_json",
"(",
"index",
")",
",",
"attribute_key",
")",
")",
"return",
"None",
"return",
"self",
".",
"EVALUATORS_BY_MATCH_TYPE",
"[",
"condition_match",
"]",
"(",
"self",
",",
"index",
")"
] |
Given a custom attribute audience condition and user attributes, evaluate the
condition against the attributes.
Args:
index: Index of the condition to be evaluated.
Returns:
Boolean:
- True if the user attributes match the given condition.
- False if the user attributes don't match the given condition.
None: if the user attributes and condition can't be evaluated.
|
[
"Given",
"a",
"custom",
"attribute",
"audience",
"condition",
"and",
"user",
"attributes",
"evaluate",
"the",
"condition",
"against",
"the",
"attributes",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L262-L298
|
12,189
|
optimizely/python-sdk
|
optimizely/helpers/condition.py
|
ConditionDecoder.object_hook
|
def object_hook(self, object_dict):
""" Hook which when passed into a json.JSONDecoder will replace each dict
in a json string with its index and convert the dict to an object as defined
by the passed in condition_decoder. The newly created condition object is
appended to the conditions_list.
Args:
object_dict: Dict representing an object.
Returns:
An index which will be used as the placeholder in the condition_structure
"""
instance = self.decoder(object_dict)
self.condition_list.append(instance)
self.index += 1
return self.index
|
python
|
def object_hook(self, object_dict):
""" Hook which when passed into a json.JSONDecoder will replace each dict
in a json string with its index and convert the dict to an object as defined
by the passed in condition_decoder. The newly created condition object is
appended to the conditions_list.
Args:
object_dict: Dict representing an object.
Returns:
An index which will be used as the placeholder in the condition_structure
"""
instance = self.decoder(object_dict)
self.condition_list.append(instance)
self.index += 1
return self.index
|
[
"def",
"object_hook",
"(",
"self",
",",
"object_dict",
")",
":",
"instance",
"=",
"self",
".",
"decoder",
"(",
"object_dict",
")",
"self",
".",
"condition_list",
".",
"append",
"(",
"instance",
")",
"self",
".",
"index",
"+=",
"1",
"return",
"self",
".",
"index"
] |
Hook which when passed into a json.JSONDecoder will replace each dict
in a json string with its index and convert the dict to an object as defined
by the passed in condition_decoder. The newly created condition object is
appended to the conditions_list.
Args:
object_dict: Dict representing an object.
Returns:
An index which will be used as the placeholder in the condition_structure
|
[
"Hook",
"which",
"when",
"passed",
"into",
"a",
"json",
".",
"JSONDecoder",
"will",
"replace",
"each",
"dict",
"in",
"a",
"json",
"string",
"with",
"its",
"index",
"and",
"convert",
"the",
"dict",
"to",
"an",
"object",
"as",
"defined",
"by",
"the",
"passed",
"in",
"condition_decoder",
".",
"The",
"newly",
"created",
"condition",
"object",
"is",
"appended",
"to",
"the",
"conditions_list",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition.py#L310-L325
|
12,190
|
optimizely/python-sdk
|
optimizely/decision_service.py
|
DecisionService._get_bucketing_id
|
def _get_bucketing_id(self, user_id, attributes):
""" Helper method to determine bucketing ID for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes. May consist of bucketing ID to be used.
Returns:
String representing bucketing ID if it is a String type in attributes else return user ID.
"""
attributes = attributes or {}
bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID)
if bucketing_id is not None:
if isinstance(bucketing_id, string_types):
return bucketing_id
self.logger.warning('Bucketing ID attribute is not a string. Defaulted to user_id.')
return user_id
|
python
|
def _get_bucketing_id(self, user_id, attributes):
""" Helper method to determine bucketing ID for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes. May consist of bucketing ID to be used.
Returns:
String representing bucketing ID if it is a String type in attributes else return user ID.
"""
attributes = attributes or {}
bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID)
if bucketing_id is not None:
if isinstance(bucketing_id, string_types):
return bucketing_id
self.logger.warning('Bucketing ID attribute is not a string. Defaulted to user_id.')
return user_id
|
[
"def",
"_get_bucketing_id",
"(",
"self",
",",
"user_id",
",",
"attributes",
")",
":",
"attributes",
"=",
"attributes",
"or",
"{",
"}",
"bucketing_id",
"=",
"attributes",
".",
"get",
"(",
"enums",
".",
"ControlAttributes",
".",
"BUCKETING_ID",
")",
"if",
"bucketing_id",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"bucketing_id",
",",
"string_types",
")",
":",
"return",
"bucketing_id",
"self",
".",
"logger",
".",
"warning",
"(",
"'Bucketing ID attribute is not a string. Defaulted to user_id.'",
")",
"return",
"user_id"
] |
Helper method to determine bucketing ID for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes. May consist of bucketing ID to be used.
Returns:
String representing bucketing ID if it is a String type in attributes else return user ID.
|
[
"Helper",
"method",
"to",
"determine",
"bucketing",
"ID",
"for",
"the",
"user",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L36-L56
|
12,191
|
optimizely/python-sdk
|
optimizely/decision_service.py
|
DecisionService.get_forced_variation
|
def get_forced_variation(self, experiment, user_id):
""" Determine if a user is forced into a variation for the given experiment and return that variation.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for the user.
Returns:
Variation in which the user with ID user_id is forced into. None if no variation.
"""
forced_variations = experiment.forcedVariations
if forced_variations and user_id in forced_variations:
variation_key = forced_variations.get(user_id)
variation = self.config.get_variation_from_key(experiment.key, variation_key)
if variation:
self.logger.info('User "%s" is forced in variation "%s".' % (user_id, variation_key))
return variation
return None
|
python
|
def get_forced_variation(self, experiment, user_id):
""" Determine if a user is forced into a variation for the given experiment and return that variation.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for the user.
Returns:
Variation in which the user with ID user_id is forced into. None if no variation.
"""
forced_variations = experiment.forcedVariations
if forced_variations and user_id in forced_variations:
variation_key = forced_variations.get(user_id)
variation = self.config.get_variation_from_key(experiment.key, variation_key)
if variation:
self.logger.info('User "%s" is forced in variation "%s".' % (user_id, variation_key))
return variation
return None
|
[
"def",
"get_forced_variation",
"(",
"self",
",",
"experiment",
",",
"user_id",
")",
":",
"forced_variations",
"=",
"experiment",
".",
"forcedVariations",
"if",
"forced_variations",
"and",
"user_id",
"in",
"forced_variations",
":",
"variation_key",
"=",
"forced_variations",
".",
"get",
"(",
"user_id",
")",
"variation",
"=",
"self",
".",
"config",
".",
"get_variation_from_key",
"(",
"experiment",
".",
"key",
",",
"variation_key",
")",
"if",
"variation",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" is forced in variation \"%s\".'",
"%",
"(",
"user_id",
",",
"variation_key",
")",
")",
"return",
"variation",
"return",
"None"
] |
Determine if a user is forced into a variation for the given experiment and return that variation.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_id: ID for the user.
Returns:
Variation in which the user with ID user_id is forced into. None if no variation.
|
[
"Determine",
"if",
"a",
"user",
"is",
"forced",
"into",
"a",
"variation",
"for",
"the",
"given",
"experiment",
"and",
"return",
"that",
"variation",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L58-L77
|
12,192
|
optimizely/python-sdk
|
optimizely/decision_service.py
|
DecisionService.get_stored_variation
|
def get_stored_variation(self, experiment, user_profile):
""" Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
"""
user_id = user_profile.user_id
variation_id = user_profile.get_variation_for_experiment(experiment.id)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
if variation:
self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % (
user_id,
variation.key,
experiment.key
))
return variation
return None
|
python
|
def get_stored_variation(self, experiment, user_profile):
""" Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
"""
user_id = user_profile.user_id
variation_id = user_profile.get_variation_for_experiment(experiment.id)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
if variation:
self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % (
user_id,
variation.key,
experiment.key
))
return variation
return None
|
[
"def",
"get_stored_variation",
"(",
"self",
",",
"experiment",
",",
"user_profile",
")",
":",
"user_id",
"=",
"user_profile",
".",
"user_id",
"variation_id",
"=",
"user_profile",
".",
"get_variation_for_experiment",
"(",
"experiment",
".",
"id",
")",
"if",
"variation_id",
":",
"variation",
"=",
"self",
".",
"config",
".",
"get_variation_from_id",
"(",
"experiment",
".",
"key",
",",
"variation_id",
")",
"if",
"variation",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Found a stored decision. User \"%s\" is in variation \"%s\" of experiment \"%s\".'",
"%",
"(",
"user_id",
",",
"variation",
".",
"key",
",",
"experiment",
".",
"key",
")",
")",
"return",
"variation",
"return",
"None"
] |
Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
|
[
"Determine",
"if",
"the",
"user",
"has",
"a",
"stored",
"variation",
"available",
"for",
"the",
"given",
"experiment",
"and",
"return",
"that",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L79-L103
|
12,193
|
optimizely/python-sdk
|
optimizely/decision_service.py
|
DecisionService.get_variation
|
def get_variation(self, experiment, user_id, attributes, ignore_user_profile=False):
""" Top-level function to help determine variation user should be put in.
First, check if experiment is running.
Second, check if user is forced in a variation.
Third, check if there is a stored decision for the user and return the corresponding variation.
Fourth, figure out if user is in the experiment by evaluating audience conditions if any.
Fifth, bucket the user and return the variation.
Args:
experiment: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
ignore_user_profile: True to ignore the user profile lookup. Defaults to False.
Returns:
Variation user should see. None if user is not in experiment or experiment is not running.
"""
# Check if experiment is running
if not experiment_helper.is_experiment_running(experiment):
self.logger.info('Experiment "%s" is not running.' % experiment.key)
return None
# Check if the user is forced into a variation
variation = self.config.get_forced_variation(experiment.key, user_id)
if variation:
return variation
# Check to see if user is white-listed for a certain variation
variation = self.get_forced_variation(experiment, user_id)
if variation:
return variation
# Check to see if user has a decision available for the given experiment
user_profile = UserProfile(user_id)
if not ignore_user_profile and self.user_profile_service:
try:
retrieved_profile = self.user_profile_service.lookup(user_id)
except:
self.logger.exception('Unable to retrieve user profile for user "%s" as lookup failed.' % user_id)
retrieved_profile = None
if validator.is_user_profile_valid(retrieved_profile):
user_profile = UserProfile(**retrieved_profile)
variation = self.get_stored_variation(experiment, user_profile)
if variation:
return variation
else:
self.logger.warning('User profile has invalid format.')
# Bucket user and store the new decision
if not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger):
self.logger.info('User "%s" does not meet conditions to be in experiment "%s".' % (
user_id,
experiment.key
))
return None
# Determine bucketing ID to be used
bucketing_id = self._get_bucketing_id(user_id, attributes)
variation = self.bucketer.bucket(experiment, user_id, bucketing_id)
if variation:
# Store this new decision and return the variation for the user
if not ignore_user_profile and self.user_profile_service:
try:
user_profile.save_variation_for_experiment(experiment.id, variation.id)
self.user_profile_service.save(user_profile.__dict__)
except:
self.logger.exception('Unable to save user profile for user "%s".' % user_id)
return variation
return None
|
python
|
def get_variation(self, experiment, user_id, attributes, ignore_user_profile=False):
""" Top-level function to help determine variation user should be put in.
First, check if experiment is running.
Second, check if user is forced in a variation.
Third, check if there is a stored decision for the user and return the corresponding variation.
Fourth, figure out if user is in the experiment by evaluating audience conditions if any.
Fifth, bucket the user and return the variation.
Args:
experiment: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
ignore_user_profile: True to ignore the user profile lookup. Defaults to False.
Returns:
Variation user should see. None if user is not in experiment or experiment is not running.
"""
# Check if experiment is running
if not experiment_helper.is_experiment_running(experiment):
self.logger.info('Experiment "%s" is not running.' % experiment.key)
return None
# Check if the user is forced into a variation
variation = self.config.get_forced_variation(experiment.key, user_id)
if variation:
return variation
# Check to see if user is white-listed for a certain variation
variation = self.get_forced_variation(experiment, user_id)
if variation:
return variation
# Check to see if user has a decision available for the given experiment
user_profile = UserProfile(user_id)
if not ignore_user_profile and self.user_profile_service:
try:
retrieved_profile = self.user_profile_service.lookup(user_id)
except:
self.logger.exception('Unable to retrieve user profile for user "%s" as lookup failed.' % user_id)
retrieved_profile = None
if validator.is_user_profile_valid(retrieved_profile):
user_profile = UserProfile(**retrieved_profile)
variation = self.get_stored_variation(experiment, user_profile)
if variation:
return variation
else:
self.logger.warning('User profile has invalid format.')
# Bucket user and store the new decision
if not audience_helper.is_user_in_experiment(self.config, experiment, attributes, self.logger):
self.logger.info('User "%s" does not meet conditions to be in experiment "%s".' % (
user_id,
experiment.key
))
return None
# Determine bucketing ID to be used
bucketing_id = self._get_bucketing_id(user_id, attributes)
variation = self.bucketer.bucket(experiment, user_id, bucketing_id)
if variation:
# Store this new decision and return the variation for the user
if not ignore_user_profile and self.user_profile_service:
try:
user_profile.save_variation_for_experiment(experiment.id, variation.id)
self.user_profile_service.save(user_profile.__dict__)
except:
self.logger.exception('Unable to save user profile for user "%s".' % user_id)
return variation
return None
|
[
"def",
"get_variation",
"(",
"self",
",",
"experiment",
",",
"user_id",
",",
"attributes",
",",
"ignore_user_profile",
"=",
"False",
")",
":",
"# Check if experiment is running",
"if",
"not",
"experiment_helper",
".",
"is_experiment_running",
"(",
"experiment",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Experiment \"%s\" is not running.'",
"%",
"experiment",
".",
"key",
")",
"return",
"None",
"# Check if the user is forced into a variation",
"variation",
"=",
"self",
".",
"config",
".",
"get_forced_variation",
"(",
"experiment",
".",
"key",
",",
"user_id",
")",
"if",
"variation",
":",
"return",
"variation",
"# Check to see if user is white-listed for a certain variation",
"variation",
"=",
"self",
".",
"get_forced_variation",
"(",
"experiment",
",",
"user_id",
")",
"if",
"variation",
":",
"return",
"variation",
"# Check to see if user has a decision available for the given experiment",
"user_profile",
"=",
"UserProfile",
"(",
"user_id",
")",
"if",
"not",
"ignore_user_profile",
"and",
"self",
".",
"user_profile_service",
":",
"try",
":",
"retrieved_profile",
"=",
"self",
".",
"user_profile_service",
".",
"lookup",
"(",
"user_id",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Unable to retrieve user profile for user \"%s\" as lookup failed.'",
"%",
"user_id",
")",
"retrieved_profile",
"=",
"None",
"if",
"validator",
".",
"is_user_profile_valid",
"(",
"retrieved_profile",
")",
":",
"user_profile",
"=",
"UserProfile",
"(",
"*",
"*",
"retrieved_profile",
")",
"variation",
"=",
"self",
".",
"get_stored_variation",
"(",
"experiment",
",",
"user_profile",
")",
"if",
"variation",
":",
"return",
"variation",
"else",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'User profile has invalid format.'",
")",
"# Bucket user and store the new decision",
"if",
"not",
"audience_helper",
".",
"is_user_in_experiment",
"(",
"self",
".",
"config",
",",
"experiment",
",",
"attributes",
",",
"self",
".",
"logger",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'User \"%s\" does not meet conditions to be in experiment \"%s\".'",
"%",
"(",
"user_id",
",",
"experiment",
".",
"key",
")",
")",
"return",
"None",
"# Determine bucketing ID to be used",
"bucketing_id",
"=",
"self",
".",
"_get_bucketing_id",
"(",
"user_id",
",",
"attributes",
")",
"variation",
"=",
"self",
".",
"bucketer",
".",
"bucket",
"(",
"experiment",
",",
"user_id",
",",
"bucketing_id",
")",
"if",
"variation",
":",
"# Store this new decision and return the variation for the user",
"if",
"not",
"ignore_user_profile",
"and",
"self",
".",
"user_profile_service",
":",
"try",
":",
"user_profile",
".",
"save_variation_for_experiment",
"(",
"experiment",
".",
"id",
",",
"variation",
".",
"id",
")",
"self",
".",
"user_profile_service",
".",
"save",
"(",
"user_profile",
".",
"__dict__",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Unable to save user profile for user \"%s\".'",
"%",
"user_id",
")",
"return",
"variation",
"return",
"None"
] |
Top-level function to help determine variation user should be put in.
First, check if experiment is running.
Second, check if user is forced in a variation.
Third, check if there is a stored decision for the user and return the corresponding variation.
Fourth, figure out if user is in the experiment by evaluating audience conditions if any.
Fifth, bucket the user and return the variation.
Args:
experiment: Experiment for which user variation needs to be determined.
user_id: ID for user.
attributes: Dict representing user attributes.
ignore_user_profile: True to ignore the user profile lookup. Defaults to False.
Returns:
Variation user should see. None if user is not in experiment or experiment is not running.
|
[
"Top",
"-",
"level",
"function",
"to",
"help",
"determine",
"variation",
"user",
"should",
"be",
"put",
"in",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L105-L178
|
12,194
|
optimizely/python-sdk
|
optimizely/decision_service.py
|
DecisionService.get_experiment_in_group
|
def get_experiment_in_group(self, group, bucketing_id):
""" Determine which experiment in the group the user is bucketed into.
Args:
group: The group to bucket the user into.
bucketing_id: ID to be used for bucketing the user.
Returns:
Experiment if the user is bucketed into an experiment in the specified group. None otherwise.
"""
experiment_id = self.bucketer.find_bucket(bucketing_id, group.id, group.trafficAllocation)
if experiment_id:
experiment = self.config.get_experiment_from_id(experiment_id)
if experiment:
self.logger.info('User with bucketing ID "%s" is in experiment %s of group %s.' % (
bucketing_id,
experiment.key,
group.id
))
return experiment
self.logger.info('User with bucketing ID "%s" is not in any experiments of group %s.' % (
bucketing_id,
group.id
))
return None
|
python
|
def get_experiment_in_group(self, group, bucketing_id):
""" Determine which experiment in the group the user is bucketed into.
Args:
group: The group to bucket the user into.
bucketing_id: ID to be used for bucketing the user.
Returns:
Experiment if the user is bucketed into an experiment in the specified group. None otherwise.
"""
experiment_id = self.bucketer.find_bucket(bucketing_id, group.id, group.trafficAllocation)
if experiment_id:
experiment = self.config.get_experiment_from_id(experiment_id)
if experiment:
self.logger.info('User with bucketing ID "%s" is in experiment %s of group %s.' % (
bucketing_id,
experiment.key,
group.id
))
return experiment
self.logger.info('User with bucketing ID "%s" is not in any experiments of group %s.' % (
bucketing_id,
group.id
))
return None
|
[
"def",
"get_experiment_in_group",
"(",
"self",
",",
"group",
",",
"bucketing_id",
")",
":",
"experiment_id",
"=",
"self",
".",
"bucketer",
".",
"find_bucket",
"(",
"bucketing_id",
",",
"group",
".",
"id",
",",
"group",
".",
"trafficAllocation",
")",
"if",
"experiment_id",
":",
"experiment",
"=",
"self",
".",
"config",
".",
"get_experiment_from_id",
"(",
"experiment_id",
")",
"if",
"experiment",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'User with bucketing ID \"%s\" is in experiment %s of group %s.'",
"%",
"(",
"bucketing_id",
",",
"experiment",
".",
"key",
",",
"group",
".",
"id",
")",
")",
"return",
"experiment",
"self",
".",
"logger",
".",
"info",
"(",
"'User with bucketing ID \"%s\" is not in any experiments of group %s.'",
"%",
"(",
"bucketing_id",
",",
"group",
".",
"id",
")",
")",
"return",
"None"
] |
Determine which experiment in the group the user is bucketed into.
Args:
group: The group to bucket the user into.
bucketing_id: ID to be used for bucketing the user.
Returns:
Experiment if the user is bucketed into an experiment in the specified group. None otherwise.
|
[
"Determine",
"which",
"experiment",
"in",
"the",
"group",
"the",
"user",
"is",
"bucketed",
"into",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/decision_service.py#L238-L265
|
12,195
|
optimizely/python-sdk
|
optimizely/notification_center.py
|
NotificationCenter.add_notification_listener
|
def add_notification_listener(self, notification_type, notification_callback):
""" Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
"""
if notification_type not in self.notifications:
self.notifications[notification_type] = [(self.notification_id, notification_callback)]
else:
if reduce(lambda a, b: a + 1,
filter(lambda tup: tup[1] == notification_callback, self.notifications[notification_type]),
0) > 0:
return -1
self.notifications[notification_type].append((self.notification_id, notification_callback))
ret_val = self.notification_id
self.notification_id += 1
return ret_val
|
python
|
def add_notification_listener(self, notification_type, notification_callback):
""" Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
"""
if notification_type not in self.notifications:
self.notifications[notification_type] = [(self.notification_id, notification_callback)]
else:
if reduce(lambda a, b: a + 1,
filter(lambda tup: tup[1] == notification_callback, self.notifications[notification_type]),
0) > 0:
return -1
self.notifications[notification_type].append((self.notification_id, notification_callback))
ret_val = self.notification_id
self.notification_id += 1
return ret_val
|
[
"def",
"add_notification_listener",
"(",
"self",
",",
"notification_type",
",",
"notification_callback",
")",
":",
"if",
"notification_type",
"not",
"in",
"self",
".",
"notifications",
":",
"self",
".",
"notifications",
"[",
"notification_type",
"]",
"=",
"[",
"(",
"self",
".",
"notification_id",
",",
"notification_callback",
")",
"]",
"else",
":",
"if",
"reduce",
"(",
"lambda",
"a",
",",
"b",
":",
"a",
"+",
"1",
",",
"filter",
"(",
"lambda",
"tup",
":",
"tup",
"[",
"1",
"]",
"==",
"notification_callback",
",",
"self",
".",
"notifications",
"[",
"notification_type",
"]",
")",
",",
"0",
")",
">",
"0",
":",
"return",
"-",
"1",
"self",
".",
"notifications",
"[",
"notification_type",
"]",
".",
"append",
"(",
"(",
"self",
".",
"notification_id",
",",
"notification_callback",
")",
")",
"ret_val",
"=",
"self",
".",
"notification_id",
"self",
".",
"notification_id",
"+=",
"1",
"return",
"ret_val"
] |
Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
|
[
"Add",
"a",
"notification",
"callback",
"to",
"the",
"notification",
"center",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/notification_center.py#L29-L53
|
12,196
|
optimizely/python-sdk
|
optimizely/notification_center.py
|
NotificationCenter.remove_notification_listener
|
def remove_notification_listener(self, notification_id):
""" Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise.
"""
for v in self.notifications.values():
toRemove = list(filter(lambda tup: tup[0] == notification_id, v))
if len(toRemove) > 0:
v.remove(toRemove[0])
return True
return False
|
python
|
def remove_notification_listener(self, notification_id):
""" Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise.
"""
for v in self.notifications.values():
toRemove = list(filter(lambda tup: tup[0] == notification_id, v))
if len(toRemove) > 0:
v.remove(toRemove[0])
return True
return False
|
[
"def",
"remove_notification_listener",
"(",
"self",
",",
"notification_id",
")",
":",
"for",
"v",
"in",
"self",
".",
"notifications",
".",
"values",
"(",
")",
":",
"toRemove",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"tup",
":",
"tup",
"[",
"0",
"]",
"==",
"notification_id",
",",
"v",
")",
")",
"if",
"len",
"(",
"toRemove",
")",
">",
"0",
":",
"v",
".",
"remove",
"(",
"toRemove",
"[",
"0",
"]",
")",
"return",
"True",
"return",
"False"
] |
Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise.
|
[
"Remove",
"a",
"previously",
"added",
"notification",
"callback",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/notification_center.py#L55-L71
|
12,197
|
optimizely/python-sdk
|
optimizely/notification_center.py
|
NotificationCenter.send_notifications
|
def send_notifications(self, notification_type, *args):
""" Fires off the notification for the specific event. Uses var args to pass in a
arbitrary list of parameter according to which notification type was fired.
Args:
notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)
args: variable list of arguments to the callback.
"""
if notification_type in self.notifications:
for notification_id, callback in self.notifications[notification_type]:
try:
callback(*args)
except:
self.logger.exception('Problem calling notify callback!')
|
python
|
def send_notifications(self, notification_type, *args):
""" Fires off the notification for the specific event. Uses var args to pass in a
arbitrary list of parameter according to which notification type was fired.
Args:
notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)
args: variable list of arguments to the callback.
"""
if notification_type in self.notifications:
for notification_id, callback in self.notifications[notification_type]:
try:
callback(*args)
except:
self.logger.exception('Problem calling notify callback!')
|
[
"def",
"send_notifications",
"(",
"self",
",",
"notification_type",
",",
"*",
"args",
")",
":",
"if",
"notification_type",
"in",
"self",
".",
"notifications",
":",
"for",
"notification_id",
",",
"callback",
"in",
"self",
".",
"notifications",
"[",
"notification_type",
"]",
":",
"try",
":",
"callback",
"(",
"*",
"args",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Problem calling notify callback!'",
")"
] |
Fires off the notification for the specific event. Uses var args to pass in a
arbitrary list of parameter according to which notification type was fired.
Args:
notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)
args: variable list of arguments to the callback.
|
[
"Fires",
"off",
"the",
"notification",
"for",
"the",
"specific",
"event",
".",
"Uses",
"var",
"args",
"to",
"pass",
"in",
"a",
"arbitrary",
"list",
"of",
"parameter",
"according",
"to",
"which",
"notification",
"type",
"was",
"fired",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/notification_center.py#L87-L101
|
12,198
|
optimizely/python-sdk
|
optimizely/helpers/condition_tree_evaluator.py
|
and_evaluator
|
def and_evaluator(conditions, leaf_evaluator):
""" Evaluates a list of conditions as if the evaluator had been applied
to each entry and the results AND-ed together.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if all operands evaluate to True.
- False if a single operand evaluates to False.
None: if conditions couldn't be evaluated.
"""
saw_null_result = False
for condition in conditions:
result = evaluate(condition, leaf_evaluator)
if result is False:
return False
if result is None:
saw_null_result = True
return None if saw_null_result else True
|
python
|
def and_evaluator(conditions, leaf_evaluator):
""" Evaluates a list of conditions as if the evaluator had been applied
to each entry and the results AND-ed together.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if all operands evaluate to True.
- False if a single operand evaluates to False.
None: if conditions couldn't be evaluated.
"""
saw_null_result = False
for condition in conditions:
result = evaluate(condition, leaf_evaluator)
if result is False:
return False
if result is None:
saw_null_result = True
return None if saw_null_result else True
|
[
"def",
"and_evaluator",
"(",
"conditions",
",",
"leaf_evaluator",
")",
":",
"saw_null_result",
"=",
"False",
"for",
"condition",
"in",
"conditions",
":",
"result",
"=",
"evaluate",
"(",
"condition",
",",
"leaf_evaluator",
")",
"if",
"result",
"is",
"False",
":",
"return",
"False",
"if",
"result",
"is",
"None",
":",
"saw_null_result",
"=",
"True",
"return",
"None",
"if",
"saw_null_result",
"else",
"True"
] |
Evaluates a list of conditions as if the evaluator had been applied
to each entry and the results AND-ed together.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if all operands evaluate to True.
- False if a single operand evaluates to False.
None: if conditions couldn't be evaluated.
|
[
"Evaluates",
"a",
"list",
"of",
"conditions",
"as",
"if",
"the",
"evaluator",
"had",
"been",
"applied",
"to",
"each",
"entry",
"and",
"the",
"results",
"AND",
"-",
"ed",
"together",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition_tree_evaluator.py#L17-L40
|
12,199
|
optimizely/python-sdk
|
optimizely/helpers/condition_tree_evaluator.py
|
not_evaluator
|
def not_evaluator(conditions, leaf_evaluator):
""" Evaluates a list of conditions as if the evaluator had been applied
to a single entry and NOT was applied to the result.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if the operand evaluates to False.
- False if the operand evaluates to True.
None: if conditions is empty or condition couldn't be evaluated.
"""
if not len(conditions) > 0:
return None
result = evaluate(conditions[0], leaf_evaluator)
return None if result is None else not result
|
python
|
def not_evaluator(conditions, leaf_evaluator):
""" Evaluates a list of conditions as if the evaluator had been applied
to a single entry and NOT was applied to the result.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if the operand evaluates to False.
- False if the operand evaluates to True.
None: if conditions is empty or condition couldn't be evaluated.
"""
if not len(conditions) > 0:
return None
result = evaluate(conditions[0], leaf_evaluator)
return None if result is None else not result
|
[
"def",
"not_evaluator",
"(",
"conditions",
",",
"leaf_evaluator",
")",
":",
"if",
"not",
"len",
"(",
"conditions",
")",
">",
"0",
":",
"return",
"None",
"result",
"=",
"evaluate",
"(",
"conditions",
"[",
"0",
"]",
",",
"leaf_evaluator",
")",
"return",
"None",
"if",
"result",
"is",
"None",
"else",
"not",
"result"
] |
Evaluates a list of conditions as if the evaluator had been applied
to a single entry and NOT was applied to the result.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if the operand evaluates to False.
- False if the operand evaluates to True.
None: if conditions is empty or condition couldn't be evaluated.
|
[
"Evaluates",
"a",
"list",
"of",
"conditions",
"as",
"if",
"the",
"evaluator",
"had",
"been",
"applied",
"to",
"a",
"single",
"entry",
"and",
"NOT",
"was",
"applied",
"to",
"the",
"result",
"."
] |
ec028d9efcf22498c3820f2650fa10f5c30bec90
|
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/helpers/condition_tree_evaluator.py#L69-L87
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.